From bfacc172f0aed85fde1e01ac1220c3ce90c90dea Mon Sep 17 00:00:00 2001 From: coolsnowwolf Date: Thu, 19 Oct 2017 22:23:41 +0800 Subject: [PATCH] update kernel to 4.9.57 --- include/kernel-version.mk | 4 +- package/network/services/hostapd/Makefile | 2 +- .../network/services/hostapd/files/hostapd.sh | 5 + ...e-against-PTK-reinstalls-in-4-way-ha.patch | 34 + ...-and-check-for-this-when-deriving-PT.patch | 53 + ...-workaround-for-key-reinstallation-a.patch | 221 + ...stentcy-checks-for-PTK-component-len.patch | 100 + ...rmation-in-supplicant-state-machine-.patch | 25 + .../services/hostapd/patches/300-noscan.patch | 4 +- .../patches/380-disable_ctrl_iface_mib.patch | 8 +- .../patches/390-wpa_ie_cap_workaround.patch | 4 +- .../hostapd/patches/600-ubus_support.patch | 2 +- ...mware-loader-for-uPD720201-and-uPD72.patch | 6 +- .../802-usb-xhci-force-msi-renesas-xhci.patch | 2 +- ...support-for-performing-fake-doorbell.patch | 2 +- ...fault-mouse-polling-interval-to-60Hz.patch | 2 +- ...ate-for-rename-of-page_cache_release.patch | 2 +- .../patches-4.9/200-broadcom_phy_reinit.patch | 2 +- ...024-1-tcp-tsq-add-tsq_flags-tsq_enum.patch | 90 + ...ve-one-locked-operation-in-tcp_wfree.patch | 48 + ...tsq-add-shortcut-in-tcp_tasklet_func.patch | 71 + ...cp-tsq-avoid-one-atomic-in-tcp_wfree.patch | 38 + ...-a-shortcut-in-tcp_small_queue_check.patch | 37 + ...cp_mtu_probe-is-likely-to-exit-early.patch | 55 + ...struct-sock-for-better-data-locality.patch | 157 + ...ove-tsq_flags-close-to-sk_wmem_alloc.patch | 176 + ...-missing-barrier-in-tcp_tasklet_func.patch | 40 + ...90-net-generalize-napi_complete_done.patch | 1412 + .../661-use_fq_codel_by_default.patch | 2 +- .../generic/hack-4.9/902-debloat_proc.patch | 2 +- .../pending-4.9/630-packet_socket_type.patch | 16 +- ...Add-support-for-MAP-E-FMRs-mesh-mode.patch | 22 +- target/linux/layerscape/32b/config-default | 152 - .../layerscape/32b/profiles/00-default.mk | 1 - target/linux/layerscape/64b/config-default | 183 - target/linux/layerscape/Makefile | 13 +- target/linux/layerscape/armv8_32b/config-4.9 | 1444 + .../{64b => armv8_32b}/profiles/00-default.mk | 0 .../layerscape/{32b => armv8_32b}/target.mk | 8 +- target/linux/layerscape/armv8_64b/config-4.9 | 1346 + .../armv8_64b/profiles/00-default.mk | 18 + .../layerscape/{64b => armv8_64b}/target.mk | 6 +- target/linux/layerscape/config-4.4 | 310 - target/linux/layerscape/image/Makefile | 75 +- ...nsure-ATU-is-enabled-before-IO-conf-.patch | 43 - ...PCI-designware-Simplify-control-flow.patch | 121 - ...ake-config-accessor-override-checkin.patch | 71 - ...xplain-why-we-don-t-program-ATU-for-.patch | 34 - ...nware-Remove-PCI_PROBE_ONLY-handling.patch | 41 - ...re-Add-generic-dw_pcie_wait_for_link.patch | 249 - ...dd-default-link-up-check-if-sub-driv.patch | 46 - ...ove-Root-Complex-setup-code-to-dw_pc.patch | 109 - ...emove-incorrect-RC-memory-base-limit.patch | 45 - ...onfig-add-freescale-config-for-amr64.patch | 148 - ...NFIG_EEPROM_AT24-for-freescale.confi.patch | 24 - ...39-ARM-dts-ls1021a-add-PCIe-dts-node.patch | 68 - ...RM-dts-ls1021a-add-SCFG-MSI-dts-node.patch | 56 - ...Add-bindings-for-Layerscape-SCFG-MSI.patch | 53 - ...-mtd-nand-spi-nor-assign-MTD-of_node.patch | 31 - ...vert-to-spi_nor_-get-set-_flash_node.patch | 80 - ...op-unnecessary-partition-parser-data.patch | 83 - ...d-get-set-of_node-flash_node-helpers.patch | 62 - ...78-mtd-spi-nor-drop-flash_node-field.patch | 57 - ...ve-unnecessary-leading-space-from-db.patch | 27 - ...sl-quadspi-possible-NULL-dereference.patch | 50 - ...ide-default-erase_sector-implementat.patch | 105 - ...error-message-with-unrecognized-JEDE.patch | 31 - ...-fix-error-handling-in-spi_nor_erase.patch | 39 - ...-Check-the-return-value-from-read_sr.patch | 58 - ...-until-lock-unlock-operations-are-re.patch | 66 - ...r-fsl-quadspi-add-big-endian-support.patch | 400 - ...-fsl-quadspi-add-support-for-ls1021a.patch | 63 - ...l-quadspi-add-support-for-layerscape.patch | 28 - ...spi-nor-Add-SPI-NOR-layer-PM-support.patch | 138 - ...or-change-return-value-of-read-write.patch | 82 - ...return-amount-of-data-read-written-o.patch | 73 - ...r-check-return-value-from-read-write.patch | 127 - ...d-spi-nor-stop-passing-around-retlen.patch | 215 - ...1095-mtd-spi-nor-simplify-write-loop.patch | 100 - .../1096-mtd-spi-nor-add-read-loop.patch | 46 - ...i-use-the-property-fields-of-SPI-NOR.patch | 87 - ...Rename-SEQID_QUAD_READ-to-SEQID_READ.patch | 46 - ...l-quadspi-Add-fast-read-mode-support.patch | 72 - ...r-Disable-Micron-flash-HW-protection.patch | 41 - ...quadspi-extend-support-for-some-spec.patch | 122 - ...fsl-quadspi-Support-qspi-for-ls2080a.patch | 83 - ...Support-R-W-for-S25FS-S-family-flash.patch | 110 - ...dspi-Add-quad-mode-for-flash-n25q128.patch | 112 - ...td-spi-nor-add-DDR-quad-read-support.patch | 181 - ...adspi-add-DDR-quad-read-for-Spansion.patch | 122 - ...-quadspi-disable-AHB-buffer-prefetch.patch | 67 - ...-add-multi-flash-chip-R-W-on-ls2080a.patch | 42 - ...-spi-nor-Enable-QSPI-Flash-in-Kernel.patch | 36 - ...l-quad-add-flash-S25FS-extra-support.patch | 157 - ...isable-4kb-sector-erase-for-s25fl128.patch | 27 - ...uad-Hang-memcpy-Unhandled-fault-alig.patch | 29 - ...quad-move-mtd_device_register-to-the.patch | 49 - ...ap_of-to-let-the-device-tree-specify.patch | 85 - ...d-the-default-config-ls_aarch32_defc.patch | 209 - ...-update-defconfig-for-LayerScape-SoC.patch | 101 - ...arch32-defconfig-Enable-CAAM-support.patch | 31 - ...32-defconfig-Enable-firmware-loading.patch | 23 - ...fconfig-Enable-support-for-AHCI-SATA.patch | 29 - ...fconfig-Enable-USB-and-related-confi.patch | 31 - ...fconfig-Enable-KVM-related-configura.patch | 59 - ...2-defconfig-Enable-FTM-alarm-support.patch | 23 - ...3a-add-DTS-for-Freescale-LS1043A-SoC.patch | 552 - ...ls1043a-add-LS1043ARDB-board-support.patch | 150 - ...-address-cells-and-reg-properties-of.patch | 141 - ...d-ITS-file-for-AArch32-Linux-on-LS10.patch | 71 - ...8-aarch32-change-FS-file-name-in-ITS.patch | 21 - ...n-32-bit-Linux-in-AArch32-execution-.patch | 79 - ...h32-Add-SMP-support-for-32-bit-Linux.patch | 103 - ...low-RAM-to-be-mapped-for-LayerScape-.patch | 31 - ..._cached-and-pgprot_cached_ns-support.patch | 26 - ...11-arm-add-new-non-shareable-ioremap.patch | 99 - ...3a-add-fman-bman-qman-ethernet-nodes.patch | 747 - ...13-dts-ls1043ardb-add-mdio-phy-nodes.patch | 81 - ...icetree-doc-out-of-powerpc-directory.patch | 108 - ...-move-mpc85xx.h-to-include-linux-fsl.patch | 283 - ...-dts-align-to-the-new-clocking-model.patch | 25 - ...028-dts-ls1043-update-dts-for-ls1043.patch | 523 - ...-arm64-Add-pdev_archdata-for-dmamask.patch | 51 - ...p-for-normal-cacheable-non-shareable.patch | 42 - ...t-to-remap-kernel-cacheable-memory-t.patch | 28 - ...d-support-to-map-cacheable-and-non-s.patch | 22 - ...-specific-fucntions-required-for-ehc.patch | 79 - .../3063-arm64-add-NO_IRQ-macro.patch | 27 - ...vice-tree-for-ls1012a-SoC-and-boards.patch | 880 - ...Run-32-bit-Linux-for-LayerScape-SoCs.patch | 49 - ...Add-KVM-support-for-AArch32-on-ARMv8.patch | 34 - ...6a-add-DTS-for-Freescale-LS1046A-SoC.patch | 1056 - ...ls1046a-add-LS1046ARDB-board-support.patch | 557 - .../3133-ls1046ardb-add-ITS-file.patch | 69 - ...Add-DTS-support-for-FSL-s-LS1088ARDB.patch | 790 - .../3139-ls1088ardb-add-ITS-file.patch | 69 - .../3141-caam-add-caam-node-for-ls1088a.patch | 62 - ...h32-Execute-32-bit-Linux-for-ls1046a.patch | 27 - ...quadspi-Enable-fast-read-for-LS1088A.patch | 43 - .../3227-ls2088a-dts-add-ls2088a-dts.patch | 1338 - .../3228-ls2088a-add-ls2088a-its.patch | 129 - ...1a-fix-typo-of-MSI-compatible-string.patch | 35 - ...3a-fix-typo-of-MSI-compatible-string.patch | 45 - .../3231-arm-dts-ls1021a-share-all-MSIs.patch | 37 - ...232-arm64-dts-ls1043a-share-all-MSIs.patch | 46 - ...rm64-dts-ls1046a-update-MSI-dts-node.patch | 113 - ...1043a-change-GIC-register-for-rev1.1.patch | 32 - ...-memory-Removal-of-deprecated-NO_IRQ.patch | 24 - ...emory-Add-deep-sleep-support-for-IFC.patch | 233 - ...date-dependency-of-IFC-for-Layerscap.patch | 51 - ...regate-IFC-fcm-and-runtime-registers.patch | 705 - ...ers-memory-Fix-build-error-for-arm64.patch | 53 - ...ilation-error-when-COMPAT-not-enable.patch | 29 - ...14-temp-QE-headers-are-needed-by-FMD.patch | 1317 - .../7016-dpa-add-dpaa_eth-driver.patch | 19160 -------- .../7017-fsl_qbman-add-qbman-driver.patch | 24828 ---------- .../7018-devres-add-devm_alloc_percpu.patch | 138 - .../7019-net-readd-skb_recycle.patch | 59 - .../7020-net-add-custom-NETIF-flags.patch | 40 - ...dev-watchdog-aware-of-hardware-multi.patch | 48 - ...to-setup-HugeTLB-mappings-for-USDPAA.patch | 59 - ...needs-to-be-reprogrammed-after-sleep.patch | 228 - ...md-use-kernel-api-for-64bit-division.patch | 178 - ...able-DPAA1-QBMan-for-ARM64-platforms.patch | 31 - ...r-issue-introduced-with-2.5G-support.patch | 22 - ...a_eth-replace-sgmii-2500-with-qsgmii.patch | 39 - .../7066-fmd-add-2.5G-SGMII-mode-suport.patch | 38 - .../7067-net-phy-add-SGMII-2500-PHY.patch | 20 - ...x-link-state-detect-for-10G-interfac.patch | 63 - ...7072-LS1012-Add-PPFE-driver-in-Linux.patch | 15167 ------ ...d-driver-for-aquantia-AQR106-107-phy.patch | 63 - ...h_setup_dma_ops-before-using-dma_ops.patch | 53 - ...dded-generic-MSI-support-for-FSL-MC-.patch | 400 - ...dded-GICv3-ITS-support-for-FSL-MC-MS.patch | 167 - ...xtended-MC-bus-allocator-to-include-.patch | 326 - ...hanged-DPRC-built-in-portal-s-mc_io-.patch | 44 - ...opulate-the-IRQ-pool-for-an-MC-bus-i.patch | 109 - ...l-mc-set-MSI-domain-for-DPRC-objects.patch | 103 - ...c-Fixed-bug-in-dprc_probe-error-path.patch | 72 - ...-fsl-mc-Added-DPRC-interrupt-handler.patch | 301 - ...dded-MSI-support-to-the-MC-bus-drive.patch | 59 - ...g-fsl-mc-Remove-unneeded-parentheses.patch | 39 - ...mc-Do-not-allow-building-as-a-module.patch | 30 - ...taging-fsl-mc-Avoid-section-mismatch.patch | 43 - ...emove-unneeded-else-following-a-retu.patch | 45 - ...l-mc-Drop-unneeded-void-pointer-cast.patch | 43 - ...c-bus-Eliminate-double-function-call.patch | 73 - ...fsl-mc-Replace-pr_debug-with-dev_dbg.patch | 96 - ...g-fsl-mc-Replace-pr_err-with-dev_err.patch | 83 - ...ix-incorrect-type-passed-to-dev_dbg-.patch | 48 - ...ix-incorrect-type-passed-to-dev_err-.patch | 38 - ...mc-get-rid-of-mutex_locked-variables.patch | 207 - .../7165-staging-fsl-mc-TODO-updates.patch | 49 - ...-fsl-mc-DPAA2-overview-readme-update.patch | 279 - ...pdate-dpmcp-binary-interface-to-v3.0.patch | 123 - ...update-dpbp-binary-interface-to-v2.2.patch | 208 - ...update-dprc-binary-interface-to-v5.1.patch | 206 - ...on-t-use-object-versions-to-make-bin.patch | 136 - ...et-up-coherent-dma-ops-for-added-dev.patch | 29 - ...et-cacheable-flag-for-added-devices-.patch | 30 - ...et-version-of-root-dprc-from-MC-hard.patch | 106 - ...taging-fsl-mc-add-dprc-version-check.patch | 90 - ...dd-quirk-handling-for-dpseci-objects.patch | 38 - ...aging-fsl-mc-add-dpmcp-version-check.patch | 56 - ...eturn-EINVAL-for-all-fsl_mc_portal_a.patch | 30 - ...7178-staging-fsl-mc-bus-Drop-warning.patch | 47 - ...dd-support-for-the-modalias-sysfs-at.patch | 54 - ...mplement-uevent-callback-and-set-the.patch | 32 - ...fsl-mc-clean-up-the-device-id-struct.patch | 85 - ...dd-support-for-device-table-matching.patch | 98 - ...staging-fsl-mc-export-mc_get_version.patch | 23 - ...l-mc-make-fsl_mc_is_root_dprc-global.patch | 77 - ...mc-fix-asymmetry-in-destroy-of-mc_io.patch | 62 - ...ing-fsl-mc-dprc-add-missing-irq-free.patch | 28 - ...prc-fix-ordering-problem-freeing-res.patch | 41 - ...c-properly-set-hwirq-in-msi-set_desc.patch | 48 - ...pdate-dpcon-binary-interface-to-v2.2.patch | 964 - ...oot-dprc-rescan-attribute-to-sync-ke.patch | 59 - ...us-rescan-attribute-to-sync-kernel-w.patch | 78 - ...ropagate-driver_override-for-a-child.patch | 193 - ...dd-device-binding-path-driver_overri.patch | 111 - ...c-export-irq-cleanup-for-vfio-to-use.patch | 47 - ...crement-MC_CMD_COMPLETION_TIMEOUT_MS.patch | 88 - ...-mc-make-fsl_mc_get_root_dprc-public.patch | 45 - ...mc-Management-Complex-restool-driver.patch | 489 - ...-staging-fsl-mc-dpio-services-driver.patch | 8943 ---- .../7199-dpaa2-dpio-Cosmetic-cleanup.patch | 35 - ...-fsl-mc-dpio-driver-match-id-cleanup.patch | 26 - ...h-initial-commit-of-dpaa2-eth-driver.patch | 12268 ----- ...aa2-eth-code-cleanup-for-upstreaming.patch | 3257 -- ...-Update-description-of-DPNI-counters.patch | 37 - ...aa2-eth-dpni-Clear-compiler-warnings.patch | 38 - ...eth-sanitize-supported-private-flags.patch | 57 - .../7206-fsl-dpaa2-eth-match-id-cleanup.patch | 26 - ...dpaa2-eth-add-device-table-to-driver.patch | 22 - ...2-mac-Added-MAC-PHY-interface-driver.patch | 2347 - ...fsl-dpaa2-mac-Interrupt-code-cleanup.patch | 182 - ...paa2-mac-Fix-unregister_netdev-issue.patch | 42 - ...l-dpaa2-mac-Don-t-call-devm_free_irq.patch | 42 - ...sl-dpaa2-mac-Use-of_property_read_32.patch | 43 - ...-fsl-dpaa2-mac-Remove-version-checks.patch | 61 - ...aging-fsl-dpaa2-mac-match-id-cleanup.patch | 26 - ...evb-Added-Edge-Virtual-Bridge-driver.patch | 2918 -- ...216-dpaa2-evb-Fix-interrupt-handling.patch | 69 - ...7-dpaa2-evb-Add-object-version-check.patch | 43 - .../7218-dpaa2-evb-Cosmetic-cleanup.patch | 20 - .../7219-dpaa2-evb-match-id-cleanup.patch | 26 - ...0-dpaa2-ethsw-Ethernet-Switch-driver.patch | 6605 --- .../7221-dpaa2-ethsw-match-id-cleanup.patch | 26 - ...fix-compile-error-on-backport-to-4.4.patch | 21 - ...domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch | 26 - ...-FSL-MC-specific-member-to-the-msi_d.patch | 40 - ...2-evb-fix-4.4-backport-compile-error.patch | 21 - ...paa_eth-fix-adjust_link-for-10G-2.5G.patch | 43 - .../8036-ls2085a-Add-support-for-reset.patch | 135 - .../8037-ls1043a-Add-support-for-reset.patch | 56 - ...nfig-Change-define-to-ARCH_LAYERSCAP.patch | 22 - ...t-gpio-driver-to-support-layerscape-.patch | 289 - ...hc-add-remove-some-quirks-according-.patch | 60 - ...e-Add-fsl-ls2085a-pcie-compatible-ID.patch | 25 - ...-layerscape-Fix-MSG-TLP-drop-setting.patch | 66 - ...yerscape-SCFG-MSI-controller-support.patch | 285 - ...ayerscape-Enable-PCIe-for-Layerscape.patch | 22 - ...2-enable-pci_domains-for-armv8-32bit.patch | 20 - ...73-ls1012a-added-clock-configuration.patch | 61 - .../8114-drivers-PCIE-enable-for-Linux.patch | 49 - ...all-dw_pcie_setup_rc-in-host-initial.patch | 30 - ...25-rtc-pcf2127-add-pcf2129-device-id.patch | 63 - ...-ls1046a-msi-Add-LS1046A-MSI-support.patch | 25 - ...8-pci-layerscape-add-LS1046A-support.patch | 38 - .../8129-clk-qoriq-add-ls1046a-support.patch | 75 - ...s1046a-sata-Add-LS1046A-sata-support.patch | 53 - ...e-add-LUT-DBG-reigster-offset-member.patch | 67 - ...mc-Add-compatible-string-for-LS1088A.patch | 24 - ...37-armv8-ls1088a-Add-PCIe-compatible.patch | 38 - ...layerscape-add-MSI-interrupt-support.patch | 259 - ...mc-Add-compatible-string-for-LS1046A.patch | 20 - ...iq-Add-ls2088a-key-to-chipinfo-table.patch | 30 - ...8230-layerscape-pci-fix-linkup-issue.patch | 37 - ...er-clk-qoriq-Add-ls2088a-clk-support.patch | 20 - ...option-to-skip-disabling-PCA954x-Mux.patch | 105 - ...-layerscape-fix-pci-lut-offset-issue.patch | 33 - .../8236-clk-add-API-of-clks.patch | 75 - ...unified-compatible-fsl-ls2080a-pcie-.patch | 96 - ...msi-fix-typo-of-MSI-compatible-strin.patch | 55 - ...-ls-scfg-msi-add-LS1046a-MSI-support.patch | 293 - ...cfg-msi-add-LS1043a-v1.1-MSI-support.patch | 134 - ...ls-scfg-msi-add-MSI-affinity-support.patch | 152 - ...69-Revert-arm64-simplify-dma_get_ops.patch | 93 - ...-fixmap-region-for-permanent-FDT-map.patch | 304 - .../201-config-support-layerscape.patch | 486 + .../301-arch-support-layerscape.patch | 428 + .../302-dts-support-layercape.patch | 9944 ++++ .../401-mtd-spi-nor-support-layerscape.patch | 1030 + .../402-mtd-support-layerscape.patch | 397 + .../601-net-support-layerscape.patch | 2365 + .../701-sdk_dpaa-support-layerscape.patch} | 38810 +++++++++++++++- .../702-pci-support-layerscape.patch | 2036 + .../703-phy-support-layerscape.patch | 1753 + .../704-fsl-mc-layerscape-support.patch | 11444 +++++ .../705-dpaa2-support-layerscape.patch | 22907 +++++++++ .../801-ata-support-layerscape.patch | 144 + .../802-clk-support-layerscape.patch | 307 + .../803-cpufreq-support-layerscape.patch} | 171 +- .../804-crypto-support-layerscape.patch | 26717 +++++++++++ .../805-dma-support-layerscape.patch | 3750 ++ .../806-flextimer-support-layerscape.patch | 323 + .../807-gpu-support-layerscape.patch | 68 + .../808-guts-support-layerscape.patch | 452 + .../809-i2c-support-layerscape.patch | 133 + .../810-iommu-support-layerscape.patch | 1314 + .../811-irqchip-support-layerscape.patch | 169 + .../812-mmc-layerscape-support.patch | 599 + .../813-qe-support-layerscape.patch | 1976 + .../814-rtc-support-layerscape.patch | 682 + .../815-spi-support-layerscape.patch | 438 + .../816-tty-serial-support-layerscape.patch | 158 + .../817-usb-support-layerscape.patch | 1434 + .../818-vfio-support-layerscape.patch | 1166 + ...ig-remove-dependency-FSL_SOC-for-ehc.patch | 28 + .../patches-4.9/0063-atomic-sleep.patch | 38 + ...25-pinctrl-ralink-add-pinctrl-driver.patch | 2 +- target/linux/sunxi/Makefile | 5 +- target/linux/sunxi/config-4.9 | 573 + target/linux/sunxi/cortexa53/config-default | 101 + target/linux/sunxi/cortexa53/target.mk | 13 + target/linux/sunxi/cortexa7/config-default | 8 + target/linux/sunxi/cortexa7/target.mk | 12 + target/linux/sunxi/cortexa8/config-default | 22 + target/linux/sunxi/cortexa8/target.mk | 12 + target/linux/sunxi/image/Makefile | 152 +- target/linux/sunxi/image/cortex-a53.mk | 20 + target/linux/sunxi/image/cortex-a7.mk | 149 + target/linux/sunxi/image/cortex-a8.mk | 59 + target/linux/sunxi/modules.mk | 4 +- ...sunxi-always-enable-reset-controller.patch | 39 + ...xi-ng-Rename-the-internal-structures.patch | 239 + ...ove-the-use-of-rational-computations.patch | 239 + ...ish-to-convert-to-structures-for-arg.patch | 182 + ...-minimums-for-all-the-relevant-struc.patch | 256 + ...ng-Implement-minimum-for-multipliers.patch | 132 + .../0007-clk-sunxi-ng-Add-A64-clocks.patch | 1295 + ...rm64-dts-add-Allwinner-A64-SoC-.dtsi.patch | 311 + .../0011-arm64-dts-add-Pine64-support.patch | 176 + ...ild-errors-from-missing-dependencies.patch | 134 + ...ner-add-USB1-related-nodes-of-Allwin.patch | 84 + ...ner-sort-the-nodes-in-sun50i-a64-pin.patch | 40 + ...ner-enable-EHCI1-OHCI1-and-USB-PHY-n.patch | 47 + ...-add-MUSB-node-to-Allwinner-A64-dtsi.patch | 42 + ...-the-MUSB-controller-of-Pine64-in-ho.patch | 32 + ...ner-Remove-no-longer-used-pinctrl-su.patch | 31 + ...19-arm64-allwinner-a64-Add-MMC-nodes.patch | 69 + ...-allwinner-a64-Add-MMC-pinctrl-nodes.patch | 50 + ...m64-allwinner-pine64-add-MMC-support.patch | 62 + ...64-allwinner-a64-add-UART1-pin-nodes.patch | 35 + ...4-arm64-allwinner-a64-add-r_ccu-node.patch | 52 + ...allwinner-a64-add-R_PIO-pinctrl-node.patch | 35 + ...winner-a64-add-pmu0-regs-for-USB-PHY.patch | 29 + ...a64-Add-PLL_PERIPH0-clock-to-the-R_C.patch | 32 + ...-Rework-the-pin-config-building-code.patch | 251 + ...e-macros-from-bindings-header-file-f.patch | 38 + ...32-pinctrl-sunxi-Handle-bias-disable.patch | 42 + ...inctrl-sunxi-Support-generic-binding.patch | 106 + ...ctrl-sunxi-Deal-with-configless-pins.patch | 128 + ...ke-bool-drivers-explicitly-non-modul.patch | 437 + ...ee-configs-in-pinctrl_map-only-if-it.patch | 51 + ...x-PIN_CONFIG_BIAS_PULL_-DOWN-UP-argu.patch | 40 + ...d-support-for-fetching-pinconf-setti.patch | 158 + ...ke-sunxi_pconf_group_set-use-sunxi_p.patch | 122 + ...Add-support-for-interrupt-debouncing.patch | 171 + ...x-theoretical-uninitialized-variable.patch | 40 + ...trl-sunxi-Testing-the-wrong-variable.patch | 35 + ...i-Don-t-enforce-bias-disable-for-now.patch | 42 + .../patches-4.9/0050-stmmac-form-4-10.patch | 3497 ++ .../patches-4.9/0051-stmmac-form-4-11.patch | 2296 + .../patches-4.9/0052-stmmac-form-4-12.patch | 5974 +++ .../patches-4.9/0053-stmmac-form-4-13.patch | 1924 + ...-ss_support_the_Security_System_PRNG.patch | 206 + ...sun50i-a64-Add-dt-node-for-the-sysco.patch | 33 + ...sun50i-a64-add-dwmac-sun8i-Ethernet-.patch | 69 + ...-allwinner-pine64-Enable-dwmac-sun8i.patch | 46 + ...inner-pine64-plus-Enable-dwmac-sun8i.patch | 38 + ...sun50i-a64-Correct-emac-register-siz.patch | 26 + ...a64-pine64-add-missing-ethernet0-ali.patch | 29 + ...h3-h5-Add-dt-node-for-the-syscon-con.patch | 32 + ...h3-h5-add-dwmac-sun8i-ethernet-drive.patch | 67 + ...-sun8i-orangepi-2-Enable-dwmac-sun8i.patch | 40 + ...n8i-orangepi-plus-Enable-dwmac-sun8i.patch | 64 + ...nxi-h3-h5-Correct-emac-register-size.patch | 26 + ...-sunxi-nanopi-neo-Enable-dwmac-sun8i.patch | 24 + ...nopi-neo-enable-UART-USB-and-I2C-pin.patch | 80 + .../115-musb-ignore-vbus-errors.patch | 26 + .../patches-4.9/131-reset-add-h3-resets.patch | 92 + ...i-add-support-for-Orange-Pi-R1-board.patch | 196 + 394 files changed, 157726 insertions(+), 124645 deletions(-) create mode 100644 package/network/services/hostapd/patches/008-WPA-Extra-defense-against-PTK-reinstalls-in-4-way-ha.patch create mode 100644 package/network/services/hostapd/patches/009-Clear-PMK-length-and-check-for-this-when-deriving-PT.patch create mode 100644 package/network/services/hostapd/patches/010-Optional-AP-side-workaround-for-key-reinstallation-a.patch create mode 100644 package/network/services/hostapd/patches/011-Additional-consistentcy-checks-for-PTK-component-len.patch create mode 100644 package/network/services/hostapd/patches/012-Clear-BSSID-information-in-supplicant-state-machine-.patch create mode 100644 target/linux/generic/backport-4.9/024-1-tcp-tsq-add-tsq_flags-tsq_enum.patch create mode 100644 target/linux/generic/backport-4.9/024-2-tcp-tsq-remove-one-locked-operation-in-tcp_wfree.patch create mode 100644 target/linux/generic/backport-4.9/024-3-tcp-tsq-add-shortcut-in-tcp_tasklet_func.patch create mode 100644 target/linux/generic/backport-4.9/024-4-tcp-tsq-avoid-one-atomic-in-tcp_wfree.patch create mode 100644 target/linux/generic/backport-4.9/024-5-tcp-tsq-add-a-shortcut-in-tcp_small_queue_check.patch create mode 100644 target/linux/generic/backport-4.9/024-6-tcp-tcp_mtu_probe-is-likely-to-exit-early.patch create mode 100644 target/linux/generic/backport-4.9/024-7-net-reorganize-struct-sock-for-better-data-locality.patch create mode 100644 target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch create mode 100644 target/linux/generic/backport-4.9/024-9-tcp-add-a-missing-barrier-in-tcp_tasklet_func.patch create mode 100644 target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch delete mode 100644 target/linux/layerscape/32b/config-default delete mode 120000 target/linux/layerscape/32b/profiles/00-default.mk delete mode 100644 target/linux/layerscape/64b/config-default create mode 100644 target/linux/layerscape/armv8_32b/config-4.9 rename target/linux/layerscape/{64b => armv8_32b}/profiles/00-default.mk (100%) rename target/linux/layerscape/{32b => armv8_32b}/target.mk (56%) create mode 100644 target/linux/layerscape/armv8_64b/config-4.9 create mode 100644 target/linux/layerscape/armv8_64b/profiles/00-default.mk rename target/linux/layerscape/{64b => armv8_64b}/target.mk (63%) delete mode 100644 target/linux/layerscape/config-4.4 delete mode 100644 target/linux/layerscape/patches-4.4/0051-PCI-designware-Ensure-ATU-is-enabled-before-IO-conf-.patch delete mode 100644 target/linux/layerscape/patches-4.4/0052-PCI-designware-Simplify-control-flow.patch delete mode 100644 target/linux/layerscape/patches-4.4/0053-PCI-designware-Make-config-accessor-override-checkin.patch delete mode 100644 target/linux/layerscape/patches-4.4/0054-PCI-designware-Explain-why-we-don-t-program-ATU-for-.patch delete mode 100644 target/linux/layerscape/patches-4.4/0055-PCI-designware-Remove-PCI_PROBE_ONLY-handling.patch delete mode 100644 target/linux/layerscape/patches-4.4/0056-PCI-designware-Add-generic-dw_pcie_wait_for_link.patch delete mode 100644 target/linux/layerscape/patches-4.4/0057-PCI-designware-Add-default-link-up-check-if-sub-driv.patch delete mode 100644 target/linux/layerscape/patches-4.4/0058-PCI-designware-Move-Root-Complex-setup-code-to-dw_pc.patch delete mode 100644 target/linux/layerscape/patches-4.4/0059-PCI-designware-Remove-incorrect-RC-memory-base-limit.patch delete mode 100644 target/linux/layerscape/patches-4.4/0140-config-add-freescale-config-for-amr64.patch delete mode 100644 target/linux/layerscape/patches-4.4/0238-arm64-disable-CONFIG_EEPROM_AT24-for-freescale.confi.patch delete mode 100644 target/linux/layerscape/patches-4.4/0239-ARM-dts-ls1021a-add-PCIe-dts-node.patch delete mode 100644 target/linux/layerscape/patches-4.4/0240-ARM-dts-ls1021a-add-SCFG-MSI-dts-node.patch delete mode 100644 target/linux/layerscape/patches-4.4/0241-dt-bindings-Add-bindings-for-Layerscape-SCFG-MSI.patch delete mode 100644 target/linux/layerscape/patches-4.4/1074-mtd-nand-spi-nor-assign-MTD-of_node.patch delete mode 100644 target/linux/layerscape/patches-4.4/1075-mtd-spi-nor-convert-to-spi_nor_-get-set-_flash_node.patch delete mode 100644 target/linux/layerscape/patches-4.4/1076-mtd-spi-nor-drop-unnecessary-partition-parser-data.patch delete mode 100644 target/linux/layerscape/patches-4.4/1077-mtd-add-get-set-of_node-flash_node-helpers.patch delete mode 100644 target/linux/layerscape/patches-4.4/1078-mtd-spi-nor-drop-flash_node-field.patch delete mode 100644 target/linux/layerscape/patches-4.4/1079-mtd-spi-nor-remove-unnecessary-leading-space-from-db.patch delete mode 100644 target/linux/layerscape/patches-4.4/1080-mtd-fsl-quadspi-possible-NULL-dereference.patch delete mode 100644 target/linux/layerscape/patches-4.4/1081-mtd-spi-nor-provide-default-erase_sector-implementat.patch delete mode 100644 target/linux/layerscape/patches-4.4/1083-mtd-spi-nor-Fix-error-message-with-unrecognized-JEDE.patch delete mode 100644 target/linux/layerscape/patches-4.4/1084-mtd-spi-nor-fix-error-handling-in-spi_nor_erase.patch delete mode 100644 target/linux/layerscape/patches-4.4/1085-mtd-spi-nor-Check-the-return-value-from-read_sr.patch delete mode 100644 target/linux/layerscape/patches-4.4/1086-mtd-spi-nor-wait-until-lock-unlock-operations-are-re.patch delete mode 100644 target/linux/layerscape/patches-4.4/1087-mtd-spi-nor-fsl-quadspi-add-big-endian-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/1088-mtd-spi-nor-fsl-quadspi-add-support-for-ls1021a.patch delete mode 100644 target/linux/layerscape/patches-4.4/1089-mtd-spi-nor-fsl-quadspi-add-support-for-layerscape.patch delete mode 100644 target/linux/layerscape/patches-4.4/1090-mtd-spi-nor-Add-SPI-NOR-layer-PM-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/1091-mtd-spi-nor-change-return-value-of-read-write.patch delete mode 100644 target/linux/layerscape/patches-4.4/1092-mtd-fsl-quadspi-return-amount-of-data-read-written-o.patch delete mode 100644 target/linux/layerscape/patches-4.4/1093-mtd-spi-nor-check-return-value-from-read-write.patch delete mode 100644 target/linux/layerscape/patches-4.4/1094-mtd-spi-nor-stop-passing-around-retlen.patch delete mode 100644 target/linux/layerscape/patches-4.4/1095-mtd-spi-nor-simplify-write-loop.patch delete mode 100644 target/linux/layerscape/patches-4.4/1096-mtd-spi-nor-add-read-loop.patch delete mode 100644 target/linux/layerscape/patches-4.4/1097-mtd-fsl-quadspi-use-the-property-fields-of-SPI-NOR.patch delete mode 100644 target/linux/layerscape/patches-4.4/1098-mtd-fsl-quadspi-Rename-SEQID_QUAD_READ-to-SEQID_READ.patch delete mode 100644 target/linux/layerscape/patches-4.4/1099-mtd-spi-nor-fsl-quadspi-Add-fast-read-mode-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/1100-mtd-spi_nor-Disable-Micron-flash-HW-protection.patch delete mode 100644 target/linux/layerscape/patches-4.4/1101-mtd-spi-nor-fsl-quadspi-extend-support-for-some-spec.patch delete mode 100644 target/linux/layerscape/patches-4.4/1102-mtd-spi-nor-fsl-quadspi-Support-qspi-for-ls2080a.patch delete mode 100644 target/linux/layerscape/patches-4.4/1103-mtd-spi-nor-Support-R-W-for-S25FS-S-family-flash.patch delete mode 100644 target/linux/layerscape/patches-4.4/1104-mtd-fsl-quadspi-Add-quad-mode-for-flash-n25q128.patch delete mode 100644 target/linux/layerscape/patches-4.4/1105-mtd-spi-nor-add-DDR-quad-read-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/1106-mtd-fsl-quadspi-add-DDR-quad-read-for-Spansion.patch delete mode 100644 target/linux/layerscape/patches-4.4/1107-mtd-fsl-quadspi-disable-AHB-buffer-prefetch.patch delete mode 100644 target/linux/layerscape/patches-4.4/1108-mtd-fsl-quadspi-add-multi-flash-chip-R-W-on-ls2080a.patch delete mode 100644 target/linux/layerscape/patches-4.4/1109-drivers-mtd-spi-nor-Enable-QSPI-Flash-in-Kernel.patch delete mode 100644 target/linux/layerscape/patches-4.4/1110-mtd-spi-nor-fsl-quad-add-flash-S25FS-extra-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/1111-mtd-spi-nor-disable-4kb-sector-erase-for-s25fl128.patch delete mode 100644 target/linux/layerscape/patches-4.4/1112-driver-spi-fsl-quad-Hang-memcpy-Unhandled-fault-alig.patch delete mode 100644 target/linux/layerscape/patches-4.4/1113-mtd-spi-nor-fsl-quad-move-mtd_device_register-to-the.patch delete mode 100644 target/linux/layerscape/patches-4.4/1239-mtd-extend-physmap_of-to-let-the-device-tree-specify.patch delete mode 100644 target/linux/layerscape/patches-4.4/2006-armv8-aarch32-Add-the-default-config-ls_aarch32_defc.patch delete mode 100644 target/linux/layerscape/patches-4.4/2027-armv8-aarch32-update-defconfig-for-LayerScape-SoC.patch delete mode 100644 target/linux/layerscape/patches-4.4/2119-armv8-aarch32-defconfig-Enable-CAAM-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/2120-armv8-aarch32-defconfig-Enable-firmware-loading.patch delete mode 100644 target/linux/layerscape/patches-4.4/2121-armv8-aarch32-defconfig-Enable-support-for-AHCI-SATA.patch delete mode 100644 target/linux/layerscape/patches-4.4/2122-armv8-aarch32-defconfig-Enable-USB-and-related-confi.patch delete mode 100644 target/linux/layerscape/patches-4.4/2123-armv8-aarch32-defconfig-Enable-KVM-related-configura.patch delete mode 100644 target/linux/layerscape/patches-4.4/2124-armv8-aarch32-defconfig-Enable-FTM-alarm-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/3001-arm64-ls1043a-add-DTS-for-Freescale-LS1043A-SoC.patch delete mode 100644 target/linux/layerscape/patches-4.4/3002-dts-ls1043a-add-LS1043ARDB-board-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/3003-arm64-dts-Update-address-cells-and-reg-properties-of.patch delete mode 100644 target/linux/layerscape/patches-4.4/3004-armv8-aarch32-Add-ITS-file-for-AArch32-Linux-on-LS10.patch delete mode 100644 target/linux/layerscape/patches-4.4/3005-armv8-aarch32-change-FS-file-name-in-ITS.patch delete mode 100644 target/linux/layerscape/patches-4.4/3007-armv8-aarch32-Run-32-bit-Linux-in-AArch32-execution-.patch delete mode 100644 target/linux/layerscape/patches-4.4/3008-armv8-aarch32-Add-SMP-support-for-32-bit-Linux.patch delete mode 100644 target/linux/layerscape/patches-4.4/3009-armv8-aarch32-Allow-RAM-to-be-mapped-for-LayerScape-.patch delete mode 100644 target/linux/layerscape/patches-4.4/3010-arm-add-pgprot_cached-and-pgprot_cached_ns-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/3011-arm-add-new-non-shareable-ioremap.patch delete mode 100644 target/linux/layerscape/patches-4.4/3012-dts-ls1043a-add-fman-bman-qman-ethernet-nodes.patch delete mode 100644 target/linux/layerscape/patches-4.4/3013-dts-ls1043ardb-add-mdio-phy-nodes.patch delete mode 100644 target/linux/layerscape/patches-4.4/3022-dt-move-guts-devicetree-doc-out-of-powerpc-directory.patch delete mode 100644 target/linux/layerscape/patches-4.4/3023-powerpc-fsl-move-mpc85xx.h-to-include-linux-fsl.patch delete mode 100644 target/linux/layerscape/patches-4.4/3025-arm64-dts-align-to-the-new-clocking-model.patch delete mode 100644 target/linux/layerscape/patches-4.4/3028-dts-ls1043-update-dts-for-ls1043.patch delete mode 100644 target/linux/layerscape/patches-4.4/3032-arm64-Add-pdev_archdata-for-dmamask.patch delete mode 100644 target/linux/layerscape/patches-4.4/3033-arm64-add-ioremap-for-normal-cacheable-non-shareable.patch delete mode 100644 target/linux/layerscape/patches-4.4/3034-arm64-add-support-to-remap-kernel-cacheable-memory-t.patch delete mode 100644 target/linux/layerscape/patches-4.4/3035-arm64-pgtable-add-support-to-map-cacheable-and-non-s.patch delete mode 100644 target/linux/layerscape/patches-4.4/3039-arch-arm-add-ARM-specific-fucntions-required-for-ehc.patch delete mode 100644 target/linux/layerscape/patches-4.4/3063-arm64-add-NO_IRQ-macro.patch delete mode 100644 target/linux/layerscape/patches-4.4/3071-arm64-dts-add-device-tree-for-ls1012a-SoC-and-boards.patch delete mode 100644 target/linux/layerscape/patches-4.4/3117-armv8-aarch32-Run-32-bit-Linux-for-LayerScape-SoCs.patch delete mode 100644 target/linux/layerscape/patches-4.4/3118-armv8-aarch32-Add-KVM-support-for-AArch32-on-ARMv8.patch delete mode 100644 target/linux/layerscape/patches-4.4/3131-arm64-ls1046a-add-DTS-for-Freescale-LS1046A-SoC.patch delete mode 100644 target/linux/layerscape/patches-4.4/3132-dts-ls1046a-add-LS1046ARDB-board-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/3133-ls1046ardb-add-ITS-file.patch delete mode 100644 target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch delete mode 100644 target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch delete mode 100644 target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch delete mode 100644 target/linux/layerscape/patches-4.4/3143-armv8-aarch32-Execute-32-bit-Linux-for-ls1046a.patch delete mode 100644 target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch delete mode 100644 target/linux/layerscape/patches-4.4/3227-ls2088a-dts-add-ls2088a-dts.patch delete mode 100644 target/linux/layerscape/patches-4.4/3228-ls2088a-add-ls2088a-its.patch delete mode 100644 target/linux/layerscape/patches-4.4/3229-arm-dts-ls1021a-fix-typo-of-MSI-compatible-string.patch delete mode 100644 target/linux/layerscape/patches-4.4/3230-arm64-dts-ls1043a-fix-typo-of-MSI-compatible-string.patch delete mode 100644 target/linux/layerscape/patches-4.4/3231-arm-dts-ls1021a-share-all-MSIs.patch delete mode 100644 target/linux/layerscape/patches-4.4/3232-arm64-dts-ls1043a-share-all-MSIs.patch delete mode 100644 target/linux/layerscape/patches-4.4/3233-arm64-dts-ls1046a-update-MSI-dts-node.patch delete mode 100644 target/linux/layerscape/patches-4.4/3234-dts-ls1043a-change-GIC-register-for-rev1.1.patch delete mode 100644 target/linux/layerscape/patches-4.4/4043-driver-memory-Removal-of-deprecated-NO_IRQ.patch delete mode 100644 target/linux/layerscape/patches-4.4/4044-drivers-memory-Add-deep-sleep-support-for-IFC.patch delete mode 100644 target/linux/layerscape/patches-4.4/4045-driver-memory-Update-dependency-of-IFC-for-Layerscap.patch delete mode 100644 target/linux/layerscape/patches-4.4/4046-mtd-ifc-Segregate-IFC-fcm-and-runtime-registers.patch delete mode 100644 target/linux/layerscape/patches-4.4/4047-drivers-memory-Fix-build-error-for-arm64.patch delete mode 100644 target/linux/layerscape/patches-4.4/4234-fsl-ifc-fix-compilation-error-when-COMPAT-not-enable.patch delete mode 100644 target/linux/layerscape/patches-4.4/7014-temp-QE-headers-are-needed-by-FMD.patch delete mode 100644 target/linux/layerscape/patches-4.4/7016-dpa-add-dpaa_eth-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7017-fsl_qbman-add-qbman-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7018-devres-add-devm_alloc_percpu.patch delete mode 100644 target/linux/layerscape/patches-4.4/7019-net-readd-skb_recycle.patch delete mode 100644 target/linux/layerscape/patches-4.4/7020-net-add-custom-NETIF-flags.patch delete mode 100644 target/linux/layerscape/patches-4.4/7021-net-Make-the-netdev-watchdog-aware-of-hardware-multi.patch delete mode 100644 target/linux/layerscape/patches-4.4/7024-Add-APIs-to-setup-HugeTLB-mappings-for-USDPAA.patch delete mode 100644 target/linux/layerscape/patches-4.4/7029-fmd-SGMII-PCS-needs-to-be-reprogrammed-after-sleep.patch delete mode 100644 target/linux/layerscape/patches-4.4/7030-fmd-use-kernel-api-for-64bit-division.patch delete mode 100644 target/linux/layerscape/patches-4.4/7031-fsl_qbman-Enable-DPAA1-QBMan-for-ARM64-platforms.patch delete mode 100644 target/linux/layerscape/patches-4.4/7064-dpaa_eth-repair-issue-introduced-with-2.5G-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/7065-dpaa_eth-replace-sgmii-2500-with-qsgmii.patch delete mode 100644 target/linux/layerscape/patches-4.4/7066-fmd-add-2.5G-SGMII-mode-suport.patch delete mode 100644 target/linux/layerscape/patches-4.4/7067-net-phy-add-SGMII-2500-PHY.patch delete mode 100644 target/linux/layerscape/patches-4.4/7068-dpaa_ethernet-fix-link-state-detect-for-10G-interfac.patch delete mode 100644 target/linux/layerscape/patches-4.4/7072-LS1012-Add-PPFE-driver-in-Linux.patch delete mode 100644 target/linux/layerscape/patches-4.4/7126-net-phy-add-driver-for-aquantia-AQR106-107-phy.patch delete mode 100644 target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch delete mode 100644 target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch delete mode 100644 target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch delete mode 100644 target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch delete mode 100644 target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch delete mode 100644 target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch delete mode 100644 target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch delete mode 100644 target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch delete mode 100644 target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch delete mode 100644 target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch delete mode 100644 target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch delete mode 100644 target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch delete mode 100644 target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch delete mode 100644 target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch delete mode 100644 target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch delete mode 100644 target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch delete mode 100644 target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch delete mode 100644 target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch delete mode 100644 target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch delete mode 100644 target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch delete mode 100644 target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch delete mode 100644 target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch delete mode 100644 target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch delete mode 100644 target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch delete mode 100644 target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch delete mode 100644 target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch delete mode 100644 target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch delete mode 100644 target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch delete mode 100644 target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch delete mode 100644 target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch delete mode 100644 target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch delete mode 100644 target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch delete mode 100644 target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch delete mode 100644 target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch delete mode 100644 target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch delete mode 100644 target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch delete mode 100644 target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch delete mode 100644 target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch delete mode 100644 target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch delete mode 100644 target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch delete mode 100644 target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch delete mode 100644 target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch delete mode 100644 target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch delete mode 100644 target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch delete mode 100644 target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch delete mode 100644 target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch delete mode 100644 target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch delete mode 100644 target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch delete mode 100644 target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch delete mode 100644 target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch delete mode 100644 target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch delete mode 100644 target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch delete mode 100644 target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch delete mode 100644 target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch delete mode 100644 target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch delete mode 100644 target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch delete mode 100644 target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch delete mode 100644 target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch delete mode 100644 target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch delete mode 100644 target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch delete mode 100644 target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch delete mode 100644 target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch delete mode 100644 target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch delete mode 100644 target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch delete mode 100644 target/linux/layerscape/patches-4.4/7226-dpaa_eth-fix-adjust_link-for-10G-2.5G.patch delete mode 100644 target/linux/layerscape/patches-4.4/8036-ls2085a-Add-support-for-reset.patch delete mode 100644 target/linux/layerscape/patches-4.4/8037-ls1043a-Add-support-for-reset.patch delete mode 100644 target/linux/layerscape/patches-4.4/8038-reset-driver-Kconfig-Change-define-to-ARCH_LAYERSCAP.patch delete mode 100644 target/linux/layerscape/patches-4.4/8042-drivers-gpio-Port-gpio-driver-to-support-layerscape-.patch delete mode 100644 target/linux/layerscape/patches-4.4/8048-mmc-sdhci-of-esdhc-add-remove-some-quirks-according-.patch delete mode 100644 target/linux/layerscape/patches-4.4/8049-PCI-layerscape-Add-fsl-ls2085a-pcie-compatible-ID.patch delete mode 100644 target/linux/layerscape/patches-4.4/8050-PCI-layerscape-Fix-MSG-TLP-drop-setting.patch delete mode 100644 target/linux/layerscape/patches-4.4/8060-irqchip-Add-Layerscape-SCFG-MSI-controller-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8061-arm64-layerscape-Enable-PCIe-for-Layerscape.patch delete mode 100644 target/linux/layerscape/patches-4.4/8062-armv8-aarch32-enable-pci_domains-for-armv8-32bit.patch delete mode 100644 target/linux/layerscape/patches-4.4/8073-ls1012a-added-clock-configuration.patch delete mode 100644 target/linux/layerscape/patches-4.4/8114-drivers-PCIE-enable-for-Linux.patch delete mode 100644 target/linux/layerscape/patches-4.4/8115-PCI-layerscape-call-dw_pcie_setup_rc-in-host-initial.patch delete mode 100644 target/linux/layerscape/patches-4.4/8125-rtc-pcf2127-add-pcf2129-device-id.patch delete mode 100644 target/linux/layerscape/patches-4.4/8127-ls1046a-msi-Add-LS1046A-MSI-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8128-pci-layerscape-add-LS1046A-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8129-clk-qoriq-add-ls1046a-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8130-ls1046a-sata-Add-LS1046A-sata-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8134-pci-layerscape-add-LUT-DBG-reigster-offset-member.patch delete mode 100644 target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch delete mode 100644 target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch delete mode 100644 target/linux/layerscape/patches-4.4/8138-pci-layerscape-add-MSI-interrupt-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8142-drivers-mmc-Add-compatible-string-for-LS1046A.patch delete mode 100644 target/linux/layerscape/patches-4.4/8229-drivers-clk-qoriq-Add-ls2088a-key-to-chipinfo-table.patch delete mode 100644 target/linux/layerscape/patches-4.4/8230-layerscape-pci-fix-linkup-issue.patch delete mode 100644 target/linux/layerscape/patches-4.4/8231-driver-clk-qoriq-Add-ls2088a-clk-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8233-i2c-pca954x-Add-option-to-skip-disabling-PCA954x-Mux.patch delete mode 100644 target/linux/layerscape/patches-4.4/8235-pci-layerscape-fix-pci-lut-offset-issue.patch delete mode 100644 target/linux/layerscape/patches-4.4/8236-clk-add-API-of-clks.patch delete mode 100644 target/linux/layerscape/patches-4.4/8237-pcie-ls208x-use-unified-compatible-fsl-ls2080a-pcie-.patch delete mode 100644 target/linux/layerscape/patches-4.4/8238-irqchip-ls-scfg-msi-fix-typo-of-MSI-compatible-strin.patch delete mode 100644 target/linux/layerscape/patches-4.4/8239-irqchip-ls-scfg-msi-add-LS1046a-MSI-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8240-irqchip-ls-scfg-msi-add-LS1043a-v1.1-MSI-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/8241-irqchip-ls-scfg-msi-add-MSI-affinity-support.patch delete mode 100644 target/linux/layerscape/patches-4.4/9069-Revert-arm64-simplify-dma_get_ops.patch delete mode 100644 target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch create mode 100644 target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch create mode 100644 target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch rename target/linux/layerscape/{patches-4.4/7015-fmd-add-fman-driver.patch => patches-4.9/701-sdk_dpaa-support-layerscape.patch} (80%) create mode 100644 target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch create mode 100644 target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch rename target/linux/layerscape/{patches-4.4/8026-cpufreq-qoriq-Don-t-look-at-clock-implementation-det.patch => patches-4.9/803-cpufreq-support-layerscape.patch} (57%) create mode 100644 target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch create mode 100644 target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch create mode 100644 target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch create mode 100644 target/linux/mediatek/patches-4.9/0063-atomic-sleep.patch create mode 100644 target/linux/sunxi/config-4.9 create mode 100644 target/linux/sunxi/cortexa53/config-default create mode 100644 target/linux/sunxi/cortexa53/target.mk create mode 100644 target/linux/sunxi/cortexa7/config-default create mode 100644 target/linux/sunxi/cortexa7/target.mk create mode 100644 target/linux/sunxi/cortexa8/config-default create mode 100644 target/linux/sunxi/cortexa8/target.mk create mode 100644 target/linux/sunxi/image/cortex-a53.mk create mode 100644 target/linux/sunxi/image/cortex-a7.mk create mode 100644 target/linux/sunxi/image/cortex-a8.mk create mode 100644 target/linux/sunxi/patches-4.9/0001-arm64-sunxi-always-enable-reset-controller.patch create mode 100644 target/linux/sunxi/patches-4.9/0002-clk-sunxi-ng-Rename-the-internal-structures.patch create mode 100644 target/linux/sunxi/patches-4.9/0003-clk-sunxi-ng-Remove-the-use-of-rational-computations.patch create mode 100644 target/linux/sunxi/patches-4.9/0004-clk-sunxi-ng-Finish-to-convert-to-structures-for-arg.patch create mode 100644 target/linux/sunxi/patches-4.9/0005-clk-sunxi-ng-Add-minimums-for-all-the-relevant-struc.patch create mode 100644 target/linux/sunxi/patches-4.9/0006-clk-sunxi-ng-Implement-minimum-for-multipliers.patch create mode 100644 target/linux/sunxi/patches-4.9/0007-clk-sunxi-ng-Add-A64-clocks.patch create mode 100644 target/linux/sunxi/patches-4.9/0010-arm64-dts-add-Allwinner-A64-SoC-.dtsi.patch create mode 100644 target/linux/sunxi/patches-4.9/0011-arm64-dts-add-Pine64-support.patch create mode 100644 target/linux/sunxi/patches-4.9/0012-arm64-dts-fix-build-errors-from-missing-dependencies.patch create mode 100644 target/linux/sunxi/patches-4.9/0013-arm64-dts-allwinner-add-USB1-related-nodes-of-Allwin.patch create mode 100644 target/linux/sunxi/patches-4.9/0014-arm64-dts-allwinner-sort-the-nodes-in-sun50i-a64-pin.patch create mode 100644 target/linux/sunxi/patches-4.9/0015-arm64-dts-allwinner-enable-EHCI1-OHCI1-and-USB-PHY-n.patch create mode 100644 target/linux/sunxi/patches-4.9/0016-arm64-dts-add-MUSB-node-to-Allwinner-A64-dtsi.patch create mode 100644 target/linux/sunxi/patches-4.9/0017-arm64-dts-enable-the-MUSB-controller-of-Pine64-in-ho.patch create mode 100644 target/linux/sunxi/patches-4.9/0018-arm64-dts-allwinner-Remove-no-longer-used-pinctrl-su.patch create mode 100644 target/linux/sunxi/patches-4.9/0019-arm64-allwinner-a64-Add-MMC-nodes.patch create mode 100644 target/linux/sunxi/patches-4.9/0020-arm64-allwinner-a64-Add-MMC-pinctrl-nodes.patch create mode 100644 target/linux/sunxi/patches-4.9/0022-arm64-allwinner-pine64-add-MMC-support.patch create mode 100644 target/linux/sunxi/patches-4.9/0023-arm64-allwinner-a64-add-UART1-pin-nodes.patch create mode 100644 target/linux/sunxi/patches-4.9/0024-arm64-allwinner-a64-add-r_ccu-node.patch create mode 100644 target/linux/sunxi/patches-4.9/0025-arm64-allwinner-a64-add-R_PIO-pinctrl-node.patch create mode 100644 target/linux/sunxi/patches-4.9/0026-arm64-allwinner-a64-add-pmu0-regs-for-USB-PHY.patch create mode 100644 target/linux/sunxi/patches-4.9/0027-arm64-allwinner-a64-Add-PLL_PERIPH0-clock-to-the-R_C.patch create mode 100644 target/linux/sunxi/patches-4.9/0030-pinctrl-sunxi-Rework-the-pin-config-building-code.patch create mode 100644 target/linux/sunxi/patches-4.9/0031-pinctrl-sunxi-Use-macros-from-bindings-header-file-f.patch create mode 100644 target/linux/sunxi/patches-4.9/0032-pinctrl-sunxi-Handle-bias-disable.patch create mode 100644 target/linux/sunxi/patches-4.9/0033-pinctrl-sunxi-Support-generic-binding.patch create mode 100644 target/linux/sunxi/patches-4.9/0034-pinctrl-sunxi-Deal-with-configless-pins.patch create mode 100644 target/linux/sunxi/patches-4.9/0035-pinctrl-sunxi-make-bool-drivers-explicitly-non-modul.patch create mode 100644 target/linux/sunxi/patches-4.9/0036-pinctrl-sunxi-Free-configs-in-pinctrl_map-only-if-it.patch create mode 100644 target/linux/sunxi/patches-4.9/0037-pinctrl-sunxi-Fix-PIN_CONFIG_BIAS_PULL_-DOWN-UP-argu.patch create mode 100644 target/linux/sunxi/patches-4.9/0038-pinctrl-sunxi-Add-support-for-fetching-pinconf-setti.patch create mode 100644 target/linux/sunxi/patches-4.9/0039-pinctrl-sunxi-Make-sunxi_pconf_group_set-use-sunxi_p.patch create mode 100644 target/linux/sunxi/patches-4.9/0040-pinctrl-sunxi-Add-support-for-interrupt-debouncing.patch create mode 100644 target/linux/sunxi/patches-4.9/0041-pinctrl-sunxi-fix-theoretical-uninitialized-variable.patch create mode 100644 target/linux/sunxi/patches-4.9/0042-pinctrl-sunxi-Testing-the-wrong-variable.patch create mode 100644 target/linux/sunxi/patches-4.9/0043-pinctrl-sunxi-Don-t-enforce-bias-disable-for-now.patch create mode 100644 target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch create mode 100644 target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch create mode 100644 target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch create mode 100644 target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch create mode 100644 target/linux/sunxi/patches-4.9/0054-crypto-sun4i-ss_support_the_Security_System_PRNG.patch create mode 100644 target/linux/sunxi/patches-4.9/0060-arm64-allwinner-sun50i-a64-Add-dt-node-for-the-sysco.patch create mode 100644 target/linux/sunxi/patches-4.9/0061-arm64-allwinner-sun50i-a64-add-dwmac-sun8i-Ethernet-.patch create mode 100644 target/linux/sunxi/patches-4.9/0062-arm64-allwinner-pine64-Enable-dwmac-sun8i.patch create mode 100644 target/linux/sunxi/patches-4.9/0063-arm64-allwinner-pine64-plus-Enable-dwmac-sun8i.patch create mode 100644 target/linux/sunxi/patches-4.9/0064-arm64-allwinner-sun50i-a64-Correct-emac-register-siz.patch create mode 100644 target/linux/sunxi/patches-4.9/0065-arm64-allwinner-a64-pine64-add-missing-ethernet0-ali.patch create mode 100644 target/linux/sunxi/patches-4.9/0070-arm-sun8i-sunxi-h3-h5-Add-dt-node-for-the-syscon-con.patch create mode 100644 target/linux/sunxi/patches-4.9/0071-arm-sun8i-sunxi-h3-h5-add-dwmac-sun8i-ethernet-drive.patch create mode 100644 target/linux/sunxi/patches-4.9/0072-arm-sun8i-orangepi-2-Enable-dwmac-sun8i.patch create mode 100644 target/linux/sunxi/patches-4.9/0073-ARM-sun8i-orangepi-plus-Enable-dwmac-sun8i.patch create mode 100644 target/linux/sunxi/patches-4.9/0074-ARM-dts-sunxi-h3-h5-Correct-emac-register-size.patch create mode 100644 target/linux/sunxi/patches-4.9/0080-ARM-dts-sunxi-nanopi-neo-Enable-dwmac-sun8i.patch create mode 100644 target/linux/sunxi/patches-4.9/0081-ARM-dts-sun8i-nanopi-neo-enable-UART-USB-and-I2C-pin.patch create mode 100644 target/linux/sunxi/patches-4.9/115-musb-ignore-vbus-errors.patch create mode 100644 target/linux/sunxi/patches-4.9/131-reset-add-h3-resets.patch create mode 100644 target/linux/sunxi/patches-4.9/200-ARM-dts-sunxi-add-support-for-Orange-Pi-R1-board.patch diff --git a/include/kernel-version.mk b/include/kernel-version.mk index 405e5fd52..2d3e8e6fb 100644 --- a/include/kernel-version.mk +++ b/include/kernel-version.mk @@ -4,11 +4,11 @@ LINUX_RELEASE?=1 LINUX_VERSION-3.18 = .71 LINUX_VERSION-4.4 = .92 -LINUX_VERSION-4.9 = .54 +LINUX_VERSION-4.9 = .57 LINUX_KERNEL_HASH-3.18.71 = 5abc9778ad44ce02ed6c8ab52ece8a21c6d20d21f6ed8a19287b4a38a50c1240 LINUX_KERNEL_HASH-4.4.92 = 53f8cd8b024444df0f242f8e6ab5147b0b009d7a30e8b2ed3854e8d17937460d -LINUX_KERNEL_HASH-4.9.54 = 651005db6efbce4fcd607415ebd697dd8d2f5a2abc2c632b11ece03a1a210fc5 +LINUX_KERNEL_HASH-4.9.57 = 09230554ec6a34a12e2d2a6b32733aed3c9bc90b1662cc1b06dd67bf726c96a6 ifdef KERNEL_PATCHVER LINUX_VERSION:=$(KERNEL_PATCHVER)$(strip $(LINUX_VERSION-$(KERNEL_PATCHVER))) diff --git a/package/network/services/hostapd/Makefile b/package/network/services/hostapd/Makefile index 344896ca6..5a353e67e 100644 --- a/package/network/services/hostapd/Makefile +++ b/package/network/services/hostapd/Makefile @@ -7,7 +7,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=hostapd -PKG_RELEASE:=1 +PKG_RELEASE:=3 PKG_SOURCE_URL:=http://w1.fi/hostap.git PKG_SOURCE_PROTO:=git diff --git a/package/network/services/hostapd/files/hostapd.sh b/package/network/services/hostapd/files/hostapd.sh index 3766b7a7c..16925d58a 100644 --- a/package/network/services/hostapd/files/hostapd.sh +++ b/package/network/services/hostapd/files/hostapd.sh @@ -149,6 +149,7 @@ hostapd_common_add_bss_config() { config_add_int \ wep_rekey eap_reauth_period \ wpa_group_rekey wpa_pair_rekey wpa_master_rekey + config_add_boolean wpa_disable_eapol_key_retries config_add_boolean rsn_preauth auth_cache config_add_int ieee80211w @@ -214,6 +215,7 @@ hostapd_set_bss_options() { json_get_vars \ wep_rekey wpa_group_rekey wpa_pair_rekey wpa_master_rekey \ + wpa_disable_eapol_key_retries \ maxassoc max_inactivity disassoc_low_ack isolate auth_cache \ wps_pushbutton wps_label ext_registrar wps_pbc_in_m1 wps_ap_setup_locked \ wps_independent wps_device_type wps_device_name wps_manufacturer wps_pin \ @@ -229,6 +231,7 @@ hostapd_set_bss_options() { set_default hidden 0 set_default wmm 1 set_default uapsd 1 + set_default wpa_disable_eapol_key_retries 0 set_default eapol_version 0 set_default acct_port 1813 @@ -416,6 +419,8 @@ hostapd_set_bss_options() { done fi + append bss_conf "wpa_disable_eapol_key_retries=$wpa_disable_eapol_key_retries" "$N" + hostapd_append_wpa_key_mgmt [ -n "$wpa_key_mgmt" ] && append bss_conf "wpa_key_mgmt=$wpa_key_mgmt" "$N" fi diff --git a/package/network/services/hostapd/patches/008-WPA-Extra-defense-against-PTK-reinstalls-in-4-way-ha.patch b/package/network/services/hostapd/patches/008-WPA-Extra-defense-against-PTK-reinstalls-in-4-way-ha.patch new file mode 100644 index 000000000..40f6b5696 --- /dev/null +++ b/package/network/services/hostapd/patches/008-WPA-Extra-defense-against-PTK-reinstalls-in-4-way-ha.patch @@ -0,0 +1,34 @@ +From a00e946c1c9a1f9cc65c72900d2a444ceb1f872e Mon Sep 17 00:00:00 2001 +From: Mathy Vanhoef +Date: Thu, 5 Oct 2017 23:53:01 +0200 +Subject: [PATCH] WPA: Extra defense against PTK reinstalls in 4-way handshake + +Currently, reinstallations of the PTK are prevented by (1) assuring the +same TPTK is only set once as the PTK, and (2) that one particular PTK +is only installed once. This patch makes it more explicit that point (1) +is required to prevent key reinstallations. At the same time, this patch +hardens wpa_supplicant such that future changes do not accidentally +break this property. + +Signed-off-by: Mathy Vanhoef +--- + src/rsn_supp/wpa.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/src/rsn_supp/wpa.c ++++ b/src/rsn_supp/wpa.c +@@ -1728,6 +1728,14 @@ static int wpa_supplicant_verify_eapol_k + sm->ptk_set = 1; + os_memcpy(&sm->ptk, &sm->tptk, sizeof(sm->ptk)); + os_memset(&sm->tptk, 0, sizeof(sm->tptk)); ++ /* ++ * This assures the same TPTK in sm->tptk can never be ++ * copied twice to sm->pkt as the new PTK. In ++ * combination with the installed flag in the wpa_ptk ++ * struct, this assures the same PTK is only installed ++ * once. ++ */ ++ sm->renew_snonce = 1; + } + } + diff --git a/package/network/services/hostapd/patches/009-Clear-PMK-length-and-check-for-this-when-deriving-PT.patch b/package/network/services/hostapd/patches/009-Clear-PMK-length-and-check-for-this-when-deriving-PT.patch new file mode 100644 index 000000000..ed7d79ec1 --- /dev/null +++ b/package/network/services/hostapd/patches/009-Clear-PMK-length-and-check-for-this-when-deriving-PT.patch @@ -0,0 +1,53 @@ +From b488a12948751f57871f09baa345e59b23959a41 Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Sun, 8 Oct 2017 13:18:02 +0300 +Subject: [PATCH] Clear PMK length and check for this when deriving PTK + +Instead of setting the default PMK length for the cleared PMK, set the +length to 0 and explicitly check for this when deriving PTK to avoid +unexpected key derivation with an all-zeroes key should it be possible +to somehow trigger PTK derivation to happen before PMK derivation. + +Signed-off-by: Jouni Malinen +--- + src/common/wpa_common.c | 5 +++++ + src/rsn_supp/wpa.c | 7 ++++--- + 2 files changed, 9 insertions(+), 3 deletions(-) + +--- a/src/common/wpa_common.c ++++ b/src/common/wpa_common.c +@@ -225,6 +225,11 @@ int wpa_pmk_to_ptk(const u8 *pmk, size_t + u8 tmp[WPA_KCK_MAX_LEN + WPA_KEK_MAX_LEN + WPA_TK_MAX_LEN]; + size_t ptk_len; + ++ if (pmk_len == 0) { ++ wpa_printf(MSG_ERROR, "WPA: No PMK set for PT derivation"); ++ return -1; ++ } ++ + if (os_memcmp(addr1, addr2, ETH_ALEN) < 0) { + os_memcpy(data, addr1, ETH_ALEN); + os_memcpy(data + ETH_ALEN, addr2, ETH_ALEN); +--- a/src/rsn_supp/wpa.c ++++ b/src/rsn_supp/wpa.c +@@ -584,7 +584,8 @@ static void wpa_supplicant_process_1_of_ + /* Calculate PTK which will be stored as a temporary PTK until it has + * been verified when processing message 3/4. */ + ptk = &sm->tptk; +- wpa_derive_ptk(sm, src_addr, key, ptk); ++ if (wpa_derive_ptk(sm, src_addr, key, ptk) < 0) ++ goto failed; + if (sm->pairwise_cipher == WPA_CIPHER_TKIP) { + u8 buf[8]; + /* Supplicant: swap tx/rx Mic keys */ +@@ -2705,8 +2706,8 @@ void wpa_sm_set_pmk_from_pmksa(struct wp + sm->pmk_len = sm->cur_pmksa->pmk_len; + os_memcpy(sm->pmk, sm->cur_pmksa->pmk, sm->pmk_len); + } else { +- sm->pmk_len = PMK_LEN; +- os_memset(sm->pmk, 0, PMK_LEN); ++ sm->pmk_len = 0; ++ os_memset(sm->pmk, 0, PMK_LEN_MAX); + } + } + diff --git a/package/network/services/hostapd/patches/010-Optional-AP-side-workaround-for-key-reinstallation-a.patch b/package/network/services/hostapd/patches/010-Optional-AP-side-workaround-for-key-reinstallation-a.patch new file mode 100644 index 000000000..19165cce2 --- /dev/null +++ b/package/network/services/hostapd/patches/010-Optional-AP-side-workaround-for-key-reinstallation-a.patch @@ -0,0 +1,221 @@ +From 6f234c1e2ee1ede29f2412b7012b3345ed8e52d3 Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Mon, 16 Oct 2017 18:37:43 +0300 +Subject: [PATCH] Optional AP side workaround for key reinstallation attacks + +This adds a new hostapd configuration parameter +wpa_disable_eapol_key_retries=1 that can be used to disable +retransmission of EAPOL-Key frames that are used to install +keys (EAPOL-Key message 3/4 and group message 1/2). This is +similar to setting wpa_group_update_count=1 and +wpa_pairwise_update_count=1, but with no impact to message 1/4 +retries and with extended timeout for messages 4/4 and group +message 2/2 to avoid causing issues with stations that may use +aggressive power saving have very long time in replying to the +EAPOL-Key messages. + +This option can be used to work around key reinstallation attacks +on the station (supplicant) side in cases those station devices +cannot be updated for some reason. By removing the +retransmissions the attacker cannot cause key reinstallation with +a delayed frame transmission. This is related to the station side +vulnerabilities CVE-2017-13077, CVE-2017-13078, CVE-2017-13079, +CVE-2017-13080, and CVE-2017-13081. + +This workaround might cause interoperability issues and reduced +robustness of key negotiation especially in environments with +heavy traffic load due to the number of attempts to perform the +key exchange is reduced significantly. As such, this workaround +is disabled by default (unless overridden in build +configuration). To enable this, set the parameter to 1. + +It is also possible to enable this in the build by default by +adding the following to the build configuration: + +CFLAGS += -DDEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES=1 + +Signed-off-by: Jouni Malinen +--- + hostapd/config_file.c | 2 ++ + hostapd/defconfig | 4 ++++ + hostapd/hostapd.conf | 24 ++++++++++++++++++++++++ + src/ap/ap_config.c | 6 ++++++ + src/ap/ap_config.h | 1 + + src/ap/wpa_auth.c | 22 ++++++++++++++++++++-- + src/ap/wpa_auth.h | 1 + + src/ap/wpa_auth_glue.c | 2 ++ + 8 files changed, 60 insertions(+), 2 deletions(-) + +--- a/hostapd/config_file.c ++++ b/hostapd/config_file.c +@@ -2542,6 +2542,8 @@ static int hostapd_config_fill(struct ho + return 1; + } + bss->wpa_pairwise_update_count = (u32) val; ++ } else if (os_strcmp(buf, "wpa_disable_eapol_key_retries") == 0) { ++ bss->wpa_disable_eapol_key_retries = atoi(pos); + } else if (os_strcmp(buf, "wpa_passphrase") == 0) { + int len = os_strlen(pos); + if (len < 8 || len > 63) { +--- a/hostapd/defconfig ++++ b/hostapd/defconfig +@@ -372,3 +372,7 @@ CONFIG_IPV6=y + # Opportunistic Wireless Encryption (OWE) + # Experimental implementation of draft-harkins-owe-07.txt + #CONFIG_OWE=y ++ ++# Override default value for the wpa_disable_eapol_key_retries configuration ++# parameter. See that parameter in hostapd.conf for more details. ++#CFLAGS += -DDEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES=1 +--- a/hostapd/hostapd.conf ++++ b/hostapd/hostapd.conf +@@ -1315,6 +1315,30 @@ own_ip_addr=127.0.0.1 + # Range 1..4294967295; default: 4 + #wpa_pairwise_update_count=4 + ++# Workaround for key reinstallation attacks ++# ++# This parameter can be used to disable retransmission of EAPOL-Key frames that ++# are used to install keys (EAPOL-Key message 3/4 and group message 1/2). This ++# is similar to setting wpa_group_update_count=1 and ++# wpa_pairwise_update_count=1, but with no impact to message 1/4 and with ++# extended timeout on the response to avoid causing issues with stations that ++# may use aggressive power saving have very long time in replying to the ++# EAPOL-Key messages. ++# ++# This option can be used to work around key reinstallation attacks on the ++# station (supplicant) side in cases those station devices cannot be updated ++# for some reason. By removing the retransmissions the attacker cannot cause ++# key reinstallation with a delayed frame transmission. This is related to the ++# station side vulnerabilities CVE-2017-13077, CVE-2017-13078, CVE-2017-13079, ++# CVE-2017-13080, and CVE-2017-13081. ++# ++# This workaround might cause interoperability issues and reduced robustness of ++# key negotiation especially in environments with heavy traffic load due to the ++# number of attempts to perform the key exchange is reduced significantly. As ++# such, this workaround is disabled by default (unless overridden in build ++# configuration). To enable this, set the parameter to 1. ++#wpa_disable_eapol_key_retries=1 ++ + # Enable IEEE 802.11i/RSN/WPA2 pre-authentication. This is used to speed up + # roaming be pre-authenticating IEEE 802.1X/EAP part of the full RSN + # authentication and key handshake before actually associating with a new AP. +--- a/src/ap/ap_config.c ++++ b/src/ap/ap_config.c +@@ -37,6 +37,10 @@ static void hostapd_config_free_vlan(str + } + + ++#ifndef DEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES ++#define DEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES 0 ++#endif /* DEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES */ ++ + void hostapd_config_defaults_bss(struct hostapd_bss_config *bss) + { + dl_list_init(&bss->anqp_elem); +@@ -58,6 +62,8 @@ void hostapd_config_defaults_bss(struct + bss->wpa_gmk_rekey = 86400; + bss->wpa_group_update_count = 4; + bss->wpa_pairwise_update_count = 4; ++ bss->wpa_disable_eapol_key_retries = ++ DEFAULT_WPA_DISABLE_EAPOL_KEY_RETRIES; + bss->wpa_key_mgmt = WPA_KEY_MGMT_PSK; + bss->wpa_pairwise = WPA_CIPHER_TKIP; + bss->wpa_group = WPA_CIPHER_TKIP; +--- a/src/ap/ap_config.h ++++ b/src/ap/ap_config.h +@@ -333,6 +333,7 @@ struct hostapd_bss_config { + int wpa_ptk_rekey; + u32 wpa_group_update_count; + u32 wpa_pairwise_update_count; ++ int wpa_disable_eapol_key_retries; + int rsn_pairwise; + int rsn_preauth; + char *rsn_preauth_interfaces; +--- a/src/ap/wpa_auth.c ++++ b/src/ap/wpa_auth.c +@@ -65,6 +65,7 @@ static u8 * ieee80211w_kde_add(struct wp + static const u32 eapol_key_timeout_first = 100; /* ms */ + static const u32 eapol_key_timeout_subseq = 1000; /* ms */ + static const u32 eapol_key_timeout_first_group = 500; /* ms */ ++static const u32 eapol_key_timeout_no_retrans = 4000; /* ms */ + + /* TODO: make these configurable */ + static const int dot11RSNAConfigPMKLifetime = 43200; +@@ -1653,6 +1654,9 @@ static void wpa_send_eapol(struct wpa_au + eapol_key_timeout_first_group; + else + timeout_ms = eapol_key_timeout_subseq; ++ if (wpa_auth->conf.wpa_disable_eapol_key_retries && ++ (!pairwise || (key_info & WPA_KEY_INFO_MIC))) ++ timeout_ms = eapol_key_timeout_no_retrans; + if (pairwise && ctr == 1 && !(key_info & WPA_KEY_INFO_MIC)) + sm->pending_1_of_4_timeout = 1; + wpa_printf(MSG_DEBUG, "WPA: Use EAPOL-Key timeout of %u ms (retry " +@@ -2882,6 +2886,11 @@ SM_STATE(WPA_PTK, PTKINITNEGOTIATING) + sm->TimeoutEvt = FALSE; + + sm->TimeoutCtr++; ++ if (sm->wpa_auth->conf.wpa_disable_eapol_key_retries && ++ sm->TimeoutCtr > 1) { ++ /* Do not allow retransmission of EAPOL-Key msg 3/4 */ ++ return; ++ } + if (sm->TimeoutCtr > sm->wpa_auth->conf.wpa_pairwise_update_count) { + /* No point in sending the EAPOL-Key - we will disconnect + * immediately following this. */ +@@ -3220,7 +3229,9 @@ SM_STEP(WPA_PTK) + sm->EAPOLKeyPairwise && sm->MICVerified) + SM_ENTER(WPA_PTK, PTKINITDONE); + else if (sm->TimeoutCtr > +- sm->wpa_auth->conf.wpa_pairwise_update_count) { ++ sm->wpa_auth->conf.wpa_pairwise_update_count || ++ (sm->wpa_auth->conf.wpa_disable_eapol_key_retries && ++ sm->TimeoutCtr > 1)) { + wpa_auth->dot11RSNA4WayHandshakeFailures++; + wpa_auth_vlogger( + sm->wpa_auth, sm->addr, LOGGER_DEBUG, +@@ -3260,6 +3271,11 @@ SM_STATE(WPA_PTK_GROUP, REKEYNEGOTIATING + SM_ENTRY_MA(WPA_PTK_GROUP, REKEYNEGOTIATING, wpa_ptk_group); + + sm->GTimeoutCtr++; ++ if (sm->wpa_auth->conf.wpa_disable_eapol_key_retries && ++ sm->GTimeoutCtr > 1) { ++ /* Do not allow retransmission of EAPOL-Key group msg 1/2 */ ++ return; ++ } + if (sm->GTimeoutCtr > sm->wpa_auth->conf.wpa_group_update_count) { + /* No point in sending the EAPOL-Key - we will disconnect + * immediately following this. */ +@@ -3363,7 +3379,9 @@ SM_STEP(WPA_PTK_GROUP) + !sm->EAPOLKeyPairwise && sm->MICVerified) + SM_ENTER(WPA_PTK_GROUP, REKEYESTABLISHED); + else if (sm->GTimeoutCtr > +- sm->wpa_auth->conf.wpa_group_update_count) ++ sm->wpa_auth->conf.wpa_group_update_count || ++ (sm->wpa_auth->conf.wpa_disable_eapol_key_retries && ++ sm->GTimeoutCtr > 1)) + SM_ENTER(WPA_PTK_GROUP, KEYERROR); + else if (sm->TimeoutEvt) + SM_ENTER(WPA_PTK_GROUP, REKEYNEGOTIATING); +--- a/src/ap/wpa_auth.h ++++ b/src/ap/wpa_auth.h +@@ -165,6 +165,7 @@ struct wpa_auth_config { + int wpa_ptk_rekey; + u32 wpa_group_update_count; + u32 wpa_pairwise_update_count; ++ int wpa_disable_eapol_key_retries; + int rsn_pairwise; + int rsn_preauth; + int eapol_version; +--- a/src/ap/wpa_auth_glue.c ++++ b/src/ap/wpa_auth_glue.c +@@ -45,6 +45,8 @@ static void hostapd_wpa_auth_conf(struct + wconf->wpa_gmk_rekey = conf->wpa_gmk_rekey; + wconf->wpa_ptk_rekey = conf->wpa_ptk_rekey; + wconf->wpa_group_update_count = conf->wpa_group_update_count; ++ wconf->wpa_disable_eapol_key_retries = ++ conf->wpa_disable_eapol_key_retries; + wconf->wpa_pairwise_update_count = conf->wpa_pairwise_update_count; + wconf->rsn_pairwise = conf->rsn_pairwise; + wconf->rsn_preauth = conf->rsn_preauth; diff --git a/package/network/services/hostapd/patches/011-Additional-consistentcy-checks-for-PTK-component-len.patch b/package/network/services/hostapd/patches/011-Additional-consistentcy-checks-for-PTK-component-len.patch new file mode 100644 index 000000000..5cc2f7b17 --- /dev/null +++ b/package/network/services/hostapd/patches/011-Additional-consistentcy-checks-for-PTK-component-len.patch @@ -0,0 +1,100 @@ +From a6ea665300919d6a3af22b1f4237203647fda93a Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Tue, 17 Oct 2017 00:01:11 +0300 +Subject: [PATCH] Additional consistentcy checks for PTK component lengths + +Verify that TK, KCK, and KEK lengths are set to consistent values within +struct wpa_ptk before using them in supplicant. This is an additional +layer of protection against unexpected states. + +Signed-off-by: Jouni Malinen +--- + src/common/wpa_common.c | 6 ++++++ + src/rsn_supp/wpa.c | 26 ++++++++++++++++++++------ + 2 files changed, 26 insertions(+), 6 deletions(-) + +--- a/src/common/wpa_common.c ++++ b/src/common/wpa_common.c +@@ -100,6 +100,12 @@ int wpa_eapol_key_mic(const u8 *key, siz + { + u8 hash[SHA512_MAC_LEN]; + ++ if (key_len == 0) { ++ wpa_printf(MSG_DEBUG, ++ "WPA: KCK not set - cannot calculate MIC"); ++ return -1; ++ } ++ + switch (ver) { + #ifndef CONFIG_FIPS + case WPA_KEY_INFO_TYPE_HMAC_MD5_RC4: +--- a/src/rsn_supp/wpa.c ++++ b/src/rsn_supp/wpa.c +@@ -725,6 +725,11 @@ static int wpa_supplicant_install_ptk(st + + alg = wpa_cipher_to_alg(sm->pairwise_cipher); + keylen = wpa_cipher_key_len(sm->pairwise_cipher); ++ if (keylen <= 0 || (unsigned int) keylen != sm->ptk.tk_len) { ++ wpa_printf(MSG_DEBUG, "WPA: TK length mismatch: %d != %lu", ++ keylen, (long unsigned int) sm->ptk.tk_len); ++ return -1; ++ } + rsclen = wpa_cipher_rsc_len(sm->pairwise_cipher); + + if (sm->proto == WPA_PROTO_RSN || sm->proto == WPA_PROTO_OSEN) { +@@ -745,6 +750,7 @@ static int wpa_supplicant_install_ptk(st + + /* TK is not needed anymore in supplicant */ + os_memset(sm->ptk.tk, 0, WPA_TK_MAX_LEN); ++ sm->ptk.tk_len = 0; + sm->ptk.installed = 1; + + if (sm->wpa_ptk_rekey) { +@@ -1717,9 +1723,10 @@ static int wpa_supplicant_verify_eapol_k + os_memcpy(mic, key + 1, mic_len); + if (sm->tptk_set) { + os_memset(key + 1, 0, mic_len); +- wpa_eapol_key_mic(sm->tptk.kck, sm->tptk.kck_len, sm->key_mgmt, +- ver, buf, len, (u8 *) (key + 1)); +- if (os_memcmp_const(mic, key + 1, mic_len) != 0) { ++ if (wpa_eapol_key_mic(sm->tptk.kck, sm->tptk.kck_len, ++ sm->key_mgmt, ++ ver, buf, len, (u8 *) (key + 1)) < 0 || ++ os_memcmp_const(mic, key + 1, mic_len) != 0) { + wpa_msg(sm->ctx->msg_ctx, MSG_WARNING, + "WPA: Invalid EAPOL-Key MIC " + "when using TPTK - ignoring TPTK"); +@@ -1742,9 +1749,10 @@ static int wpa_supplicant_verify_eapol_k + + if (!ok && sm->ptk_set) { + os_memset(key + 1, 0, mic_len); +- wpa_eapol_key_mic(sm->ptk.kck, sm->ptk.kck_len, sm->key_mgmt, +- ver, buf, len, (u8 *) (key + 1)); +- if (os_memcmp_const(mic, key + 1, mic_len) != 0) { ++ if (wpa_eapol_key_mic(sm->ptk.kck, sm->ptk.kck_len, ++ sm->key_mgmt, ++ ver, buf, len, (u8 *) (key + 1)) < 0 || ++ os_memcmp_const(mic, key + 1, mic_len) != 0) { + wpa_msg(sm->ctx->msg_ctx, MSG_WARNING, + "WPA: Invalid EAPOL-Key MIC - " + "dropping packet"); +@@ -4167,6 +4175,11 @@ int fils_process_assoc_resp(struct wpa_s + + alg = wpa_cipher_to_alg(sm->pairwise_cipher); + keylen = wpa_cipher_key_len(sm->pairwise_cipher); ++ if (keylen <= 0 || (unsigned int) keylen != sm->ptk.tk_len) { ++ wpa_printf(MSG_DEBUG, "FILS: TK length mismatch: %u != %lu", ++ keylen, (long unsigned int) sm->ptk.tk_len); ++ goto fail; ++ } + rsclen = wpa_cipher_rsc_len(sm->pairwise_cipher); + wpa_hexdump_key(MSG_DEBUG, "FILS: Set TK to driver", + sm->ptk.tk, keylen); +@@ -4183,6 +4196,7 @@ int fils_process_assoc_resp(struct wpa_s + * takes care of association frame encryption/decryption. */ + /* TK is not needed anymore in supplicant */ + os_memset(sm->ptk.tk, 0, WPA_TK_MAX_LEN); ++ sm->ptk.tk_len = 0; + sm->ptk.installed = 1; + + /* FILS HLP Container */ diff --git a/package/network/services/hostapd/patches/012-Clear-BSSID-information-in-supplicant-state-machine-.patch b/package/network/services/hostapd/patches/012-Clear-BSSID-information-in-supplicant-state-machine-.patch new file mode 100644 index 000000000..808d34586 --- /dev/null +++ b/package/network/services/hostapd/patches/012-Clear-BSSID-information-in-supplicant-state-machine-.patch @@ -0,0 +1,25 @@ +From c0fe5f125a9d4a6564e1f4956ccc3809bf2fd69d Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Tue, 17 Oct 2017 01:15:24 +0300 +Subject: [PATCH] Clear BSSID information in supplicant state machine on + disconnection + +This fixes a corner case where RSN pre-authentication candidate from +scan results was ignored if the station was associated with that BSS +just before running the new scan for the connection. + +Signed-off-by: Jouni Malinen +--- + src/rsn_supp/wpa.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/src/rsn_supp/wpa.c ++++ b/src/rsn_supp/wpa.c +@@ -2662,6 +2662,7 @@ void wpa_sm_notify_disassoc(struct wpa_s + wpa_sm_drop_sa(sm); + + sm->msg_3_of_4_ok = 0; ++ os_memset(sm->bssid, 0, ETH_ALEN); + } + + diff --git a/package/network/services/hostapd/patches/300-noscan.patch b/package/network/services/hostapd/patches/300-noscan.patch index 6db16c941..c8ca3694c 100644 --- a/package/network/services/hostapd/patches/300-noscan.patch +++ b/package/network/services/hostapd/patches/300-noscan.patch @@ -1,6 +1,6 @@ --- a/hostapd/config_file.c +++ b/hostapd/config_file.c -@@ -3014,6 +3014,10 @@ static int hostapd_config_fill(struct ho +@@ -3016,6 +3016,10 @@ static int hostapd_config_fill(struct ho } #endif /* CONFIG_IEEE80211W */ #ifdef CONFIG_IEEE80211N @@ -13,7 +13,7 @@ } else if (os_strcmp(buf, "ht_capab") == 0) { --- a/src/ap/ap_config.h +++ b/src/ap/ap_config.h -@@ -734,6 +734,8 @@ struct hostapd_config { +@@ -735,6 +735,8 @@ struct hostapd_config { int ht_op_mode_fixed; u16 ht_capab; diff --git a/package/network/services/hostapd/patches/380-disable_ctrl_iface_mib.patch b/package/network/services/hostapd/patches/380-disable_ctrl_iface_mib.patch index 908641f63..e977f00a2 100644 --- a/package/network/services/hostapd/patches/380-disable_ctrl_iface_mib.patch +++ b/package/network/services/hostapd/patches/380-disable_ctrl_iface_mib.patch @@ -129,7 +129,7 @@ static void ieee802_1x_wnm_notif_send(void *eloop_ctx, void *timeout_ctx) --- a/src/ap/wpa_auth.c +++ b/src/ap/wpa_auth.c -@@ -3762,6 +3762,7 @@ static const char * wpa_bool_txt(int val +@@ -3780,6 +3780,7 @@ static const char * wpa_bool_txt(int val return val ? "TRUE" : "FALSE"; } @@ -137,7 +137,7 @@ #define RSN_SUITE "%02x-%02x-%02x-%d" #define RSN_SUITE_ARG(s) \ -@@ -3906,7 +3907,7 @@ int wpa_get_mib_sta(struct wpa_state_mac +@@ -3924,7 +3925,7 @@ int wpa_get_mib_sta(struct wpa_state_mac return len; } @@ -148,7 +148,7 @@ { --- a/src/rsn_supp/wpa.c +++ b/src/rsn_supp/wpa.c -@@ -2339,6 +2339,8 @@ static u32 wpa_key_mgmt_suite(struct wpa +@@ -2356,6 +2356,8 @@ static u32 wpa_key_mgmt_suite(struct wpa } @@ -157,7 +157,7 @@ #define RSN_SUITE "%02x-%02x-%02x-%d" #define RSN_SUITE_ARG(s) \ ((s) >> 24) & 0xff, ((s) >> 16) & 0xff, ((s) >> 8) & 0xff, (s) & 0xff -@@ -2422,6 +2424,7 @@ int wpa_sm_get_mib(struct wpa_sm *sm, ch +@@ -2439,6 +2441,7 @@ int wpa_sm_get_mib(struct wpa_sm *sm, ch return (int) len; } diff --git a/package/network/services/hostapd/patches/390-wpa_ie_cap_workaround.patch b/package/network/services/hostapd/patches/390-wpa_ie_cap_workaround.patch index bdbae9b48..8f7a6879c 100644 --- a/package/network/services/hostapd/patches/390-wpa_ie_cap_workaround.patch +++ b/package/network/services/hostapd/patches/390-wpa_ie_cap_workaround.patch @@ -1,6 +1,6 @@ --- a/src/common/wpa_common.c +++ b/src/common/wpa_common.c -@@ -1664,6 +1664,31 @@ u32 wpa_akm_to_suite(int akm) +@@ -1675,6 +1675,31 @@ u32 wpa_akm_to_suite(int akm) } @@ -32,7 +32,7 @@ int wpa_compare_rsn_ie(int ft_initial_assoc, const u8 *ie1, size_t ie1len, const u8 *ie2, size_t ie2len) -@@ -1671,8 +1696,19 @@ int wpa_compare_rsn_ie(int ft_initial_as +@@ -1682,8 +1707,19 @@ int wpa_compare_rsn_ie(int ft_initial_as if (ie1 == NULL || ie2 == NULL) return -1; diff --git a/package/network/services/hostapd/patches/600-ubus_support.patch b/package/network/services/hostapd/patches/600-ubus_support.patch index 0c85a27ca..31e3e7999 100644 --- a/package/network/services/hostapd/patches/600-ubus_support.patch +++ b/package/network/services/hostapd/patches/600-ubus_support.patch @@ -298,7 +298,7 @@ } --- a/src/ap/wpa_auth_glue.c +++ b/src/ap/wpa_auth_glue.c -@@ -173,6 +173,7 @@ static void hostapd_wpa_auth_psk_failure +@@ -175,6 +175,7 @@ static void hostapd_wpa_auth_psk_failure struct hostapd_data *hapd = ctx; wpa_msg(hapd->msg_ctx, MSG_INFO, AP_STA_POSSIBLE_PSK_MISMATCH MACSTR, MAC2STR(addr)); diff --git a/target/linux/apm821xx/patches-4.9/801-usb-xhci-add-firmware-loader-for-uPD720201-and-uPD72.patch b/target/linux/apm821xx/patches-4.9/801-usb-xhci-add-firmware-loader-for-uPD720201-and-uPD72.patch index 256344139..96a5940e7 100644 --- a/target/linux/apm821xx/patches-4.9/801-usb-xhci-add-firmware-loader-for-uPD720201-and-uPD72.patch +++ b/target/linux/apm821xx/patches-4.9/801-usb-xhci-add-firmware-loader-for-uPD720201-and-uPD72.patch @@ -44,7 +44,7 @@ Signed-off-by: Christian Lamparter #include "xhci.h" #include "xhci-trace.h" -@@ -248,6 +250,458 @@ static void xhci_pme_acpi_rtd3_enable(st +@@ -236,6 +238,458 @@ static void xhci_pme_acpi_rtd3_enable(st static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } #endif /* CONFIG_ACPI */ @@ -503,7 +503,7 @@ Signed-off-by: Christian Lamparter /* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { -@@ -287,6 +741,22 @@ static int xhci_pci_probe(struct pci_dev +@@ -275,6 +729,22 @@ static int xhci_pci_probe(struct pci_dev struct hc_driver *driver; struct usb_hcd *hcd; @@ -526,7 +526,7 @@ Signed-off-by: Christian Lamparter driver = (struct hc_driver *)id->driver_data; /* Prevent runtime suspending between USB-2 and USB-3 initialization */ -@@ -344,6 +814,16 @@ static void xhci_pci_remove(struct pci_d +@@ -332,6 +802,16 @@ static void xhci_pci_remove(struct pci_d { struct xhci_hcd *xhci; diff --git a/target/linux/apm821xx/patches-4.9/802-usb-xhci-force-msi-renesas-xhci.patch b/target/linux/apm821xx/patches-4.9/802-usb-xhci-force-msi-renesas-xhci.patch index 76141a068..4d7c68661 100644 --- a/target/linux/apm821xx/patches-4.9/802-usb-xhci-force-msi-renesas-xhci.patch +++ b/target/linux/apm821xx/patches-4.9/802-usb-xhci-force-msi-renesas-xhci.patch @@ -13,7 +13,7 @@ produce a noisy warning. --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c -@@ -205,7 +205,7 @@ static void xhci_pci_quirks(struct devic +@@ -193,7 +193,7 @@ static void xhci_pci_quirks(struct devic } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) diff --git a/target/linux/bcm53xx/patches-4.9/180-usb-xhci-add-support-for-performing-fake-doorbell.patch b/target/linux/bcm53xx/patches-4.9/180-usb-xhci-add-support-for-performing-fake-doorbell.patch index a4163fc32..f7b34941c 100644 --- a/target/linux/bcm53xx/patches-4.9/180-usb-xhci-add-support-for-performing-fake-doorbell.patch +++ b/target/linux/bcm53xx/patches-4.9/180-usb-xhci-add-support-for-performing-fake-doorbell.patch @@ -129,7 +129,7 @@ it on BCM4708 family. +++ b/drivers/usb/host/xhci.h @@ -1662,6 +1662,7 @@ struct xhci_hcd { #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) - #define XHCI_U2_DISABLE_WAKE (1 << 27) + /* Reserved. It was XHCI_U2_DISABLE_WAKE */ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) +#define XHCI_FAKE_DOORBELL (1 << 29) diff --git a/target/linux/brcm2708/patches-4.9/950-0061-hid-Reduce-default-mouse-polling-interval-to-60Hz.patch b/target/linux/brcm2708/patches-4.9/950-0061-hid-Reduce-default-mouse-polling-interval-to-60Hz.patch index 444c64e47..4a05d3131 100644 --- a/target/linux/brcm2708/patches-4.9/950-0061-hid-Reduce-default-mouse-polling-interval-to-60Hz.patch +++ b/target/linux/brcm2708/patches-4.9/950-0061-hid-Reduce-default-mouse-polling-interval-to-60Hz.patch @@ -19,7 +19,7 @@ Reduces overhead when using X module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644); MODULE_PARM_DESC(mousepoll, "Polling interval of mice"); -@@ -1083,8 +1083,12 @@ static int usbhid_start(struct hid_devic +@@ -1093,8 +1093,12 @@ static int usbhid_start(struct hid_devic } /* Change the polling interval of mice. */ diff --git a/target/linux/brcm2708/patches-4.9/950-0101-staging-vchi-Update-for-rename-of-page_cache_release.patch b/target/linux/brcm2708/patches-4.9/950-0101-staging-vchi-Update-for-rename-of-page_cache_release.patch index eccb792ee..8979bc2f6 100644 --- a/target/linux/brcm2708/patches-4.9/950-0101-staging-vchi-Update-for-rename-of-page_cache_release.patch +++ b/target/linux/brcm2708/patches-4.9/950-0101-staging-vchi-Update-for-rename-of-page_cache_release.patch @@ -21,7 +21,7 @@ Signed-off-by: Eric Anholt } kfree(pagelist); if (actual_pages == 0) -@@ -577,7 +577,7 @@ free_pagelist(PAGELIST_T *pagelist, int +@@ -579,7 +579,7 @@ free_pagelist(PAGELIST_T *pagelist, int offset = 0; set_page_dirty(pg); } diff --git a/target/linux/cns3xxx/patches-4.9/200-broadcom_phy_reinit.patch b/target/linux/cns3xxx/patches-4.9/200-broadcom_phy_reinit.patch index f3ae5e6f2..0352a89f9 100644 --- a/target/linux/cns3xxx/patches-4.9/200-broadcom_phy_reinit.patch +++ b/target/linux/cns3xxx/patches-4.9/200-broadcom_phy_reinit.patch @@ -1,6 +1,6 @@ --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c -@@ -414,6 +414,11 @@ static int bcm5481_config_aneg(struct ph +@@ -420,6 +420,11 @@ static int bcm5481_config_aneg(struct ph /* Write bits 14:0. */ reg |= (1 << 15); phy_write(phydev, 0x18, reg); diff --git a/target/linux/generic/backport-4.9/024-1-tcp-tsq-add-tsq_flags-tsq_enum.patch b/target/linux/generic/backport-4.9/024-1-tcp-tsq-add-tsq_flags-tsq_enum.patch new file mode 100644 index 000000000..20311498a --- /dev/null +++ b/target/linux/generic/backport-4.9/024-1-tcp-tsq-add-tsq_flags-tsq_enum.patch @@ -0,0 +1,90 @@ +From 40fc3423b983b864bf70b03199191260ae9b2ea6 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:50 -0800 +Subject: [PATCH 01/10] tcp: tsq: add tsq_flags / tsq_enum + +This is a cleanup, to ease code review of following patches. + +Old 'enum tsq_flags' is renamed, and a new enumeration is added +with the flags used in cmpxchg() operations as opposed to +single bit operations. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + include/linux/tcp.h | 11 ++++++++++- + net/ipv4/tcp_output.c | 16 ++++++++-------- + 2 files changed, 18 insertions(+), 9 deletions(-) + +--- a/include/linux/tcp.h ++++ b/include/linux/tcp.h +@@ -367,7 +367,7 @@ struct tcp_sock { + u32 *saved_syn; + }; + +-enum tsq_flags { ++enum tsq_enum { + TSQ_THROTTLED, + TSQ_QUEUED, + TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ +@@ -378,6 +378,15 @@ enum tsq_flags { + */ + }; + ++enum tsq_flags { ++ TSQF_THROTTLED = (1UL << TSQ_THROTTLED), ++ TSQF_QUEUED = (1UL << TSQ_QUEUED), ++ TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), ++ TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), ++ TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), ++ TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), ++}; ++ + static inline struct tcp_sock *tcp_sk(const struct sock *sk) + { + return (struct tcp_sock *)sk; +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -784,10 +784,10 @@ static void tcp_tasklet_func(unsigned lo + } + } + +-#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ +- (1UL << TCP_WRITE_TIMER_DEFERRED) | \ +- (1UL << TCP_DELACK_TIMER_DEFERRED) | \ +- (1UL << TCP_MTU_REDUCED_DEFERRED)) ++#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ ++ TCPF_WRITE_TIMER_DEFERRED | \ ++ TCPF_DELACK_TIMER_DEFERRED | \ ++ TCPF_MTU_REDUCED_DEFERRED) + /** + * tcp_release_cb - tcp release_sock() callback + * @sk: socket +@@ -808,7 +808,7 @@ void tcp_release_cb(struct sock *sk) + nflags = flags & ~TCP_DEFERRED_ALL; + } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); + +- if (flags & (1UL << TCP_TSQ_DEFERRED)) ++ if (flags & TCPF_TSQ_DEFERRED) + tcp_tsq_handler(sk); + + /* Here begins the tricky part : +@@ -822,15 +822,15 @@ void tcp_release_cb(struct sock *sk) + */ + sock_release_ownership(sk); + +- if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { ++ if (flags & TCPF_WRITE_TIMER_DEFERRED) { + tcp_write_timer_handler(sk); + __sock_put(sk); + } +- if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { ++ if (flags & TCPF_DELACK_TIMER_DEFERRED) { + tcp_delack_timer_handler(sk); + __sock_put(sk); + } +- if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { ++ if (flags & TCPF_MTU_REDUCED_DEFERRED) { + inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); + __sock_put(sk); + } diff --git a/target/linux/generic/backport-4.9/024-2-tcp-tsq-remove-one-locked-operation-in-tcp_wfree.patch b/target/linux/generic/backport-4.9/024-2-tcp-tsq-remove-one-locked-operation-in-tcp_wfree.patch new file mode 100644 index 000000000..914be607e --- /dev/null +++ b/target/linux/generic/backport-4.9/024-2-tcp-tsq-remove-one-locked-operation-in-tcp_wfree.patch @@ -0,0 +1,48 @@ +From 408f0a6c21e124cc4f6c7aa370b38aa47e55428d Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:51 -0800 +Subject: [PATCH 02/10] tcp: tsq: remove one locked operation in tcp_wfree() + +Instead of atomically clear TSQ_THROTTLED and atomically set TSQ_QUEUED +bits, use one cmpxchg() to perform a single locked operation. + +Since the following patch will also set TCP_TSQ_DEFERRED here, +this cmpxchg() will make this addition free. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -860,6 +860,7 @@ void tcp_wfree(struct sk_buff *skb) + { + struct sock *sk = skb->sk; + struct tcp_sock *tp = tcp_sk(sk); ++ unsigned long flags, nval, oval; + int wmem; + + /* Keep one reference on sk_wmem_alloc. +@@ -877,11 +878,17 @@ void tcp_wfree(struct sk_buff *skb) + if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) + goto out; + +- if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && +- !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { +- unsigned long flags; ++ for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { + struct tsq_tasklet *tsq; + ++ if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) ++ goto out; ++ ++ nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; ++ nval = cmpxchg(&tp->tsq_flags, oval, nval); ++ if (nval != oval) ++ continue; ++ + /* queue this socket to tasklet queue */ + local_irq_save(flags); + tsq = this_cpu_ptr(&tsq_tasklet); diff --git a/target/linux/generic/backport-4.9/024-3-tcp-tsq-add-shortcut-in-tcp_tasklet_func.patch b/target/linux/generic/backport-4.9/024-3-tcp-tsq-add-shortcut-in-tcp_tasklet_func.patch new file mode 100644 index 000000000..d3db74252 --- /dev/null +++ b/target/linux/generic/backport-4.9/024-3-tcp-tsq-add-shortcut-in-tcp_tasklet_func.patch @@ -0,0 +1,71 @@ +From b223feb9de2a65c533ff95c08e834fa732906ea5 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:52 -0800 +Subject: [PATCH 03/10] tcp: tsq: add shortcut in tcp_tasklet_func() + +Under high stress, I've seen tcp_tasklet_func() consuming +~700 usec, handling ~150 tcp sockets. + +By setting TCP_TSQ_DEFERRED in tcp_wfree(), we give a chance +for other cpus/threads entering tcp_write_xmit() to grab it, +allowing tcp_tasklet_func() to skip sockets that already did +an xmit cycle. + +In the future, we might give to ACK processing an increased +budget to reduce even more tcp_tasklet_func() amount of work. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -767,19 +767,19 @@ static void tcp_tasklet_func(unsigned lo + list_for_each_safe(q, n, &list) { + tp = list_entry(q, struct tcp_sock, tsq_node); + list_del(&tp->tsq_node); ++ clear_bit(TSQ_QUEUED, &tp->tsq_flags); + + sk = (struct sock *)tp; +- bh_lock_sock(sk); +- +- if (!sock_owned_by_user(sk)) { +- tcp_tsq_handler(sk); +- } else { +- /* defer the work to tcp_release_cb() */ +- set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); ++ if (!sk->sk_lock.owned && ++ test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) { ++ bh_lock_sock(sk); ++ if (!sock_owned_by_user(sk)) { ++ clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); ++ tcp_tsq_handler(sk); ++ } ++ bh_unlock_sock(sk); + } +- bh_unlock_sock(sk); + +- clear_bit(TSQ_QUEUED, &tp->tsq_flags); + sk_free(sk); + } + } +@@ -884,7 +884,7 @@ void tcp_wfree(struct sk_buff *skb) + if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) + goto out; + +- nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; ++ nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; + nval = cmpxchg(&tp->tsq_flags, oval, nval); + if (nval != oval) + continue; +@@ -2182,6 +2182,8 @@ static bool tcp_write_xmit(struct sock * + unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) + break; + ++ if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) ++ clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); + if (tcp_small_queue_check(sk, skb, 0)) + break; + diff --git a/target/linux/generic/backport-4.9/024-4-tcp-tsq-avoid-one-atomic-in-tcp_wfree.patch b/target/linux/generic/backport-4.9/024-4-tcp-tsq-avoid-one-atomic-in-tcp_wfree.patch new file mode 100644 index 000000000..a25cdb571 --- /dev/null +++ b/target/linux/generic/backport-4.9/024-4-tcp-tsq-avoid-one-atomic-in-tcp_wfree.patch @@ -0,0 +1,38 @@ +From a9b204d1564702b704ad6fe74f10a102c7b87ba3 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:53 -0800 +Subject: [PATCH 04/10] tcp: tsq: avoid one atomic in tcp_wfree() + +Under high load, tcp_wfree() has an atomic operation trying +to schedule a tasklet over and over. + +We can schedule it only if our per cpu list was empty. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb) + + for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { + struct tsq_tasklet *tsq; ++ bool empty; + + if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) + goto out; +@@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb) + /* queue this socket to tasklet queue */ + local_irq_save(flags); + tsq = this_cpu_ptr(&tsq_tasklet); ++ empty = list_empty(&tsq->head); + list_add(&tp->tsq_node, &tsq->head); +- tasklet_schedule(&tsq->tasklet); ++ if (empty) ++ tasklet_schedule(&tsq->tasklet); + local_irq_restore(flags); + return; + } diff --git a/target/linux/generic/backport-4.9/024-5-tcp-tsq-add-a-shortcut-in-tcp_small_queue_check.patch b/target/linux/generic/backport-4.9/024-5-tcp-tsq-add-a-shortcut-in-tcp_small_queue_check.patch new file mode 100644 index 000000000..925d2dcea --- /dev/null +++ b/target/linux/generic/backport-4.9/024-5-tcp-tsq-add-a-shortcut-in-tcp_small_queue_check.patch @@ -0,0 +1,37 @@ +From 75eefc6c59fd2c5f1ab95a3a113c217237d12a31 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:54 -0800 +Subject: [PATCH 05/10] tcp: tsq: add a shortcut in tcp_small_queue_check() + +Always allow the two first skbs in write queue to be sent, +regardless of sk_wmem_alloc/sk_pacing_rate values. + +This helps a lot in situations where TX completions are delayed either +because of driver latencies or softirq latencies. + +Test is done with no cache line misses. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2087,6 +2087,15 @@ static bool tcp_small_queue_check(struct + limit <<= factor; + + if (atomic_read(&sk->sk_wmem_alloc) > limit) { ++ /* Always send the 1st or 2nd skb in write queue. ++ * No need to wait for TX completion to call us back, ++ * after softirq/tasklet schedule. ++ * This helps when TX completions are delayed too much. ++ */ ++ if (skb == sk->sk_write_queue.next || ++ skb->prev == sk->sk_write_queue.next) ++ return false; ++ + set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags); + /* It is possible TX completion already happened + * before we set TSQ_THROTTLED, so we must diff --git a/target/linux/generic/backport-4.9/024-6-tcp-tcp_mtu_probe-is-likely-to-exit-early.patch b/target/linux/generic/backport-4.9/024-6-tcp-tcp_mtu_probe-is-likely-to-exit-early.patch new file mode 100644 index 000000000..c5bb42d44 --- /dev/null +++ b/target/linux/generic/backport-4.9/024-6-tcp-tcp_mtu_probe-is-likely-to-exit-early.patch @@ -0,0 +1,55 @@ +From 12a59abc22d6664f7d3944f625ceefee92de8820 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:55 -0800 +Subject: [PATCH 06/10] tcp: tcp_mtu_probe() is likely to exit early + +Adding a likely() in tcp_mtu_probe() moves its code which used to +be inlined in front of tcp_write_xmit() + +We still have a cache line miss to access icsk->icsk_mtup.enabled, +we will probably have to reorganize fields to help data locality. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1928,26 +1928,26 @@ static inline void tcp_mtu_check_reprobe + */ + static int tcp_mtu_probe(struct sock *sk) + { +- struct tcp_sock *tp = tcp_sk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); ++ struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb, *nskb, *next; + struct net *net = sock_net(sk); +- int len; + int probe_size; + int size_needed; +- int copy; ++ int copy, len; + int mss_now; + int interval; + + /* Not currently probing/verifying, + * not in recovery, + * have enough cwnd, and +- * not SACKing (the variable headers throw things off) */ +- if (!icsk->icsk_mtup.enabled || +- icsk->icsk_mtup.probe_size || +- inet_csk(sk)->icsk_ca_state != TCP_CA_Open || +- tp->snd_cwnd < 11 || +- tp->rx_opt.num_sacks || tp->rx_opt.dsack) ++ * not SACKing (the variable headers throw things off) ++ */ ++ if (likely(!icsk->icsk_mtup.enabled || ++ icsk->icsk_mtup.probe_size || ++ inet_csk(sk)->icsk_ca_state != TCP_CA_Open || ++ tp->snd_cwnd < 11 || ++ tp->rx_opt.num_sacks || tp->rx_opt.dsack)) + return -1; + + /* Use binary search for probe_size between tcp_mss_base, diff --git a/target/linux/generic/backport-4.9/024-7-net-reorganize-struct-sock-for-better-data-locality.patch b/target/linux/generic/backport-4.9/024-7-net-reorganize-struct-sock-for-better-data-locality.patch new file mode 100644 index 000000000..e8c1915e1 --- /dev/null +++ b/target/linux/generic/backport-4.9/024-7-net-reorganize-struct-sock-for-better-data-locality.patch @@ -0,0 +1,157 @@ +From 9115e8cd2a0c6eaaa900c462721f12e1d45f326c Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:56 -0800 +Subject: [PATCH 07/10] net: reorganize struct sock for better data locality + +Group fields used in TX path, and keep some cache lines mostly read +to permit sharing among cpus. + +Gained two 4 bytes holes on 64bit arches. + +Added a place holder for tcp tsq_flags, next to sk_wmem_alloc +to speed up tcp_wfree() in the following patch. + +I have not added ____cacheline_aligned_in_smp, this might be done later. +I prefer doing this once inet and tcp/udp sockets reorg is also done. + +Tested with both TCP and UDP. + +UDP receiver performance under flood increased by ~20 % : +Accessing sk_filter/sk_wq/sk_napi_id no longer stalls because sk_drops +was moved away from a critical cache line, now mostly read and shared. + + /* --- cacheline 4 boundary (256 bytes) --- */ + unsigned int sk_napi_id; /* 0x100 0x4 */ + int sk_rcvbuf; /* 0x104 0x4 */ + struct sk_filter * sk_filter; /* 0x108 0x8 */ + union { + struct socket_wq * sk_wq; /* 0x8 */ + struct socket_wq * sk_wq_raw; /* 0x8 */ + }; /* 0x110 0x8 */ + struct xfrm_policy * sk_policy[2]; /* 0x118 0x10 */ + struct dst_entry * sk_rx_dst; /* 0x128 0x8 */ + struct dst_entry * sk_dst_cache; /* 0x130 0x8 */ + atomic_t sk_omem_alloc; /* 0x138 0x4 */ + int sk_sndbuf; /* 0x13c 0x4 */ + /* --- cacheline 5 boundary (320 bytes) --- */ + int sk_wmem_queued; /* 0x140 0x4 */ + atomic_t sk_wmem_alloc; /* 0x144 0x4 */ + long unsigned int sk_tsq_flags; /* 0x148 0x8 */ + struct sk_buff * sk_send_head; /* 0x150 0x8 */ + struct sk_buff_head sk_write_queue; /* 0x158 0x18 */ + __s32 sk_peek_off; /* 0x170 0x4 */ + int sk_write_pending; /* 0x174 0x4 */ + long int sk_sndtimeo; /* 0x178 0x8 */ + +Signed-off-by: Eric Dumazet +Tested-by: Paolo Abeni +Signed-off-by: David S. Miller +--- + include/net/sock.h | 51 +++++++++++++++++++++++++++------------------------ + 1 file changed, 27 insertions(+), 24 deletions(-) + +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -343,6 +343,9 @@ struct sock { + #define sk_rxhash __sk_common.skc_rxhash + + socket_lock_t sk_lock; ++ atomic_t sk_drops; ++ int sk_rcvlowat; ++ struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + /* + * The backlog queue is special, it is always used with +@@ -359,14 +362,13 @@ struct sock { + struct sk_buff *tail; + } sk_backlog; + #define sk_rmem_alloc sk_backlog.rmem_alloc +- int sk_forward_alloc; + +- __u32 sk_txhash; ++ int sk_forward_alloc; + #ifdef CONFIG_NET_RX_BUSY_POLL +- unsigned int sk_napi_id; + unsigned int sk_ll_usec; ++ /* ===== mostly read cache line ===== */ ++ unsigned int sk_napi_id; + #endif +- atomic_t sk_drops; + int sk_rcvbuf; + + struct sk_filter __rcu *sk_filter; +@@ -379,11 +381,30 @@ struct sock { + #endif + struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_dst_cache; +- /* Note: 32bit hole on 64bit arches */ +- atomic_t sk_wmem_alloc; + atomic_t sk_omem_alloc; + int sk_sndbuf; ++ ++ /* ===== cache line for TX ===== */ ++ int sk_wmem_queued; ++ atomic_t sk_wmem_alloc; ++ unsigned long sk_tsq_flags; ++ struct sk_buff *sk_send_head; + struct sk_buff_head sk_write_queue; ++ __s32 sk_peek_off; ++ int sk_write_pending; ++ long sk_sndtimeo; ++ struct timer_list sk_timer; ++ __u32 sk_priority; ++ __u32 sk_mark; ++ u32 sk_pacing_rate; /* bytes per second */ ++ u32 sk_max_pacing_rate; ++ struct page_frag sk_frag; ++ netdev_features_t sk_route_caps; ++ netdev_features_t sk_route_nocaps; ++ int sk_gso_type; ++ unsigned int sk_gso_max_size; ++ gfp_t sk_allocation; ++ __u32 sk_txhash; + + /* + * Because of non atomicity rules, all +@@ -399,41 +420,23 @@ struct sock { + #define SK_PROTOCOL_MAX U8_MAX + kmemcheck_bitfield_end(flags); + +- int sk_wmem_queued; +- gfp_t sk_allocation; +- u32 sk_pacing_rate; /* bytes per second */ +- u32 sk_max_pacing_rate; +- netdev_features_t sk_route_caps; +- netdev_features_t sk_route_nocaps; +- int sk_gso_type; +- unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; +- int sk_rcvlowat; + unsigned long sk_lingertime; +- struct sk_buff_head sk_error_queue; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err, + sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; +- __u32 sk_priority; +- __u32 sk_mark; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long sk_rcvtimeo; +- long sk_sndtimeo; +- struct timer_list sk_timer; + ktime_t sk_stamp; + u16 sk_tsflags; + u8 sk_shutdown; + u32 sk_tskey; + struct socket *sk_socket; + void *sk_user_data; +- struct page_frag sk_frag; +- struct sk_buff *sk_send_head; +- __s32 sk_peek_off; +- int sk_write_pending; + #ifdef CONFIG_SECURITY + void *sk_security; + #endif diff --git a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch new file mode 100644 index 000000000..6604a20fc --- /dev/null +++ b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch @@ -0,0 +1,176 @@ +From 7aa5470c2c09265902b5e4289afa82e4e7c2987e Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 3 Dec 2016 11:14:57 -0800 +Subject: [PATCH 08/10] tcp: tsq: move tsq_flags close to sk_wmem_alloc + +tsq_flags being in the same cache line than sk_wmem_alloc +makes a lot of sense. Both fields are changed from tcp_wfree() +and more generally by various TSQ related functions. + +Prior patch made room in struct sock and added sk_tsq_flags, +this patch deletes tsq_flags from struct tcp_sock. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + include/linux/tcp.h | 1 - + net/ipv4/tcp.c | 4 ++-- + net/ipv4/tcp_ipv4.c | 2 +- + net/ipv4/tcp_output.c | 24 +++++++++++------------- + net/ipv4/tcp_timer.c | 4 ++-- + net/ipv6/tcp_ipv6.c | 2 +- + 6 files changed, 17 insertions(+), 20 deletions(-) + +--- a/include/linux/tcp.h ++++ b/include/linux/tcp.h +@@ -192,7 +192,6 @@ struct tcp_sock { + u32 tsoffset; /* timestamp offset */ + + struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ +- unsigned long tsq_flags; + + /* Data for direct copy to user */ + struct { +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -665,9 +665,9 @@ static void tcp_push(struct sock *sk, in + if (tcp_should_autocork(sk, skb, size_goal)) { + + /* avoid atomic op if TSQ_THROTTLED bit is already set */ +- if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) { ++ if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); +- set_bit(TSQ_THROTTLED, &tp->tsq_flags); ++ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); + } + /* It is possible TX completion already happened + * before we set TSQ_THROTTLED. +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -446,7 +446,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb + if (!sock_owned_by_user(sk)) { + tcp_v4_mtu_reduced(sk); + } else { +- if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) ++ if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) + sock_hold(sk); + } + goto out; +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned lo + list_for_each_safe(q, n, &list) { + tp = list_entry(q, struct tcp_sock, tsq_node); + list_del(&tp->tsq_node); +- clear_bit(TSQ_QUEUED, &tp->tsq_flags); + + sk = (struct sock *)tp; ++ clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); ++ + if (!sk->sk_lock.owned && +- test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) { ++ test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) { + bh_lock_sock(sk); + if (!sock_owned_by_user(sk)) { +- clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); ++ clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); + tcp_tsq_handler(sk); + } + bh_unlock_sock(sk); +@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned lo + */ + void tcp_release_cb(struct sock *sk) + { +- struct tcp_sock *tp = tcp_sk(sk); + unsigned long flags, nflags; + + /* perform an atomic operation only if at least one flag is set */ + do { +- flags = tp->tsq_flags; ++ flags = sk->sk_tsq_flags; + if (!(flags & TCP_DEFERRED_ALL)) + return; + nflags = flags & ~TCP_DEFERRED_ALL; +- } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); ++ } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); + + if (flags & TCPF_TSQ_DEFERRED) + tcp_tsq_handler(sk); +@@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb) + if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) + goto out; + +- for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { ++ for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { + struct tsq_tasklet *tsq; + bool empty; + +@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb) + goto out; + + nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; +- nval = cmpxchg(&tp->tsq_flags, oval, nval); ++ nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); + if (nval != oval) + continue; + +@@ -2096,7 +2096,7 @@ static bool tcp_small_queue_check(struct + skb->prev == sk->sk_write_queue.next) + return false; + +- set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags); ++ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); + /* It is possible TX completion already happened + * before we set TSQ_THROTTLED, so we must + * test again the condition. +@@ -2194,8 +2194,8 @@ static bool tcp_write_xmit(struct sock * + unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) + break; + +- if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) +- clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); ++ if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) ++ clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); + if (tcp_small_queue_check(sk, skb, 0)) + break; + +@@ -3508,8 +3508,6 @@ void tcp_send_ack(struct sock *sk) + /* We do not want pure acks influencing TCP Small Queues or fq/pacing + * too much. + * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 +- * We also avoid tcp_wfree() overhead (cache line miss accessing +- * tp->tsq_flags) by using regular sock_wfree() + */ + skb_set_tcp_pure_ack(buff); + +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -311,7 +311,7 @@ static void tcp_delack_timer(unsigned lo + inet_csk(sk)->icsk_ack.blocked = 1; + __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); + /* deleguate our work to tcp_release_cb() */ +- if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) ++ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) + sock_hold(sk); + } + bh_unlock_sock(sk); +@@ -594,7 +594,7 @@ static void tcp_write_timer(unsigned lon + tcp_write_timer_handler(sk); + } else { + /* delegate our work to tcp_release_cb() */ +- if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) ++ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) + sock_hold(sk); + } + bh_unlock_sock(sk); +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -404,7 +404,7 @@ static void tcp_v6_err(struct sk_buff *s + if (!sock_owned_by_user(sk)) + tcp_v6_mtu_reduced(sk); + else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, +- &tp->tsq_flags)) ++ &sk->sk_tsq_flags)) + sock_hold(sk); + goto out; + } diff --git a/target/linux/generic/backport-4.9/024-9-tcp-add-a-missing-barrier-in-tcp_tasklet_func.patch b/target/linux/generic/backport-4.9/024-9-tcp-add-a-missing-barrier-in-tcp_tasklet_func.patch new file mode 100644 index 000000000..d2b8de6a0 --- /dev/null +++ b/target/linux/generic/backport-4.9/024-9-tcp-add-a-missing-barrier-in-tcp_tasklet_func.patch @@ -0,0 +1,40 @@ +From 0a9648f1293966c838dc570da73c15a76f4c89d6 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Wed, 21 Dec 2016 05:42:43 -0800 +Subject: [PATCH 09/10] tcp: add a missing barrier in tcp_tasklet_func() + +Madalin reported crashes happening in tcp_tasklet_func() on powerpc64 + +Before TSQ_QUEUED bit is cleared, we must ensure the changes done +by list_del(&tp->tsq_node); are committed to memory, otherwise +corruption might happen, as an other cpu could catch TSQ_QUEUED +clearance too soon. + +We can notice that old kernels were immune to this bug, because +TSQ_QUEUED was cleared after a bh_lock_sock(sk)/bh_unlock_sock(sk) +section, but they could have missed a kick to write additional bytes, +when NIC interrupts for a given flow are spread to multiple cpus. + +Affected TCP flows would need an incoming ACK or RTO timer to add more +packets to the pipe. So overall situation should be better now. + +Fixes: b223feb9de2a ("tcp: tsq: add shortcut in tcp_tasklet_func()") +Signed-off-by: Eric Dumazet +Reported-by: Madalin Bucur +Tested-by: Madalin Bucur +Tested-by: Xing Lei +Signed-off-by: David S. Miller +--- + net/ipv4/tcp_output.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -769,6 +769,7 @@ static void tcp_tasklet_func(unsigned lo + list_del(&tp->tsq_node); + + sk = (struct sock *)tp; ++ smp_mb__before_atomic(); + clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); + + if (!sk->sk_lock.owned && diff --git a/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch b/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch new file mode 100644 index 000000000..e1541e90a --- /dev/null +++ b/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch @@ -0,0 +1,1412 @@ +From 6ad20165d376fa07919a70e4f43dfae564601829 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Mon, 30 Jan 2017 08:22:01 -0800 +Subject: drivers: net: generalize napi_complete_done() + +napi_complete_done() allows to opt-in for gro_flush_timeout, +added back in linux-3.19, commit 3b47d30396ba +("net: gro: add a per device gro flush timer") + +This allows for more efficient GRO aggregation without +sacrifying latencies. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +--- + drivers/net/can/at91_can.c | 2 +- + drivers/net/can/c_can/c_can.c | 2 +- + drivers/net/can/flexcan.c | 2 +- + drivers/net/can/ifi_canfd/ifi_canfd.c | 2 +- + drivers/net/can/janz-ican3.c | 2 +- + drivers/net/can/m_can/m_can.c | 2 +- + drivers/net/can/rcar/rcar_can.c | 2 +- + drivers/net/can/rcar/rcar_canfd.c | 2 +- + drivers/net/can/xilinx_can.c | 2 +- + drivers/net/ethernet/3com/typhoon.c | 2 +- + drivers/net/ethernet/adi/bfin_mac.c | 2 +- + drivers/net/ethernet/agere/et131x.c | 2 +- + drivers/net/ethernet/altera/altera_tse_main.c | 2 +- + drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +- + drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 2 +- + drivers/net/ethernet/arc/emac_main.c | 2 +- + drivers/net/ethernet/atheros/alx/main.c | 2 +- + drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2 +- + drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- + drivers/net/ethernet/atheros/atlx/atl1.c | 2 +- + drivers/net/ethernet/broadcom/b44.c | 2 +- + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2 +- + drivers/net/ethernet/broadcom/bgmac.c | 2 +- + drivers/net/ethernet/broadcom/bnx2.c | 4 ++-- + drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- + drivers/net/ethernet/broadcom/sb1250-mac.c | 2 +- + drivers/net/ethernet/brocade/bna/bnad.c | 2 +- + drivers/net/ethernet/cadence/macb.c | 2 +- + drivers/net/ethernet/calxeda/xgmac.c | 2 +- + drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- + drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- + drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +- + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- + drivers/net/ethernet/chelsio/cxgb/sge.c | 2 +- + drivers/net/ethernet/chelsio/cxgb3/sge.c | 4 ++-- + drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- + drivers/net/ethernet/cisco/enic/enic_main.c | 4 ++-- + drivers/net/ethernet/dec/tulip/interrupt.c | 6 +++--- + drivers/net/ethernet/dnet.c | 2 +- + drivers/net/ethernet/emulex/benet/be_main.c | 2 +- + drivers/net/ethernet/ethoc.c | 2 +- + drivers/net/ethernet/ezchip/nps_enet.c | 2 +- + drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- + drivers/net/ethernet/freescale/fec_main.c | 2 +- + .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 2 +- + drivers/net/ethernet/freescale/gianfar.c | 4 ++-- + drivers/net/ethernet/freescale/ucc_geth.c | 2 +- + drivers/net/ethernet/hisilicon/hip04_eth.c | 2 +- + drivers/net/ethernet/hisilicon/hisi_femac.c | 2 +- + drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 2 +- + drivers/net/ethernet/ibm/ibmveth.c | 2 +- + drivers/net/ethernet/ibm/ibmvnic.c | 2 +- + drivers/net/ethernet/intel/e100.c | 2 +- + drivers/net/ethernet/intel/ixgb/ixgb_main.c | 2 +- + drivers/net/ethernet/korina.c | 2 +- + drivers/net/ethernet/lantiq_etop.c | 21 +++++++++------------ + drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- + drivers/net/ethernet/marvell/mvneta.c | 6 ++---- + drivers/net/ethernet/marvell/mvpp2.c | 2 +- + drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- + drivers/net/ethernet/moxa/moxart_ether.c | 2 +- + drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- + drivers/net/ethernet/natsemi/natsemi.c | 2 +- + drivers/net/ethernet/neterion/s2io.c | 4 ++-- + drivers/net/ethernet/neterion/vxge/vxge-main.c | 6 +++--- + drivers/net/ethernet/nvidia/forcedeth.c | 2 +- + drivers/net/ethernet/nxp/lpc_eth.c | 2 +- + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- + drivers/net/ethernet/pasemi/pasemi_mac.c | 2 +- + .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- + drivers/net/ethernet/qlogic/qede/qede_fp.c | 2 +- + drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 10 +++++----- + drivers/net/ethernet/qlogic/qlge/qlge_main.c | 2 +- + drivers/net/ethernet/qualcomm/emac/emac.c | 2 +- + drivers/net/ethernet/realtek/r8169.c | 2 +- + drivers/net/ethernet/rocker/rocker_main.c | 2 +- + drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2 +- + drivers/net/ethernet/sfc/efx.c | 2 +- + drivers/net/ethernet/sfc/falcon/efx.c | 2 +- + drivers/net/ethernet/smsc/smsc9420.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- + drivers/net/ethernet/sun/niu.c | 2 +- + drivers/net/ethernet/sun/sungem.c | 2 +- + drivers/net/ethernet/sun/sunvnet_common.c | 2 +- + drivers/net/ethernet/tehuti/tehuti.c | 2 +- + drivers/net/ethernet/ti/cpsw.c | 2 +- + drivers/net/ethernet/ti/davinci_emac.c | 2 +- + drivers/net/ethernet/ti/netcp_core.c | 2 +- + drivers/net/ethernet/tile/tilegx.c | 2 +- + drivers/net/ethernet/tile/tilepro.c | 2 +- + drivers/net/ethernet/toshiba/ps3_gelic_net.c | 2 +- + drivers/net/ethernet/toshiba/spider_net.c | 2 +- + drivers/net/ethernet/toshiba/tc35815.c | 2 +- + drivers/net/ethernet/tundra/tsi108_eth.c | 2 +- + drivers/net/ethernet/via/via-rhine.c | 2 +- + drivers/net/ethernet/via/via-velocity.c | 2 +- + drivers/net/ethernet/wiznet/w5100.c | 2 +- + drivers/net/ethernet/wiznet/w5300.c | 2 +- + drivers/net/fjes/fjes_main.c | 2 +- + drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++-- + drivers/net/wan/fsl_ucc_hdlc.c | 2 +- + drivers/net/wan/hd64572.c | 2 +- + drivers/net/wireless/ath/ath10k/pci.c | 2 +- + drivers/net/wireless/ath/wil6210/netdev.c | 2 +- + drivers/net/xen-netback/interface.c | 2 +- + drivers/net/xen-netfront.c | 2 +- + drivers/staging/octeon/ethernet-rx.c | 2 +- + drivers/staging/unisys/visornic/visornic_main.c | 2 +- + 109 files changed, 132 insertions(+), 137 deletions(-) + +--- a/drivers/net/can/at91_can.c ++++ b/drivers/net/can/at91_can.c +@@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct + u32 reg_ier = AT91_IRQ_ERR_FRAME; + reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + at91_write(priv, AT91_IER, reg_ier); + } + +--- a/drivers/net/can/c_can/c_can.c ++++ b/drivers/net/can/c_can/c_can.c +@@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct + + end: + if (work_done < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* enable all IRQs if we are not in bus off state */ + if (priv->can.state != CAN_STATE_BUS_OFF) + c_can_irq_control(priv, true); +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_stru + work_done += flexcan_poll_bus_err(dev, reg_esr); + + if (work_done < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* enable IRQs */ + flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + flexcan_write(priv->reg_ctrl_default, ®s->ctrl); +--- a/drivers/net/can/ifi_canfd/ifi_canfd.c ++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c +@@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_st + work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); + + if (work_done < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + ifi_canfd_irq_enable(ndev, 1); + } + +--- a/drivers/net/can/janz-ican3.c ++++ b/drivers/net/can/janz-ican3.c +@@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct + /* We have processed all packets that the adapter had, but it + * was less than our budget, stop polling */ + if (received < budget) +- napi_complete(napi); ++ napi_complete_done(napi, received); + + spin_lock_irqsave(&mod->lock, flags); + +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct + work_done += m_can_do_rx_poll(dev, (quota - work_done)); + + if (work_done < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + m_can_enable_all_interrupts(priv); + } + +--- a/drivers/net/can/rcar/rcar_can.c ++++ b/drivers/net/can/rcar/rcar_can.c +@@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_ + } + /* All packets processed */ + if (num_pkts < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, num_pkts); + priv->ier |= RCAR_CAN_IER_RXFIE; + writeb(priv->ier, &priv->regs->ier); + } +--- a/drivers/net/can/rcar/rcar_canfd.c ++++ b/drivers/net/can/rcar/rcar_canfd.c +@@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct nap + + /* All packets processed */ + if (num_pkts < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, num_pkts); + /* Enable Rx FIFO interrupts */ + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), + RCANFD_RFCC_RFIE); +--- a/drivers/net/can/xilinx_can.c ++++ b/drivers/net/can/xilinx_can.c +@@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_stru + can_led_event(ndev, CAN_LED_EVENT_RX); + + if (work_done < quota) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + ier = priv->read_reg(priv, XCAN_IER_OFFSET); + ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); + priv->write_reg(priv, XCAN_IER_OFFSET, ier); +--- a/drivers/net/ethernet/3com/typhoon.c ++++ b/drivers/net/ethernet/3com/typhoon.c +@@ -1748,7 +1748,7 @@ typhoon_poll(struct napi_struct *napi, i + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + iowrite32(TYPHOON_INTR_NONE, + tp->ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(tp->ioaddr); +--- a/drivers/net/ethernet/adi/bfin_mac.c ++++ b/drivers/net/ethernet/adi/bfin_mac.c +@@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_str + } + + if (i < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, i); + if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) + enable_irq(IRQ_MAC_RX); + } +--- a/drivers/net/ethernet/agere/et131x.c ++++ b/drivers/net/ethernet/agere/et131x.c +@@ -3573,7 +3573,7 @@ static int et131x_poll(struct napi_struc + et131x_handle_send_pkts(adapter); + + if (work_done < budget) { +- napi_complete(&adapter->napi); ++ napi_complete_done(&adapter->napi, work_done); + et131x_enable_interrupts(adapter); + } + +--- a/drivers/net/ethernet/altera/altera_tse_main.c ++++ b/drivers/net/ethernet/altera/altera_tse_main.c +@@ -491,7 +491,7 @@ static int tse_poll(struct napi_struct * + + if (rxcomplete < budget) { + +- napi_complete(napi); ++ napi_complete_done(napi, rxcomplete); + + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +@@ -651,7 +651,7 @@ static int xgene_enet_napi(struct napi_s + processed = xgene_enet_process_ring(ring, budget); + + if (processed != budget) { +- napi_complete(napi); ++ napi_complete_done(napi, processed); + enable_irq(ring->irq); + } + +--- a/drivers/net/ethernet/arc/emac_main.c ++++ b/drivers/net/ethernet/arc/emac_main.c +@@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_str + + work_done = arc_emac_rx(ndev, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); + } + +--- a/drivers/net/ethernet/atheros/alx/main.c ++++ b/drivers/net/ethernet/atheros/alx/main.c +@@ -292,7 +292,7 @@ static int alx_poll(struct napi_struct * + if (!tx_complete || work == budget) + return budget; + +- napi_complete(&alx->napi); ++ napi_complete_done(&alx->napi, work); + + /* enable interrupt */ + if (alx->flags & ALX_FLAG_USING_MSIX) { +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -1885,7 +1885,7 @@ static int atl1c_clean(struct napi_struc + + if (work_done < budget) { + quit_polling: +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + adapter->hw.intr_mask |= ISR_RX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + } +--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +@@ -1532,7 +1532,7 @@ static int atl1e_clean(struct napi_struc + /* If no Tx and not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + quit_polling: +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + imr_data = AT_READ_REG(&adapter->hw, REG_IMR); + AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); + /* test debug */ +--- a/drivers/net/ethernet/atheros/atlx/atl1.c ++++ b/drivers/net/ethernet/atheros/atlx/atl1.c +@@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_ + if (work_done >= budget) + return work_done; + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* re-enable Interrupt */ + if (likely(adapter->int_enabled)) + atlx_imr_set(adapter, IMR_NORMAL_MASK); +--- a/drivers/net/ethernet/broadcom/b44.c ++++ b/drivers/net/ethernet/broadcom/b44.c +@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct * + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + b44_enable_ints(bp); + } + +--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c ++++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c +@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_str + + /* no more packet in rx/tx queue, remove device from poll + * queue */ +- napi_complete(napi); ++ napi_complete_done(napi, rx_work_done); + + /* restore rx/tx interrupt */ + enet_dmac_writel(priv, priv->dma_chan_int_mask, +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -1145,7 +1145,7 @@ static int bgmac_poll(struct napi_struct + return weight; + + if (handled < weight) { +- napi_complete(napi); ++ napi_complete_done(napi, handled); + bgmac_chip_intrs_on(bgmac); + } + +--- a/drivers/net/ethernet/broadcom/bnx2.c ++++ b/drivers/net/ethernet/broadcom/bnx2.c +@@ -3522,7 +3522,7 @@ static int bnx2_poll_msix(struct napi_st + rmb(); + if (likely(!bnx2_has_fast_work(bnapi))) { + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); +@@ -3559,7 +3559,7 @@ static int bnx2_poll(struct napi_struct + + rmb(); + if (likely(!bnx2_has_work(bnapi))) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -3229,7 +3229,7 @@ static int bnx2x_poll(struct napi_struct + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_work_done); + } else { + bnx2x_update_fpsb_idx(fp); + /* bnx2x_has_rx_work() reads the status block, +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -1768,7 +1768,7 @@ static int bnxt_poll_nitroa0(struct napi + } + + if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_pkts); + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } + return rx_pkts; +--- a/drivers/net/ethernet/broadcom/sb1250-mac.c ++++ b/drivers/net/ethernet/broadcom/sb1250-mac.c +@@ -2545,7 +2545,7 @@ static int sbmac_poll(struct napi_struct + sbdma_tx_process(sc, &(sc->sbm_txdma), 1); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + #ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *na + return rcvd; + + poll_exit: +- napi_complete(napi); ++ napi_complete_done(napi, rcvd); + + rx_ctrl->rx_complete++; + +--- a/drivers/net/ethernet/cadence/macb.c ++++ b/drivers/net/ethernet/cadence/macb.c +@@ -1069,7 +1069,7 @@ static int macb_poll(struct napi_struct + + work_done = bp->macbgem_ops.mog_rx(bp, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + /* Packets received while interrupts were disabled */ + status = macb_readl(bp, RSR); +--- a/drivers/net/ethernet/calxeda/xgmac.c ++++ b/drivers/net/ethernet/calxeda/xgmac.c +@@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct + work_done = xgmac_rx(priv, budget); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); + } + return work_done; +--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c ++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c +@@ -2433,7 +2433,7 @@ static int liquidio_napi_poll(struct nap + } + + if ((work_done < budget) && (tx_done)) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, + POLL_EVENT_ENABLE_INTR, 0); + return 0; +--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c ++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +@@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct + + if (work_done < budget) { + /* We stopped because no more packets were available. */ +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + octeon_mgmt_enable_rx_irq(p); + } + octeon_mgmt_update_rx_stats(netdev); +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +@@ -737,7 +737,7 @@ static int nicvf_poll(struct napi_struct + + if (work_done < budget) { + /* Slow packet rate, exit polling */ +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* Re-enable interrupts */ + cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, + cq->cq_idx); +--- a/drivers/net/ethernet/chelsio/cxgb/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb/sge.c +@@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, in + int work_done = process_responses(adapter, budget); + + if (likely(work_done < budget)) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + writel(adapter->sge->respQ.cidx, + adapter->regs + A_SG_SLEEPING); + } +--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c +@@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct + __skb_queue_head_init(&queue); + skb_queue_splice_init(&q->rx_queue, &queue); + if (skb_queue_empty(&queue)) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + spin_unlock_irq(&q->lock); + return work_done; + } +@@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_s + int work_done = process_responses(adap, qs, budget); + + if (likely(work_done < budget)) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + /* + * Because we don't atomically flush the following +--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +@@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_s + u32 val; + + if (likely(work_done < budget)) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + intr_params = rspq->next_intr_params; + rspq->next_intr_params = rspq->intr_params; + } else +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1339,7 +1339,7 @@ static int enic_poll(struct napi_struct + * exit polling + */ + +- napi_complete(napi); ++ napi_complete_done(napi, rq_work_done); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[0]); + vnic_intr_unmask(&enic->intr[intr]); +@@ -1496,7 +1496,7 @@ static int enic_poll_msix_rq(struct napi + * exit polling + */ + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[rq]); + vnic_intr_unmask(&enic->intr[intr]); +--- a/drivers/net/ethernet/dec/tulip/interrupt.c ++++ b/drivers/net/ethernet/dec/tulip/interrupt.c +@@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, + + /* Remove us from polling list and enable RX intr. */ + +- napi_complete(napi); +- iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); ++ napi_complete_done(napi, work_done); ++ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + + /* The last op happens after poll completion. Which means the following: + * 1. it can race with disabling irqs in irq handler +@@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, + * before we did napi_complete(). See? We would lose it. */ + + /* remove ourselves from the polling list */ +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + return work_done; + } +--- a/drivers/net/ethernet/dnet.c ++++ b/drivers/net/ethernet/dnet.c +@@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts. + */ +- napi_complete(napi); ++ napi_complete_done(napi, npackets); + int_enable = dnet_readl(bp, INTR_ENB); + int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -3346,7 +3346,7 @@ int be_poll(struct napi_struct *napi, in + be_process_mcc(adapter); + + if (max_work < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, max_work); + + /* Skyhawk EQ_DB has a provision to set the rearm to interrupt + * delay via a delay multiplier encoding value +--- a/drivers/net/ethernet/ethoc.c ++++ b/drivers/net/ethernet/ethoc.c +@@ -614,7 +614,7 @@ static int ethoc_poll(struct napi_struct + tx_work_done = ethoc_tx(priv->netdev, budget); + + if (rx_work_done < budget && tx_work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_work_done); + ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); + } + +--- a/drivers/net/ethernet/ezchip/nps_enet.c ++++ b/drivers/net/ethernet/ezchip/nps_enet.c +@@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_str + if (work_done < budget) { + u32 buf_int_enable_value = 0; + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + /* set tx_done and rx_rdy bits */ + buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_ + fec_enet_tx(ndev); + + if (pkts < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, pkts); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } + return pkts; +--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c ++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +@@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_stru + + if (received < budget && tx_left) { + /* done */ +- napi_complete(napi); ++ napi_complete_done(napi, received); + (*fep->ops->napi_enable)(dev); + + return received; +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -3186,7 +3186,7 @@ static int gfar_poll_rx_sq(struct napi_s + + if (work_done < budget) { + u32 imask; +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + +@@ -3275,7 +3275,7 @@ static int gfar_poll_rx(struct napi_stru + + if (!num_act_queues) { + u32 imask; +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_str + howmany += ucc_geth_rx(ugeth, i, budget - howmany); + + if (howmany < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, howmany); + setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); + } + +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c +@@ -555,7 +555,7 @@ refill: + priv->reg_inten |= RCV_INT; + writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); + } +- napi_complete(napi); ++ napi_complete_done(napi, rx); + done: + /* clean up tx descriptors and start a new timer if necessary */ + tx_remaining = hip04_tx_reclaim(ndev, false); +--- a/drivers/net/ethernet/hisilicon/hisi_femac.c ++++ b/drivers/net/ethernet/hisilicon/hisi_femac.c +@@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_s + } while (ints & DEF_INT_MASK); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + hisi_femac_irq_enable(priv, DEF_INT_MASK & + (~IRQ_INT_TX_PER_PACKET)); + } +--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c ++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +@@ -578,7 +578,7 @@ static int hix5hd2_poll(struct napi_stru + } while (ints & DEF_INT_MASK); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + hix5hd2_irq_enable(priv); + } + +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1320,7 +1320,7 @@ restart_poll: + ibmveth_replenish_task(adapter); + + if (frames_processed < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, frames_processed); + + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -999,7 +999,7 @@ restart_poll: + + if (frames_processed < budget) { + enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); +- napi_complete(napi); ++ napi_complete_done(napi, frames_processed); + if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && + napi_reschedule(napi)) { + disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); +--- a/drivers/net/ethernet/intel/e100.c ++++ b/drivers/net/ethernet/intel/e100.c +@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + e100_enable_irq(nic); + } + +--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c ++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c +@@ -1825,7 +1825,7 @@ ixgb_clean(struct napi_struct *napi, int + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + ixgb_irq_enable(adapter); + } +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -464,7 +464,7 @@ static int korina_poll(struct napi_struc + + work_done = korina_rx(dev, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + writel(readl(&lp->rx_dma_regs->dmasm) & + ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), +--- a/drivers/net/ethernet/lantiq_etop.c ++++ b/drivers/net/ethernet/lantiq_etop.c +@@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *nap + { + struct ltq_etop_chan *ch = container_of(napi, + struct ltq_etop_chan, napi); +- int rx = 0; +- int complete = 0; ++ int work_done = 0; + +- while ((rx < budget) && !complete) { ++ while (work_done < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + +- if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { +- ltq_etop_hw_receive(ch); +- rx++; +- } else { +- complete = 1; +- } ++ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) ++ break; ++ ltq_etop_hw_receive(ch); ++ work_done++; + } +- if (complete || !rx) { +- napi_complete(&ch->napi); ++ if (work_done < budget) { ++ napi_complete_done(&ch->napi, work_done); + ltq_dma_ack_irq(&ch->dma); + } +- return rx; ++ return work_done; + } + + static int +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c +@@ -2312,7 +2312,7 @@ static int mv643xx_eth_poll(struct napi_ + if (work_done < budget) { + if (mp->oom) + mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + wrlp(mp, INT_MASK, mp->int_mask); + } + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2696,11 +2696,9 @@ static int mvneta_poll(struct napi_struc + rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); + } + +- budget -= rx_done; +- +- if (budget > 0) { ++ if (rx_done < budget) { + cause_rx_tx = 0; +- napi_complete(&port->napi); ++ napi_complete_done(&port->napi, rx_done); + enable_percpu_irq(pp->dev->irq, 0); + } + +--- a/drivers/net/ethernet/marvell/mvpp2.c ++++ b/drivers/net/ethernet/marvell/mvpp2.c +@@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct + + if (budget > 0) { + cause_rx = 0; +- napi_complete(napi); ++ napi_complete_done(napi, rx_done); + + mvpp2_interrupts_enable(port); + } +--- a/drivers/net/ethernet/marvell/pxa168_eth.c ++++ b/drivers/net/ethernet/marvell/pxa168_eth.c +@@ -1264,7 +1264,7 @@ static int pxa168_rx_poll(struct napi_st + } + work_done = rxq_process(dev, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + wrl(pep, INT_MASK, ALL_INTS); + } + +--- a/drivers/net/ethernet/moxa/moxart_ether.c ++++ b/drivers/net/ethernet/moxa/moxart_ether.c +@@ -269,7 +269,7 @@ rx_next: + } + + if (rx < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx); + } + + priv->reg_imr |= RPKT_FINISH_M; +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +@@ -1678,7 +1678,7 @@ static int myri10ge_poll(struct napi_str + + myri10ge_ss_unlock_napi(ss); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + put_be32(htonl(3), ss->irq_claim); + } + return work_done; +--- a/drivers/net/ethernet/natsemi/natsemi.c ++++ b/drivers/net/ethernet/natsemi/natsemi.c +@@ -2261,7 +2261,7 @@ static int natsemi_poll(struct napi_stru + np->intr_status = readl(ioaddr + IntrStatus); + } while (np->intr_status); + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + /* Reenable interrupts providing nothing is trying to shut + * the chip down. */ +--- a/drivers/net/ethernet/neterion/s2io.c ++++ b/drivers/net/ethernet/neterion/s2io.c +@@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_st + s2io_chk_rx_buffers(nic, ring); + + if (pkts_processed < budget_org) { +- napi_complete(napi); ++ napi_complete_done(napi, pkts_processed); + /*Re Enable MSI-Rx Vector*/ + addr = (u8 __iomem *)&bar0->xmsi_mask_reg; + addr += 7 - ring->ring_no; +@@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_st + break; + } + if (pkts_processed < budget_org) { +- napi_complete(napi); ++ napi_complete_done(napi, pkts_processed); + /* Re enable the Rx interrupts for the ring */ + writeq(0, &bar0->rx_traffic_mask); + readl(&bar0->rx_traffic_mask); +--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c ++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c +@@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_st + vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed = ring->pkts_processed; + +- if (ring->pkts_processed < budget_org) { +- napi_complete(napi); ++ if (pkts_processed < budget_org) { ++ napi_complete_done(napi, pkts_processed); + + /* Re enable the Rx interrupts for the vpath */ + vxge_hw_channel_msix_unmask( +@@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_st + VXGE_COMPLETE_ALL_TX(vdev); + + if (pkts_processed < budget_org) { +- napi_complete(napi); ++ napi_complete_done(napi, pkts_processed); + /* Re enable the Rx interrupts for the ring */ + vxge_hw_device_unmask_all(hldev); + vxge_hw_device_flush_io(hldev); +--- a/drivers/net/ethernet/nvidia/forcedeth.c ++++ b/drivers/net/ethernet/nvidia/forcedeth.c +@@ -3756,7 +3756,7 @@ static int nv_napi_poll(struct napi_stru + if (rx_work < budget) { + /* re-enable interrupts + (msix not enabled in napi) */ +- napi_complete(napi); ++ napi_complete_done(napi, rx_work); + + writel(np->irqmask, base + NvRegIrqMask); + } +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_stru + rx_done = __lpc_handle_recv(ndev, budget); + + if (rx_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_done); + lpc_eth_enable_int(pldat->net_base); + } + +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -2391,7 +2391,7 @@ static int pch_gbe_napi_poll(struct napi + poll_end_flag = true; + + if (poll_end_flag) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + pch_gbe_irq_enable(adapter); + } + +--- a/drivers/net/ethernet/pasemi/pasemi_mac.c ++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c +@@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_s + pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); + if (pkts < budget) { + /* all done, no more packets present */ +- napi_complete(napi); ++ napi_complete_done(napi, pkts); + + pasemi_mac_restart_rx_intr(mac); + pasemi_mac_restart_tx_intr(mac); +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -2391,7 +2391,7 @@ static int netxen_nic_poll(struct napi_s + work_done = budget; + + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + if (test_bit(__NX_DEV_UP, &adapter->state)) + netxen_nic_enable_int(sds_ring); + } +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +@@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struc + work_done = budget; + + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + qlcnic_enable_sds_intr(adapter, sds_ring); + qlcnic_enable_tx_intr(adapter, tx_ring); +@@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_st + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_sds_intr(adapter, sds_ring); + } +@@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_pol + work_done = budget; + + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + qlcnic_enable_sds_intr(adapter, sds_ring); + } + +@@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_ + work_done = budget; + + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + qlcnic_enable_sds_intr(adapter, sds_ring); + } + +@@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct na + adapter = sds_ring->adapter; + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if (work_done < budget) { +- napi_complete(&sds_ring->napi); ++ napi_complete_done(&sds_ring->napi, work_done); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_sds_intr(adapter, sds_ring); + } +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c +@@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + ql_enable_completion_interrupt(qdev, rx_ring->irq); + } + return work_done; +--- a/drivers/net/ethernet/qualcomm/emac/emac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac.c +@@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_str + emac_mac_rx_process(adpt, rx_q, &work_done, budget); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + irq->mask |= rx_q->intr; + writel(irq->mask, adpt->base + EMAC_INT_MASK); +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -7579,7 +7579,7 @@ static int rtl8169_poll(struct napi_stru + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + rtl_irq_enable(tp, enable_mask); + mmiowb(); +--- a/drivers/net/ethernet/rocker/rocker_main.c ++++ b/drivers/net/ethernet/rocker/rocker_main.c +@@ -2480,7 +2480,7 @@ static int rocker_port_poll_rx(struct na + } + + if (credits < budget) +- napi_complete(napi); ++ napi_complete_done(napi, credits); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + +--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c ++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +@@ -1578,7 +1578,7 @@ static int sxgbe_poll(struct napi_struct + + work_done = sxgbe_rx(priv, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); + } + +--- a/drivers/net/ethernet/sfc/efx.c ++++ b/drivers/net/ethernet/sfc/efx.c +@@ -332,7 +332,7 @@ static int efx_poll(struct napi_struct * + * since efx_nic_eventq_read_ack() will have no effect if + * interrupts have already been disabled. + */ +- napi_complete(napi); ++ napi_complete_done(napi, spent); + efx_nic_eventq_read_ack(channel); + } + +--- a/drivers/net/ethernet/smsc/smsc9420.c ++++ b/drivers/net/ethernet/smsc/smsc9420.c +@@ -869,7 +869,7 @@ static int smsc9420_rx_poll(struct napi_ + smsc9420_pci_flush_write(pd); + + if (work_done < budget) { +- napi_complete(&pd->napi); ++ napi_complete_done(&pd->napi, work_done); + + /* re-enable RX DMA interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2677,7 +2677,7 @@ static int stmmac_poll(struct napi_struc + + work_done = stmmac_rx(priv, budget); + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + stmmac_enable_dma_irq(priv); + } + return work_done; +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct * + work_done = niu_poll_core(np, lp, budget); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + niu_ldg_rearm(np, lp, 1); + } + return work_done; +--- a/drivers/net/ethernet/sun/sungem.c ++++ b/drivers/net/ethernet/sun/sungem.c +@@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct * + gp->status = readl(gp->regs + GREG_STAT); + } while (gp->status & GREG_STAT_NAPI); + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + gem_enable_ints(gp); + + return work_done; +--- a/drivers/net/ethernet/sun/sunvnet_common.c ++++ b/drivers/net/ethernet/sun/sunvnet_common.c +@@ -850,7 +850,7 @@ int sunvnet_poll_common(struct napi_stru + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, processed); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } +--- a/drivers/net/ethernet/tehuti/tehuti.c ++++ b/drivers/net/ethernet/tehuti/tehuti.c +@@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct * + * device lock and allow waiting tasks (eg rmmod) to advance) */ + priv->napi_stop = 0; + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + bdx_enable_interrupts(priv); + } + return work_done; +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -839,7 +839,7 @@ static int cpsw_rx_poll(struct napi_stru + } + + if (num_rx < budget) { +- napi_complete(napi_rx); ++ napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; +--- a/drivers/net/ethernet/ti/davinci_emac.c ++++ b/drivers/net/ethernet/ti/davinci_emac.c +@@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct + &emac_rxhost_errcodes[cause][0], ch); + } + } else if (num_rx_pkts < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, num_rx_pkts); + emac_int_enable(priv); + } + +--- a/drivers/net/ethernet/ti/netcp_core.c ++++ b/drivers/net/ethernet/ti/netcp_core.c +@@ -939,7 +939,7 @@ static int netcp_rx_poll(struct napi_str + + netcp_rxpool_refill(netcp); + if (packets < budget) { +- napi_complete(&netcp->rx_napi); ++ napi_complete_done(&netcp->rx_napi, packets); + knav_queue_enable_notify(netcp->rx_queue); + } + +--- a/drivers/net/ethernet/tile/tilegx.c ++++ b/drivers/net/ethernet/tile/tilegx.c +@@ -678,7 +678,7 @@ static int tile_net_poll(struct napi_str + } + + /* There are no packets left. */ +- napi_complete(&info_mpipe->napi); ++ napi_complete_done(&info_mpipe->napi, work); + + md = &mpipe_data[instance]; + /* Re-enable hypervisor interrupts. */ +--- a/drivers/net/ethernet/tile/tilepro.c ++++ b/drivers/net/ethernet/tile/tilepro.c +@@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_str + } + } + +- napi_complete(&info->napi); ++ napi_complete_done(&info->napi, work); + + if (!priv->active) + goto done; +--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c ++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c +@@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_st + } + + if (packets_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, packets_done); + gelic_card_rx_irq_on(card); + } + return packets_done; +--- a/drivers/net/ethernet/toshiba/spider_net.c ++++ b/drivers/net/ethernet/toshiba/spider_net.c +@@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_s + /* if all packets are in the stack, enable interrupts and return 0 */ + /* if not, return 1 */ + if (packets_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, packets_done); + spider_net_rx_irq_on(card); + card->ignore_rx_ramfull = 0; + } +--- a/drivers/net/ethernet/toshiba/tc35815.c ++++ b/drivers/net/ethernet/toshiba/tc35815.c +@@ -1639,7 +1639,7 @@ static int tc35815_poll(struct napi_stru + spin_unlock(&lp->rx_lock); + + if (received < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, received); + /* enable interrupts */ + tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); + } +--- a/drivers/net/ethernet/tundra/tsi108_eth.c ++++ b/drivers/net/ethernet/tundra/tsi108_eth.c +@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struc + + if (num_received < budget) { + data->rxpending = 0; +- napi_complete(napi); ++ napi_complete_done(napi, num_received); + + TSI_WRITE(TSI108_EC_INTMASK, + TSI_READ(TSI108_EC_INTMASK) +--- a/drivers/net/ethernet/via/via-rhine.c ++++ b/drivers/net/ethernet/via/via-rhine.c +@@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_st + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + iowrite16(enable_mask, ioaddr + IntrEnable); + mmiowb(); + } +--- a/drivers/net/ethernet/via/via-velocity.c ++++ b/drivers/net/ethernet/via/via-velocity.c +@@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_str + velocity_tx_srv(vptr); + /* If budget not fully consumed, exit the polling mode */ + if (rx_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_done); + mac_enable_int(vptr->mac_regs); + } + spin_unlock_irqrestore(&vptr->lock, flags); +--- a/drivers/net/ethernet/wiznet/w5100.c ++++ b/drivers/net/ethernet/wiznet/w5100.c +@@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_s + } + + if (rx_count < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_count); + w5100_enable_intr(priv); + } + +--- a/drivers/net/ethernet/wiznet/w5300.c ++++ b/drivers/net/ethernet/wiznet/w5300.c +@@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_s + } + + if (rx_count < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rx_count); + w5300_write(priv, W5300_IMR, IR_S0); + mmiowb(); + } +--- a/drivers/net/fjes/fjes_main.c ++++ b/drivers/net/fjes/fjes_main.c +@@ -1122,7 +1122,7 @@ static int fjes_poll(struct napi_struct + } + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + if (adapter->unset_rx_last) { + adapter->rx_last_jiffies = jiffies; +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, i + rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); + + if (rxd_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rxd_done); + vmxnet3_enable_all_intrs(rx_queue->adapter); + } + return rxd_done; +@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct + rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); + + if (rxd_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, rxd_done); + vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); + } + return rxd_done; +--- a/drivers/net/wan/fsl_ucc_hdlc.c ++++ b/drivers/net/wan/fsl_ucc_hdlc.c +@@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_str + howmany += hdlc_rx_done(priv, budget - howmany); + + if (howmany < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, howmany); + qe_setbits32(priv->uccf->p_uccm, + (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); + } +--- a/drivers/net/wan/hd64572.c ++++ b/drivers/net/wan/hd64572.c +@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct * + received = sca_rx_done(port, budget); + + if (received < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, received); + enable_intr(port); + } + +--- a/drivers/net/wireless/ath/ath10k/pci.c ++++ b/drivers/net/wireless/ath/ath10k/pci.c +@@ -2800,7 +2800,7 @@ static int ath10k_pci_napi_poll(struct n + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { +- napi_complete(ctx); ++ napi_complete_done(ctx, done); + /* In case of MSI, it is possible that interrupts are received + * while NAPI poll is inprogress. So pending interrupts that are + * received after processing all copy engine pipes by NAPI poll +--- a/drivers/net/wireless/ath/wil6210/netdev.c ++++ b/drivers/net/wireless/ath/wil6210/netdev.c +@@ -84,7 +84,7 @@ static int wil6210_netdev_poll_rx(struct + done = budget - quota; + + if (done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, done); + wil6210_unmask_irq_rx(wil); + wil_dbg_txrx(wil, "NAPI RX complete\n"); + } +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struc + work_done = xenvif_tx_action(queue, budget); + + if (work_done < budget) { +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + /* If the queue is rate-limited, it shall be + * rescheduled in the timer callback. + */ +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -1059,7 +1059,7 @@ err: + if (work_done < budget) { + int more_to_do = 0; + +- napi_complete(napi); ++ napi_complete_done(napi, work_done); + + RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); + if (more_to_do) +--- a/drivers/staging/octeon/ethernet-rx.c ++++ b/drivers/staging/octeon/ethernet-rx.c +@@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi + + if (rx_count < budget) { + /* No more work */ +- napi_complete(napi); ++ napi_complete_done(napi, rx_count); + enable_irq(rx_group->irq); + } + return rx_count; +--- a/drivers/staging/unisys/visornic/visornic_main.c ++++ b/drivers/staging/unisys/visornic/visornic_main.c +@@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_str + + /* If there aren't any more packets to receive stop the poll */ + if (rx_count < budget) +- napi_complete(napi); ++ napi_complete_done(napi, rx_count); + + return rx_count; + } diff --git a/target/linux/generic/hack-4.9/661-use_fq_codel_by_default.patch b/target/linux/generic/hack-4.9/661-use_fq_codel_by_default.patch index b0af139f8..4e7b4ad1d 100644 --- a/target/linux/generic/hack-4.9/661-use_fq_codel_by_default.patch +++ b/target/linux/generic/hack-4.9/661-use_fq_codel_by_default.patch @@ -83,7 +83,7 @@ Signed-off-by: Felix Fietkau EXPORT_SYMBOL(default_qdisc_ops); /* Main transmission queue. */ -@@ -759,7 +759,7 @@ static void attach_one_default_qdisc(str +@@ -760,7 +760,7 @@ static void attach_one_default_qdisc(str void *_unused) { struct Qdisc *qdisc; diff --git a/target/linux/generic/hack-4.9/902-debloat_proc.patch b/target/linux/generic/hack-4.9/902-debloat_proc.patch index 39cb2693d..6881d5faf 100644 --- a/target/linux/generic/hack-4.9/902-debloat_proc.patch +++ b/target/linux/generic/hack-4.9/902-debloat_proc.patch @@ -327,7 +327,7 @@ Signed-off-by: Felix Fietkau --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -3082,6 +3082,8 @@ static __net_initdata struct pernet_oper +@@ -3084,6 +3084,8 @@ static __net_initdata struct pernet_oper static int __init proto_init(void) { diff --git a/target/linux/generic/pending-4.9/630-packet_socket_type.patch b/target/linux/generic/pending-4.9/630-packet_socket_type.patch index e0c5ced29..dea2e2ce7 100644 --- a/target/linux/generic/pending-4.9/630-packet_socket_type.patch +++ b/target/linux/generic/pending-4.9/630-packet_socket_type.patch @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau #define PACKET_FANOUT_LB 1 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c -@@ -1772,6 +1772,7 @@ static int packet_rcv_spkt(struct sk_buf +@@ -1778,6 +1778,7 @@ static int packet_rcv_spkt(struct sk_buf { struct sock *sk; struct sockaddr_pkt *spkt; @@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau /* * When we registered the protocol we saved the socket in the data -@@ -1779,6 +1780,7 @@ static int packet_rcv_spkt(struct sk_buf +@@ -1785,6 +1786,7 @@ static int packet_rcv_spkt(struct sk_buf */ sk = pt->af_packet_priv; @@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau /* * Yank back the headers [hope the device set this -@@ -1791,7 +1793,7 @@ static int packet_rcv_spkt(struct sk_buf +@@ -1797,7 +1799,7 @@ static int packet_rcv_spkt(struct sk_buf * so that this procedure is noop. */ @@ -55,7 +55,7 @@ Signed-off-by: Felix Fietkau goto out; if (!net_eq(dev_net(dev), sock_net(sk))) -@@ -2029,12 +2031,12 @@ static int packet_rcv(struct sk_buff *sk +@@ -2035,12 +2037,12 @@ static int packet_rcv(struct sk_buff *sk unsigned int snaplen, res; bool is_drop_n_account = false; @@ -71,7 +71,7 @@ Signed-off-by: Felix Fietkau if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; -@@ -2160,12 +2162,12 @@ static int tpacket_rcv(struct sk_buff *s +@@ -2166,12 +2168,12 @@ static int tpacket_rcv(struct sk_buff *s BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); @@ -87,7 +87,7 @@ Signed-off-by: Felix Fietkau if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; -@@ -3240,6 +3242,7 @@ static int packet_create(struct net *net +@@ -3250,6 +3252,7 @@ static int packet_create(struct net *net mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; @@ -95,7 +95,7 @@ Signed-off-by: Felix Fietkau if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; -@@ -3826,6 +3829,16 @@ packet_setsockopt(struct socket *sock, i +@@ -3836,6 +3839,16 @@ packet_setsockopt(struct socket *sock, i po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } @@ -112,7 +112,7 @@ Signed-off-by: Felix Fietkau default: return -ENOPROTOOPT; } -@@ -3878,6 +3891,13 @@ static int packet_getsockopt(struct sock +@@ -3888,6 +3901,13 @@ static int packet_getsockopt(struct sock case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; diff --git a/target/linux/generic/pending-4.9/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch b/target/linux/generic/pending-4.9/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch index cf4881e12..ee58c2c5e 100644 --- a/target/linux/generic/pending-4.9/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch +++ b/target/linux/generic/pending-4.9/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch @@ -300,7 +300,7 @@ Signed-off-by: Steven Barth /** * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own * @t: the outgoing tunnel device -@@ -1285,6 +1425,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str +@@ -1286,6 +1426,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str { struct ip6_tnl *t = netdev_priv(dev); struct ipv6hdr *ipv6h = ipv6_hdr(skb); @@ -308,7 +308,7 @@ Signed-off-by: Steven Barth int encap_limit = -1; __u16 offset; struct flowi6 fl6; -@@ -1343,6 +1484,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str +@@ -1344,6 +1485,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str fl6.flowi6_mark = skb->mark; } @@ -327,7 +327,7 @@ Signed-off-by: Steven Barth if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1; -@@ -1470,6 +1623,14 @@ ip6_tnl_change(struct ip6_tnl *t, const +@@ -1471,6 +1624,14 @@ ip6_tnl_change(struct ip6_tnl *t, const t->parms.flowinfo = p->flowinfo; t->parms.link = p->link; t->parms.proto = p->proto; @@ -342,7 +342,7 @@ Signed-off-by: Steven Barth dst_cache_reset(&t->dst_cache); ip6_tnl_link_config(t); return 0; -@@ -1508,6 +1669,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_ +@@ -1509,6 +1670,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_ p->flowinfo = u->flowinfo; p->link = u->link; p->proto = u->proto; @@ -350,7 +350,7 @@ Signed-off-by: Steven Barth memcpy(p->name, u->name, sizeof(u->name)); } -@@ -1885,6 +2047,15 @@ static int ip6_tnl_validate(struct nlatt +@@ -1886,6 +2048,15 @@ static int ip6_tnl_validate(struct nlatt return 0; } @@ -366,7 +366,7 @@ Signed-off-by: Steven Barth static void ip6_tnl_netlink_parms(struct nlattr *data[], struct __ip6_tnl_parm *parms) { -@@ -1919,6 +2090,46 @@ static void ip6_tnl_netlink_parms(struct +@@ -1920,6 +2091,46 @@ static void ip6_tnl_netlink_parms(struct if (data[IFLA_IPTUN_COLLECT_METADATA]) parms->collect_md = true; @@ -413,7 +413,7 @@ Signed-off-by: Steven Barth } static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], -@@ -2028,6 +2239,12 @@ static void ip6_tnl_dellink(struct net_d +@@ -2029,6 +2240,12 @@ static void ip6_tnl_dellink(struct net_d static size_t ip6_tnl_get_size(const struct net_device *dev) { @@ -426,7 +426,7 @@ Signed-off-by: Steven Barth return /* IFLA_IPTUN_LINK */ nla_total_size(4) + -@@ -2055,6 +2272,24 @@ static size_t ip6_tnl_get_size(const str +@@ -2056,6 +2273,24 @@ static size_t ip6_tnl_get_size(const str nla_total_size(2) + /* IFLA_IPTUN_COLLECT_METADATA */ nla_total_size(0) + @@ -451,7 +451,7 @@ Signed-off-by: Steven Barth 0; } -@@ -2062,6 +2297,9 @@ static int ip6_tnl_fill_info(struct sk_b +@@ -2063,6 +2298,9 @@ static int ip6_tnl_fill_info(struct sk_b { struct ip6_tnl *tunnel = netdev_priv(dev); struct __ip6_tnl_parm *parm = &tunnel->parms; @@ -461,7 +461,7 @@ Signed-off-by: Steven Barth if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || -@@ -2070,9 +2308,27 @@ static int ip6_tnl_fill_info(struct sk_b +@@ -2071,9 +2309,27 @@ static int ip6_tnl_fill_info(struct sk_b nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || @@ -490,7 +490,7 @@ Signed-off-by: Steven Barth if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || -@@ -2110,6 +2366,7 @@ static const struct nla_policy ip6_tnl_p +@@ -2111,6 +2367,7 @@ static const struct nla_policy ip6_tnl_p [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, diff --git a/target/linux/layerscape/32b/config-default b/target/linux/layerscape/32b/config-default deleted file mode 100644 index 7aa0dd5c5..000000000 --- a/target/linux/layerscape/32b/config-default +++ /dev/null @@ -1,152 +0,0 @@ -CONFIG_ALIGNMENT_TRAP=y -# CONFIG_ARCH_AXXIA is not set -CONFIG_ARCH_HAS_RESET_CONTROLLER=y -CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -CONFIG_ARCH_MULTIPLATFORM=y -# CONFIG_ARCH_MULTI_CPU_AUTO is not set -CONFIG_ARCH_MULTI_V6_V7=y -CONFIG_ARCH_MULTI_V7=y -CONFIG_ARCH_MXC=y -CONFIG_ARCH_NR_GPIO=0 -CONFIG_ARCH_REQUIRE_GPIOLIB=y -# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set -# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y -CONFIG_ARM=y -# CONFIG_ARM_CPU_SUSPEND is not set -CONFIG_ARM_ERRATA_754322=y -CONFIG_ARM_ERRATA_764369=y -CONFIG_ARM_ERRATA_775420=y -CONFIG_ARM_HAS_SG_CHAIN=y -CONFIG_ARM_HEAVY_MB=y -CONFIG_ARM_L1_CACHE_SHIFT=6 -CONFIG_ARM_L1_CACHE_SHIFT_6=y -CONFIG_ARM_LPAE=y -CONFIG_ARM_PATCH_PHYS_VIRT=y -CONFIG_ARM_PMU=y -CONFIG_ARM_THUMB=y -# CONFIG_ARM_THUMBEE is not set -CONFIG_ARM_VIRT_EXT=y -CONFIG_ATAGS=y -CONFIG_AUTO_ZRELADDR=y -CONFIG_CACHE_L2X0=y -CONFIG_CLKSRC_IMX_GPT=y -CONFIG_CPUFREQ_DT=y -CONFIG_CPU_32v6K=y -CONFIG_CPU_32v7=y -CONFIG_CPU_ABRT_EV7=y -# CONFIG_CPU_BPREDICT_DISABLE is not set -CONFIG_CPU_CACHE_V7=y -CONFIG_CPU_CACHE_VIPT=y -CONFIG_CPU_COPY_V6=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_STAT_DETAILS=y -CONFIG_CPU_HAS_ASID=y -# CONFIG_CPU_ICACHE_DISABLE is not set -CONFIG_CPU_PABRT_V7=y -# CONFIG_CPU_THERMAL is not set -CONFIG_CPU_TLB_V7=y -CONFIG_CPU_V7=y -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_DEBUG_IMX_UART_PORT=1 -CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" -# CONFIG_DEBUG_UART_8250 is not set -# CONFIG_DEBUG_USER is not set -CONFIG_EDAC_ATOMIC_SCRUB=y -# CONFIG_ENABLE_DEFAULT_TRACERS is not set -CONFIG_EXTCON=y -CONFIG_FEC=y -# CONFIG_FSL_QMAN_FQ_LOOKUP is not set -# CONFIG_FTRACE_SYSCALLS is not set -CONFIG_GENERIC_IRQ_CHIP=y -CONFIG_GPIO_MXC=y -CONFIG_HAVE_ARM_ARCH_TIMER=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_IDE=y -CONFIG_HAVE_IMX_SRC=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_HAVE_OPROFILE=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_PROC_CPU=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_SMP=y -CONFIG_HZ_FIXED=0 -# CONFIG_IMX_WEIM is not set -CONFIG_MICREL_PHY=y -CONFIG_MIGHT_HAVE_CACHE_L2X0=y -CONFIG_MIGHT_HAVE_PCI=y -# CONFIG_MMC_SDHCI_ESDHC_IMX is not set -# CONFIG_MMC_MXC is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_MODULES_USE_ELF_REL=y -CONFIG_MULTI_IRQ_HANDLER=y -CONFIG_NEON=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_OLD_SIGACTION=y -CONFIG_OUTER_CACHE=y -CONFIG_OUTER_CACHE_SYNC=y -CONFIG_PAGE_OFFSET=0x80000000 -# CONFIG_PATA_IMX is not set -CONFIG_PERF_EVENTS=y -CONFIG_PINCTRL=y -# CONFIG_PINCTRL_SINGLE is not set -CONFIG_PL310_ERRATA_588369=y -CONFIG_PL310_ERRATA_727915=y -# CONFIG_PL310_ERRATA_753970 is not set -CONFIG_PL310_ERRATA_769419=y -CONFIG_PM_OPP=y -CONFIG_PPS=y -# CONFIG_PROBE_EVENTS is not set -CONFIG_PTP_1588_CLOCK=y -CONFIG_RTC_CLASS=y -# CONFIG_RTC_DRV_IMXDI is not set -# CONFIG_RTC_DRV_MXC is not set -# CONFIG_SERIAL_IMX is not set -CONFIG_SMP_ON_UP=y -CONFIG_SOC_BUS=y -# CONFIG_SOC_IMX50 is not set -# CONFIG_SOC_IMX51 is not set -# CONFIG_SOC_IMX53 is not set -# CONFIG_SOC_IMX6Q is not set -# CONFIG_SOC_IMX6SL is not set -# CONFIG_SOC_IMX6SX is not set -# CONFIG_SOC_IMX6UL is not set -# CONFIG_SOC_IMX7D is not set -# CONFIG_SOC_LS1021A is not set -# CONFIG_SOC_VF610 is not set -# CONFIG_SPI_IMX is not set -CONFIG_SRAM=y -CONFIG_SWP_EMULATE=y -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -# CONFIG_THUMB2_KERNEL is not set -CONFIG_TRACING_EVENTS_GPIO=y -CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" -# CONFIG_USB_IMX21_HCD is not set -CONFIG_USE_OF=y -CONFIG_VECTORS_BASE=0xffff0000 -CONFIG_VFP=y -CONFIG_VFPv3=y -CONFIG_WATCHDOG_CORE=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_BCJ=y -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZLIB_DEFLATE=y -CONFIG_ZONE_DMA_FLAG=0 diff --git a/target/linux/layerscape/32b/profiles/00-default.mk b/target/linux/layerscape/32b/profiles/00-default.mk deleted file mode 120000 index 7b7774985..000000000 --- a/target/linux/layerscape/32b/profiles/00-default.mk +++ /dev/null @@ -1 +0,0 @@ -../../64b/profiles/00-default.mk \ No newline at end of file diff --git a/target/linux/layerscape/64b/config-default b/target/linux/layerscape/64b/config-default deleted file mode 100644 index 960b0770e..000000000 --- a/target/linux/layerscape/64b/config-default +++ /dev/null @@ -1,183 +0,0 @@ -CONFIG_64BIT=y -CONFIG_ACPI=y -CONFIG_ACPI_CCA_REQUIRED=y -# CONFIG_ACPI_CONTAINER is not set -# CONFIG_ACPI_CUSTOM_DSDT is not set -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_DEBUGGER is not set -# CONFIG_ACPI_DOCK is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_FAN=y -CONFIG_ACPI_GENERIC_GSI=y -# CONFIG_ACPI_PCI_SLOT is not set -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -CONFIG_ACPI_THERMAL=y -# CONFIG_ARCH_BCM_IPROC is not set -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -# CONFIG_ARCH_EXYNOS7 is not set -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -# CONFIG_ARCH_SEATTLE is not set -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -# CONFIG_ARCH_SPRD is not set -# CONFIG_ARCH_STRATIX10 is not set -# CONFIG_ARCH_THUNDER is not set -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -# CONFIG_ARCH_XGENE is not set -# CONFIG_ARCH_ZYNQMP is not set -CONFIG_ARM64=y -# CONFIG_ARM64_16K_PAGES is not set -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_CRYPTO is not set -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_845719=y -CONFIG_ARM64_HW_AFDBM=y -# CONFIG_ARM64_LSE_ATOMICS is not set -CONFIG_ARM64_PAN=y -# CONFIG_ARM64_PTDUMP is not set -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -CONFIG_ARM64_VA_BITS=39 -CONFIG_ARM64_VA_BITS_39=y -# CONFIG_ARM64_VA_BITS_48 is not set -# CONFIG_ARMV8_DEPRECATED is not set -CONFIG_ARM_AMBA=y -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -# CONFIG_ARM_PL172_MPMC is not set -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_SP805_WATCHDOG is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_BLOCK_COMPAT=y -CONFIG_BOUNCE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_BUILD_BIN2C=y -# CONFIG_CAVIUM_ERRATUM_22375 is not set -# CONFIG_CAVIUM_ERRATUM_23154 is not set -CONFIG_CLKSRC_ACPI=y -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y -CONFIG_CMDLINE="console=ttyAMA0" -CONFIG_COMMON_CLK_VERSATILE=y -CONFIG_COMMON_CLK_XGENE=y -CONFIG_COMPAT=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_COMPAT_NETLINK_MESSAGES=y -CONFIG_COMPAT_OLD_SIGACTION=y -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_CUSE=y -CONFIG_DEBUG_INFO=y -CONFIG_DEFAULT_IOSCHED="noop" -CONFIG_DEFAULT_NOOP=y -CONFIG_DMI=y -CONFIG_DMIID=y -# CONFIG_DMI_SYSFS is not set -CONFIG_EFI=y -# CONFIG_EFIVAR_FS is not set -CONFIG_EFI_ARMSTUB=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_STUB=y -# CONFIG_EFI_VARS is not set -CONFIG_FSL_MC_BUS=y -CONFIG_FSL_QMAN_FQ_LOOKUP=y -CONFIG_FS_MBCACHE=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GPIO_ACPI=y -# CONFIG_GPIO_AMDPT is not set -# CONFIG_GPIO_XGENE is not set -CONFIG_GRACE_PERIOD=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_DEBUG_BUGVERBOSE=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_HAVE_PATA_PLATFORM=y -# CONFIG_HPET is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_VIRTIO=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_IP_PNP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_IP_PNP_RARP is not set -CONFIG_JBD2=y -# CONFIG_LIQUIDIO is not set -CONFIG_LOCKD=y -CONFIG_LOGO=y -CONFIG_LOGO_LINUX_CLUT224=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MFD_CORE=y -CONFIG_MFD_VEXPRESS_SYSREG=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NFS_FS=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NO_HZ=y -CONFIG_PCI_BUS_ADDR_T_64BIT=y -# CONFIG_PCI_HISI is not set -CONFIG_PCI_LABEL=y -# CONFIG_PHY_XGENE is not set -# CONFIG_PMIC_OPREGION is not set -CONFIG_PNP=y -CONFIG_PNPACPI=y -CONFIG_PNP_DEBUG_MESSAGES=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_PROFILING=y -# CONFIG_POWER_RESET_VEXPRESS is not set -CONFIG_ROOT_NFS=y -# CONFIG_SCSI_LOWLEVEL is not set -# CONFIG_SCSI_PROC_FS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SMC91X=y -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SUNRPC=y -# CONFIG_SWAP is not set -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_SYSVIPC_COMPAT=y -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -CONFIG_UCS2_STRING=y -CONFIG_VEXPRESS_CONFIG=y -CONFIG_VEXPRESS_SYSCFG=y -CONFIG_VFAT_FS=y -CONFIG_VIDEOMODE_HELPERS=y -CONFIG_VIRTIO=y -CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_CONSOLE is not set -CONFIG_VIRTIO_MMIO=y -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set -CONFIG_VIRTIO_NET=y diff --git a/target/linux/layerscape/Makefile b/target/linux/layerscape/Makefile index f511407f9..67da8449a 100644 --- a/target/linux/layerscape/Makefile +++ b/target/linux/layerscape/Makefile @@ -9,18 +9,17 @@ include $(TOPDIR)/rules.mk BOARD:=layerscape BOARDNAME:=NXP Layerscape DEVICE_TYPE:=developerboard -KERNEL_PATCHVER:=4.4 -KERNELNAME:=Image dtbs +KERNEL_PATCHVER:=4.9 FEATURES:=squashfs nand usb pcie gpio -SUBTARGETS:=64b 32b -MAINTAINER:=Jiang Yutang - -include $(INCLUDE_DIR)/target.mk +SUBTARGETS:=armv8_64b armv8_32b +MAINTAINER:=Yangbo Lu define Target/Description - Build firmware images for $(BOARDNAME) SoC devices. + Build firmware images for NXP Layerscape based boards. endef +include $(INCLUDE_DIR)/target.mk + DEFAULT_PACKAGES += kmod-usb3 kmod-usb-dwc3 kmod-usb-storage $(eval $(call BuildTarget)) diff --git a/target/linux/layerscape/armv8_32b/config-4.9 b/target/linux/layerscape/armv8_32b/config-4.9 new file mode 100644 index 000000000..f0687f380 --- /dev/null +++ b/target/linux/layerscape/armv8_32b/config-4.9 @@ -0,0 +1,1444 @@ +CONFIG_ABX500_CORE=y +CONFIG_AD525X_DPOT=y +CONFIG_AD525X_DPOT_I2C=y +# CONFIG_AD525X_DPOT_SPI is not set +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AHCI_IMX=y +CONFIG_AHCI_QORIQ=y +CONFIG_AK8975=y +CONFIG_ALIGNMENT_TRAP=y +CONFIG_APDS9802ALS=y +CONFIG_AQUANTIA_PHY=y +# CONFIG_ARCH_AXXIA is not set +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_HAS_BANDGAP=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +CONFIG_ARCH_MULTI_V6_V7=y +CONFIG_ARCH_MULTI_V7=y +CONFIG_ARCH_MXC=y +CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED=y +CONFIG_ARCH_NR_GPIO=512 +CONFIG_ARCH_OMAP=y +CONFIG_ARCH_OMAP2PLUS=y +CONFIG_ARCH_OMAP2PLUS_TYPICAL=y +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_VIRT=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARM=y +CONFIG_ARM_AMBA=y +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ATAG_DTB_COMPAT=y +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND is not set +CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y +CONFIG_ARM_CPUIDLE=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_ARM_CRYPTO=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_643719=y +CONFIG_ARM_ERRATA_720789=y +CONFIG_ARM_ERRATA_754322=y +CONFIG_ARM_ERRATA_754327=y +CONFIG_ARM_ERRATA_764369=y +CONFIG_ARM_ERRATA_775420=y +CONFIG_ARM_ERRATA_798181=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_ARM_HEAVY_MB=y +# CONFIG_ARM_HIGHBANK_CPUIDLE is not set +CONFIG_ARM_IMX6Q_CPUFREQ=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_LPAE=y +CONFIG_ARM_OMAP2PLUS_CPUFREQ=y +CONFIG_ARM_PATCH_IDIV=y +CONFIG_ARM_PATCH_PHYS_VIRT=y +# CONFIG_ARM_PL172_MPMC is not set +CONFIG_ARM_PMU=y +CONFIG_ARM_PSCI=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_SCPI_PROTOCOL is not set +# CONFIG_ARM_SMMU is not set +CONFIG_ARM_SP805_WATCHDOG=y +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_ARM_UNWIND=y +CONFIG_ARM_VIRT_EXT=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_AT803X_PHY=y +CONFIG_ATA=y +CONFIG_ATAGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTO_ZRELADDR=y +# CONFIG_AXP20X_POWER is not set +CONFIG_BACKLIGHT_AS3711=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_PWM=y +# CONFIG_BACKLIGHT_TPS65217 is not set +CONFIG_BATTERY_ACT8945A=y +CONFIG_BATTERY_SBS=y +CONFIG_BCMA=y +CONFIG_BCMA_BLOCKIO=y +# CONFIG_BCMA_DEBUG is not set +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_HOST_PCI=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_SOC=y +CONFIG_BCMA_SFLASH=y +CONFIG_BCM_NET_PHYLIB=y +# CONFIG_BLK_CGROUP is not set +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NVME=y +# CONFIG_BLK_DEV_NVME_SCSI is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=262144 +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_BLK_MQ_PCI=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_BOUNCE=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_BRCMSTB_GISB_ARB=y +CONFIG_BROADCOM_PHY=y +CONFIG_BUILD_BIN2C=y +CONFIG_CACHE_L2X0=y +# CONFIG_CACHE_L2X0_PMU is not set +CONFIG_CAN=y +# CONFIG_CAN_8DEV_USB is not set +CONFIG_CAN_BCM=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CC770 is not set +# CONFIG_CAN_C_CAN is not set +CONFIG_CAN_DEV=y +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_ESD_USB2 is not set +CONFIG_CAN_FLEXCAN=y +# CONFIG_CAN_GRCAN is not set +CONFIG_CAN_GW=y +# CONFIG_CAN_KVASER_USB is not set +# CONFIG_CAN_LEDS is not set +CONFIG_CAN_MCP251X=y +# CONFIG_CAN_PEAK_USB is not set +CONFIG_CAN_RAW=y +# CONFIG_CAN_SJA1000 is not set +# CONFIG_CAN_SOFTING is not set +# CONFIG_CAN_TI_HECC is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_CGROUPS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_NET_CLASSID is not set +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_SCHED=y +# CONFIG_CHARGER_MAX14577 is not set +CONFIG_CHARGER_TPS65090=y +# CONFIG_CHARGER_TPS65217 is not set +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHROME_PLATFORMS=y +CONFIG_CHR_DEV_SG=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_IMX_GPT=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLKSRC_TI_32K=y +CONFIG_CLKSRC_VERSATILE=y +CONFIG_CLK_QORIQ=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMA=y +CONFIG_CMA_ALIGNMENT=8 +CONFIG_CMA_AREAS=7 +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_SIZE_MBYTES=64 +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +CONFIG_CMDLINE_PARTITION=y +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_MAX77686=y +# CONFIG_COMMON_CLK_PALMAS is not set +# CONFIG_COMMON_CLK_RK808 is not set +# CONFIG_COMMON_CLK_S2MPS11 is not set +CONFIG_COMMON_CLK_TI_ADPLL=y +CONFIG_COMPACTION=y +CONFIG_COMPAT_BRK=y +CONFIG_CONFIGFS_FS=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_COREDUMP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CORTINA_PHY is not set +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPUSETS=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_V7=y +CONFIG_CRC16=y +# CONFIG_CRC32_SARWATE is not set +CONFIG_CRC32_SLICEBY8=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +# CONFIG_CRYPTO_AES_ARM_CE is not set +# CONFIG_CRYPTO_ARC4 is not set +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set +# CONFIG_CRYPTO_DEV_MXC_SCC is not set +# CONFIG_CRYPTO_DEV_OMAP_AES is not set +# CONFIG_CRYPTO_DEV_OMAP_DES is not set +# CONFIG_CRYPTO_DEV_OMAP_SHAM is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_ECHAINIV=y +# CONFIG_CRYPTO_GHASH_ARM_CE is not set +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA1_ARM_CE is not set +# CONFIG_CRYPTO_SHA1_ARM_NEON is not set +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA256_ARM is not set +# CONFIG_CRYPTO_SHA2_ARM_CE is not set +# CONFIG_CRYPTO_SHA512_ARM is not set +# CONFIG_CRYPTO_TLS is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_IMX_UART_PORT=1 +CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" +CONFIG_DEBUG_RODATA=y +# CONFIG_DEBUG_UART_8250 is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_DEVKMEM=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_CMA=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y +CONFIG_DMA_OMAP=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=y +CONFIG_DRM=y +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FSL_DCU=y +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_PANEL=y +CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_SII902X=y +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DST_CACHE=y +CONFIG_DTC=y +CONFIG_DT_IDLE_STATES=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DWMAC_GENERIC=y +CONFIG_DW_DMAC=y +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_WATCHDOG=y +CONFIG_E1000E=y +CONFIG_EDAC=y +CONFIG_EDAC_ATOMIC_SCRUB=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_MM_EDAC is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EEPROM_93CX6=y +CONFIG_EEPROM_AT24=y +CONFIG_EFI=y +# CONFIG_EFIVAR_FS is not set +CONFIG_EFI_ARMSTUB=y +# CONFIG_EFI_CAPSULE_LOADER is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_TEST is not set +# CONFIG_EFI_VARS is not set +CONFIG_ELF_CORE=y +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_EXPORTFS=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXTCON=y +# CONFIG_EXTCON_AXP288 is not set +# CONFIG_EXTCON_MAX14577 is not set +# CONFIG_EXTCON_MAX8997 is not set +# CONFIG_EXTCON_PALMAS is not set +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FAT_FS=y +CONFIG_FB=y +CONFIG_FB_ARMCLCD=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_CMDLINE=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_EFI=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_MX3=y +CONFIG_FB_SIMPLE=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FEC=y +CONFIG_FHANDLE=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_FMAN_ARM=y +# CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN is not set +# CONFIG_FMAN_P1023 is not set +# CONFIG_FMAN_P3040_P4080_P5020 is not set +# CONFIG_FMAN_PFC is not set +# CONFIG_FMAN_V3H is not set +# CONFIG_FMAN_V3L is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x16=y +CONFIG_FONT_8x8=y +CONFIG_FONT_SUPPORT=y +CONFIG_FORCE_MAX_ZONEORDER=12 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FREEZER=y +CONFIG_FSL_BMAN_CONFIG=y +CONFIG_FSL_BMAN_DEBUGFS=y +# CONFIG_FSL_BMAN_TEST is not set +# CONFIG_FSL_DPAA_1588 is not set +CONFIG_FSL_DPAA_ADVANCED_DRIVERS=y +# CONFIG_FSL_DPAA_CEETM is not set +CONFIG_FSL_DPAA_CS_THRESHOLD_10G=0x10000000 +CONFIG_FSL_DPAA_CS_THRESHOLD_1G=0x06000000 +# CONFIG_FSL_DPAA_DBG_LOOP is not set +# CONFIG_FSL_DPAA_ETH_DEBUG is not set +CONFIG_FSL_DPAA_ETH_DEBUGFS=y +# CONFIG_FSL_DPAA_ETH_JUMBO_FRAME is not set +CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT=128 +CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD=80 +CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE=y +# CONFIG_FSL_DPAA_HOOKS is not set +CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD=0x10000000 +CONFIG_FSL_DPAA_OFFLINE_PORTS=y +# CONFIG_FSL_DPAA_TS is not set +CONFIG_FSL_DPA_CAN_WAIT=y +CONFIG_FSL_DPA_CAN_WAIT_SYNC=y +# CONFIG_FSL_DPA_CHECKING is not set +CONFIG_FSL_DPA_PIRQ_FAST=y +CONFIG_FSL_DPA_PIRQ_SLOW=y +CONFIG_FSL_DPA_PORTAL_SHARE=y +CONFIG_FSL_EDMA=y +CONFIG_FSL_FM_MAX_FRAME_SIZE=1522 +CONFIG_FSL_FM_RX_EXTRA_HEADROOM=64 +CONFIG_FSL_GUTS=y +CONFIG_FSL_PQ_MDIO=y +# CONFIG_FSL_QDMA is not set +CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W=3 +CONFIG_FSL_QMAN_CONFIG=y +CONFIG_FSL_QMAN_DEBUGFS=y +CONFIG_FSL_QMAN_FQD_SZ=10 +# CONFIG_FSL_QMAN_FQ_LOOKUP is not set +CONFIG_FSL_QMAN_INIT_TIMEOUT=10 +CONFIG_FSL_QMAN_PFDR_SZ=13 +CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH=12 +CONFIG_FSL_QMAN_PIRQ_IPERIOD=100 +CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH=4 +CONFIG_FSL_QMAN_POLL_LIMIT=32 +# CONFIG_FSL_QMAN_TEST is not set +CONFIG_FSL_SDK_BMAN=y +CONFIG_FSL_SDK_DPA=y +CONFIG_FSL_SDK_DPAA_ETH=y +CONFIG_FSL_SDK_FMAN=y +# CONFIG_FSL_SDK_FMAN_TEST is not set +CONFIG_FSL_SDK_QMAN=y +CONFIG_FSL_USDPAA=y +CONFIG_FSL_XGMAC_MDIO=y +CONFIG_FS_MBCACHE=y +CONFIG_FS_POSIX_ACL=y +CONFIG_FTRACE=y +# CONFIG_FTRACE_SYSCALLS is not set +CONFIG_FUSE_FS=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GIANFAR=y +CONFIG_GLOB=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_GPIO_AXP209 is not set +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_EM=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_GPIO_MXC=y +CONFIG_GPIO_OMAP=y +CONFIG_GPIO_PALMAS=y +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_STMPE is not set +CONFIG_GPIO_SYSCON=y +# CONFIG_GPIO_TPS65218 is not set +CONFIG_GPIO_TPS6586X=y +CONFIG_GPIO_TPS65910=y +CONFIG_GPIO_TWL4030=y +CONFIG_GPIO_XILINX=y +CONFIG_GRACE_PERIOD=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_HAVE_ARM_SCU=y +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_TWD=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_GENERIC_RCU_GUP=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IMX_SRC=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SMP=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HDMI=y +CONFIG_HID=y +CONFIG_HID_GENERIC=y +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y +CONFIG_HIX5HD2_GMAC=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_HUGETLBFS is not set +CONFIG_HVC_DRIVER=y +CONFIG_HWMON=y +CONFIG_HW_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_OMAP=y +CONFIG_HW_RANDOM_OMAP3_ROM=y +CONFIG_HZ_FIXED=0 +CONFIG_I2C=y +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_DEMUX_PINCTRL=y +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_IMX=y +CONFIG_I2C_MUX=y +CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_MUX_PINCTRL=y +CONFIG_I2C_NOMADIK=y +CONFIG_I2C_OMAP=y +CONFIG_I2C_RK3X=y +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=y +CONFIG_I2C_XILINX=y +CONFIG_ICPLUS_PHY=y +CONFIG_ICS932S401=y +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +CONFIG_IIO_CONFIGFS=y +CONFIG_IIO_HRTIMER_TRIGGER=y +CONFIG_IIO_KFIFO_BUF=y +CONFIG_IIO_SW_TRIGGER=y +# CONFIG_IIO_TIGHTLOOP_TRIGGER is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_TRIGGERED_BUFFER=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IMX2_WDT=y +CONFIG_IMX_DMA=y +CONFIG_IMX_SDMA=y +# CONFIG_IMX_WEIM is not set +CONFIG_INET6_XFRM_MODE_BEET=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET_DIAG=y +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_ESP=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_INPUT=y +# CONFIG_INPUT_AXP20X_PEK is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_JOYDEV=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_MATRIXKMAP=y +# CONFIG_INPUT_MAX8997_HAPTIC is not set +CONFIG_INPUT_MOUSE=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_IOMMU_HELPER=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_NAT=y +CONFIG_IP6_NF_TARGET_MASQUERADE=y +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_IPC_NS=y +CONFIG_IPV6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_MROUTE is not set +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_VS=y +# CONFIG_IP_VS_DEBUG is not set +# CONFIG_IP_VS_DH is not set +# CONFIG_IP_VS_FO is not set +# CONFIG_IP_VS_IPV6 is not set +# CONFIG_IP_VS_LBLC is not set +# CONFIG_IP_VS_LBLCR is not set +# CONFIG_IP_VS_LC is not set +# CONFIG_IP_VS_NFCT is not set +# CONFIG_IP_VS_NQ is not set +# CONFIG_IP_VS_OVF is not set +# CONFIG_IP_VS_PROTO_AH is not set +# CONFIG_IP_VS_PROTO_AH_ESP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_SCTP is not set +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_RR is not set +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_SH is not set +CONFIG_IP_VS_SH_TAB_BITS=8 +CONFIG_IP_VS_TAB_BITS=12 +# CONFIG_IP_VS_WLC is not set +# CONFIG_IP_VS_WRR is not set +CONFIG_IRQCHIP=y +CONFIG_IRQ_CROSSBAR=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +# CONFIG_ISDN is not set +CONFIG_ISL29003=y +CONFIG_JBD2=y +# CONFIG_JFFS2_FS is not set +CONFIG_KALLSYMS=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYBOARD_ATKBD=y +CONFIG_KEYBOARD_BCM=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_KEYBOARD_IMX is not set +# CONFIG_KEYBOARD_STMPE is not set +CONFIG_KEYS=y +CONFIG_KS8851=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_VFIO=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_LEDS_GPIO=y +# CONFIG_LEDS_MAX8997 is not set +CONFIG_LEDS_PWM=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_CAMERA=y +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_GPIO=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_LIBFDT=y +CONFIG_LOCALVERSION_AUTO=y +CONFIG_LOCKD=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MACB=y +CONFIG_MACH_OMAP3517EVM=y +CONFIG_MACH_OMAP3_PANDORA=y +CONFIG_MACH_OMAP_GENERIC=y +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAILBOX=y +# CONFIG_MAILBOX_TEST is not set +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_MARVELL_PHY=y +CONFIG_MCPM=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MDIO_BUS_MUX=y +CONFIG_MDIO_BUS_MUX_MMIOREG=y +# CONFIG_MDIO_FSL_BACKPLANE is not set +# CONFIG_MDIO_GPIO is not set +CONFIG_MEMCG=y +# CONFIG_MEMCG_SWAP is not set +CONFIG_MEMORY=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MFD_ACT8945A=y +CONFIG_MFD_AS3711=y +CONFIG_MFD_AS3722=y +CONFIG_MFD_ATMEL_FLEXCOM=y +CONFIG_MFD_AXP20X=y +CONFIG_MFD_AXP20X_I2C=y +CONFIG_MFD_BCM590XX=y +CONFIG_MFD_CORE=y +CONFIG_MFD_MAX14577=y +CONFIG_MFD_MAX77686=y +CONFIG_MFD_MAX8907=y +CONFIG_MFD_MAX8997=y +CONFIG_MFD_MAX8998=y +CONFIG_MFD_OMAP_USB_HOST=y +CONFIG_MFD_PALMAS=y +CONFIG_MFD_RK808=y +CONFIG_MFD_SEC_CORE=y +CONFIG_MFD_STMPE=y +CONFIG_MFD_SYSCON=y +CONFIG_MFD_TPS65090=y +CONFIG_MFD_TPS65217=y +CONFIG_MFD_TPS65218=y +CONFIG_MFD_TPS6586X=y +CONFIG_MFD_TPS65910=y +# CONFIG_MFD_TWL4030_AUDIO is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +CONFIG_MICREL_PHY=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MIGRATION=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=16 +CONFIG_MMC_DW=y +CONFIG_MMC_DW_EXYNOS=y +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_DW_PLTFM=y +# CONFIG_MMC_MXC is not set +CONFIG_MMC_OMAP=y +CONFIG_MMC_OMAP_HS=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_ESDHC_IMX=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_OF_ARASAN=y +CONFIG_MMC_SDHCI_OF_AT91=y +CONFIG_MMC_SDHCI_OF_ESDHC=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MMU_NOTIFIER=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=y +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +CONFIG_MSDOS_FS=y +# CONFIG_MTD_BCM47XXSFLASH is not set +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_GEOMETRY=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_BRCMNAND=y +CONFIG_MTD_NAND_DENALI=y +CONFIG_MTD_NAND_DENALI_DT=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_MTD_SST25L=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MULTI_IRQ_HANDLER=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_MVMDIO=y +CONFIG_MX3_IPU=y +CONFIG_MX3_IPU_IRQS=4 +CONFIG_NAMESPACES=y +CONFIG_NATIONAL_PHY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEON=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETLINK_DIAG=y +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_INGRESS=y +CONFIG_NET_IP_TUNNEL=y +CONFIG_NET_KEY=y +CONFIG_NET_NS=y +CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NET_SWITCHDEV=y +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_FS=y +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_V2=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_RTCACHE is not set +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_LOG_IPV6 is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_IPV4=y +CONFIG_NF_NAT_IPV6=y +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_NF_NAT_NEEDED=y +# CONFIG_NF_NAT_REDIRECT is not set +CONFIG_NLS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=16 +CONFIG_NTFS_FS=y +CONFIG_NVMEM=y +CONFIG_NVME_CORE=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OID_REGISTRY=y +CONFIG_OLD_SIGACTION=y +CONFIG_OLD_SIGSUSPEND3=y +# CONFIG_OMAP2PLUS_MBOX is not set +# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set +# CONFIG_OMAP3_SDRC_AC_TIMING is not set +# CONFIG_OMAP5_ERRATA_801819 is not set +CONFIG_OMAP_32K_TIMER=y +CONFIG_OMAP_CONTROL_PHY=y +CONFIG_OMAP_DM_TIMER=y +CONFIG_OMAP_GPMC=y +# CONFIG_OMAP_GPMC_DEBUG is not set +CONFIG_OMAP_INTERCONNECT=y +CONFIG_OMAP_INTERCONNECT_BARRIER=y +# CONFIG_OMAP_IOMMU is not set +CONFIG_OMAP_IRQCHIP=y +CONFIG_OMAP_MUX=y +# CONFIG_OMAP_MUX_DEBUG is not set +CONFIG_OMAP_MUX_WARNINGS=y +CONFIG_OMAP_OCP2SCP=y +CONFIG_OMAP_PACKAGE_CBB=y +CONFIG_OMAP_PM_NOOP=y +# CONFIG_OMAP_RESET_CLOCKS is not set +CONFIG_OMAP_USB2=y +# CONFIG_OMAP_WATCHDOG is not set +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_PACKET_DIAG=y +CONFIG_PAGE_COUNTER=y +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PALMAS_GPADC is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_PARTITION_PERCPU=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIEASPM_POWERSAVE is not set +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_DW=y +CONFIG_PCIE_PME=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +# CONFIG_PCI_DRA7XX is not set +CONFIG_PCI_ECAM=y +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_PHYLIB=y +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PHY_DM816X_USB is not set +CONFIG_PID_NS=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_AS3722=y +CONFIG_PINCTRL_PALMAS=y +CONFIG_PL310_ERRATA_588369=y +CONFIG_PL310_ERRATA_727915=y +CONFIG_PL310_ERRATA_753970=y +CONFIG_PL310_ERRATA_769419=y +CONFIG_PL320_MBOX=y +CONFIG_PL330_DMA=y +# CONFIG_PLAT_VERSATILE_CLCD is not set +CONFIG_PM=y +CONFIG_PM_CLK=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_DEVFREQ=y +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_PM_OPP=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POWER_AVS=y +# CONFIG_POWER_AVS_OMAP is not set +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_AS3722=y +CONFIG_POWER_RESET_BRCMKONA=y +CONFIG_POWER_RESET_BRCMSTB=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_SUPPLY=y +CONFIG_PPS=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PRINTK_TIME=y +# CONFIG_PROBE_EVENTS is not set +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZO_COMPRESS is not set +CONFIG_PSTORE_PMSG=y +CONFIG_PSTORE_RAM=y +CONFIG_PSTORE_ZLIB_COMPRESS=y +CONFIG_PTP_1588_CLOCK=y +# CONFIG_PTP_1588_CLOCK_DPAA is not set +CONFIG_PTP_1588_CLOCK_GIANFAR=y +CONFIG_PWM=y +# CONFIG_PWM_IMX is not set +# CONFIG_PWM_OMAP_DMTIMER is not set +# CONFIG_PWM_STMPE is not set +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_TIECAP is not set +# CONFIG_PWM_TIEHRPWM is not set +# CONFIG_PWM_TWL is not set +# CONFIG_PWM_TWL_LED is not set +CONFIG_QMAN_CEETM_UPDATE_PERIOD=1000 +CONFIG_QORIQ_CPUFREQ=y +# CONFIG_QUICC_ENGINE is not set +CONFIG_R8169=y +CONFIG_RAS=y +CONFIG_RATIONAL=y +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_EXPERT is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RD_BZIP2=y +CONFIG_RD_GZIP=y +CONFIG_RD_LZ4=y +CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y +CONFIG_RD_XZ=y +CONFIG_REALTEK_PHY=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_SPI=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_ACT8865=y +CONFIG_REGULATOR_ACT8945A=y +CONFIG_REGULATOR_ANATOP=y +CONFIG_REGULATOR_AS3711=y +CONFIG_REGULATOR_AS3722=y +CONFIG_REGULATOR_AXP20X=y +CONFIG_REGULATOR_BCM590XX=y +CONFIG_REGULATOR_DA9210=y +CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_LP872X=y +# CONFIG_REGULATOR_MAX14577 is not set +CONFIG_REGULATOR_MAX77686=y +# CONFIG_REGULATOR_MAX77802 is not set +CONFIG_REGULATOR_MAX8907=y +CONFIG_REGULATOR_MAX8973=y +# CONFIG_REGULATOR_MAX8997 is not set +# CONFIG_REGULATOR_MAX8998 is not set +CONFIG_REGULATOR_PALMAS=y +CONFIG_REGULATOR_PBIAS=y +CONFIG_REGULATOR_PWM=y +# CONFIG_REGULATOR_QCOM_SPMI is not set +CONFIG_REGULATOR_RK808=y +# CONFIG_REGULATOR_S2MPA01 is not set +CONFIG_REGULATOR_S2MPS11=y +CONFIG_REGULATOR_S5M8767=y +CONFIG_REGULATOR_TI_ABB=y +CONFIG_REGULATOR_TPS51632=y +CONFIG_REGULATOR_TPS62360=y +CONFIG_REGULATOR_TPS65090=y +CONFIG_REGULATOR_TPS65217=y +CONFIG_REGULATOR_TPS65218=y +CONFIG_REGULATOR_TPS6586X=y +CONFIG_REGULATOR_TPS65910=y +CONFIG_REGULATOR_TWL4030=y +CONFIG_REGULATOR_VEXPRESS=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RFS_ACCEL=y +CONFIG_ROOT_NFS=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_AS3722=y +# CONFIG_RTC_DRV_CMOS is not set +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_DS1307_HWMON=y +CONFIG_RTC_DRV_DS3232=y +# CONFIG_RTC_DRV_EFI is not set +CONFIG_RTC_DRV_EM3027=y +# CONFIG_RTC_DRV_IMXDI is not set +CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_MAX8907=y +# CONFIG_RTC_DRV_MAX8997 is not set +# CONFIG_RTC_DRV_MAX8998 is not set +# CONFIG_RTC_DRV_MXC is not set +CONFIG_RTC_DRV_PALMAS=y +CONFIG_RTC_DRV_PCF2127=y +CONFIG_RTC_DRV_PCF85263=y +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_RK808 is not set +# CONFIG_RTC_DRV_S5M is not set +CONFIG_RTC_DRV_TPS6586X=y +CONFIG_RTC_DRV_TPS65910=y +CONFIG_RTC_DRV_TWL4030=y +CONFIG_RTC_I2C_AND_SPI=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SATA_MV=y +CONFIG_SATA_PMP=y +CONFIG_SATA_SIL24=y +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHED_INFO is not set +CONFIG_SCSI=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +CONFIG_SENSORS_IIO_HWMON=y +CONFIG_SENSORS_ISL29018=y +CONFIG_SENSORS_ISL29028=y +CONFIG_SENSORS_LM90=y +CONFIG_SENSORS_LM95245=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_EM=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_OMAP is not set +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_BCM63XX=y +CONFIG_SERIAL_BCM63XX_CONSOLE=y +CONFIG_SERIAL_CONEXANT_DIGICOLOR=y +CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE=y +CONFIG_SERIAL_FSL_LPUART=y +CONFIG_SERIAL_FSL_LPUART_CONSOLE=y +CONFIG_SERIAL_IMX=y +CONFIG_SERIAL_IMX_CONSOLE=y +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y +CONFIG_SERIAL_ST_ASC=y +CONFIG_SERIAL_ST_ASC_CONSOLE=y +CONFIG_SERIAL_XILINX_PS_UART=y +CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +CONFIG_SERIO=y +CONFIG_SERIO_AMBAKMI=y +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_SERPORT=y +CONFIG_SG_POOL=y +CONFIG_SG_SPLIT=y +CONFIG_SLUB_DEBUG=y +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_SMSC911X=y +# CONFIG_SMSC911X_ARCH_HOOKS is not set +CONFIG_SMSC_PHY=y +CONFIG_SOCK_DIAG=y +CONFIG_SOC_AM33XX=y +CONFIG_SOC_AM43XX=y +CONFIG_SOC_BRCMSTB=y +CONFIG_SOC_BUS=y +CONFIG_SOC_DRA7XX=y +CONFIG_SOC_HAS_OMAP2_SDRC=y +CONFIG_SOC_HAS_REALTIME_COUNTER=y +# CONFIG_SOC_IMX50 is not set +# CONFIG_SOC_IMX51 is not set +# CONFIG_SOC_IMX53 is not set +# CONFIG_SOC_IMX6Q is not set +# CONFIG_SOC_IMX6SL is not set +# CONFIG_SOC_IMX6SX is not set +# CONFIG_SOC_IMX6UL is not set +# CONFIG_SOC_IMX7D is not set +# CONFIG_SOC_LS1021A is not set +CONFIG_SOC_OMAP3430=y +CONFIG_SOC_OMAP5=y +CONFIG_SOC_TI81XX=y +# CONFIG_SOC_VF610 is not set +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +CONFIG_SPI_BITBANG=y +CONFIG_SPI_CADENCE=y +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_FSL_QUADSPI=y +# CONFIG_SPI_IMX is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_OMAP24XX=y +CONFIG_SPI_PL022=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPI_XILINX=y +CONFIG_SPMI=y +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SRAM=y +CONFIG_SRCU=y +CONFIG_STAGING_BOARD=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_STMPE_I2C=y +# CONFIG_STMPE_SPI is not set +CONFIG_STMP_DEVICE=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_SWIOTLB=y +CONFIG_SWPHY=y +CONFIG_SWP_EMULATE=y +# CONFIG_SW_SYNC is not set +CONFIG_SYNC_FILE=y +# CONFIG_SYN_COOKIES is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THUMB2_KERNEL is not set +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_TI_CPPI41 is not set +CONFIG_TI_CPSW=y +CONFIG_TI_CPSW_ALE=y +CONFIG_TI_CPSW_PHY_SEL=y +CONFIG_TI_DAVINCI_CPDMA=y +# CONFIG_TI_DAVINCI_EMAC is not set +CONFIG_TI_DAVINCI_MDIO=y +CONFIG_TI_DMA_CROSSBAR=y +CONFIG_TI_EDMA=y +# CONFIG_TI_EMIF is not set +CONFIG_TI_PIPE3=y +# CONFIG_TI_SOC_THERMAL is not set +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +CONFIG_TOUCHSCREEN_STMPE=y +CONFIG_TRACING_EVENTS_GPIO=y +CONFIG_TREE_RCU=y +CONFIG_TUN=y +CONFIG_TWL4030_CORE=y +CONFIG_TWL4030_POWER=y +# CONFIG_TWL4030_WATCHDOG is not set +CONFIG_UBIFS_FS=y +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UCS2_STRING=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_UNIX_DIAG=y +CONFIG_USB=y +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_OF=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_COMMON=y +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_DUAL_ROLE=y +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_DUAL_ROLE=y +# CONFIG_USB_DWC3_GADGET is not set +# CONFIG_USB_DWC3_HOST is not set +CONFIG_USB_DWC3_OF_SIMPLE=y +CONFIG_USB_DWC3_OMAP=y +CONFIG_USB_DWC3_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_EHCI_MXC is not set +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_FSL_USB2=y +CONFIG_USB_GADGET=y +CONFIG_USB_GPIO_VBUS=y +# CONFIG_USB_HCD_BCMA is not set +CONFIG_USB_HID=y +# CONFIG_USB_IMX21_HCD is not set +CONFIG_USB_ISP1301=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_DUAL_ROLE=y +# CONFIG_USB_ISP1760_GADGET_ROLE is not set +CONFIG_USB_ISP1760_HCD=y +# CONFIG_USB_ISP1760_HOST_ROLE is not set +CONFIG_USB_ISP1761_UDC=y +CONFIG_USB_MXS_PHY=y +CONFIG_USB_NET_AX88179_178A=y +CONFIG_USB_NET_AX8817X=y +CONFIG_USB_NET_CDCETHER=y +CONFIG_USB_NET_CDC_NCM=y +CONFIG_USB_NET_CDC_SUBSET=y +CONFIG_USB_NET_CDC_SUBSET_ENABLE=y +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_NET_NET1080=y +CONFIG_USB_NET_SMSC75XX=y +CONFIG_USB_NET_SMSC95XX=y +CONFIG_USB_NET_ZAURUS=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_OMAP3=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_PEGASUS=y +CONFIG_USB_PHY=y +CONFIG_USB_STORAGE=y +CONFIG_USB_SUPPORT=y +# CONFIG_USB_UHCI_HCD is not set +CONFIG_USB_ULPI=y +CONFIG_USB_ULPI_VIEWPORT=y +CONFIG_USB_USBNET=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USERIO is not set +CONFIG_USER_NS=y +CONFIG_USE_OF=y +CONFIG_UTS_NS=y +CONFIG_VDSO=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VETH=y +CONFIG_VEXPRESS_CONFIG=y +CONFIG_VEXPRESS_SYSCFG=y +CONFIG_VFAT_FS=y +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_VHOST=y +CONFIG_VHOST_NET=y +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VITESSE_PHY=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WKUP_M3_RPROC is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XILINX_WATCHDOG=y +CONFIG_XPS=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_X86=y +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y diff --git a/target/linux/layerscape/64b/profiles/00-default.mk b/target/linux/layerscape/armv8_32b/profiles/00-default.mk similarity index 100% rename from target/linux/layerscape/64b/profiles/00-default.mk rename to target/linux/layerscape/armv8_32b/profiles/00-default.mk diff --git a/target/linux/layerscape/32b/target.mk b/target/linux/layerscape/armv8_32b/target.mk similarity index 56% rename from target/linux/layerscape/32b/target.mk rename to target/linux/layerscape/armv8_32b/target.mk index 13eae2455..fd1be67cf 100644 --- a/target/linux/layerscape/32b/target.mk +++ b/target/linux/layerscape/armv8_32b/target.mk @@ -6,9 +6,11 @@ # ARCH:=arm -BOARDNAME:=layerscape 32b boards -CPU_TYPE:=cortex-a9 +BOARDNAME:=ARMv8 32-bit based boards +CPU_TYPE:=cortex-a15 +CPU_SUBTYPE:=neon-vfpv4 +KERNELNAME:=zImage dtbs define Target/Description - Build firmware images for $(BOARDNAME) SoC devices. + Build firmware images for NXP Layerscape ARMv8 32-bit based boards. endef diff --git a/target/linux/layerscape/armv8_64b/config-4.9 b/target/linux/layerscape/armv8_64b/config-4.9 new file mode 100644 index 000000000..91a89a25b --- /dev/null +++ b/target/linux/layerscape/armv8_64b/config-4.9 @@ -0,0 +1,1346 @@ +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_CPPC_CPUFREQ is not set +# CONFIG_ACPI_CUSTOM_DSDT is not set +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_DEBUGGER is not set +# CONFIG_ACPI_DOCK is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_FAN=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_ACPI_IORT=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AHCI_CEVA=y +CONFIG_AHCI_QORIQ=y +CONFIG_AHCI_XGENE=y +CONFIG_AMD_XGBE=y +CONFIG_AQUANTIA_PHY=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_HAS_KCOV=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_LAYERSCAPE=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARM64=y +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARM64_CRYPTO=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_HW_AFDBM=y +# CONFIG_ARM64_LSE_ATOMICS is not set +CONFIG_ARM64_MODULE_CMODEL_LARGE=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_PAN=y +# CONFIG_ARM64_PTDUMP is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +CONFIG_ARM64_UAO=y +CONFIG_ARM64_VA_BITS=48 +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VHE=y +# CONFIG_ARMV8_DEPRECATED is not set +CONFIG_ARM_AMBA=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_BIG_LITTLE_CPUFREQ=y +CONFIG_ARM_CPUIDLE=y +# CONFIG_ARM_DT_BL_CPUFREQ is not set +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +# CONFIG_ARM_PL172_MPMC is not set +CONFIG_ARM_PMU=y +CONFIG_ARM_PSCI_FW=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SP805_WATCHDOG=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_ASN1=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_ATA=y +CONFIG_ATA_ACPI=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_TREE=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUTOFS4_FS=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BALLOON_COMPACTION=y +CONFIG_BATTERY_BQ27XXX=y +CONFIG_BATTERY_BQ27XXX_I2C=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NVME=y +# CONFIG_BLK_DEV_NVME_SCSI is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=262144 +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLOCK_COMPAT=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_BOUNCE=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_NETFILTER=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_BTRFS_FS=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_BUILD_BIN2C=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_CEPH_LIB=y +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_CGROUPS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +# CONFIG_CGROUP_FREEZER is not set +CONFIG_CGROUP_HUGETLB=y +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHROME_PLATFORMS=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_ACPI=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLKSRC_VERSATILE=y +CONFIG_CLK_QORIQ=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CLZ_TAB=y +CONFIG_CMA=y +CONFIG_CMA_ALIGNMENT=8 +CONFIG_CMA_AREAS=7 +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_SIZE_MBYTES=16 +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_CS2000_CP=y +# CONFIG_COMMON_CLK_MAX77686 is not set +CONFIG_COMMON_CLK_PWM=y +CONFIG_COMMON_CLK_RK808=y +CONFIG_COMMON_CLK_S2MPS11=y +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_COMMON_CLK_XGENE=y +CONFIG_COMPACTION=y +CONFIG_COMPAT=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_CONFIGFS_FS=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_COREDUMP=y +# CONFIG_CORTINA_PHY is not set +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPUSETS=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_THERMAL=y +CONFIG_CRC16=y +# CONFIG_CRC32_SARWATE is not set +CONFIG_CRC32_SLICEBY8=y +CONFIG_CRC7=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC_T10DIF=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_CROS_EC_CHARDEV is not set +CONFIG_CROS_EC_PROTO=y +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CRYPTO_ABLK_HELPER=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_ANSI_CPRNG=y +# CONFIG_CRYPTO_ARC4 is not set +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32_ARM64 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEV_FSL_CAAM=y +CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API=y +CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON=y +CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API=y +CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC=y +CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI=y +# CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG is not set +# CONFIG_CRYPTO_DEV_FSL_CAAM_INTC is not set +CONFIG_CRYPTO_DEV_FSL_CAAM_JR=y +CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API=y +CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE=9 +CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API=y +# CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_TLS is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_RODATA=y +CONFIG_DEBUG_SET_MODULE_RONX=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DEFAULT_CFQ=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVKMEM=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEV_DAX is not set +CONFIG_DMADEVICES=y +CONFIG_DMATEST=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_CMA=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ENGINE_RAID=y +CONFIG_DMA_OF=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMI=y +CONFIG_DMIID=y +# CONFIG_DMI_SYSFS is not set +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=y +CONFIG_DST_CACHE=y +CONFIG_DTC=y +CONFIG_DT_IDLE_STATES=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EEPROM_AT24=y +CONFIG_EFI=y +CONFIG_EFIVAR_FS=y +CONFIG_EFI_ARMSTUB=y +# CONFIG_EFI_CAPSULE_LOADER is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_TEST is not set +# CONFIG_EFI_VARS is not set +CONFIG_ELF_CORE=y +# CONFIG_EMBEDDED is not set +CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_EVM is not set +CONFIG_EXPORTFS=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_POSIX_ACL=y +# CONFIG_EXT2_FS_SECURITY is not set +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXTCON=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_FAT_FS=y +CONFIG_FB=y +CONFIG_FB_ARMCLCD=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_CMDLINE=y +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_EFI is not set +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FHANDLE=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_FMAN_ARM=y +# CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN is not set +# CONFIG_FMAN_P1023 is not set +# CONFIG_FMAN_P3040_P4080_P5020 is not set +# CONFIG_FMAN_PFC is not set +# CONFIG_FMAN_V3H is not set +# CONFIG_FMAN_V3L is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x16=y +CONFIG_FONT_8x8=y +CONFIG_FONT_SUPPORT=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +CONFIG_FRAME_POINTER=y +CONFIG_FRAME_WARN=2048 +CONFIG_FREEZER=y +CONFIG_FSL_BMAN_CONFIG=y +CONFIG_FSL_BMAN_DEBUGFS=y +# CONFIG_FSL_BMAN_TEST is not set +CONFIG_FSL_DPAA2=y +CONFIG_FSL_DPAA2_ETH=y +CONFIG_FSL_DPAA2_ETHSW=y +# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set +CONFIG_FSL_DPAA2_EVB=y +CONFIG_FSL_DPAA2_MAC=y +# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set +# CONFIG_FSL_DPAA2_QDMA is not set +# CONFIG_FSL_DPAA_1588 is not set +CONFIG_FSL_DPAA_ADVANCED_DRIVERS=y +# CONFIG_FSL_DPAA_CEETM is not set +CONFIG_FSL_DPAA_CS_THRESHOLD_10G=0x10000000 +CONFIG_FSL_DPAA_CS_THRESHOLD_1G=0x06000000 +# CONFIG_FSL_DPAA_DBG_LOOP is not set +# CONFIG_FSL_DPAA_ETH_DEBUG is not set +CONFIG_FSL_DPAA_ETH_DEBUGFS=y +# CONFIG_FSL_DPAA_ETH_JUMBO_FRAME is not set +CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT=128 +CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD=80 +CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE=y +# CONFIG_FSL_DPAA_HOOKS is not set +CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD=0x10000000 +CONFIG_FSL_DPAA_OFFLINE_PORTS=y +# CONFIG_FSL_DPAA_TS is not set +CONFIG_FSL_DPA_CAN_WAIT=y +CONFIG_FSL_DPA_CAN_WAIT_SYNC=y +# CONFIG_FSL_DPA_CHECKING is not set +CONFIG_FSL_DPA_PIRQ_FAST=y +CONFIG_FSL_DPA_PIRQ_SLOW=y +CONFIG_FSL_DPA_PORTAL_SHARE=y +CONFIG_FSL_EDMA=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_FSL_FM_MAX_FRAME_SIZE=1522 +CONFIG_FSL_FM_RX_EXTRA_HEADROOM=64 +CONFIG_FSL_GUTS=y +CONFIG_FSL_IFC=y +CONFIG_FSL_LS2_CONSOLE=y +CONFIG_FSL_MC_BUS=y +CONFIG_FSL_MC_DPIO=y +CONFIG_FSL_MC_RESTOOL=y +# CONFIG_FSL_QBMAN_DEBUG is not set +# CONFIG_FSL_QDMA is not set +CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W=3 +CONFIG_FSL_QMAN_CONFIG=y +CONFIG_FSL_QMAN_DEBUGFS=y +CONFIG_FSL_QMAN_FQD_SZ=10 +CONFIG_FSL_QMAN_FQ_LOOKUP=y +CONFIG_FSL_QMAN_INIT_TIMEOUT=10 +CONFIG_FSL_QMAN_PFDR_SZ=13 +CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH=12 +CONFIG_FSL_QMAN_PIRQ_IPERIOD=100 +CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH=4 +CONFIG_FSL_QMAN_POLL_LIMIT=32 +# CONFIG_FSL_QMAN_TEST is not set +CONFIG_FSL_SDK_BMAN=y +CONFIG_FSL_SDK_DPA=y +CONFIG_FSL_SDK_DPAA_ETH=y +CONFIG_FSL_SDK_FMAN=y +# CONFIG_FSL_SDK_FMAN_TEST is not set +CONFIG_FSL_SDK_QMAN=y +CONFIG_FSL_USDPAA=y +CONFIG_FSL_XGMAC_MDIO=y +CONFIG_FS_IOMAP=y +CONFIG_FS_MBCACHE=y +CONFIG_FS_POSIX_ACL=y +CONFIG_FTM_ALARM=y +CONFIG_FUSE_FS=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_GARP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +# CONFIG_GIANFAR is not set +CONFIG_GLOB=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_IRQCHIP=y +CONFIG_GPIO_ACPI=y +# CONFIG_GPIO_AMDPT is not set +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_GPIO_MAX77620=y +CONFIG_GPIO_MPC8XXX=y +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PL061=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_XGENE=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_GENERIC_RCU_GUP=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GENERIC=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HNS=y +CONFIG_HNS_DSAF=y +CONFIG_HNS_ENET=y +CONFIG_HNS_MDIO=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_HPET is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_HWMON=y +CONFIG_HW_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_CAVIUM is not set +CONFIG_HZ=250 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +CONFIG_I2C=y +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CROS_EC_TUNNEL=y +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_IMX=y +CONFIG_I2C_MUX=y +CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_RK3X=y +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +CONFIG_IGB=y +CONFIG_IGBVF=y +CONFIG_IGB_HWMON=y +CONFIG_IIO=y +# CONFIG_IIO_BUFFER is not set +# CONFIG_IIO_TRIGGER is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_IMA is not set +CONFIG_IMX2_WDT=y +CONFIG_INET_DIAG=y +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_ESP=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_INPUT=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_MOUSE=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_HELPER=y +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_IO_PGTABLE=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IPC_NS=y +CONFIG_IPV6=y +CONFIG_IPV6_SIT=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_MROUTE is not set +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_RARP is not set +CONFIG_IRQCHIP=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +# CONFIG_ISDN is not set +CONFIG_JBD2=y +# CONFIG_JFFS2_FS is not set +CONFIG_JUMP_LABEL=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KASAN is not set +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_CROS_EC is not set +CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYS=y +CONFIG_KSM=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_ARM_VGIC_V3_ITS=y +CONFIG_KVM_COMPAT=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_VFIO=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_PWM=y +CONFIG_LEDS_SYSCON=y +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LIBCRC32C=y +CONFIG_LIBFDT=y +CONFIG_LOCALVERSION_AUTO=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LS_SCFG_MSI=y +CONFIG_LS_SOC_DRIVERS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MACB=y +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_MAX77620_THERMAL is not set +# CONFIG_MAX77620_WATCHDOG is not set +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MDIO_BUS_MUX=y +CONFIG_MDIO_BUS_MUX_MMIOREG=y +# CONFIG_MDIO_FSL_BACKPLANE is not set +# CONFIG_MDIO_GPIO is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMORY=y +CONFIG_MEMORY_BALLOON=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMTEST=y +CONFIG_MFD_CORE=y +CONFIG_MFD_CROS_EC=y +CONFIG_MFD_CROS_EC_I2C=y +# CONFIG_MFD_CROS_EC_SPI is not set +CONFIG_MFD_MAX77620=y +CONFIG_MFD_RK808=y +CONFIG_MFD_SEC_CORE=y +CONFIG_MFD_SYSCON=y +CONFIG_MFD_VEXPRESS_SYSREG=y +CONFIG_MICREL_PHY=y +CONFIG_MIGRATION=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_DW=y +CONFIG_MMC_DW_EXYNOS=y +CONFIG_MMC_DW_K3=y +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_DW_PLTFM=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_ACPI=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_OF_ARASAN=y +CONFIG_MMC_SDHCI_OF_ESDHC=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SPI=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MMU_NOTIFIER=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODVERSIONS=y +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_CYPRESS=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +CONFIG_MPILIB=y +CONFIG_MRP=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_DENALI=y +CONFIG_MTD_NAND_DENALI_DT=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_FSL_IFC=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_MTD_SST25L=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_MV_XOR_V2=y +CONFIG_NAMESPACES=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETLABEL is not set +CONFIG_NETLINK_DIAG=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NET_9P=y +# CONFIG_NET_9P_DEBUG is not set +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_INGRESS=y +CONFIG_NET_IP_TUNNEL=y +CONFIG_NET_KEY=y +CONFIG_NET_NS=y +CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NFT_CHAIN_NAT_IPV4 is not set +# CONFIG_NFT_CHAIN_ROUTE_IPV4 is not set +CONFIG_NFT_COMPAT=y +# CONFIG_NFT_COUNTER is not set +CONFIG_NFT_CT=y +# CONFIG_NFT_EXTHDR is not set +# CONFIG_NFT_HASH is not set +# CONFIG_NFT_LIMIT is not set +# CONFIG_NFT_LOG is not set +CONFIG_NFT_MASQ=y +# CONFIG_NFT_MASQ_IPV4 is not set +# CONFIG_NFT_META is not set +CONFIG_NFT_NAT=y +# CONFIG_NFT_NUMGEN is not set +# CONFIG_NFT_QUOTA is not set +# CONFIG_NFT_REDIR is not set +# CONFIG_NFT_REJECT is not set +# CONFIG_NFT_REJECT_IPV4 is not set +# CONFIG_NFT_SET_HASH is not set +# CONFIG_NFT_SET_RBTREE is not set +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_RTCACHE is not set +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_LOG_BRIDGE is not set +CONFIG_NF_LOG_COMMON=y +CONFIG_NF_LOG_IPV6=y +CONFIG_NF_NAT=y +CONFIG_NF_NAT_IPV4=y +CONFIG_NF_NAT_IPV6=y +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_NF_NAT_NEEDED=y +# CONFIG_NF_NAT_REDIRECT is not set +CONFIG_NF_REJECT_IPV6=y +CONFIG_NF_TABLES=y +# CONFIG_NF_TABLES_ARP is not set +CONFIG_NF_TABLES_BRIDGE=y +# CONFIG_NF_TABLES_INET is not set +CONFIG_NF_TABLES_IPV4=y +# CONFIG_NF_TABLES_IPV6 is not set +CONFIG_NLS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NODES_SHIFT=2 +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=64 +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_NVMEM=y +CONFIG_NVME_CORE=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IOMMU=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_NUMA=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_PACKET_DIAG=y +CONFIG_PAGE_COUNTER=y +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_PARAVIRT=y +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_PARTITION_PERCPU=y +# CONFIG_PATA_ACPI is not set +CONFIG_PATA_OF_PLATFORM=y +CONFIG_PATA_PLATFORM=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIEASPM_POWERSAVE is not set +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_DW=y +CONFIG_PCIE_PME=y +CONFIG_PCI_ATS=y +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_HISI=y +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_IOV=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PERF_EVENTS=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_PHYLIB=y +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PHY_EXYNOS4210_USB2 is not set +# CONFIG_PHY_EXYNOS4X12_USB2 is not set +# CONFIG_PHY_EXYNOS5250_USB2 is not set +CONFIG_PHY_SAMSUNG_USB2=y +CONFIG_PHY_XGENE=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_PID_NS=y +CONFIG_PL330_DMA=y +CONFIG_PM=y +# CONFIG_PMIC_OPREGION is not set +CONFIG_PM_CLK=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_OPP=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PNP=y +CONFIG_PNPACPI=y +CONFIG_PNP_DEBUG_MESSAGES=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_SUPPLY=y +CONFIG_PPS=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PREEMPT_RCU=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINT_QUOTA_WARNING=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_PTP_1588_CLOCK=y +# CONFIG_PTP_1588_CLOCK_DPAA is not set +CONFIG_PTP_1588_CLOCK_DPAA2=y +CONFIG_PWM=y +# CONFIG_PWM_CROS_EC is not set +CONFIG_PWM_SYSFS=y +CONFIG_QCOM_HIDMA=y +CONFIG_QCOM_HIDMA_MGMT=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QMAN_CEETM_UPDATE_PERIOD=1000 +CONFIG_QORIQ_CPUFREQ=y +# CONFIG_QUICC_ENGINE is not set +CONFIG_QUOTA=y +CONFIG_QUOTACTL=y +# CONFIG_QUOTA_NETLINK_INTERFACE is not set +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_RAID6_PQ=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_RAS=y +CONFIG_RATIONAL=y +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_EXPERT is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RD_BZIP2=y +CONFIG_RD_GZIP=y +CONFIG_RD_LZ4=y +CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y +CONFIG_RD_XZ=y +CONFIG_REALTEK_PHY=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_SPI=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_MAX77620=y +CONFIG_REGULATOR_PWM=y +CONFIG_REGULATOR_QCOM_SPMI=y +CONFIG_REGULATOR_RK808=y +# CONFIG_REGULATOR_S2MPA01 is not set +CONFIG_REGULATOR_S2MPS11=y +# CONFIG_REGULATOR_S5M8767 is not set +# CONFIG_REGULATOR_VEXPRESS is not set +CONFIG_RESET_CONTROLLER=y +CONFIG_RFS_ACCEL=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_DS1307_HWMON=y +CONFIG_RTC_DRV_DS3232=y +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_PCF2127=y +CONFIG_RTC_DRV_PCF85263=y +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_RK808 is not set +CONFIG_RTC_DRV_S5M=y +CONFIG_RTC_I2C_AND_SPI=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SATA_PMP=y +CONFIG_SATA_SIL24=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_INFO=y +CONFIG_SCHED_MC=y +CONFIG_SCSI=y +CONFIG_SCSI_HISI_SAS=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +CONFIG_SECURITY=y +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_FSL_LPUART=y +CONFIG_SERIAL_FSL_LPUART_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_XILINX_PS_UART=y +CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +CONFIG_SERIO=y +CONFIG_SERIO_AMBAKMI=y +CONFIG_SERIO_LIBPS2=y +CONFIG_SG_POOL=y +CONFIG_SKY2=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +CONFIG_SMC91X=y +CONFIG_SMP=y +CONFIG_SMSC911X=y +# CONFIG_SMSC911X_ARCH_HOOKS is not set +CONFIG_SND=y +# CONFIG_SND_COMPRESS_OFFLOAD is not set +CONFIG_SND_DMAENGINE_PCM=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_PCM=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_SIMPLE_CARD=y +CONFIG_SND_SIMPLE_CARD_UTILS=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_AK4613=y +CONFIG_SND_SOC_FSL_SAI=y +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y +CONFIG_SND_SOC_I2C_AND_SPI=y +CONFIG_SND_SOC_SGTL5000=y +CONFIG_SND_SPI=y +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_TIMER=y +CONFIG_SOCK_DIAG=y +CONFIG_SOC_BUS=y +CONFIG_SOUND=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +CONFIG_SPI_FSL_DSPI=y +CONFIG_SPI_FSL_QUADSPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_PL022=y +CONFIG_SPMI=y +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +# CONFIG_SQUASHFS_XZ is not set +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SRAM=y +CONFIG_SRCU=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_SWIOTLB=y +CONFIG_SWIOTLB_XEN=y +CONFIG_SWPHY=y +# CONFIG_SW_SYNC is not set +CONFIG_SYNC_FILE=y +# CONFIG_SYN_COOKIES is not set +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_SYS_HYPERVISOR=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_TASK_XACCT=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_TUN=y +CONFIG_UCS2_STRING=y +CONFIG_UIO=y +CONFIG_UIO_AEC=y +CONFIG_UIO_CIF=y +CONFIG_UIO_DMEM_GENIRQ=y +CONFIG_UIO_MF624=y +CONFIG_UIO_NETX=y +CONFIG_UIO_PCI_GENERIC=y +CONFIG_UIO_PDRV_GENIRQ=y +# CONFIG_UIO_PRUSS is not set +CONFIG_UIO_SERCOS3=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_UNIX_DIAG=y +CONFIG_USB=y +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_OF=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_COMMON=y +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_DUAL_ROLE=y +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_DUAL_ROLE=y +# CONFIG_USB_DWC3_GADGET is not set +# CONFIG_USB_DWC3_HOST is not set +CONFIG_USB_DWC3_OF_SIMPLE=y +CONFIG_USB_DWC3_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_GADGET=y +CONFIG_USB_HID=y +CONFIG_USB_HSIC_USB3503=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_DUAL_ROLE=y +# CONFIG_USB_ISP1760_GADGET_ROLE is not set +CONFIG_USB_ISP1760_HCD=y +# CONFIG_USB_ISP1760_HOST_ROLE is not set +CONFIG_USB_ISP1761_UDC=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_OTG=y +CONFIG_USB_STORAGE=y +CONFIG_USB_SUPPORT=y +# CONFIG_USB_UHCI_HCD is not set +CONFIG_USB_ULPI=y +CONFIG_USB_ULPI_VIEWPORT=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USERIO is not set +CONFIG_USER_NS=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_UTS_NS=y +CONFIG_VEXPRESS_CONFIG=y +CONFIG_VEXPRESS_SYSCFG=y +CONFIG_VFAT_FS=y +CONFIG_VFIO=y +CONFIG_VFIO_FSL_MC=y +CONFIG_VFIO_IOMMU_TYPE1=y +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_MMAP=y +# CONFIG_VFIO_PLATFORM is not set +CONFIG_VFIO_VIRQFD=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_VHOST=y +CONFIG_VHOST_NET=y +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VITESSE_PHY=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_XEN=y +CONFIG_XENFS=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_BACKEND=y +CONFIG_XEN_BALLOON=y +# CONFIG_XEN_BLKDEV_BACKEND is not set +CONFIG_XEN_BLKDEV_FRONTEND=y +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_DEV_EVTCHN=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_EFI=y +CONFIG_XEN_FBDEV_FRONTEND=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y +# CONFIG_XEN_NETDEV_BACKEND is not set +CONFIG_XEN_NETDEV_FRONTEND=y +CONFIG_XEN_PRIVCMD=y +CONFIG_XEN_SCRUB_PAGES=y +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_XEN_SYS_HYPERVISOR=y +# CONFIG_XEN_WDT is not set +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_XOR_BLOCKS=y +CONFIG_XPS=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_X86=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y diff --git a/target/linux/layerscape/armv8_64b/profiles/00-default.mk b/target/linux/layerscape/armv8_64b/profiles/00-default.mk new file mode 100644 index 000000000..fc1231dc7 --- /dev/null +++ b/target/linux/layerscape/armv8_64b/profiles/00-default.mk @@ -0,0 +1,18 @@ +define Profile/Default + NAME:=Default Profile + PRIORITY:=1 +endef + +define Profile/Default/Description + Default package set compatible with most boards. +endef + +DEFAULT_PACKAGES+= \ + rcw-layerscape-ls1043ardb uboot-layerscape-$(SUBTARGET)-ls1043ardb \ + fman-layerscape-ls1043ardb \ + rcw-layerscape-ls1046ardb uboot-layerscape-$(SUBTARGET)-ls1046ardb \ + fman-layerscape-ls1046ardb \ + rcw-layerscape-ls1012ardb uboot-layerscape-$(SUBTARGET)-ls1012ardb \ + kmod-ppfe ppfe-ls1012ardb + +$(eval $(call Profile,Default)) diff --git a/target/linux/layerscape/64b/target.mk b/target/linux/layerscape/armv8_64b/target.mk similarity index 63% rename from target/linux/layerscape/64b/target.mk rename to target/linux/layerscape/armv8_64b/target.mk index 98aa8be9c..274a72944 100644 --- a/target/linux/layerscape/64b/target.mk +++ b/target/linux/layerscape/armv8_64b/target.mk @@ -6,9 +6,9 @@ # ARCH:=aarch64 -BOARDNAME:=layerscape 64b boards +BOARDNAME:=ARMv8 64-bit based boards +KERNELNAME:=Image dtbs define Target/Description - Build firmware images for $(BOARDNAME) SoC devices. + Build firmware images for NXP Layerscape ARMv8 64-bit based boards. endef - diff --git a/target/linux/layerscape/config-4.4 b/target/linux/layerscape/config-4.4 deleted file mode 100644 index 6894af0df..000000000 --- a/target/linux/layerscape/config-4.4 +++ /dev/null @@ -1,310 +0,0 @@ -CONFIG_AQUANTIA_PHY=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_ARCH_LAYERSCAPE=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_GIC=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_SD=y -# CONFIG_CAVIUM_ERRATUM_27456 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CLKDEV_LOOKUP=y -CONFIG_CLKSRC_MMIO=y -CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_PROBE=y -CONFIG_CLK_QORIQ=y -CONFIG_CLONE_BACKWARDS=y -CONFIG_COMMON_CLK=y -CONFIG_CPU_RMAP=y -CONFIG_CRC16=y -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_DTC=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EXT4_FS=y -CONFIG_FAT_FS=y -CONFIG_FIXED_PHY=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_FMAN_ARM=y -# CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN is not set -# CONFIG_FMAN_P1023 is not set -# CONFIG_FMAN_P3040_P4080_P5020 is not set -# CONFIG_FMAN_PFC is not set -# CONFIG_FMAN_V3H is not set -# CONFIG_FMAN_V3L is not set -CONFIG_FRAME_POINTER=y -CONFIG_FSL_BMAN=y -CONFIG_FSL_BMAN_CONFIG=y -CONFIG_FSL_BMAN_DEBUGFS=y -# CONFIG_FSL_BMAN_TEST is not set -CONFIG_FSL_DPA=y -# CONFIG_FSL_DPAA_1588 is not set -CONFIG_FSL_DPAA_ADVANCED_DRIVERS=y -# CONFIG_FSL_DPAA_CEETM is not set -CONFIG_FSL_DPAA_CS_THRESHOLD_10G=0x10000000 -CONFIG_FSL_DPAA_CS_THRESHOLD_1G=0x06000000 -# CONFIG_FSL_DPAA_DBG_LOOP is not set -# CONFIG_FSL_DPAA_ETH_DEBUG is not set -CONFIG_FSL_DPAA_ETH_DEBUGFS=y -CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT=128 -CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD=80 -CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE=y -CONFIG_FSL_DPAA_GENERIC_DRIVER=y -# CONFIG_FSL_DPAA_HOOKS is not set -CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD=0x10000000 -# CONFIG_FSL_DPAA_MACSEC is not set -CONFIG_FSL_DPAA_OFFLINE_PORTS=y -# CONFIG_FSL_DPAA_TS is not set -CONFIG_FSL_DPA_CAN_WAIT=y -CONFIG_FSL_DPA_CAN_WAIT_SYNC=y -# CONFIG_FSL_DPA_CHECKING is not set -CONFIG_FSL_DPA_PIRQ_FAST=y -CONFIG_FSL_DPA_PIRQ_SLOW=y -CONFIG_FSL_DPA_PORTAL_SHARE=y -CONFIG_FSL_FM_MAX_FRAME_SIZE=1522 -CONFIG_FSL_FM_RX_EXTRA_HEADROOM=64 -CONFIG_FSL_IFC=y -CONFIG_FSL_QMAN=y -CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2 -CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2 -CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4 -CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W=3 -CONFIG_FSL_QMAN_CONFIG=y -CONFIG_FSL_QMAN_DEBUGFS=y -CONFIG_FSL_QMAN_FQD_SZ=10 -CONFIG_FSL_QMAN_INIT_TIMEOUT=10 -CONFIG_FSL_QMAN_PFDR_SZ=13 -CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH=12 -CONFIG_FSL_QMAN_PIRQ_IPERIOD=100 -CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH=4 -CONFIG_FSL_QMAN_POLL_LIMIT=32 -# CONFIG_FSL_QMAN_TEST is not set -CONFIG_FSL_SDK_DPAA_ETH=y -CONFIG_FSL_SDK_FMAN=y -# CONFIG_FSL_SDK_FMAN_TEST is not set -CONFIG_FSL_USDPAA=y -CONFIG_FSL_XGMAC_MDIO=y -# CONFIG_FTL is not set -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_GENERIC_IO=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -# CONFIG_GIANFAR is not set -CONFIG_GPIOLIB=y -CONFIG_GPIO_DEVRES=y -CONFIG_GPIO_GENERIC=y -CONFIG_GPIO_GENERIC_PLATFORM=y -CONFIG_GPIO_MPC8XXX=y -CONFIG_GPIO_SYSFS=y -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_HAS_DMA=y -CONFIG_HAS_FSL_QBMAN=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_HAVE_BPF_JIT=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_HAVE_GENERIC_RCU_GUP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_NET_DSA=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_UID16=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_HUGETLBFS is not set -CONFIG_I2C=y -# CONFIG_ACPI_I2C_OPREGION is not set -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_IMX=y -CONFIG_I2C_MUX=y -CONFIG_I2C_MUX_PCA954x=y -# CONFIG_IMX2_WDT is not set -CONFIG_INITRAMFS_SOURCE="" -CONFIG_IOMMU_HELPER=y -CONFIG_IRQCHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_IRQ_WORK=y -CONFIG_LIBFDT=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_LS_SCFG_MSI=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_MDIO_BOARDINFO=y -CONFIG_MEMORY=y -CONFIG_MFD_SYSCON=y -CONFIG_MMC=y -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_OF_ESDHC=y -# CONFIG_MMC_SDHCI_PCI is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_BE_BYTE_SWAP=y -# CONFIG_MTD_CFI_GEOMETRY is not set -# CONFIG_MTD_CFI_NOSWAP is not set -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NET_FLOW_LIMIT=y -CONFIG_NLS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NO_BOOTMEM=y -CONFIG_NO_HZ_COMMON=y -CONFIG_NO_HZ_IDLE=y -CONFIG_NR_CPUS=4 -CONFIG_OF=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_FLATTREE=y -CONFIG_OF_GPIO=y -CONFIG_OF_IRQ=y -CONFIG_OF_MDIO=y -CONFIG_OF_MTD=y -CONFIG_OF_NET=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_PCI=y -CONFIG_PCIEAER=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_PERFORMANCE is not set -# CONFIG_PCIEASPM_POWERSAVE is not set -CONFIG_PCIEPORTBUS=y -CONFIG_PCIE_DW=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_LAYERSCAPE=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PERF_USE_VMALLOC=y -CONFIG_PGTABLE_LEVELS=3 -CONFIG_PHYLIB=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_LAYERSCAPE=y -# CONFIG_POWER_RESET_XGENE is not set -CONFIG_POWER_SUPPLY=y -CONFIG_QMAN_CEETM_UPDATE_PERIOD=1000 -CONFIG_RAS=y -CONFIG_RATIONAL=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_REALTEK_PHY=y -CONFIG_REGMAP=y -CONFIG_REGMAP_MMIO=y -CONFIG_RFS_ACCEL=y -CONFIG_RPS=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -# CONFIG_SCHED_INFO is not set -CONFIG_SCSI=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SMP=y -CONFIG_SPARSE_IRQ=y -CONFIG_SQUASHFS=y -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -# CONFIG_SQUASHFS_DECOMP_SINGLE is not set -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y -# CONFIG_SQUASHFS_XATTR is not set -# CONFIG_SQUASHFS_ZLIB is not set -# CONFIG_SQUASHFS_LZ4 is not set -# CONFIG_SQUASHFS_LZO is not set -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -CONFIG_SRCU=y -CONFIG_SWIOTLB=y -CONFIG_SYS_SUPPORTS_HUGETLBFS=y -CONFIG_THERMAL=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_OF=y -CONFIG_TICK_CPU_ACCOUNTING=y -CONFIG_TREE_RCU=y -CONFIG_USB_SUPPORT=y -CONFIG_USB=y -CONFIG_VITESSE_PHY=y -CONFIG_XPS=y -CONFIG_ZLIB_INFLATE=y -CONFIG_MTD_SPI_NOR=y -CONFIG_SPI_FSL_QUADSPI=y -CONFIG_FSL_MC_BUS=y -CONFIG_FSL_MC_RESTOOL=y -CONFIG_FSL_MC_DPIO=y -# CONFIG_FSL_QBMAN_DEBUG is not set -CONFIG_FSL_DPAA2=y -CONFIG_FSL_DPAA2_ETH=y -# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set -CONFIG_FSL_DPAA2_MAC=y -# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set -CONFIG_FSL_DPAA2_EVB=y -CONFIG_FSL_DPAA2_ETHSW=y diff --git a/target/linux/layerscape/image/Makefile b/target/linux/layerscape/image/Makefile index 107a3e6ee..5577b1623 100644 --- a/target/linux/layerscape/image/Makefile +++ b/target/linux/layerscape/image/Makefile @@ -33,15 +33,17 @@ endef define Device/Default PROFILES = Default FILESYSTEMS := squashfs - KERNEL := kernel-bin | gzip | uImage gzip DEVICE_DTS := IMAGES = firmware.bin -ifeq ($(SUBTARGET),64b) +ifeq ($(SUBTARGET),armv8_64b) + KERNEL := kernel-bin | gzip | uImage gzip KERNEL_LOADADDR = 0x80080000 KERNEL_ENTRY_POINT = 0x80080000 endif -ifeq ($(SUBTARGET),32b) +ifeq ($(SUBTARGET),armv8_32b) + KERNEL := kernel-bin | uImage none + KERNEL_NAME := zImage KERNEL_LOADADDR = 0x80008000 KERNEL_ENTRY_POINT = 0x80008000 endif @@ -50,74 +52,61 @@ endef define Device/ls1043ardb DEVICE_TITLE := ls1043ardb-$(SUBTARGET) DEVICE_PACKAGES += rcw-layerscape-ls1043ardb uboot-layerscape-$(SUBTARGET)-ls1043ardb fman-layerscape-ls1043ardb -ifeq ($(SUBTARGET),64b) - DEVICE_DTS = freescale/fsl-ls1043a-rdb -endif -ifeq ($(SUBTARGET),32b) - DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1043a-rdb -endif - IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | append-ls-uboot $(1) | pad-to 3M | \ - append-ls-fman $(1) | pad-to 4M | append-ls-dtb $$(DEVICE_DTS) | pad-to 5M | \ - append-kernel | pad-to 10M | append-rootfs | pad-rootfs | check-size 67108865 + DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk + IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | \ + append-ls-uboot $(1) | pad-to 9M | \ + append-ls-fman $(1) | pad-to 15M | \ + append-ls-dtb $$(DEVICE_DTS) | pad-to 16M | \ + append-kernel | pad-to 32M | \ + append-rootfs | pad-rootfs | check-size 67108865 endef TARGET_DEVICES += ls1043ardb define Device/ls1046ardb DEVICE_TITLE := ls1046ardb-$(SUBTARGET) DEVICE_PACKAGES += rcw-layerscape-ls1046ardb uboot-layerscape-$(SUBTARGET)-ls1046ardb fman-layerscape-ls1046ardb -ifeq ($(SUBTARGET),64b) - DEVICE_DTS = freescale/fsl-ls1046a-rdb -endif -ifeq ($(SUBTARGET),32b) - DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1046a-rdb -endif - IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | append-ls-uboot $(1) | pad-to 3M | \ - append-ls-fman $(1) | pad-to 4M | append-ls-dtb $$(DEVICE_DTS) | pad-to 5M | \ - append-kernel | pad-to 10M | append-ls-rootfs-ext4 $(1) 22M | check-size 33554433 + DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk + IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | \ + append-ls-uboot $(1) | pad-to 9M | \ + append-ls-fman $(1) | pad-to 15M | \ + append-ls-dtb $$(DEVICE_DTS) | pad-to 16M | \ + append-kernel | pad-to 32M | \ + append-ls-rootfs-ext4 $(1) 22M | check-size 67108865 endef TARGET_DEVICES += ls1046ardb define Device/ls1012ardb DEVICE_TITLE := ls1012ardb-$(SUBTARGET) DEVICE_PACKAGES += rcw-layerscape-ls1012ardb uboot-layerscape-$(SUBTARGET)-ls1012ardb kmod-ppfe ppfe-ls1012ardb -ifeq ($(SUBTARGET),64b) - DEVICE_DTS = freescale/fsl-ls1012a-rdb -endif -ifeq ($(SUBTARGET),32b) DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1012a-rdb -endif - IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | append-ls-uboot $(1) | pad-to 3M | \ - append-ls-dtb $$(DEVICE_DTS) | pad-to 4M | append-kernel | pad-to 9M | \ - append-ls-rootfs-ext4 $(1) 23M | check-size 33554433 + IMAGE/firmware.bin = append-ls-rcw $(1) | pad-to 1M | \ + append-ls-uboot $(1) | pad-to 15M | \ + append-ls-dtb $$(DEVICE_DTS) | pad-to 16M | \ + append-kernel | pad-to 32M | \ + append-ls-rootfs-ext4 $(1) 23M | check-size 67108865 endef TARGET_DEVICES += ls1012ardb +ifeq ($(SUBTARGET),armv8_64b) define Device/ls1088ardb DEVICE_TITLE := ls1088ardb-$(SUBTARGET) DEVICE_PACKAGES += rcw-layerscape-ls1088ardb uboot-layerscape-$(SUBTARGET)-ls1088ardb mc-binary-ls1088ardb -ifeq ($(SUBTARGET),64b) - DEVICE_DTS = freescale/fsl-ls1088a-rdb -endif -ifeq ($(SUBTARGET),32b) DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1088a-rdb -endif - IMAGE/firmware.bin = append-ls-dtb $$(DEVICE_DTS) | pad-to 1M | append-kernel | pad-to 6M | \ - append-ls-rootfs-ext4 $(1) 17M | check-size 24117249 + IMAGE/firmware.bin = append-ls-dtb $$(DEVICE_DTS) | pad-to 1M | \ + append-kernel | pad-to 17M | \ + append-ls-rootfs-ext4 $(1) 17M | check-size 51380225 endef TARGET_DEVICES += ls1088ardb define Device/ls2088ardb DEVICE_TITLE := ls2088ardb-$(SUBTARGET) DEVICE_PACKAGES += rcw-layerscape-ls2088ardb uboot-layerscape-$(SUBTARGET)-ls2088ardb mc-binary-ls2088ardb -ifeq ($(SUBTARGET),64b) - DEVICE_DTS = freescale/fsl-ls2088a-rdb -endif -ifeq ($(SUBTARGET),32b) DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls2088a-rdb -endif - IMAGE/firmware.bin = append-ls-dtb $$(DEVICE_DTS) | pad-to 1M | append-kernel | pad-to 6M | \ - append-rootfs | pad-rootfs | check-size 24117249 + IMAGE/firmware.bin = append-ls-dtb $$(DEVICE_DTS) | pad-to 1M | \ + append-kernel | pad-to 17M | \ + append-rootfs | pad-rootfs | check-size 51380225 endef TARGET_DEVICES += ls2088ardb +endif $(eval $(call BuildImage)) diff --git a/target/linux/layerscape/patches-4.4/0051-PCI-designware-Ensure-ATU-is-enabled-before-IO-conf-.patch b/target/linux/layerscape/patches-4.4/0051-PCI-designware-Ensure-ATU-is-enabled-before-IO-conf-.patch deleted file mode 100644 index c670dac4a..000000000 --- a/target/linux/layerscape/patches-4.4/0051-PCI-designware-Ensure-ATU-is-enabled-before-IO-conf-.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 7f434723cdb6823443330cd4847d5c3b8dd30bd7 Mon Sep 17 00:00:00 2001 -From: Stanimir Varbanov -Date: Fri, 18 Dec 2015 14:38:55 +0200 -Subject: [PATCH 51/70] PCI: designware: Ensure ATU is enabled before IO/conf - space accesses - -Read back the ATU CR2 register to ensure ATU programming is effective -before any subsequent I/O or config space accesses. - -Without this, PCI device enumeration is unreliable. - -[bhelgaas: changelog, comment] -Signed-off-by: Stanimir Varbanov -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pcie-designware.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -154,6 +154,8 @@ static int dw_pcie_wr_own_conf(struct pc - static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, - int type, u64 cpu_addr, u64 pci_addr, u32 size) - { -+ u32 val; -+ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); -@@ -164,6 +166,12 @@ static void dw_pcie_prog_outbound_atu(st - dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -+ -+ /* -+ * Make sure ATU enable takes effect before any subsequent config -+ * and I/O accesses. -+ */ -+ dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val); - } - - static struct irq_chip dw_msi_irq_chip = { diff --git a/target/linux/layerscape/patches-4.4/0052-PCI-designware-Simplify-control-flow.patch b/target/linux/layerscape/patches-4.4/0052-PCI-designware-Simplify-control-flow.patch deleted file mode 100644 index 1eef2b61f..000000000 --- a/target/linux/layerscape/patches-4.4/0052-PCI-designware-Simplify-control-flow.patch +++ /dev/null @@ -1,121 +0,0 @@ -From 610b32220391c9d271290bdf8f2b8fe1cf8da9a0 Mon Sep 17 00:00:00 2001 -From: Bjorn Helgaas -Date: Tue, 5 Jan 2016 15:48:11 -0600 -Subject: [PATCH 52/70] PCI: designware: Simplify control flow - -Return values immediately when possible to simplify the control flow. - -No functional change intended. Folded in unused variable removal as -pointed out by Fabio Estevam , Arnd Bergmann -, and Thierry Reding . - -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pcie-designware.c | 54 ++++++++++++------------------------ - 1 file changed, 18 insertions(+), 36 deletions(-) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -128,27 +128,19 @@ static inline void dw_pcie_writel_rc(str - static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - u32 *val) - { -- int ret; -- - if (pp->ops->rd_own_conf) -- ret = pp->ops->rd_own_conf(pp, where, size, val); -- else -- ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); -+ return pp->ops->rd_own_conf(pp, where, size, val); - -- return ret; -+ return dw_pcie_cfg_read(pp->dbi_base + where, size, val); - } - - static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - u32 val) - { -- int ret; -- - if (pp->ops->wr_own_conf) -- ret = pp->ops->wr_own_conf(pp, where, size, val); -- else -- ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); -+ return pp->ops->wr_own_conf(pp, where, size, val); - -- return ret; -+ return dw_pcie_cfg_write(pp->dbi_base + where, size, val); - } - - static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, -@@ -392,8 +384,8 @@ int dw_pcie_link_up(struct pcie_port *pp - { - if (pp->ops->link_up) - return pp->ops->link_up(pp); -- else -- return 0; -+ -+ return 0; - } - - static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, -@@ -666,46 +658,36 @@ static int dw_pcie_rd_conf(struct pci_bu - int size, u32 *val) - { - struct pcie_port *pp = bus->sysdata; -- int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - -- if (bus->number != pp->root_bus_nr) -- if (pp->ops->rd_other_conf) -- ret = pp->ops->rd_other_conf(pp, bus, devfn, -- where, size, val); -- else -- ret = dw_pcie_rd_other_conf(pp, bus, devfn, -- where, size, val); -- else -- ret = dw_pcie_rd_own_conf(pp, where, size, val); -+ if (bus->number == pp->root_bus_nr) -+ return dw_pcie_rd_own_conf(pp, where, size, val); - -- return ret; -+ if (pp->ops->rd_other_conf) -+ return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); -+ -+ return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); - } - - static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) - { - struct pcie_port *pp = bus->sysdata; -- int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) - return PCIBIOS_DEVICE_NOT_FOUND; - -- if (bus->number != pp->root_bus_nr) -- if (pp->ops->wr_other_conf) -- ret = pp->ops->wr_other_conf(pp, bus, devfn, -- where, size, val); -- else -- ret = dw_pcie_wr_other_conf(pp, bus, devfn, -- where, size, val); -- else -- ret = dw_pcie_wr_own_conf(pp, where, size, val); -+ if (bus->number == pp->root_bus_nr) -+ return dw_pcie_wr_own_conf(pp, where, size, val); - -- return ret; -+ if (pp->ops->wr_other_conf) -+ return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); -+ -+ return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); - } - - static struct pci_ops dw_pcie_ops = { diff --git a/target/linux/layerscape/patches-4.4/0053-PCI-designware-Make-config-accessor-override-checkin.patch b/target/linux/layerscape/patches-4.4/0053-PCI-designware-Make-config-accessor-override-checkin.patch deleted file mode 100644 index 299e87ebe..000000000 --- a/target/linux/layerscape/patches-4.4/0053-PCI-designware-Make-config-accessor-override-checkin.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 6882f9eef932e6f5cc3c57115e3d7d4b5bc19662 Mon Sep 17 00:00:00 2001 -From: Bjorn Helgaas -Date: Tue, 5 Jan 2016 15:56:30 -0600 -Subject: [PATCH 53/70] PCI: designware: Make config accessor override - checking symmetric - -Drivers based on the DesignWare core can override the config read accessors -by supplying rd_own_conf() and rd_other_conf() function pointers. -dw_pcie_rd_conf() calls dw_pcie_rd_own_conf() (for accesses to the root -bus) or dw_pcie_rd_other_conf(): - - dw_pcie_rd_conf - dw_pcie_rd_own_conf # if on root bus - dw_pcie_rd_other_conf # if not on root bus - -Previously we checked for rd_other_conf() directly in dw_pcie_rd_conf(), -but we checked for rd_own_conf() in dw_pcie_rd_own_conf(). - -Check for rd_other_conf() in dw_pcie_rd_other_conf() to make this symmetric -with the rd_own_conf() checking, and similarly for the write path. - -No functional change intended. - -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pcie-designware.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -571,6 +571,9 @@ static int dw_pcie_rd_other_conf(struct - u64 cpu_addr; - void __iomem *va_cfg_base; - -+ if (pp->ops->rd_other_conf) -+ return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); -+ - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); - -@@ -605,6 +608,9 @@ static int dw_pcie_wr_other_conf(struct - u64 cpu_addr; - void __iomem *va_cfg_base; - -+ if (pp->ops->wr_other_conf) -+ return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); -+ - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); - -@@ -667,9 +673,6 @@ static int dw_pcie_rd_conf(struct pci_bu - if (bus->number == pp->root_bus_nr) - return dw_pcie_rd_own_conf(pp, where, size, val); - -- if (pp->ops->rd_other_conf) -- return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); -- - return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); - } - -@@ -684,9 +687,6 @@ static int dw_pcie_wr_conf(struct pci_bu - if (bus->number == pp->root_bus_nr) - return dw_pcie_wr_own_conf(pp, where, size, val); - -- if (pp->ops->wr_other_conf) -- return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); -- - return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); - } - diff --git a/target/linux/layerscape/patches-4.4/0054-PCI-designware-Explain-why-we-don-t-program-ATU-for-.patch b/target/linux/layerscape/patches-4.4/0054-PCI-designware-Explain-why-we-don-t-program-ATU-for-.patch deleted file mode 100644 index f48150dab..000000000 --- a/target/linux/layerscape/patches-4.4/0054-PCI-designware-Explain-why-we-don-t-program-ATU-for-.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 481b1bc4ce0d58107887558342e50d6323a9601d Mon Sep 17 00:00:00 2001 -From: Jisheng Zhang -Date: Thu, 7 Jan 2016 14:12:38 +0800 -Subject: [PATCH 54/70] PCI: designware: Explain why we don't program ATU for - some platforms - -Some platforms don't support ATU, e.g., pci-keystone.c. These platforms -use their own address translation component rather than ATU, and they -provide the rd_other_conf and wr_other_conf methods to program the -translation component and perform the access. - -Add a comment to explain why we don't program the ATU for these platforms. - -[bhelgaas: changelog] -Signed-off-by: Jisheng Zhang -Signed-off-by: Bjorn Helgaas ---- - drivers/pci/host/pcie-designware.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -517,6 +517,11 @@ int dw_pcie_host_init(struct pcie_port * - if (pp->ops->host_init) - pp->ops->host_init(pp); - -+ /* -+ * If the platform provides ->rd_other_conf, it means the platform -+ * uses its own address translation component rather than ATU, so -+ * we should not program the ATU here. -+ */ - if (!pp->ops->rd_other_conf) - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, - PCIE_ATU_TYPE_MEM, pp->mem_base, diff --git a/target/linux/layerscape/patches-4.4/0055-PCI-designware-Remove-PCI_PROBE_ONLY-handling.patch b/target/linux/layerscape/patches-4.4/0055-PCI-designware-Remove-PCI_PROBE_ONLY-handling.patch deleted file mode 100644 index 13639ebeb..000000000 --- a/target/linux/layerscape/patches-4.4/0055-PCI-designware-Remove-PCI_PROBE_ONLY-handling.patch +++ /dev/null @@ -1,41 +0,0 @@ -From ee2a430c1691d0bac3098e8db3c29d8f023b04c2 Mon Sep 17 00:00:00 2001 -From: Lorenzo Pieralisi -Date: Fri, 29 Jan 2016 11:29:32 +0000 -Subject: [PATCH 55/70] PCI: designware: Remove PCI_PROBE_ONLY handling - -The PCIe designware host driver is not used in system configurations -requiring the PCI_PROBE_ONLY flag to be set to prevent resources -assignment, therefore the driver code handling the flag can be removed -from the kernel. - -Signed-off-by: Lorenzo Pieralisi -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand -Acked-by: Jingoo Han Jingoo Han -Cc: Arnd Bergmann -Cc: Gabriele Paoloni -Cc: Zhou Wang ---- - drivers/pci/host/pcie-designware.c | 10 ++++------ - 1 file changed, 4 insertions(+), 6 deletions(-) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -556,13 +556,11 @@ int dw_pcie_host_init(struct pcie_port * - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); - #endif - -- if (!pci_has_flag(PCI_PROBE_ONLY)) { -- pci_bus_size_bridges(bus); -- pci_bus_assign_resources(bus); -+ pci_bus_size_bridges(bus); -+ pci_bus_assign_resources(bus); - -- list_for_each_entry(child, &bus->children, node) -- pcie_bus_configure_settings(child); -- } -+ list_for_each_entry(child, &bus->children, node) -+ pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return 0; diff --git a/target/linux/layerscape/patches-4.4/0056-PCI-designware-Add-generic-dw_pcie_wait_for_link.patch b/target/linux/layerscape/patches-4.4/0056-PCI-designware-Add-generic-dw_pcie_wait_for_link.patch deleted file mode 100644 index b13aba33d..000000000 --- a/target/linux/layerscape/patches-4.4/0056-PCI-designware-Add-generic-dw_pcie_wait_for_link.patch +++ /dev/null @@ -1,249 +0,0 @@ -From f0c3f31a8bd81b8e7354a187c49200f3ce52740d Mon Sep 17 00:00:00 2001 -From: Joao Pinto -Date: Thu, 10 Mar 2016 14:44:35 -0600 -Subject: [PATCH 56/70] PCI: designware: Add generic dw_pcie_wait_for_link() - -commit 886bc5ceb5cc3ad4b219502d72b277e3c3255a32 upstream -[context adjustment] -[remove drivers/pci/host/pcie-qcom.c related changes] - -Several DesignWare-based drivers (dra7xx, exynos, imx6, keystone, qcom, and -spear13xx) had similar loops waiting for the link to come up. - -Add a generic dw_pcie_wait_for_link() for use by all these drivers so the -waiting is done consistently, e.g., always using usleep_range() rather than -mdelay() and using similar timeouts and retry counts. - -Note that this changes the Keystone link training/wait for link strategy, -so we initiate link training, then wait longer for the link to come up -before re-initiating link training. - -[bhelgaas: changelog, split into its own patch, update pci-keystone.c, pcie-qcom.c] -Signed-off-by: Joao Pinto -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand -Integrated-by: Jiang Yutang ---- - drivers/pci/host/pci-dra7xx.c | 11 +---------- - drivers/pci/host/pci-exynos.c | 13 +++---------- - drivers/pci/host/pci-imx6.c | 13 ++++--------- - drivers/pci/host/pci-keystone.c | 10 ++++------ - drivers/pci/host/pcie-designware.c | 19 +++++++++++++++++++ - drivers/pci/host/pcie-designware.h | 6 ++++++ - drivers/pci/host/pcie-spear13xx.c | 14 +------------- - 7 files changed, 38 insertions(+), 48 deletions(-) - ---- a/drivers/pci/host/pci-dra7xx.c -+++ b/drivers/pci/host/pci-dra7xx.c -@@ -10,7 +10,6 @@ - * published by the Free Software Foundation. - */ - --#include - #include - #include - #include -@@ -108,7 +107,6 @@ static int dra7xx_pcie_establish_link(st - { - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); - u32 reg; -- unsigned int retries; - - if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "link is already up\n"); -@@ -119,14 +117,7 @@ static int dra7xx_pcie_establish_link(st - reg |= LTSSM_EN; - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - -- for (retries = 0; retries < 1000; retries++) { -- if (dw_pcie_link_up(pp)) -- return 0; -- usleep_range(10, 20); -- } -- -- dev_err(pp->dev, "link is not up\n"); -- return -EINVAL; -+ return dw_pcie_wait_for_link(pp); - } - - static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) ---- a/drivers/pci/host/pci-exynos.c -+++ b/drivers/pci/host/pci-exynos.c -@@ -318,7 +318,6 @@ static int exynos_pcie_establish_link(st - { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); - u32 val; -- unsigned int retries; - - if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "Link already up\n"); -@@ -357,13 +356,8 @@ static int exynos_pcie_establish_link(st - PCIE_APP_LTSSM_ENABLE); - - /* check if the link is up or not */ -- for (retries = 0; retries < 10; retries++) { -- if (dw_pcie_link_up(pp)) { -- dev_info(pp->dev, "Link up\n"); -- return 0; -- } -- mdelay(100); -- } -+ if (!dw_pcie_wait_for_link(pp)) -+ return 0; - - while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { - val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); -@@ -372,8 +366,7 @@ static int exynos_pcie_establish_link(st - /* power off phy */ - exynos_pcie_power_off_phy(pp); - -- dev_err(pp->dev, "PCIe Link Fail\n"); -- return -EINVAL; -+ return -ETIMEDOUT; - } - - static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) ---- a/drivers/pci/host/pci-imx6.c -+++ b/drivers/pci/host/pci-imx6.c -@@ -330,19 +330,14 @@ static void imx6_pcie_init_phy(struct pc - - static int imx6_pcie_wait_for_link(struct pcie_port *pp) - { -- unsigned int retries; -+ /* check if the link is up or not */ -+ if (!dw_pcie_wait_for_link(pp)) -+ return 0; - -- for (retries = 0; retries < 200; retries++) { -- if (dw_pcie_link_up(pp)) -- return 0; -- usleep_range(100, 1000); -- } -- -- dev_err(pp->dev, "phy link never came up\n"); - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); -- return -EINVAL; -+ return -ETIMEDOUT; - } - - static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) ---- a/drivers/pci/host/pci-keystone.c -+++ b/drivers/pci/host/pci-keystone.c -@@ -97,17 +97,15 @@ static int ks_pcie_establish_link(struct - return 0; - } - -- ks_dw_pcie_initiate_link_train(ks_pcie); - /* check if the link is up or not */ -- for (retries = 0; retries < 200; retries++) { -- if (dw_pcie_link_up(pp)) -- return 0; -- usleep_range(100, 1000); -+ for (retries = 0; retries < 5; retries++) { - ks_dw_pcie_initiate_link_train(ks_pcie); -+ if (!dw_pcie_wait_for_link(pp)) -+ return 0; - } - - dev_err(pp->dev, "phy link never came up\n"); -- return -EINVAL; -+ return -ETIMEDOUT; - } - - static void ks_pcie_msi_irq_handler(struct irq_desc *desc) ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - #include "pcie-designware.h" - -@@ -380,6 +381,24 @@ static struct msi_controller dw_pcie_msi - .teardown_irq = dw_msi_teardown_irq, - }; - -+int dw_pcie_wait_for_link(struct pcie_port *pp) -+{ -+ int retries; -+ -+ /* check if the link is up or not */ -+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { -+ if (dw_pcie_link_up(pp)) { -+ dev_info(pp->dev, "link up\n"); -+ return 0; -+ } -+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); -+ } -+ -+ dev_err(pp->dev, "phy link never came up\n"); -+ -+ return -ETIMEDOUT; -+} -+ - int dw_pcie_link_up(struct pcie_port *pp) - { - if (pp->ops->link_up) ---- a/drivers/pci/host/pcie-designware.h -+++ b/drivers/pci/host/pcie-designware.h -@@ -22,6 +22,11 @@ - #define MAX_MSI_IRQS 32 - #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) - -+/* Parameters for the waiting for link up routine */ -+#define LINK_WAIT_MAX_RETRIES 10 -+#define LINK_WAIT_USLEEP_MIN 90000 -+#define LINK_WAIT_USLEEP_MAX 100000 -+ - struct pcie_port { - struct device *dev; - u8 root_bus_nr; -@@ -76,6 +81,7 @@ int dw_pcie_cfg_read(void __iomem *addr, - int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); - irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); - void dw_pcie_msi_init(struct pcie_port *pp); -+int dw_pcie_wait_for_link(struct pcie_port *pp); - int dw_pcie_link_up(struct pcie_port *pp); - void dw_pcie_setup_rc(struct pcie_port *pp); - int dw_pcie_host_init(struct pcie_port *pp); ---- a/drivers/pci/host/pcie-spear13xx.c -+++ b/drivers/pci/host/pcie-spear13xx.c -@@ -13,7 +13,6 @@ - */ - - #include --#include - #include - #include - #include -@@ -149,7 +148,6 @@ static int spear13xx_pcie_establish_link - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; - u32 exp_cap_off = EXP_CAP_ID_OFFSET; -- unsigned int retries; - - if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "link already up\n"); -@@ -200,17 +198,7 @@ static int spear13xx_pcie_establish_link - | ((u32)1 << REG_TRANSLATION_ENABLE), - &app_reg->app_ctrl_0); - -- /* check if the link is up or not */ -- for (retries = 0; retries < 10; retries++) { -- if (dw_pcie_link_up(pp)) { -- dev_info(pp->dev, "link up\n"); -- return 0; -- } -- mdelay(100); -- } -- -- dev_err(pp->dev, "link Fail\n"); -- return -EINVAL; -+ return dw_pcie_wait_for_link(pp); - } - - static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) diff --git a/target/linux/layerscape/patches-4.4/0057-PCI-designware-Add-default-link-up-check-if-sub-driv.patch b/target/linux/layerscape/patches-4.4/0057-PCI-designware-Add-default-link-up-check-if-sub-driv.patch deleted file mode 100644 index 7114fbb3a..000000000 --- a/target/linux/layerscape/patches-4.4/0057-PCI-designware-Add-default-link-up-check-if-sub-driv.patch +++ /dev/null @@ -1,46 +0,0 @@ -From a0a4f406c7e90b2be66e88ea8b21699940c0823f Mon Sep 17 00:00:00 2001 -From: Joao Pinto -Date: Thu, 10 Mar 2016 14:44:44 -0600 -Subject: [PATCH 57/70] PCI: designware: Add default link up check if - sub-driver doesn't override - -Add a default DesignWare "link_up" test for use when a sub-driver doesn't -supply its own pcie_host_ops.link_up() method. - -[bhelgaas: changelog, split into its own patch] -Signed-off-by: Joao Pinto -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pcie-designware.c | 10 +++++++++- - 1 file changed, 9 insertions(+), 1 deletion(-) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -70,6 +70,11 @@ - #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) - #define PCIE_ATU_UPPER_TARGET 0x91C - -+/* PCIe Port Logic registers */ -+#define PLR_OFFSET 0x700 -+#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) -+#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010 -+ - static struct pci_ops dw_pcie_ops; - - int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) -@@ -401,10 +406,13 @@ int dw_pcie_wait_for_link(struct pcie_po - - int dw_pcie_link_up(struct pcie_port *pp) - { -+ u32 val; -+ - if (pp->ops->link_up) - return pp->ops->link_up(pp); - -- return 0; -+ val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1); -+ return val & PCIE_PHY_DEBUG_R1_LINK_UP; - } - - static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, diff --git a/target/linux/layerscape/patches-4.4/0058-PCI-designware-Move-Root-Complex-setup-code-to-dw_pc.patch b/target/linux/layerscape/patches-4.4/0058-PCI-designware-Move-Root-Complex-setup-code-to-dw_pc.patch deleted file mode 100644 index 5826eb03b..000000000 --- a/target/linux/layerscape/patches-4.4/0058-PCI-designware-Move-Root-Complex-setup-code-to-dw_pc.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 892a427f8a2b25b561298941cf1fc0373a98b269 Mon Sep 17 00:00:00 2001 -From: Jisheng Zhang -Date: Wed, 16 Mar 2016 19:40:33 +0800 -Subject: [PATCH 58/70] PCI: designware: Move Root Complex setup code to - dw_pcie_setup_rc() - -dw_pcie_host_init() looks up host bridge resources, ioremaps them, creates -IRQ domains, and enumerates devices below the bridge. dw_pcie_setup_rc() -programs the Root Complex registers. The Root Complex may lose power -during suspend-to-RAM, and when we resume, we want to redo the latter but -not the former. - -Move some Root Complex programming from dw_pcie_host_init() to -dw_pcie_setup_rc() where it belongs. DesignWare-based drivers can call -dw_pcie_setup_rc() in their resume paths. - -[Niklas Cassel : This change moves outbound ATU -programming, which uses pp->mem_base, to dw_pcie_setup_rc(). Apply the -dra7xx pp->mem_base update before calling dw_pcie_setup_rc().] - -[bhelgaas: changelog, fold in dra7xx fix from Niklas] -Signed-off-by: Jisheng Zhang -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pci-dra7xx.c | 4 ++-- - drivers/pci/host/pcie-designware.c | 39 ++++++++++++++++++------------------ - 2 files changed, 21 insertions(+), 22 deletions(-) - ---- a/drivers/pci/host/pci-dra7xx.c -+++ b/drivers/pci/host/pci-dra7xx.c -@@ -142,13 +142,13 @@ static void dra7xx_pcie_enable_interrupt - - static void dra7xx_pcie_host_init(struct pcie_port *pp) - { -- dw_pcie_setup_rc(pp); -- - pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; - -+ dw_pcie_setup_rc(pp); -+ - dra7xx_pcie_establish_link(pp); - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -434,7 +434,6 @@ int dw_pcie_host_init(struct pcie_port * - struct platform_device *pdev = to_platform_device(pp->dev); - struct pci_bus *bus, *child; - struct resource *cfg_res; -- u32 val; - int i, ret; - LIST_HEAD(res); - struct resource_entry *win; -@@ -544,25 +543,6 @@ int dw_pcie_host_init(struct pcie_port * - if (pp->ops->host_init) - pp->ops->host_init(pp); - -- /* -- * If the platform provides ->rd_other_conf, it means the platform -- * uses its own address translation component rather than ATU, so -- * we should not program the ATU here. -- */ -- if (!pp->ops->rd_other_conf) -- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -- PCIE_ATU_TYPE_MEM, pp->mem_base, -- pp->mem_bus_addr, pp->mem_size); -- -- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); -- -- /* program correct class for RC */ -- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); -- -- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); -- val |= PORT_LOGIC_SPEED_CHANGE; -- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); -- - pp->root_bus_nr = pp->busn->start; - if (IS_ENABLED(CONFIG_PCI_MSI)) { - bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr, -@@ -800,6 +780,25 @@ void dw_pcie_setup_rc(struct pcie_port * - val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER | PCI_COMMAND_SERR; - dw_pcie_writel_rc(pp, val, PCI_COMMAND); -+ -+ /* -+ * If the platform provides ->rd_other_conf, it means the platform -+ * uses its own address translation component rather than ATU, so -+ * we should not program the ATU here. -+ */ -+ if (!pp->ops->rd_other_conf) -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -+ PCIE_ATU_TYPE_MEM, pp->mem_base, -+ pp->mem_bus_addr, pp->mem_size); -+ -+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); -+ -+ /* program correct class for RC */ -+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); -+ -+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); -+ val |= PORT_LOGIC_SPEED_CHANGE; -+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); - } - - MODULE_AUTHOR("Jingoo Han "); diff --git a/target/linux/layerscape/patches-4.4/0059-PCI-designware-Remove-incorrect-RC-memory-base-limit.patch b/target/linux/layerscape/patches-4.4/0059-PCI-designware-Remove-incorrect-RC-memory-base-limit.patch deleted file mode 100644 index 5eb0bb1a6..000000000 --- a/target/linux/layerscape/patches-4.4/0059-PCI-designware-Remove-incorrect-RC-memory-base-limit.patch +++ /dev/null @@ -1,45 +0,0 @@ -From ae717a9744a3e18f2ed0a6aa44e279c89ad5052c Mon Sep 17 00:00:00 2001 -From: Gabriele Paoloni -Date: Sat, 16 Apr 2016 12:03:39 +0100 -Subject: [PATCH 59/70] PCI: designware: Remove incorrect RC memory base/limit - configuration - -Currently dw_pcie_setup_rc() configures memory base and memory limit in the -type1 configuration header for the root complex. In doing so it uses the -CPU address (pp->mem_base) rather than the bus address (pp->mem_bus_addr). -This is wrong and it is useless since the configuration is overwritten -later on when pci_bus_assign_resources() is called. - -Remove this configuration from dw_pcie_setup_rc(). - -Signed-off-by: Gabriele Paoloni -Signed-off-by: Bjorn Helgaas -Acked-by: Pratyush Anand ---- - drivers/pci/host/pcie-designware.c | 8 -------- - 1 file changed, 8 deletions(-) - ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -708,8 +708,6 @@ static struct pci_ops dw_pcie_ops = { - void dw_pcie_setup_rc(struct pcie_port *pp) - { - u32 val; -- u32 membase; -- u32 memlimit; - - /* set the number of lanes */ - dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); -@@ -768,12 +766,6 @@ void dw_pcie_setup_rc(struct pcie_port * - val |= 0x00010100; - dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); - -- /* setup memory base, memory limit */ -- membase = ((u32)pp->mem_base & 0xfff00000) >> 16; -- memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000; -- val = memlimit | membase; -- dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); -- - /* setup command register */ - dw_pcie_readl_rc(pp, PCI_COMMAND, &val); - val &= 0xffff0000; diff --git a/target/linux/layerscape/patches-4.4/0140-config-add-freescale-config-for-amr64.patch b/target/linux/layerscape/patches-4.4/0140-config-add-freescale-config-for-amr64.patch deleted file mode 100644 index 74ff44bda..000000000 --- a/target/linux/layerscape/patches-4.4/0140-config-add-freescale-config-for-amr64.patch +++ /dev/null @@ -1,148 +0,0 @@ -From 880b7aa2e2c62e54245fb77d92db502175232d86 Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Wed, 12 Oct 2016 11:01:17 +0800 -Subject: [PATCH 140/141] config: add freescale config for amr64 - -Signed-off-by: Zhao Qiang ---- - arch/arm64/configs/freescale.config | 134 +++++++++++++++++++++++++++++++++++ - 1 file changed, 134 insertions(+) - create mode 100644 arch/arm64/configs/freescale.config - ---- /dev/null -+++ b/arch/arm64/configs/freescale.config -@@ -0,0 +1,134 @@ -+# general options -+CONFIG_LOCALVERSION_AUTO=y -+CONFIG_SLAB=y -+CONFIG_MODULE_FORCE_LOAD=y -+CONFIG_MODVERSIONS=y -+CONFIG_ARM64_VA_BITS_48=y -+CONFIG_BLK_DEV_RAM=y -+CONFIG_BLK_DEV_RAM_SIZE=262144 -+CONFIG_PRINTK_TIME=y -+CONFIG_PID_IN_CONTEXTIDR=y -+CONFIG_IPV6=y -+# iommu -+CONFIG_IOMMU_SUPPORT=y -+CONFIG_ARM_SMMU=y -+# dpaa2 -+CONFIG_STAGING=y -+CONFIG_FSL_MC_BUS=y -+CONFIG_FSL_MC_RESTOOL=y -+CONFIG_FSL_MC_DPIO=y -+CONFIG_FSL_DPAA2=y -+CONFIG_NET_NS=y -+CONFIG_FSL_DPAA2_DCE=y -+CONFIG_FSL_DCE_FLOW_LIMIT=65536 -+CONFIG_FSL_DCE_API_TIME_TRIAL=m -+CONFIG_LS_SOC_DRIVERS=y -+# mdio -+CONFIG_FSL_XGMAC_MDIO=y -+CONFIG_MDIO_BUS_MUX_MMIOREG=y -+# phy -+CONFIG_AQUANTIA_PHY=y -+CONFIG_VITESSE_PHY=y -+CONFIG_REALTEK_PHY=y -+CONFIG_FIXED_PHY=y -+# reset support -+CONFIG_POWER_RESET_LAYERSCAPE=y -+# pci -+CONFIG_PCI_LAYERSCAPE=y -+CONFIG_PCI_HOST_GENERIC=y -+CONFIG_E1000=y -+CONFIG_E1000E=y -+# clock driver -+CONFIG_CLK_QORIQ=y -+# usb -+CONFIG_USB_XHCI_HCD=y -+CONFIG_USB_DWC3=y -+CONFIG_DMADEVICES=y -+# ahci/sata -+CONFIG_AHCI_QORIQ=y -+# esdhc -+CONFIG_MMC_SDHCI_OF_ESDHC=y -+# virtualization -+CONFIG_VHOST_NET=y -+CONFIG_KVM_ARM_MAX_VCPUS=8 -+# I2C -+CONFIG_I2C=y -+CONFIG_I2C_CHARDEV=y -+CONFIG_I2C_MUX=y -+CONFIG_I2C_MUX_PCA954x=y -+CONFIG_I2C_IMX=y -+# hardware monitor -+CONFIG_SENSORS_LM90=y -+CONFIG_SENSORS_INA2XX=y -+# DPAA 1 -+CONFIG_HAS_FSL_QBMAN=y -+CONFIG_CRYPTO_DEV_FSL_CAAM=y -+# network -+CONFIG_BRIDGE=m -+CONFIG_MACVLAN=y -+CONFIG_FSL_SDK_FMAN=y -+CONFIG_FMAN_ARM=y -+CONFIG_FSL_SDK_DPAA_ETH=y -+CONFIG_INET_ESP=y -+CONFIG_XFRM_USER=y -+CONFIG_NET_KEY=y -+# vfio -+CONFIG_VFIO=y -+CONFIG_VFIO_PCI=y -+CONFIG_VFIO_FSL_MC=y -+# CPU Frequency scaling -+CONFIG_CPU_FREQ=y -+CONFIG_CPU_FREQ_GOV_COMMON=y -+CONFIG_CPU_FREQ_STAT=y -+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -+CONFIG_CPU_FREQ_GOV_POWERSAVE=y -+CONFIG_CPU_FREQ_GOV_USERSPACE=y -+CONFIG_CPU_FREQ_GOV_ONDEMAND=y -+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -+CONFIG_QORIQ_CPUFREQ=y -+#ifc -+CONFIG_MTD_OF_PARTS=y -+CONFIG_MTD_GEN_PROBE=y -+CONFIG_MTD=y -+CONFIG_MTD_CMDLINE_PARTS=y -+CONFIG_MTD_BLOCK=y -+CONFIG_MTD_CFI=y -+CONFIG_MTD_CFI_ADV_OPTIONS=y -+CONFIG_MTD_CFI_INTELEXT=y -+CONFIG_MTD_CFI_AMDSTD=y -+CONFIG_MTD_CFI_STAA=y -+CONFIG_MTD_PHYSMAP_OF=y -+CONFIG_MTD_NAND=y -+CONFIG_MTD_NAND_FSL_IFC=y -+#spi -+CONFIG_SPI_FSL_DSPI=y -+CONFIG_MTD_SPI_NOR=y -+CONFIG_MTD_DATAFLASH=y -+CONFIG_MTD_M25P80=y -+CONFIG_MTD_SST25L=y -+#RTC -+CONFIG_RTC_DRV_DS3232=y -+#CryptoAPI -+CONFIG_CRYPTO_SHA256=y -+CONFIG_CRYPTO_SHA512=y -+# ls1046a -+CONFIG_MTD_CFI_BE_BYTE_SWAP=y -+CONFIG_SPI_FSL_QUADSPI=y -+CONFIG_RTC_DRV_PCF2127=y -+CONFIG_WATCHDOG=y -+CONFIG_IMX2_WDT=y -+CONFIG_HWMON=y -+CONFIG_SENSORS_LM90=y -+CONFIG_SENSORS_INA2XX=y -+CONFIG_EEPROM_AT24=y -+# lpuart -+CONFIG_SERIAL_FSL_LPUART=y -+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y -+# ftm -+CONFIG_FTM_ALARM=y -+# qDMA -+CONFIG_FSL_QDMA=y -+CONFIG_DMATEST=y -+#NVMe -+CONFIG_BLK_DEV_NVME=y diff --git a/target/linux/layerscape/patches-4.4/0238-arm64-disable-CONFIG_EEPROM_AT24-for-freescale.confi.patch b/target/linux/layerscape/patches-4.4/0238-arm64-disable-CONFIG_EEPROM_AT24-for-freescale.confi.patch deleted file mode 100644 index a16524a67..000000000 --- a/target/linux/layerscape/patches-4.4/0238-arm64-disable-CONFIG_EEPROM_AT24-for-freescale.confi.patch +++ /dev/null @@ -1,24 +0,0 @@ -From fbc31a61b7bcfbc9ae1a8acda547de891f4b8ee4 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Mon, 31 Oct 2016 17:50:03 +0800 -Subject: [PATCH 238/238] arm64: disable CONFIG_EEPROM_AT24 for - freescale.config - -Disable CONFIG_EEPROM_AT24 in freescale.config. Otherwise, i2cdump -for EEPROM will get resource busy issue. - -Signed-off-by: Yangbo Lu ---- - arch/arm64/configs/freescale.config | 1 - - 1 file changed, 1 deletion(-) - ---- a/arch/arm64/configs/freescale.config -+++ b/arch/arm64/configs/freescale.config -@@ -121,7 +121,6 @@ CONFIG_IMX2_WDT=y - CONFIG_HWMON=y - CONFIG_SENSORS_LM90=y - CONFIG_SENSORS_INA2XX=y --CONFIG_EEPROM_AT24=y - # lpuart - CONFIG_SERIAL_FSL_LPUART=y - CONFIG_SERIAL_FSL_LPUART_CONSOLE=y diff --git a/target/linux/layerscape/patches-4.4/0239-ARM-dts-ls1021a-add-PCIe-dts-node.patch b/target/linux/layerscape/patches-4.4/0239-ARM-dts-ls1021a-add-PCIe-dts-node.patch deleted file mode 100644 index 1e47060a0..000000000 --- a/target/linux/layerscape/patches-4.4/0239-ARM-dts-ls1021a-add-PCIe-dts-node.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 1f58043afef0dca3d12dc23ac3a35d7074412939 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 2 Feb 2016 16:30:07 +0800 -Subject: [PATCH 01/13] ARM: dts: ls1021a: add PCIe dts node - -Cherry-pick upstream patch. - -LS1021a contains two PCIe controllers. The patch adds their node to -dts file. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - arch/arm/boot/dts/ls1021a.dtsi | 44 ++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 44 insertions(+) - ---- a/arch/arm/boot/dts/ls1021a.dtsi -+++ b/arch/arm/boot/dts/ls1021a.dtsi -@@ -539,5 +539,49 @@ - dr_mode = "host"; - snps,quirk-frame-length-adjustment = <0x20>; - }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls1021a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */ -+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = ; /* controller interrupt */ -+ fsl,pcie-scfg = <&scfg 0>; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 2 &gic GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 3 &gic GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 4 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; -+ }; -+ -+ pcie@3500000 { -+ compatible = "fsl,ls1021a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */ -+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = ; -+ fsl,pcie-scfg = <&scfg 1>; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 2 &gic GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, -+ <0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; -+ }; - }; - }; diff --git a/target/linux/layerscape/patches-4.4/0240-ARM-dts-ls1021a-add-SCFG-MSI-dts-node.patch b/target/linux/layerscape/patches-4.4/0240-ARM-dts-ls1021a-add-SCFG-MSI-dts-node.patch deleted file mode 100644 index ca2663b86..000000000 --- a/target/linux/layerscape/patches-4.4/0240-ARM-dts-ls1021a-add-SCFG-MSI-dts-node.patch +++ /dev/null @@ -1,56 +0,0 @@ -From b57dcab78fdc76a6c56c2df71518fb022429e244 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Wed, 6 Apr 2016 19:02:07 +0800 -Subject: [PATCH 02/13] ARM: dts: ls1021a: add SCFG MSI dts node - -Cherry-pick upstream patch. - -Add SCFG MSI dts node and add msi-parent property to PCIe dts node -that points to the corresponding MSI node. - -Signed-off-by: Minghuan Lian -Tested-by: Alexander Stein -Signed-off-by: Yangbo Lu ---- - arch/arm/boot/dts/ls1021a.dtsi | 16 ++++++++++++++++ - 1 file changed, 16 insertions(+) - ---- a/arch/arm/boot/dts/ls1021a.dtsi -+++ b/arch/arm/boot/dts/ls1021a.dtsi -@@ -119,6 +119,20 @@ - - }; - -+ msi1: msi-controller@1570e00 { -+ compatible = "fsl,1s1021a-msi"; -+ reg = <0x0 0x1570e00 0x0 0x8>; -+ msi-controller; -+ interrupts = ; -+ }; -+ -+ msi2: msi-controller@1570e08 { -+ compatible = "fsl,1s1021a-msi"; -+ reg = <0x0 0x1570e08 0x0 0x8>; -+ msi-controller; -+ interrupts = ; -+ }; -+ - ifc: ifc@1530000 { - compatible = "fsl,ifc", "simple-bus"; - reg = <0x0 0x1530000 0x0 0x10000>; -@@ -554,6 +568,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi1>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, -@@ -576,6 +591,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, diff --git a/target/linux/layerscape/patches-4.4/0241-dt-bindings-Add-bindings-for-Layerscape-SCFG-MSI.patch b/target/linux/layerscape/patches-4.4/0241-dt-bindings-Add-bindings-for-Layerscape-SCFG-MSI.patch deleted file mode 100644 index 463df7db1..000000000 --- a/target/linux/layerscape/patches-4.4/0241-dt-bindings-Add-bindings-for-Layerscape-SCFG-MSI.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 066320dd0643e66bc5afe0d0984e77b2e938a6f4 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Wed, 23 Mar 2016 19:08:19 +0800 -Subject: [PATCH 03/13] dt/bindings: Add bindings for Layerscape SCFG MSI - -Cherry-pick upstream patch. - -Some Layerscape SoCs use a simple MSI controller implementation. -It contains only two SCFG register to trigger and describe a -group 32 MSI interrupts. The patch adds bindings to describe -the controller. - -Signed-off-by: Minghuan Lian -Acked-by: Rob Herring -Signed-off-by: Yangbo Lu ---- - .../interrupt-controller/fsl,ls-scfg-msi.txt | 30 ++++++++++++++++++++++ - 1 file changed, 30 insertions(+) - create mode 100644 Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt - ---- /dev/null -+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt -@@ -0,0 +1,30 @@ -+* Freescale Layerscape SCFG PCIe MSI controller -+ -+Required properties: -+ -+- compatible: should be "fsl,-msi" to identify -+ Layerscape PCIe MSI controller block such as: -+ "fsl,1s1021a-msi" -+ "fsl,1s1043a-msi" -+- msi-controller: indicates that this is a PCIe MSI controller node -+- reg: physical base address of the controller and length of memory mapped. -+- interrupts: an interrupt to the parent interrupt controller. -+ -+Optional properties: -+- interrupt-parent: the phandle to the parent interrupt controller. -+ -+This interrupt controller hardware is a second level interrupt controller that -+is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based -+platforms. If interrupt-parent is not provided, the default parent interrupt -+controller will be used. -+Each PCIe node needs to have property msi-parent that points to -+MSI controller node -+ -+Examples: -+ -+ msi1: msi-controller@1571000 { -+ compatible = "fsl,1s1043a-msi"; -+ reg = <0x0 0x1571000 0x0 0x8>, -+ msi-controller; -+ interrupts = <0 116 0x4>; -+ }; diff --git a/target/linux/layerscape/patches-4.4/1074-mtd-nand-spi-nor-assign-MTD-of_node.patch b/target/linux/layerscape/patches-4.4/1074-mtd-nand-spi-nor-assign-MTD-of_node.patch deleted file mode 100644 index 358b51245..000000000 --- a/target/linux/layerscape/patches-4.4/1074-mtd-nand-spi-nor-assign-MTD-of_node.patch +++ /dev/null @@ -1,31 +0,0 @@ -From f560fdb9d71aaf3adc54341a1650577c78495df9 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 20:33:22 -0700 -Subject: [PATCH 074/113] mtd: {nand,spi-nor}: assign MTD of_node - -We should pass along our flash DT node to the MTD layer, so it can set -up ofpart for us. - -cherry-pick{ -remove the code: -drivers/mtd/nand/nand_base.c | 3 + -commit:3e63b26bdd4069c3df2cd7ce7217a21d06801b41 -} - -Signed-off-by: Brian Norris -Reviewed-by: Boris Brezillon -Signed-off-by: Yuan Yao ---- - drivers/mtd/spi-nor/spi-nor.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1228,6 +1228,7 @@ int spi_nor_scan(struct spi_nor *nor, co - mtd->flags |= MTD_NO_ERASE; - - mtd->dev.parent = dev; -+ mtd_set_of_node(mtd, np); - nor->page_size = info->page_size; - mtd->writebufsize = nor->page_size; - diff --git a/target/linux/layerscape/patches-4.4/1075-mtd-spi-nor-convert-to-spi_nor_-get-set-_flash_node.patch b/target/linux/layerscape/patches-4.4/1075-mtd-spi-nor-convert-to-spi_nor_-get-set-_flash_node.patch deleted file mode 100644 index bbe29b612..000000000 --- a/target/linux/layerscape/patches-4.4/1075-mtd-spi-nor-convert-to-spi_nor_-get-set-_flash_node.patch +++ /dev/null @@ -1,80 +0,0 @@ -From f906ec330da9aa83de5382653436be36273c63d3 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 20:33:24 -0700 -Subject: [PATCH 075/113] mtd: spi-nor: convert to spi_nor_{get, - set}_flash_node() - -Used semantic patch with 'make coccicheck MODE=patch COCCI=script.cocci': - ----8<---- -virtual patch - -@@ -struct spi_nor b; -struct spi_nor *c; -expression d; -@@ -( --(b).flash_node = (d) -+spi_nor_set_flash_node(&b, d) -| --(c)->flash_node = (d) -+spi_nor_set_flash_node(c, d) -) ----8<---- - -And a manual conversion for the one use of spi_nor_get_flash_node(). - -Signed-off-by: Brian Norris -Reviewed-by: Boris Brezillon ---- - drivers/mtd/devices/m25p80.c | 2 +- - drivers/mtd/spi-nor/fsl-quadspi.c | 2 +- - drivers/mtd/spi-nor/nxp-spifi.c | 2 +- - drivers/mtd/spi-nor/spi-nor.c | 2 +- - 4 files changed, 4 insertions(+), 4 deletions(-) - ---- a/drivers/mtd/devices/m25p80.c -+++ b/drivers/mtd/devices/m25p80.c -@@ -221,7 +221,7 @@ static int m25p_probe(struct spi_device - nor->read_reg = m25p80_read_reg; - - nor->dev = &spi->dev; -- nor->flash_node = spi->dev.of_node; -+ spi_nor_set_flash_node(nor, spi->dev.of_node); - nor->priv = flash; - - spi_set_drvdata(spi, flash); ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -1013,7 +1013,7 @@ static int fsl_qspi_probe(struct platfor - mtd = &nor->mtd; - - nor->dev = dev; -- nor->flash_node = np; -+ spi_nor_set_flash_node(nor, np); - nor->priv = q; - - /* fill the hooks */ ---- a/drivers/mtd/spi-nor/nxp-spifi.c -+++ b/drivers/mtd/spi-nor/nxp-spifi.c -@@ -330,7 +330,7 @@ static int nxp_spifi_setup_flash(struct - writel(ctrl, spifi->io_base + SPIFI_CTRL); - - spifi->nor.dev = spifi->dev; -- spifi->nor.flash_node = np; -+ spi_nor_set_flash_node(&spifi->nor, np); - spifi->nor.priv = spifi; - spifi->nor.read = nxp_spifi_read; - spifi->nor.write = nxp_spifi_write; ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1120,7 +1120,7 @@ int spi_nor_scan(struct spi_nor *nor, co - const struct flash_info *info = NULL; - struct device *dev = nor->dev; - struct mtd_info *mtd = &nor->mtd; -- struct device_node *np = nor->flash_node; -+ struct device_node *np = spi_nor_get_flash_node(nor); - int ret; - int i; - diff --git a/target/linux/layerscape/patches-4.4/1076-mtd-spi-nor-drop-unnecessary-partition-parser-data.patch b/target/linux/layerscape/patches-4.4/1076-mtd-spi-nor-drop-unnecessary-partition-parser-data.patch deleted file mode 100644 index 0960f5331..000000000 --- a/target/linux/layerscape/patches-4.4/1076-mtd-spi-nor-drop-unnecessary-partition-parser-data.patch +++ /dev/null @@ -1,83 +0,0 @@ -From e36da6d0a0841ea3a75d5189057bd020d737e71a Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 20:33:26 -0700 -Subject: [PATCH 076/113] mtd: spi-nor: drop unnecessary partition parser data - -Now that the SPI-NOR/MTD framework pass the 'flash_node' through to the -partition parsing code, we don't have to do it ourselves. - -Also convert to mtd_device_register(), since we don't need the 2nd and -3rd parameters anymore. - -Signed-off-by: Brian Norris -Reviewed-by: Boris Brezillon ---- - drivers/mtd/devices/m25p80.c | 8 ++------ - drivers/mtd/spi-nor/fsl-quadspi.c | 4 +--- - drivers/mtd/spi-nor/nxp-spifi.c | 4 +--- - 3 files changed, 4 insertions(+), 12 deletions(-) - ---- a/drivers/mtd/devices/m25p80.c -+++ b/drivers/mtd/devices/m25p80.c -@@ -197,7 +197,6 @@ static int m25p80_erase(struct spi_nor * - */ - static int m25p_probe(struct spi_device *spi) - { -- struct mtd_part_parser_data ppdata; - struct flash_platform_data *data; - struct m25p *flash; - struct spi_nor *nor; -@@ -249,11 +248,8 @@ static int m25p_probe(struct spi_device - if (ret) - return ret; - -- ppdata.of_node = spi->dev.of_node; -- -- return mtd_device_parse_register(&nor->mtd, NULL, &ppdata, -- data ? data->parts : NULL, -- data ? data->nr_parts : 0); -+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL, -+ data ? data->nr_parts : 0); - } - - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -927,7 +927,6 @@ static void fsl_qspi_unprep(struct spi_n - static int fsl_qspi_probe(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; -- struct mtd_part_parser_data ppdata; - struct device *dev = &pdev->dev; - struct fsl_qspi *q; - struct resource *res; -@@ -1038,8 +1037,7 @@ static int fsl_qspi_probe(struct platfor - if (ret) - goto mutex_failed; - -- ppdata.of_node = np; -- ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); -+ ret = mtd_device_register(mtd, NULL, 0); - if (ret) - goto mutex_failed; - ---- a/drivers/mtd/spi-nor/nxp-spifi.c -+++ b/drivers/mtd/spi-nor/nxp-spifi.c -@@ -271,7 +271,6 @@ static void nxp_spifi_dummy_id_read(stru - static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, - struct device_node *np) - { -- struct mtd_part_parser_data ppdata; - enum read_mode flash_read; - u32 ctrl, property; - u16 mode = 0; -@@ -361,8 +360,7 @@ static int nxp_spifi_setup_flash(struct - return ret; - } - -- ppdata.of_node = np; -- ret = mtd_device_parse_register(&spifi->nor.mtd, NULL, &ppdata, NULL, 0); -+ ret = mtd_device_register(&spifi->nor.mtd, NULL, 0); - if (ret) { - dev_err(spifi->dev, "mtd device parse failed\n"); - return ret; diff --git a/target/linux/layerscape/patches-4.4/1077-mtd-add-get-set-of_node-flash_node-helpers.patch b/target/linux/layerscape/patches-4.4/1077-mtd-add-get-set-of_node-flash_node-helpers.patch deleted file mode 100644 index eeba1e90d..000000000 --- a/target/linux/layerscape/patches-4.4/1077-mtd-add-get-set-of_node-flash_node-helpers.patch +++ /dev/null @@ -1,62 +0,0 @@ -From a2f87e7df641b482e217f5b0efbaf41f6b8a0cf6 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 20:33:20 -0700 -Subject: [PATCH 077/113] mtd: add get/set of_node/flash_node helpers - -We are going to begin using the mtd->dev.of_node field for MTD device -nodes, so let's add helpers for it. Also, we'll be making some -conversions on spi_nor (and nand_chip eventually) too, so get that ready -with their own helpers. - -commit:28b8b26b308e656edfa9467867d5f79212da2ec3 -delete the include/linux/mtd/nand.h -just upgrade the code about spi. - -Signed-off-by: Brian Norris -Reviewed-by: Boris Brezillon -Signed-off-by: Yuan Yao ---- - include/linux/mtd/mtd.h | 11 +++++++++++ - include/linux/mtd/spi-nor.h | 11 +++++++++++ - 2 files changed, 22 insertions(+) - ---- a/include/linux/mtd/mtd.h -+++ b/include/linux/mtd/mtd.h -@@ -258,6 +258,17 @@ struct mtd_info { - int usecount; - }; - -+static inline void mtd_set_of_node(struct mtd_info *mtd, -+ struct device_node *np) -+{ -+ mtd->dev.of_node = np; -+} -+ -+static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) -+{ -+ return mtd->dev.of_node; -+} -+ - int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); - int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, - void **virt, resource_size_t *phys); ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -184,6 +184,17 @@ struct spi_nor { - void *priv; - }; - -+static inline void spi_nor_set_flash_node(struct spi_nor *nor, -+ struct device_node *np) -+{ -+ nor->flash_node = np; -+} -+ -+static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) -+{ -+ return nor->flash_node; -+} -+ - /** - * spi_nor_scan() - scan the SPI NOR - * @nor: the spi_nor structure diff --git a/target/linux/layerscape/patches-4.4/1078-mtd-spi-nor-drop-flash_node-field.patch b/target/linux/layerscape/patches-4.4/1078-mtd-spi-nor-drop-flash_node-field.patch deleted file mode 100644 index dc551dfab..000000000 --- a/target/linux/layerscape/patches-4.4/1078-mtd-spi-nor-drop-flash_node-field.patch +++ /dev/null @@ -1,57 +0,0 @@ -From df36b4601bc9f84684249a26eb39b818d6785fb8 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 20:33:27 -0700 -Subject: [PATCH 078/113] mtd: spi-nor: drop flash_node field - -We can just alias to the MTD of_node. - -Signed-off-by: Brian Norris -Reviewed-by: Boris Brezillon ---- - drivers/mtd/spi-nor/spi-nor.c | 1 - - include/linux/mtd/spi-nor.h | 6 ++---- - 2 files changed, 2 insertions(+), 5 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1228,7 +1228,6 @@ int spi_nor_scan(struct spi_nor *nor, co - mtd->flags |= MTD_NO_ERASE; - - mtd->dev.parent = dev; -- mtd_set_of_node(mtd, np); - nor->page_size = info->page_size; - mtd->writebufsize = nor->page_size; - ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -123,7 +123,6 @@ enum spi_nor_option_flags { - * @mtd: point to a mtd_info structure - * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: point to a spi device, or a spi nor controller device. -- * @flash_node: point to a device node describing this flash instance. - * @page_size: the page size of the SPI NOR - * @addr_width: number of address bytes - * @erase_opcode: the opcode for erasing a sector -@@ -154,7 +153,6 @@ struct spi_nor { - struct mtd_info mtd; - struct mutex lock; - struct device *dev; -- struct device_node *flash_node; - u32 page_size; - u8 addr_width; - u8 erase_opcode; -@@ -187,12 +185,12 @@ struct spi_nor { - static inline void spi_nor_set_flash_node(struct spi_nor *nor, - struct device_node *np) - { -- nor->flash_node = np; -+ mtd_set_of_node(&nor->mtd, np); - } - - static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) - { -- return nor->flash_node; -+ return mtd_get_of_node(&nor->mtd); - } - - /** diff --git a/target/linux/layerscape/patches-4.4/1079-mtd-spi-nor-remove-unnecessary-leading-space-from-db.patch b/target/linux/layerscape/patches-4.4/1079-mtd-spi-nor-remove-unnecessary-leading-space-from-db.patch deleted file mode 100644 index 13b30cf7e..000000000 --- a/target/linux/layerscape/patches-4.4/1079-mtd-spi-nor-remove-unnecessary-leading-space-from-db.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 3ea419cf269832f5743d9b5ad75ece5178b02b09 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Fri, 30 Oct 2015 12:56:22 -0700 -Subject: [PATCH 079/113] mtd: spi-nor: remove unnecessary leading space from - dbg print - -As Cyrille noted [1], this line is wrong. - -[1] http://lists.infradead.org/pipermail/linux-mtd/2015-September/061725.html - -Signed-off-by: Brian Norris -Cc: Cyrille Pitchen ---- - drivers/mtd/spi-nor/spi-nor.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -862,7 +862,7 @@ static const struct flash_info *spi_nor_ - - tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); - if (tmp < 0) { -- dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp); -+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); - return ERR_PTR(tmp); - } - diff --git a/target/linux/layerscape/patches-4.4/1080-mtd-fsl-quadspi-possible-NULL-dereference.patch b/target/linux/layerscape/patches-4.4/1080-mtd-fsl-quadspi-possible-NULL-dereference.patch deleted file mode 100644 index 76898ab58..000000000 --- a/target/linux/layerscape/patches-4.4/1080-mtd-fsl-quadspi-possible-NULL-dereference.patch +++ /dev/null @@ -1,50 +0,0 @@ -From bd02decd1ad7cc883ce388e769a34a3c402b90c4 Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Mon, 16 Nov 2015 10:45:30 -0800 -Subject: [PATCH 080/113] mtd: fsl-quadspi: possible NULL dereference - -It is theoretically possible to probe this driver without a matching -device tree, so let's guard against this. - -Also, use the of_device_get_match_data() helper to make this a bit -simpler. - -Coverity complained about this one. - -Signed-off-by: Brian Norris -Acked-by: Han xu ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -269,7 +269,7 @@ struct fsl_qspi { - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; -- struct fsl_qspi_devtype_data *devtype_data; -+ const struct fsl_qspi_devtype_data *devtype_data; - u32 nor_size; - u32 nor_num; - u32 clk_rate; -@@ -933,8 +933,6 @@ static int fsl_qspi_probe(struct platfor - struct spi_nor *nor; - struct mtd_info *mtd; - int ret, i = 0; -- const struct of_device_id *of_id = -- of_match_device(fsl_qspi_dt_ids, &pdev->dev); - - q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); - if (!q) -@@ -945,7 +943,9 @@ static int fsl_qspi_probe(struct platfor - return -ENODEV; - - q->dev = dev; -- q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data; -+ q->devtype_data = of_device_get_match_data(dev); -+ if (!q->devtype_data) -+ return -ENODEV; - platform_set_drvdata(pdev, q); - - /* find the resources */ diff --git a/target/linux/layerscape/patches-4.4/1081-mtd-spi-nor-provide-default-erase_sector-implementat.patch b/target/linux/layerscape/patches-4.4/1081-mtd-spi-nor-provide-default-erase_sector-implementat.patch deleted file mode 100644 index e966cafce..000000000 --- a/target/linux/layerscape/patches-4.4/1081-mtd-spi-nor-provide-default-erase_sector-implementat.patch +++ /dev/null @@ -1,105 +0,0 @@ -From 56bd0e13d8bc3b4486251b10ac9d2ba7434c21ee Mon Sep 17 00:00:00 2001 -From: Brian Norris -Date: Tue, 10 Nov 2015 12:15:27 -0800 -Subject: [PATCH 081/113] mtd: spi-nor: provide default erase_sector - implementation - -Some spi-nor drivers perform sector erase by duplicating their -write_reg() command. Let's not require that the driver fill this out, -and provide a default instead. - -Tested on m25p80.c and Medatek's MT8173 SPI NOR flash driver. - -Signed-off-by: Brian Norris ---- - drivers/mtd/spi-nor/spi-nor.c | 37 +++++++++++++++++++++++++++++++++---- - include/linux/mtd/spi-nor.h | 3 ++- - 2 files changed, 35 insertions(+), 5 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -38,6 +38,7 @@ - #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) - - #define SPI_NOR_MAX_ID_LEN 6 -+#define SPI_NOR_MAX_ADDR_WIDTH 4 - - struct flash_info { - char *name; -@@ -314,6 +315,29 @@ static void spi_nor_unlock_and_unprep(st - } - - /* -+ * Initiate the erasure of a single sector -+ */ -+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) -+{ -+ u8 buf[SPI_NOR_MAX_ADDR_WIDTH]; -+ int i; -+ -+ if (nor->erase) -+ return nor->erase(nor, addr); -+ -+ /* -+ * Default implementation, if driver doesn't have a specialized HW -+ * control -+ */ -+ for (i = nor->addr_width - 1; i >= 0; i--) { -+ buf[i] = addr & 0xff; -+ addr >>= 8; -+ } -+ -+ return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width); -+} -+ -+/* - * Erase an address range on the nor chip. The address range may extend - * one or more erase sectors. Return an error is there is a problem erasing. - */ -@@ -372,10 +396,9 @@ static int spi_nor_erase(struct mtd_info - while (len) { - write_enable(nor); - -- if (nor->erase(nor, addr)) { -- ret = -EIO; -+ ret = spi_nor_erase_sector(nor, addr); -+ if (ret) - goto erase_err; -- } - - addr += mtd->erasesize; - len -= mtd->erasesize; -@@ -1107,7 +1130,7 @@ static int set_quad_mode(struct spi_nor - static int spi_nor_check(struct spi_nor *nor) - { - if (!nor->dev || !nor->read || !nor->write || -- !nor->read_reg || !nor->write_reg || !nor->erase) { -+ !nor->read_reg || !nor->write_reg) { - pr_err("spi-nor: please fill all the necessary fields!\n"); - return -EINVAL; - } -@@ -1310,6 +1333,12 @@ int spi_nor_scan(struct spi_nor *nor, co - nor->addr_width = 3; - } - -+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { -+ dev_err(dev, "address width is too large: %u\n", -+ nor->addr_width); -+ return -EINVAL; -+ } -+ - nor->read_dummy = spi_nor_read_dummy_cycles(nor); - - dev_info(dev, "%s (%lld Kbytes)\n", info->name, ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -142,7 +142,8 @@ enum spi_nor_option_flags { - * @read: [DRIVER-SPECIFIC] read data from the SPI NOR - * @write: [DRIVER-SPECIFIC] write data to the SPI NOR - * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR -- * at the offset @offs -+ * at the offset @offs; if not provided by the driver, -+ * spi-nor will send the erase opcode via write_reg() - * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR - * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR - * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is diff --git a/target/linux/layerscape/patches-4.4/1083-mtd-spi-nor-Fix-error-message-with-unrecognized-JEDE.patch b/target/linux/layerscape/patches-4.4/1083-mtd-spi-nor-Fix-error-message-with-unrecognized-JEDE.patch deleted file mode 100644 index 6ebbdb417..000000000 --- a/target/linux/layerscape/patches-4.4/1083-mtd-spi-nor-Fix-error-message-with-unrecognized-JEDE.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 30e609daed95664824e95344e85c7eaedd1bfcf3 Mon Sep 17 00:00:00 2001 -From: Ricardo Ribalda -Date: Mon, 30 Nov 2015 20:41:17 +0100 -Subject: [PATCH 083/113] mtd: spi-nor: Fix error message with unrecognized - JEDEC - -The error message was: - -m25p80 spi32766.0: unrecognized JEDEC id bytes: 00, 0, 0 - -The new error message: - -m25p80 spi32766.0: unrecognized JEDEC id bytes: 00, 00, 00 - -Signed-off-by: Ricardo Ribalda Delgado -Signed-off-by: Brian Norris ---- - drivers/mtd/spi-nor/spi-nor.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -896,7 +896,7 @@ static const struct flash_info *spi_nor_ - return &spi_nor_ids[tmp]; - } - } -- dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n", -+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n", - id[0], id[1], id[2]); - return ERR_PTR(-ENODEV); - } diff --git a/target/linux/layerscape/patches-4.4/1084-mtd-spi-nor-fix-error-handling-in-spi_nor_erase.patch b/target/linux/layerscape/patches-4.4/1084-mtd-spi-nor-fix-error-handling-in-spi_nor_erase.patch deleted file mode 100644 index 53501315e..000000000 --- a/target/linux/layerscape/patches-4.4/1084-mtd-spi-nor-fix-error-handling-in-spi_nor_erase.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 9e473594776da97245049019f1d1e9608ff1214a Mon Sep 17 00:00:00 2001 -From: Heiner Kallweit -Date: Tue, 17 Nov 2015 20:18:54 +0100 -Subject: [PATCH 084/113] mtd: spi-nor: fix error handling in spi_nor_erase - -The documenting comment of mtd_erase in mtdcore.c states: -Device drivers are supposed to call instr->callback() whenever -the operation completes, even if it completes with a failure. - -Currently the callback isn't called in case of failure. Fix this. - -Signed-off-by: Heiner Kallweit -Signed-off-by: Brian Norris ---- - drivers/mtd/spi-nor/spi-nor.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -411,17 +411,13 @@ static int spi_nor_erase(struct mtd_info - - write_disable(nor); - -+erase_err: - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); - -- instr->state = MTD_ERASE_DONE; -+ instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE; - mtd_erase_callback(instr); - - return ret; -- --erase_err: -- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); -- instr->state = MTD_ERASE_FAILED; -- return ret; - } - - static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, diff --git a/target/linux/layerscape/patches-4.4/1085-mtd-spi-nor-Check-the-return-value-from-read_sr.patch b/target/linux/layerscape/patches-4.4/1085-mtd-spi-nor-Check-the-return-value-from-read_sr.patch deleted file mode 100644 index cca7ac179..000000000 --- a/target/linux/layerscape/patches-4.4/1085-mtd-spi-nor-Check-the-return-value-from-read_sr.patch +++ /dev/null @@ -1,58 +0,0 @@ -From d05c68e35f42a46b352d2a4bdaef9954c946e20a Mon Sep 17 00:00:00 2001 -From: Fabio Estevam -Date: Fri, 20 Nov 2015 16:26:11 -0200 -Subject: [PATCH 085/113] mtd: spi-nor: Check the return value from read_sr() - -[context adjustment] - -We should better check the return value from read_sr() and -propagate it in the case of error. - -Signed-off-by: Fabio Estevam -Signed-off-by: Brian Norris -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/spi-nor.c | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -479,11 +479,13 @@ static int stm_is_locked_sr(struct spi_n - static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) - { - struct mtd_info *mtd = &nor->mtd; -- u8 status_old, status_new; -+ int status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 shift = ffs(mask) - 1, pow, val; - - status_old = read_sr(nor); -+ if (status_old < 0) -+ return status_old; - - /* SPI NOR always locks to the end */ - if (ofs + len != mtd->size) { -@@ -529,11 +531,13 @@ static int stm_lock(struct spi_nor *nor, - static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) - { - struct mtd_info *mtd = &nor->mtd; -- uint8_t status_old, status_new; -+ int status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 shift = ffs(mask) - 1, pow, val; - - status_old = read_sr(nor); -+ if (status_old < 0) -+ return status_old; - - /* Cannot unlock; would unlock larger region than requested */ - if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, -@@ -1038,6 +1042,8 @@ static int macronix_quad_enable(struct s - int ret, val; - - val = read_sr(nor); -+ if (val < 0) -+ return val; - write_enable(nor); - - write_sr(nor, val | SR_QUAD_EN_MX); diff --git a/target/linux/layerscape/patches-4.4/1086-mtd-spi-nor-wait-until-lock-unlock-operations-are-re.patch b/target/linux/layerscape/patches-4.4/1086-mtd-spi-nor-wait-until-lock-unlock-operations-are-re.patch deleted file mode 100644 index 47f2c8326..000000000 --- a/target/linux/layerscape/patches-4.4/1086-mtd-spi-nor-wait-until-lock-unlock-operations-are-re.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 3a06c61b48fbc23046928275e37a693e1055ae74 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Ezequiel=20Garc=C3=ADa?= -Date: Mon, 28 Dec 2015 17:54:51 -0300 -Subject: [PATCH 086/113] mtd: spi-nor: wait until lock/unlock operations are - ready - -On Micron and Numonyx devices, the status register write command -(WRSR), raises a work-in-progress bit (WIP) on the status register. -The datasheets for these devices specify that while the status -register write is in progress, the status register WIP bit can still -be read to check the end of the operation. - -This commit adds a wait_till_ready call on lock/unlock operations, -which is required for Micron and Numonyx but should be harmless for -others. This is needed to prevent applications from issuing erase or -program operations before the unlock operation is completed. - -Reported-by: Stas Sergeev -Signed-off-by: Ezequiel Garcia -Signed-off-by: Brian Norris ---- - drivers/mtd/spi-nor/spi-nor.c | 12 ++++++++++-- - 1 file changed, 10 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -482,6 +482,7 @@ static int stm_lock(struct spi_nor *nor, - int status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 shift = ffs(mask) - 1, pow, val; -+ int ret; - - status_old = read_sr(nor); - if (status_old < 0) -@@ -520,7 +521,10 @@ static int stm_lock(struct spi_nor *nor, - return -EINVAL; - - write_enable(nor); -- return write_sr(nor, status_new); -+ ret = write_sr(nor, status_new); -+ if (ret) -+ return ret; -+ return spi_nor_wait_till_ready(nor); - } - - /* -@@ -534,6 +538,7 @@ static int stm_unlock(struct spi_nor *no - int status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 shift = ffs(mask) - 1, pow, val; -+ int ret; - - status_old = read_sr(nor); - if (status_old < 0) -@@ -570,7 +575,10 @@ static int stm_unlock(struct spi_nor *no - return -EINVAL; - - write_enable(nor); -- return write_sr(nor, status_new); -+ ret = write_sr(nor, status_new); -+ if (ret) -+ return ret; -+ return spi_nor_wait_till_ready(nor); - } - - /* diff --git a/target/linux/layerscape/patches-4.4/1087-mtd-spi-nor-fsl-quadspi-add-big-endian-support.patch b/target/linux/layerscape/patches-4.4/1087-mtd-spi-nor-fsl-quadspi-add-big-endian-support.patch deleted file mode 100644 index 9ceddd6cc..000000000 --- a/target/linux/layerscape/patches-4.4/1087-mtd-spi-nor-fsl-quadspi-add-big-endian-support.patch +++ /dev/null @@ -1,400 +0,0 @@ -From c58b398221d88ac0db29c3bb7522a4f48dfa102c Mon Sep 17 00:00:00 2001 -From: Yuan Yao -Date: Tue, 17 Nov 2015 16:13:47 +0800 -Subject: [PATCH 087/113] mtd: spi-nor: fsl-quadspi: add big-endian support - -Add R/W functions for big- or little-endian registers: -The qSPI controller's endian is independent of the CPU core's endian. -So far, the qSPI have two versions for big-endian and little-endian. - -Signed-off-by: Yuan Yao -Acked-by: Han xu ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 157 +++++++++++++++++++++++-------------- - 1 file changed, 97 insertions(+), 60 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -275,6 +275,7 @@ struct fsl_qspi { - u32 clk_rate; - unsigned int chip_base_addr; /* We may support two chips. */ - bool has_second_chip; -+ bool big_endian; - struct mutex lock; - struct pm_qos_request pm_qos_req; - }; -@@ -300,6 +301,28 @@ static inline int needs_wakeup_wait_mode - } - - /* -+ * R/W functions for big- or little-endian registers: -+ * The qSPI controller's endian is independent of the CPU core's endian. -+ * So far, although the CPU core is little-endian but the qSPI have two -+ * versions for big-endian and little-endian. -+ */ -+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) -+{ -+ if (q->big_endian) -+ iowrite32be(val, addr); -+ else -+ iowrite32(val, addr); -+} -+ -+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) -+{ -+ if (q->big_endian) -+ return ioread32be(addr); -+ else -+ return ioread32(addr); -+} -+ -+/* - * An IC bug makes us to re-arrange the 32-bit data. - * The following chips, such as IMX6SLX, have fixed this bug. - */ -@@ -310,14 +333,14 @@ static inline u32 fsl_qspi_endian_xchg(s - - static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) - { -- writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); -- writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); -+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); -+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); - } - - static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) - { -- writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); -- writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); -+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); -+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); - } - - static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) -@@ -326,8 +349,8 @@ static irqreturn_t fsl_qspi_irq_handler( - u32 reg; - - /* clear interrupt */ -- reg = readl(q->iobase + QUADSPI_FR); -- writel(reg, q->iobase + QUADSPI_FR); -+ reg = qspi_readl(q, q->iobase + QUADSPI_FR); -+ qspi_writel(q, reg, q->iobase + QUADSPI_FR); - - if (reg & QUADSPI_FR_TFF_MASK) - complete(&q->c); -@@ -348,7 +371,7 @@ static void fsl_qspi_init_lut(struct fsl - - /* Clear all the LUT table */ - for (i = 0; i < QUADSPI_LUT_NUM; i++) -- writel(0, base + QUADSPI_LUT_BASE + i * 4); -+ qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); - - /* Quad Read */ - lut_base = SEQID_QUAD_READ * 4; -@@ -364,14 +387,15 @@ static void fsl_qspi_init_lut(struct fsl - dummy = 8; - } - -- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); -- writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), -+ qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - - /* Write enable */ - lut_base = SEQID_WREN * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), -+ base + QUADSPI_LUT(lut_base)); - - /* Page Program */ - lut_base = SEQID_PP * 4; -@@ -385,13 +409,15 @@ static void fsl_qspi_init_lut(struct fsl - addrlen = ADDR32BIT; - } - -- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); -- writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1)); -+ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), -+ base + QUADSPI_LUT(lut_base + 1)); - - /* Read Status */ - lut_base = SEQID_RDSR * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1), -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) | -+ LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Erase a sector */ -@@ -400,40 +426,46 @@ static void fsl_qspi_init_lut(struct fsl - cmd = q->nor[0].erase_opcode; - addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT; - -- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - - /* Erase the whole chip */ - lut_base = SEQID_CHIP_ERASE * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), - base + QUADSPI_LUT(lut_base)); - - /* READ ID */ - lut_base = SEQID_RDID * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8), -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) | -+ LUT1(FSL_READ, PAD1, 0x8), - base + QUADSPI_LUT(lut_base)); - - /* Write Register */ - lut_base = SEQID_WRSR * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2), -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) | -+ LUT1(FSL_WRITE, PAD1, 0x2), - base + QUADSPI_LUT(lut_base)); - - /* Read Configuration Register */ - lut_base = SEQID_RDCR * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1), -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) | -+ LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Write disable */ - lut_base = SEQID_WRDI * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI), -+ base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Micron) */ - lut_base = SEQID_EN4B * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B), -+ base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Spansion) */ - lut_base = SEQID_BRWR * 4; -- writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), -+ base + QUADSPI_LUT(lut_base)); - - fsl_qspi_lock_lut(q); - } -@@ -488,15 +520,16 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c - q->chip_base_addr, addr, len, cmd); - - /* save the reg */ -- reg = readl(base + QUADSPI_MCR); -+ reg = qspi_readl(q, base + QUADSPI_MCR); - -- writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR); -- writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, -+ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, -+ base + QUADSPI_SFAR); -+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, - base + QUADSPI_RBCT); -- writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); -+ qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); - - do { -- reg2 = readl(base + QUADSPI_SR); -+ reg2 = qspi_readl(q, base + QUADSPI_SR); - if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { - udelay(1); - dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); -@@ -507,21 +540,22 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c - - /* trigger the LUT now */ - seqid = fsl_qspi_get_seqid(q, cmd); -- writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); -+ qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, -+ base + QUADSPI_IPCR); - - /* Wait for the interrupt. */ - if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { - dev_err(q->dev, - "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", -- cmd, addr, readl(base + QUADSPI_FR), -- readl(base + QUADSPI_SR)); -+ cmd, addr, qspi_readl(q, base + QUADSPI_FR), -+ qspi_readl(q, base + QUADSPI_SR)); - err = -ETIMEDOUT; - } else { - err = 0; - } - - /* restore the MCR */ -- writel(reg, base + QUADSPI_MCR); -+ qspi_writel(q, reg, base + QUADSPI_MCR); - - return err; - } -@@ -533,7 +567,7 @@ static void fsl_qspi_read_data(struct fs - int i = 0; - - while (len > 0) { -- tmp = readl(q->iobase + QUADSPI_RBDR + i * 4); -+ tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4); - tmp = fsl_qspi_endian_xchg(q, tmp); - dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", - q->chip_base_addr, tmp); -@@ -561,9 +595,9 @@ static inline void fsl_qspi_invalid(stru - { - u32 reg; - -- reg = readl(q->iobase + QUADSPI_MCR); -+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR); - reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; -- writel(reg, q->iobase + QUADSPI_MCR); -+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - - /* - * The minimum delay : 1 AHB + 2 SFCK clocks. -@@ -572,7 +606,7 @@ static inline void fsl_qspi_invalid(stru - udelay(1); - - reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); -- writel(reg, q->iobase + QUADSPI_MCR); -+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - } - - static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, -@@ -586,20 +620,20 @@ static int fsl_qspi_nor_write(struct fsl - q->chip_base_addr, to, count); - - /* clear the TX FIFO. */ -- tmp = readl(q->iobase + QUADSPI_MCR); -- writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); -+ tmp = qspi_readl(q, q->iobase + QUADSPI_MCR); -+ qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); - - /* fill the TX data to the FIFO */ - for (j = 0, i = ((count + 3) / 4); j < i; j++) { - tmp = fsl_qspi_endian_xchg(q, *txbuf); -- writel(tmp, q->iobase + QUADSPI_TBDR); -+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - txbuf++; - } - - /* fill the TXFIFO upto 16 bytes for i.MX7d */ - if (needs_fill_txfifo(q)) - for (; i < 4; i++) -- writel(tmp, q->iobase + QUADSPI_TBDR); -+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); -@@ -615,10 +649,10 @@ static void fsl_qspi_set_map_addr(struct - int nor_size = q->nor_size; - void __iomem *base = q->iobase; - -- writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); -- writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); -- writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); -- writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); -+ qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); -+ qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); -+ qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); -+ qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); - } - - /* -@@ -640,24 +674,26 @@ static void fsl_qspi_init_abh_read(struc - int seqid; - - /* AHB configuration for access buffer 0/1/2 .*/ -- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); -- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); -- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); -+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); -+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); -+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); - /* - * Set ADATSZ with the maximum AHB buffer size to improve the - * read performance. - */ -- writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8) -- << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR); -+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | -+ ((q->devtype_data->ahb_buf_size / 8) -+ << QUADSPI_BUF3CR_ADATSZ_SHIFT), -+ base + QUADSPI_BUF3CR); - - /* We only use the buffer3 */ -- writel(0, base + QUADSPI_BUF0IND); -- writel(0, base + QUADSPI_BUF1IND); -- writel(0, base + QUADSPI_BUF2IND); -+ qspi_writel(q, 0, base + QUADSPI_BUF0IND); -+ qspi_writel(q, 0, base + QUADSPI_BUF1IND); -+ qspi_writel(q, 0, base + QUADSPI_BUF2IND); - - /* Set the default lut sequence for AHB Read. */ - seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); -- writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT, -+ qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, - q->iobase + QUADSPI_BFGENCR); - } - -@@ -713,7 +749,7 @@ static int fsl_qspi_nor_setup(struct fsl - return ret; - - /* Reset the module */ -- writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, -+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, - base + QUADSPI_MCR); - udelay(1); - -@@ -721,24 +757,24 @@ static int fsl_qspi_nor_setup(struct fsl - fsl_qspi_init_lut(q); - - /* Disable the module */ -- writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, -+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, - base + QUADSPI_MCR); - -- reg = readl(base + QUADSPI_SMPR); -- writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK -+ reg = qspi_readl(q, base + QUADSPI_SMPR); -+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK - | QUADSPI_SMPR_FSPHS_MASK - | QUADSPI_SMPR_HSENA_MASK - | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); - - /* Enable the module */ -- writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, -+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, - base + QUADSPI_MCR); - - /* clear all interrupt status */ -- writel(0xffffffff, q->iobase + QUADSPI_FR); -+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); - - /* enable the interrupt */ -- writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); -+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); - - return 0; - } -@@ -954,6 +990,7 @@ static int fsl_qspi_probe(struct platfor - if (IS_ERR(q->iobase)) - return PTR_ERR(q->iobase); - -+ q->big_endian = of_property_read_bool(np, "big-endian"); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "QuadSPI-memory"); - if (!devm_request_mem_region(dev, res->start, resource_size(res), -@@ -1101,8 +1138,8 @@ static int fsl_qspi_remove(struct platfo - } - - /* disable the hardware */ -- writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); -- writel(0x0, q->iobase + QUADSPI_RSER); -+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); -+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - mutex_destroy(&q->lock); - diff --git a/target/linux/layerscape/patches-4.4/1088-mtd-spi-nor-fsl-quadspi-add-support-for-ls1021a.patch b/target/linux/layerscape/patches-4.4/1088-mtd-spi-nor-fsl-quadspi-add-support-for-ls1021a.patch deleted file mode 100644 index a5782ffc1..000000000 --- a/target/linux/layerscape/patches-4.4/1088-mtd-spi-nor-fsl-quadspi-add-support-for-ls1021a.patch +++ /dev/null @@ -1,63 +0,0 @@ -From da44c1517526822e73642fc71b034de8fc7d2b43 Mon Sep 17 00:00:00 2001 -From: Yuan Yao -Date: Tue, 17 Nov 2015 16:44:45 +0800 -Subject: [PATCH 088/113] mtd: spi-nor: fsl-quadspi: add support for ls1021a - -[context adjustment] - -LS1021a also support Freescale Quad SPI controller. -Add fsl-quadspi support for ls1021a chip and make SPI_FSL_QUADSPI -selectable for LS1021A SOC hardwares. - -Signed-off-by: Yuan Yao -Acked-by: Han xu -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/Kconfig | 2 +- - drivers/mtd/spi-nor/fsl-quadspi.c | 10 ++++++++++ - 2 files changed, 11 insertions(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/Kconfig -+++ b/drivers/mtd/spi-nor/Kconfig -@@ -23,7 +23,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS - - config SPI_FSL_QUADSPI - tristate "Freescale Quad SPI controller" -- depends on ARCH_MXC || COMPILE_TEST -+ depends on ARCH_MXC || SOC_LS1021A || COMPILE_TEST - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -213,6 +213,7 @@ enum fsl_qspi_devtype { - FSL_QUADSPI_IMX6SX, - FSL_QUADSPI_IMX7D, - FSL_QUADSPI_IMX6UL, -+ FSL_QUADSPI_LS1021A, - }; - - struct fsl_qspi_devtype_data { -@@ -258,6 +259,14 @@ static struct fsl_qspi_devtype_data imx6 - | QUADSPI_QUIRK_4X_INT_CLK, - }; - -+static struct fsl_qspi_devtype_data ls1021a_data = { -+ .devtype = FSL_QUADSPI_LS1021A, -+ .rxfifo = 128, -+ .txfifo = 64, -+ .ahb_buf_size = 1024, -+ .driver_data = 0, -+}; -+ - #define FSL_QSPI_MAX_CHIP 4 - struct fsl_qspi { - struct spi_nor nor[FSL_QSPI_MAX_CHIP]; -@@ -812,6 +821,7 @@ static const struct of_device_id fsl_qsp - { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, }, - { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, }, - { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, }, -+ { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, - { /* sentinel */ } - }; - MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); diff --git a/target/linux/layerscape/patches-4.4/1089-mtd-spi-nor-fsl-quadspi-add-support-for-layerscape.patch b/target/linux/layerscape/patches-4.4/1089-mtd-spi-nor-fsl-quadspi-add-support-for-layerscape.patch deleted file mode 100644 index bd959df11..000000000 --- a/target/linux/layerscape/patches-4.4/1089-mtd-spi-nor-fsl-quadspi-add-support-for-layerscape.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 9c6153130081ef2c109e2a243a598f2bc0dc6413 Mon Sep 17 00:00:00 2001 -From: Yuan Yao -Date: Tue, 17 Nov 2015 17:06:47 +0800 -Subject: [PATCH 089/113] mtd: spi-nor: fsl-quadspi: add support for - layerscape - -[context adjustment] - -LS1043a and LS2080A in the Layerscape family also support Freescale Quad -SPI, make Quad SPI selectable for these hardwares. - -Signed-off-by: Yuan Yao -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/Kconfig -+++ b/drivers/mtd/spi-nor/Kconfig -@@ -23,7 +23,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS - - config SPI_FSL_QUADSPI - tristate "Freescale Quad SPI controller" -- depends on ARCH_MXC || SOC_LS1021A || COMPILE_TEST -+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. diff --git a/target/linux/layerscape/patches-4.4/1090-mtd-spi-nor-Add-SPI-NOR-layer-PM-support.patch b/target/linux/layerscape/patches-4.4/1090-mtd-spi-nor-Add-SPI-NOR-layer-PM-support.patch deleted file mode 100644 index de3d2d545..000000000 --- a/target/linux/layerscape/patches-4.4/1090-mtd-spi-nor-Add-SPI-NOR-layer-PM-support.patch +++ /dev/null @@ -1,138 +0,0 @@ -From 2c5a3db21926e9ebfd7a32e3c36a3256ed84903c Mon Sep 17 00:00:00 2001 -From: Hou Zhiqiang -Date: Thu, 19 Nov 2015 20:25:24 +0800 -Subject: [PATCH 090/113] mtd: spi-nor: Add SPI NOR layer PM support - -[context adjustment] - -Add the Power Management API in SPI NOR framework. -The Power Management system will turn off power supply to SPI flash -when system suspending, and then the SPI flash will be in the reset -state after system resuming. As a result, the status&configurations -of SPI flash driver will mismatch with its current hardware state. -So reinitialize SPI flash to make sure it is resumed to the correct -state. -And the SPI NOR layer just do common configuration depending on the -records in structure spi_nor. - -Signed-off-by: Hou Zhiqiang -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/spi-nor.c | 74 ++++++++++++++++++++++++++++++++++------- - include/linux/mtd/spi-nor.h | 9 +++++ - 2 files changed, 71 insertions(+), 12 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1148,6 +1148,26 @@ static int spi_nor_check(struct spi_nor - return 0; - } - -+/* -+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up -+ * with the software protection bits set -+ */ -+static int spi_nor_unprotect_on_powerup(struct spi_nor *nor) -+{ -+ const struct flash_info *info = NULL; -+ int ret = 0; -+ -+ info = spi_nor_read_id(nor); -+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || -+ JEDEC_MFR(info) == SNOR_MFR_INTEL || -+ JEDEC_MFR(info) == SNOR_MFR_SST) { -+ write_enable(nor); -+ ret = write_sr(nor, 0); -+ } -+ -+ return ret; -+} -+ - int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) - { - const struct flash_info *info = NULL; -@@ -1195,19 +1215,9 @@ int spi_nor_scan(struct spi_nor *nor, co - - mutex_init(&nor->lock); - -- /* -- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up -- * with the software protection bits set -- */ -- -- if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || -- JEDEC_MFR(info) == SNOR_MFR_INTEL || -- JEDEC_MFR(info) == SNOR_MFR_MACRONIX || -- JEDEC_MFR(info) == SNOR_MFR_SST || -- info->flags & SPI_NOR_HAS_LOCK) { -- write_enable(nor); -- write_sr(nor, 0); -- } -+ ret = spi_nor_unprotect_on_powerup(nor); -+ if (ret) -+ return ret; - - if (!mtd->name) - mtd->name = dev_name(dev); -@@ -1374,6 +1384,45 @@ int spi_nor_scan(struct spi_nor *nor, co - } - EXPORT_SYMBOL_GPL(spi_nor_scan); - -+static int spi_nor_hw_reinit(struct spi_nor *nor) -+{ -+ const struct flash_info *info = NULL; -+ struct device *dev = nor->dev; -+ int ret; -+ -+ info = spi_nor_read_id(nor); -+ -+ ret = spi_nor_unprotect_on_powerup(nor); -+ if (ret) -+ return ret; -+ -+ if (nor->flash_read == SPI_NOR_QUAD) { -+ ret = set_quad_mode(nor, info); -+ if (ret) { -+ dev_err(dev, "quad mode not supported\n"); -+ return ret; -+ } -+ } -+ -+ if (nor->addr_width == 4 && -+ JEDEC_MFR(info) != SNOR_MFR_SPANSION) -+ set_4byte(nor, info, 1); -+ -+ return 0; -+} -+ -+int spi_nor_suspend(struct spi_nor *nor) -+{ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(spi_nor_suspend); -+ -+int spi_nor_resume(struct spi_nor *nor) -+{ -+ return spi_nor_hw_reinit(nor); -+} -+EXPORT_SYMBOL_GPL(spi_nor_resume); -+ - static const struct flash_info *spi_nor_match_id(const char *name) - { - const struct flash_info *id = spi_nor_ids; ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -210,4 +210,13 @@ static inline struct device_node *spi_no - */ - int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode); - -+/** -+ * spi_nor_suspend/resume() - the SPI NOR layer PM API -+ * @nor: the spi_nor structure -+ * -+ * Return: 0 for success, others for failure. -+ */ -+int spi_nor_suspend(struct spi_nor *nor); -+int spi_nor_resume(struct spi_nor *nor); -+ - #endif diff --git a/target/linux/layerscape/patches-4.4/1091-mtd-spi-nor-change-return-value-of-read-write.patch b/target/linux/layerscape/patches-4.4/1091-mtd-spi-nor-change-return-value-of-read-write.patch deleted file mode 100644 index faa806e70..000000000 --- a/target/linux/layerscape/patches-4.4/1091-mtd-spi-nor-change-return-value-of-read-write.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 0a8079b232e9188ba267e37e20f192bed6c2b29b Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:19 +0000 -Subject: [PATCH 091/113] mtd: spi-nor: change return value of read/write - -Change the return value of spi-nor device read and write methods to -allow returning amount of data transferred and errors as -read(2)/write(2) does. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang ---- - drivers/mtd/devices/m25p80.c | 5 +++-- - drivers/mtd/spi-nor/fsl-quadspi.c | 5 +++-- - include/linux/mtd/spi-nor.h | 4 ++-- - 3 files changed, 8 insertions(+), 6 deletions(-) - ---- a/drivers/mtd/devices/m25p80.c -+++ b/drivers/mtd/devices/m25p80.c -@@ -73,7 +73,7 @@ static int m25p80_write_reg(struct spi_n - return spi_write(spi, flash->command, len + 1); - } - --static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, -+static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, - size_t *retlen, const u_char *buf) - { - struct m25p *flash = nor->priv; -@@ -101,6 +101,7 @@ static void m25p80_write(struct spi_nor - spi_sync(spi, &m); - - *retlen += m.actual_length - cmd_sz; -+ return 0; - } - - static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) -@@ -119,7 +120,7 @@ static inline unsigned int m25p80_rx_nbi - * Read an address range from the nor chip. The address range - * may be any size provided it is within the physical boundaries. - */ --static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, -+static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, - size_t *retlen, u_char *buf) - { - struct m25p *flash = nor->priv; ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -868,7 +868,7 @@ static int fsl_qspi_write_reg(struct spi - return ret; - } - --static void fsl_qspi_write(struct spi_nor *nor, loff_t to, -+static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, - size_t len, size_t *retlen, const u_char *buf) - { - struct fsl_qspi *q = nor->priv; -@@ -878,9 +878,10 @@ static void fsl_qspi_write(struct spi_no - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); -+ return 0; - } - --static int fsl_qspi_read(struct spi_nor *nor, loff_t from, -+static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, - size_t len, size_t *retlen, u_char *buf) - { - struct fsl_qspi *q = nor->priv; ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -170,9 +170,9 @@ struct spi_nor { - int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - -- int (*read)(struct spi_nor *nor, loff_t from, -+ ssize_t (*read)(struct spi_nor *nor, loff_t from, - size_t len, size_t *retlen, u_char *read_buf); -- void (*write)(struct spi_nor *nor, loff_t to, -+ ssize_t (*write)(struct spi_nor *nor, loff_t to, - size_t len, size_t *retlen, const u_char *write_buf); - int (*erase)(struct spi_nor *nor, loff_t offs); - diff --git a/target/linux/layerscape/patches-4.4/1092-mtd-fsl-quadspi-return-amount-of-data-read-written-o.patch b/target/linux/layerscape/patches-4.4/1092-mtd-fsl-quadspi-return-amount-of-data-read-written-o.patch deleted file mode 100644 index 443803693..000000000 --- a/target/linux/layerscape/patches-4.4/1092-mtd-fsl-quadspi-return-amount-of-data-read-written-o.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 99768b3062501b05810fb62545279da3a4371ca0 Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:19 +0000 -Subject: [PATCH 092/113] mtd: fsl-quadspi: return amount of data read/written - or error - -Return amount of data read/written or error as read(2)/write(2) does. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 18 +++++++++++------- - 1 file changed, 11 insertions(+), 7 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -618,7 +618,7 @@ static inline void fsl_qspi_invalid(stru - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - } - --static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, -+static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, - u8 opcode, unsigned int to, u32 *txbuf, - unsigned count, size_t *retlen) - { -@@ -647,8 +647,11 @@ static int fsl_qspi_nor_write(struct fsl - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); - -- if (ret == 0 && retlen) -- *retlen += count; -+ if (ret == 0) { -+ if (retlen) -+ *retlen += count; -+ return count; -+ } - - return ret; - } -@@ -860,6 +863,8 @@ static int fsl_qspi_write_reg(struct spi - } else if (len > 0) { - ret = fsl_qspi_nor_write(q, nor, opcode, 0, - (u32 *)buf, len, NULL); -+ if (ret > 0) -+ return 0; - } else { - dev_err(q->dev, "invalid cmd %d\n", opcode); - ret = -EINVAL; -@@ -873,12 +878,12 @@ static ssize_t fsl_qspi_write(struct spi - { - struct fsl_qspi *q = nor->priv; - -- fsl_qspi_nor_write(q, nor, nor->program_opcode, to, -+ ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, - (u32 *)buf, len, retlen); - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); -- return 0; -+ return ret; - } - - static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, -@@ -924,8 +929,7 @@ static ssize_t fsl_qspi_read(struct spi_ - memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - -- *retlen += len; -- return 0; -+ return len; - } - - static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) diff --git a/target/linux/layerscape/patches-4.4/1093-mtd-spi-nor-check-return-value-from-read-write.patch b/target/linux/layerscape/patches-4.4/1093-mtd-spi-nor-check-return-value-from-read-write.patch deleted file mode 100644 index 85a231515..000000000 --- a/target/linux/layerscape/patches-4.4/1093-mtd-spi-nor-check-return-value-from-read-write.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 8527843351169d999995d331bbdad75560ccafb2 Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:20 +0000 -Subject: [PATCH 093/113] mtd: spi-nor: check return value from read/write - -SPI NOR hardware drivers now return useful value from their read/write -functions so check them. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang ---- - drivers/mtd/spi-nor/spi-nor.c | 50 +++++++++++++++++++++++++++++------------ - 1 file changed, 36 insertions(+), 14 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -924,7 +924,10 @@ static int spi_nor_read(struct mtd_info - ret = nor->read(nor, from, len, retlen, buf); - - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); -- return ret; -+ if (ret < 0) -+ return ret; -+ -+ return 0; - } - - static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, -@@ -950,10 +953,14 @@ static int sst_write(struct mtd_info *mt - nor->program_opcode = SPINOR_OP_BP; - - /* write one byte. */ -- nor->write(nor, to, 1, retlen, buf); -+ ret = nor->write(nor, to, 1, retlen, buf); -+ if (ret < 0) -+ goto sst_write_err; -+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", -+ (int)ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) -- goto time_out; -+ goto sst_write_err; - } - to += actual; - -@@ -962,10 +969,14 @@ static int sst_write(struct mtd_info *mt - nor->program_opcode = SPINOR_OP_AAI_WP; - - /* write two bytes. */ -- nor->write(nor, to, 2, retlen, buf + actual); -+ ret = nor->write(nor, to, 2, retlen, buf + actual); -+ if (ret < 0) -+ goto sst_write_err; -+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", -+ (int)ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) -- goto time_out; -+ goto sst_write_err; - to += 2; - nor->sst_write_second = true; - } -@@ -974,21 +985,24 @@ static int sst_write(struct mtd_info *mt - write_disable(nor); - ret = spi_nor_wait_till_ready(nor); - if (ret) -- goto time_out; -+ goto sst_write_err; - - /* Write out trailing byte if it exists. */ - if (actual != len) { - write_enable(nor); - - nor->program_opcode = SPINOR_OP_BP; -- nor->write(nor, to, 1, retlen, buf + actual); -- -+ ret = nor->write(nor, to, 1, retlen, buf + actual); -+ if (ret < 0) -+ goto sst_write_err; -+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", -+ (int)ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) -- goto time_out; -+ goto sst_write_err; - write_disable(nor); - } --time_out: -+sst_write_err: - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); - return ret; - } -@@ -1017,14 +1031,18 @@ static int spi_nor_write(struct mtd_info - - /* do all the bytes fit onto one page? */ - if (page_offset + len <= nor->page_size) { -- nor->write(nor, to, len, retlen, buf); -+ ret = nor->write(nor, to, len, retlen, buf); -+ if (ret < 0) -+ goto write_err; - } else { - /* the size of data remaining on the first page */ - page_size = nor->page_size - page_offset; -- nor->write(nor, to, page_size, retlen, buf); -+ ret = nor->write(nor, to, page_size, retlen, buf); -+ if (ret < 0) -+ goto write_err; - - /* write everything in nor->page_size chunks */ -- for (i = page_size; i < len; i += page_size) { -+ for (i = ret; i < len; ) { - page_size = len - i; - if (page_size > nor->page_size) - page_size = nor->page_size; -@@ -1035,7 +1053,11 @@ static int spi_nor_write(struct mtd_info - - write_enable(nor); - -- nor->write(nor, to + i, page_size, retlen, buf + i); -+ ret = nor->write(nor, to + i, page_size, retlen, -+ buf + i); -+ if (ret < 0) -+ goto write_err; -+ i += ret; - } - } - diff --git a/target/linux/layerscape/patches-4.4/1094-mtd-spi-nor-stop-passing-around-retlen.patch b/target/linux/layerscape/patches-4.4/1094-mtd-spi-nor-stop-passing-around-retlen.patch deleted file mode 100644 index f72bc2c82..000000000 --- a/target/linux/layerscape/patches-4.4/1094-mtd-spi-nor-stop-passing-around-retlen.patch +++ /dev/null @@ -1,215 +0,0 @@ -From a99477d72b500b48cb3614aad0ce096fe4e3f437 Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:20 +0000 -Subject: [PATCH 094/113] mtd: spi-nor: stop passing around retlen - -[context adjustment] -not apply changes of drivers/mtd/devices/m25p80.c -################# -@@ -74,7 +74,7 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) - } - - static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, -- size_t *retlen, const u_char *buf) -+ const u_char *buf) - { - struct m25p *flash = nor->priv; - struct spi_device *spi = flash->spi; -@@ -106,7 +106,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, - ret = m.actual_length - cmd_sz; - if (ret < 0) - return -EIO; -- *retlen += ret; - return ret; - } - -@@ -127,7 +126,7 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) - * may be any size provided it is within the physical boundaries. - */ - static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, -- size_t *retlen, u_char *buf) -+ u_char *buf) - { - struct m25p *flash = nor->priv; - struct spi_device *spi = flash->spi; -@@ -161,7 +160,6 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, - ret = m.actual_length - m25p_cmdsz(nor) - dummy; - if (ret < 0) - return -EIO; -- *retlen += ret; - return ret; - } - -################# - -Do not pass retlen to hardware driver read/write functions. Update it in -spi-nor generic driver instead. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 16 ++++++---------- - drivers/mtd/spi-nor/spi-nor.c | 21 +++++++++++++-------- - include/linux/mtd/spi-nor.h | 4 ++-- - 3 files changed, 21 insertions(+), 20 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -620,7 +620,7 @@ static inline void fsl_qspi_invalid(stru - - static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, - u8 opcode, unsigned int to, u32 *txbuf, -- unsigned count, size_t *retlen) -+ unsigned count) - { - int ret, i, j; - u32 tmp; -@@ -647,11 +647,8 @@ static ssize_t fsl_qspi_nor_write(struct - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); - -- if (ret == 0) { -- if (retlen) -- *retlen += count; -+ if (ret == 0) - return count; -- } - - return ret; - } -@@ -862,7 +859,7 @@ static int fsl_qspi_write_reg(struct spi - - } else if (len > 0) { - ret = fsl_qspi_nor_write(q, nor, opcode, 0, -- (u32 *)buf, len, NULL); -+ (u32 *)buf, len); - if (ret > 0) - return 0; - } else { -@@ -874,12 +871,11 @@ static int fsl_qspi_write_reg(struct spi - } - - static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, -- size_t len, size_t *retlen, const u_char *buf) -+ size_t len, const u_char *buf) - { - struct fsl_qspi *q = nor->priv; -- - ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, -- (u32 *)buf, len, retlen); -+ (u32 *)buf, len); - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); -@@ -887,7 +883,7 @@ static ssize_t fsl_qspi_write(struct spi - } - - static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, -- size_t len, size_t *retlen, u_char *buf) -+ size_t len, u_char *buf) - { - struct fsl_qspi *q = nor->priv; - u8 cmd = nor->read_opcode; ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -921,12 +921,13 @@ static int spi_nor_read(struct mtd_info - if (ret) - return ret; - -- ret = nor->read(nor, from, len, retlen, buf); -+ ret = nor->read(nor, from, len, buf); - - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); - if (ret < 0) - return ret; - -+ *retlen += ret; - return 0; - } - -@@ -953,7 +954,7 @@ static int sst_write(struct mtd_info *mt - nor->program_opcode = SPINOR_OP_BP; - - /* write one byte. */ -- ret = nor->write(nor, to, 1, retlen, buf); -+ ret = nor->write(nor, to, 1, buf); - if (ret < 0) - goto sst_write_err; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", -@@ -969,7 +970,7 @@ static int sst_write(struct mtd_info *mt - nor->program_opcode = SPINOR_OP_AAI_WP; - - /* write two bytes. */ -- ret = nor->write(nor, to, 2, retlen, buf + actual); -+ ret = nor->write(nor, to, 2, buf + actual); - if (ret < 0) - goto sst_write_err; - WARN(ret != 2, "While writing 2 bytes written %i bytes\n", -@@ -992,7 +993,7 @@ static int sst_write(struct mtd_info *mt - write_enable(nor); - - nor->program_opcode = SPINOR_OP_BP; -- ret = nor->write(nor, to, 1, retlen, buf + actual); -+ ret = nor->write(nor, to, 1, buf + actual); - if (ret < 0) - goto sst_write_err; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", -@@ -1001,8 +1002,10 @@ static int sst_write(struct mtd_info *mt - if (ret) - goto sst_write_err; - write_disable(nor); -+ actual += 1; - } - sst_write_err: -+ *retlen += actual; - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); - return ret; - } -@@ -1031,15 +1034,17 @@ static int spi_nor_write(struct mtd_info - - /* do all the bytes fit onto one page? */ - if (page_offset + len <= nor->page_size) { -- ret = nor->write(nor, to, len, retlen, buf); -+ ret = nor->write(nor, to, len, buf); - if (ret < 0) - goto write_err; -+ *retlen += ret; - } else { - /* the size of data remaining on the first page */ - page_size = nor->page_size - page_offset; -- ret = nor->write(nor, to, page_size, retlen, buf); -+ ret = nor->write(nor, to, page_size, buf); - if (ret < 0) - goto write_err; -+ *retlen += ret; - - /* write everything in nor->page_size chunks */ - for (i = ret; i < len; ) { -@@ -1053,10 +1058,10 @@ static int spi_nor_write(struct mtd_info - - write_enable(nor); - -- ret = nor->write(nor, to + i, page_size, retlen, -- buf + i); -+ ret = nor->write(nor, to + i, page_size, buf + i); - if (ret < 0) - goto write_err; -+ *retlen += ret; - i += ret; - } - } ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -171,9 +171,9 @@ struct spi_nor { - int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - - ssize_t (*read)(struct spi_nor *nor, loff_t from, -- size_t len, size_t *retlen, u_char *read_buf); -+ size_t len, u_char *read_buf); - ssize_t (*write)(struct spi_nor *nor, loff_t to, -- size_t len, size_t *retlen, const u_char *write_buf); -+ size_t len, const u_char *write_buf); - int (*erase)(struct spi_nor *nor, loff_t offs); - - int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); diff --git a/target/linux/layerscape/patches-4.4/1095-mtd-spi-nor-simplify-write-loop.patch b/target/linux/layerscape/patches-4.4/1095-mtd-spi-nor-simplify-write-loop.patch deleted file mode 100644 index f3179cd56..000000000 --- a/target/linux/layerscape/patches-4.4/1095-mtd-spi-nor-simplify-write-loop.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 93b40e12f7e580a41c4aee5597579cc539fd8544 Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:20 +0000 -Subject: [PATCH 095/113] mtd: spi-nor: simplify write loop - -The spi-nor write loop assumes that what is passed to the hardware -driver write() is what gets written. - -When write() writes less than page size at once data is dropped on the -floor. Check the amount of data writen and exit if it does not match -requested amount. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang ---- - drivers/mtd/spi-nor/spi-nor.c | 58 ++++++++++++++++++----------------------- - 1 file changed, 25 insertions(+), 33 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1019,8 +1019,8 @@ static int spi_nor_write(struct mtd_info - size_t *retlen, const u_char *buf) - { - struct spi_nor *nor = mtd_to_spi_nor(mtd); -- u32 page_offset, page_size, i; -- int ret; -+ size_t page_offset, page_remain, i; -+ ssize_t ret; - - dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); - -@@ -1028,45 +1028,37 @@ static int spi_nor_write(struct mtd_info - if (ret) - return ret; - -- write_enable(nor); -+ for (i = 0; i < len; ) { -+ ssize_t written; - -- page_offset = to & (nor->page_size - 1); -- -- /* do all the bytes fit onto one page? */ -- if (page_offset + len <= nor->page_size) { -- ret = nor->write(nor, to, len, buf); -- if (ret < 0) -- goto write_err; -- *retlen += ret; -- } else { -+ page_offset = to & (nor->page_size - 1); -+ WARN_ONCE(page_offset, -+ "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.", -+ page_offset); - /* the size of data remaining on the first page */ -- page_size = nor->page_size - page_offset; -- ret = nor->write(nor, to, page_size, buf); -+ page_remain = min_t(size_t, -+ nor->page_size - page_offset, len - i); -+ -+ write_enable(nor); -+ ret = nor->write(nor, to + i, page_remain, buf + i); - if (ret < 0) - goto write_err; -- *retlen += ret; -+ written = ret; - -- /* write everything in nor->page_size chunks */ -- for (i = ret; i < len; ) { -- page_size = len - i; -- if (page_size > nor->page_size) -- page_size = nor->page_size; -- -- ret = spi_nor_wait_till_ready(nor); -- if (ret) -- goto write_err; -- -- write_enable(nor); -- -- ret = nor->write(nor, to + i, page_size, buf + i); -- if (ret < 0) -- goto write_err; -- *retlen += ret; -- i += ret; -+ ret = spi_nor_wait_till_ready(nor); -+ if (ret) -+ goto write_err; -+ *retlen += written; -+ i += written; -+ if (written != page_remain) { -+ dev_err(nor->dev, -+ "While writing %zu bytes written %zd bytes\n", -+ page_remain, written); -+ ret = -EIO; -+ goto write_err; - } - } - -- ret = spi_nor_wait_till_ready(nor); - write_err: - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); - return ret; diff --git a/target/linux/layerscape/patches-4.4/1096-mtd-spi-nor-add-read-loop.patch b/target/linux/layerscape/patches-4.4/1096-mtd-spi-nor-add-read-loop.patch deleted file mode 100644 index 59d9f3e88..000000000 --- a/target/linux/layerscape/patches-4.4/1096-mtd-spi-nor-add-read-loop.patch +++ /dev/null @@ -1,46 +0,0 @@ -From b5929f91416d64afacf46c649f38cc8f0eea50d2 Mon Sep 17 00:00:00 2001 -From: Michal Suchanek -Date: Wed, 2 Dec 2015 10:38:20 +0000 -Subject: [PATCH 096/113] mtd: spi-nor: add read loop - -mtdblock and ubi do not handle the situation when read returns less data -than requested. Loop in spi-nor until buffer is filled or an error is -returned. - -Signed-off-by: Michal Suchanek -Signed-off-by: Hou Zhiqiang ---- - drivers/mtd/spi-nor/spi-nor.c | 20 ++++++++++++++------ - 1 file changed, 14 insertions(+), 6 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -921,14 +921,22 @@ static int spi_nor_read(struct mtd_info - if (ret) - return ret; - -- ret = nor->read(nor, from, len, buf); -+ while (len) { -+ ret = nor->read(nor, from, len, buf); -+ if (ret <= 0) -+ goto read_err; - -- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); -- if (ret < 0) -- return ret; -+ WARN_ON(ret > len); -+ *retlen += ret; -+ buf += ret; -+ from += ret; -+ len -= ret; -+ } -+ ret = 0; - -- *retlen += ret; -- return 0; -+read_err: -+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); -+ return ret; - } - - static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, diff --git a/target/linux/layerscape/patches-4.4/1097-mtd-fsl-quadspi-use-the-property-fields-of-SPI-NOR.patch b/target/linux/layerscape/patches-4.4/1097-mtd-fsl-quadspi-use-the-property-fields-of-SPI-NOR.patch deleted file mode 100644 index d07264d60..000000000 --- a/target/linux/layerscape/patches-4.4/1097-mtd-fsl-quadspi-use-the-property-fields-of-SPI-NOR.patch +++ /dev/null @@ -1,87 +0,0 @@ -From 5c315652c1b43a6a3abe48c2842cde822ac0ff3c Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Wed, 20 Jan 2016 18:40:31 +0800 -Subject: [PATCH 097/113] mtd:fsl-quadspi:use the property fields of SPI-NOR - -We can get the read/write/erase opcode from the spi nor framework -directly. This patch uses the information stored in the SPI-NOR to -remove the hardcode in the fsl_qspi_init_lut(). - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 40 +++++++++++-------------------------- - 1 file changed, 12 insertions(+), 28 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -373,9 +373,13 @@ static void fsl_qspi_init_lut(struct fsl - void __iomem *base = q->iobase; - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; -- u8 cmd, addrlen, dummy; - int i; - -+ struct spi_nor *nor = &q->nor[0]; -+ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; -+ u8 read_op = nor->read_opcode; -+ u8 read_dm = nor->read_dummy; -+ - fsl_qspi_unlock_lut(q); - - /* Clear all the LUT table */ -@@ -385,20 +389,10 @@ static void fsl_qspi_init_lut(struct fsl - /* Quad Read */ - lut_base = SEQID_QUAD_READ * 4; - -- if (q->nor_size <= SZ_16M) { -- cmd = SPINOR_OP_READ_1_1_4; -- addrlen = ADDR24BIT; -- dummy = 8; -- } else { -- /* use the 4-byte address */ -- cmd = SPINOR_OP_READ_1_1_4; -- addrlen = ADDR32BIT; -- dummy = 8; -- } -- -- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); -- qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), -+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -+ LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - - /* Write enable */ -@@ -409,16 +403,8 @@ static void fsl_qspi_init_lut(struct fsl - /* Page Program */ - lut_base = SEQID_PP * 4; - -- if (q->nor_size <= SZ_16M) { -- cmd = SPINOR_OP_PP; -- addrlen = ADDR24BIT; -- } else { -- /* use the 4-byte address */ -- cmd = SPINOR_OP_PP; -- addrlen = ADDR32BIT; -- } -- -- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) | -+ LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), - base + QUADSPI_LUT(lut_base + 1)); -@@ -432,10 +418,8 @@ static void fsl_qspi_init_lut(struct fsl - /* Erase a sector */ - lut_base = SEQID_SE * 4; - -- cmd = q->nor[0].erase_opcode; -- addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT; -- -- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), -+ qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) | -+ LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - - /* Erase the whole chip */ diff --git a/target/linux/layerscape/patches-4.4/1098-mtd-fsl-quadspi-Rename-SEQID_QUAD_READ-to-SEQID_READ.patch b/target/linux/layerscape/patches-4.4/1098-mtd-fsl-quadspi-Rename-SEQID_QUAD_READ-to-SEQID_READ.patch deleted file mode 100644 index f27ea21dd..000000000 --- a/target/linux/layerscape/patches-4.4/1098-mtd-fsl-quadspi-Rename-SEQID_QUAD_READ-to-SEQID_READ.patch +++ /dev/null @@ -1,46 +0,0 @@ -From c8f9be7df954fce18e96074af3f07aa5f75399e0 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Wed, 20 Jan 2016 15:52:25 +0800 -Subject: [PATCH 098/113] mtd: fsl-quadspi: Rename SEQID_QUAD_READ to - SEQID_READ - -There are some read modes for flash, such as NORMAL, FAST, -QUAD, DDR QUAD. These modes will use the identical lut table base -So rename SEQID_QUAD_READ to SEQID_READ. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -193,7 +193,7 @@ - #define QUADSPI_LUT_NUM 64 - - /* SEQID -- we can have 16 seqids at most. */ --#define SEQID_QUAD_READ 0 -+#define SEQID_READ 0 - #define SEQID_WREN 1 - #define SEQID_WRDI 2 - #define SEQID_RDSR 3 -@@ -386,8 +386,8 @@ static void fsl_qspi_init_lut(struct fsl - for (i = 0; i < QUADSPI_LUT_NUM; i++) - qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); - -- /* Quad Read */ -- lut_base = SEQID_QUAD_READ * 4; -+ /* Read */ -+ lut_base = SEQID_READ * 4; - - qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); -@@ -468,7 +468,7 @@ static int fsl_qspi_get_seqid(struct fsl - { - switch (cmd) { - case SPINOR_OP_READ_1_1_4: -- return SEQID_QUAD_READ; -+ return SEQID_READ; - case SPINOR_OP_WREN: - return SEQID_WREN; - case SPINOR_OP_WRDI: diff --git a/target/linux/layerscape/patches-4.4/1099-mtd-spi-nor-fsl-quadspi-Add-fast-read-mode-support.patch b/target/linux/layerscape/patches-4.4/1099-mtd-spi-nor-fsl-quadspi-Add-fast-read-mode-support.patch deleted file mode 100644 index ac99ba950..000000000 --- a/target/linux/layerscape/patches-4.4/1099-mtd-spi-nor-fsl-quadspi-Add-fast-read-mode-support.patch +++ /dev/null @@ -1,72 +0,0 @@ -From c501cdf57682265b72a8180c06e4a01dc2978375 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Mon, 1 Feb 2016 18:26:23 +0800 -Subject: [PATCH 099/113] mtd:spi-nor:fsl-quadspi:Add fast-read mode support - -The qspi driver add generic fast-read mode for different -flash venders. There are some different board flash work on -different mode, such fast-read, quad-mode. -So we have to modify the third entrace parameter of spi_nor_scan(). - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 27 +++++++++++++++++++++------ - 1 file changed, 21 insertions(+), 6 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -389,11 +389,21 @@ static void fsl_qspi_init_lut(struct fsl - /* Read */ - lut_base = SEQID_READ * 4; - -- qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), -- base + QUADSPI_LUT(lut_base)); -- qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -- LUT1(FSL_READ, PAD4, rxfifo), -- base + QUADSPI_LUT(lut_base + 1)); -+ if (nor->flash_read == SPI_NOR_FAST) { -+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | -+ LUT1(ADDR, PAD1, addrlen), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -+ LUT1(FSL_READ, PAD1, rxfifo), -+ base + QUADSPI_LUT(lut_base + 1)); -+ } else if (nor->flash_read == SPI_NOR_QUAD) { -+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | -+ LUT1(ADDR, PAD1, addrlen), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -+ LUT1(FSL_READ, PAD4, rxfifo), -+ base + QUADSPI_LUT(lut_base + 1)); -+ } - - /* Write enable */ - lut_base = SEQID_WREN * 4; -@@ -468,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl - { - switch (cmd) { - case SPINOR_OP_READ_1_1_4: -+ case SPINOR_OP_READ_FAST: - return SEQID_READ; - case SPINOR_OP_WREN: - return SEQID_WREN; -@@ -964,6 +975,7 @@ static int fsl_qspi_probe(struct platfor - struct spi_nor *nor; - struct mtd_info *mtd; - int ret, i = 0; -+ enum read_mode mode = SPI_NOR_QUAD; - - q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); - if (!q) -@@ -1065,7 +1077,10 @@ static int fsl_qspi_probe(struct platfor - /* set the chip address for READID */ - fsl_qspi_set_base_addr(q, nor); - -- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); -+ ret = of_property_read_bool(np, "m25p,fast-read"); -+ mode = (ret) ? SPI_NOR_FAST : SPI_NOR_QUAD; -+ -+ ret = spi_nor_scan(nor, NULL, mode); - if (ret) - goto mutex_failed; - diff --git a/target/linux/layerscape/patches-4.4/1100-mtd-spi_nor-Disable-Micron-flash-HW-protection.patch b/target/linux/layerscape/patches-4.4/1100-mtd-spi_nor-Disable-Micron-flash-HW-protection.patch deleted file mode 100644 index 47fef5cd0..000000000 --- a/target/linux/layerscape/patches-4.4/1100-mtd-spi_nor-Disable-Micron-flash-HW-protection.patch +++ /dev/null @@ -1,41 +0,0 @@ -From e892dea7229d56b75c46a76b9039f9e179584a91 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Mon, 1 Feb 2016 18:48:49 +0800 -Subject: [PATCH 100/113] mtd:spi_nor: Disable Micron flash HW protection - -For Micron family ,The status register write enable/disable bit, -provides hardware data protection for the device. -When the enable/disable bit is set to 1, the status register -nonvolatile bits become read-only and the WRITE STATUS REGISTER -operation will not execute. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/spi-nor.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -39,6 +39,7 @@ - - #define SPI_NOR_MAX_ID_LEN 6 - #define SPI_NOR_MAX_ADDR_WIDTH 4 -+#define SPI_NOR_MICRON_WRITE_ENABLE 0x7f - - struct flash_info { - char *name; -@@ -1246,6 +1247,14 @@ int spi_nor_scan(struct spi_nor *nor, co - if (ret) - return ret; - -+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { -+ ret = read_sr(nor); -+ ret &= SPI_NOR_MICRON_WRITE_ENABLE; -+ -+ write_enable(nor); -+ write_sr(nor, ret); -+ } -+ - if (!mtd->name) - mtd->name = dev_name(dev); - mtd->priv = nor; diff --git a/target/linux/layerscape/patches-4.4/1101-mtd-spi-nor-fsl-quadspi-extend-support-for-some-spec.patch b/target/linux/layerscape/patches-4.4/1101-mtd-spi-nor-fsl-quadspi-extend-support-for-some-spec.patch deleted file mode 100644 index 471ce4225..000000000 --- a/target/linux/layerscape/patches-4.4/1101-mtd-spi-nor-fsl-quadspi-extend-support-for-some-spec.patch +++ /dev/null @@ -1,122 +0,0 @@ -From acfc6e9b34b3b3ca0d8bbe366dd08b0fac21c740 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Tue, 2 Feb 2016 12:21:12 +0800 -Subject: [PATCH 101/113] mtd: spi-nor: fsl-quadspi: extend support for some - special requerment. - -Add extra info in LUT table to support some special requerments. -Spansion S25FS-S family flash need some special operations. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 44 +++++++++++++++++++++++++++++++++++-- - include/linux/mtd/spi-nor.h | 4 ++++ - 2 files changed, 46 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -205,6 +205,9 @@ - #define SEQID_RDCR 9 - #define SEQID_EN4B 10 - #define SEQID_BRWR 11 -+#define SEQID_RDAR 12 -+#define SEQID_WRAR 13 -+ - - #define QUADSPI_MIN_IOMAP SZ_4M - -@@ -470,6 +473,28 @@ static void fsl_qspi_init_lut(struct fsl - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), - base + QUADSPI_LUT(lut_base)); - -+ /* -+ * Read any device register. -+ * Used for Spansion S25FS-S family flash only. -+ */ -+ lut_base = SEQID_RDAR * 4; -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_RDAR) | -+ LUT1(ADDR, PAD1, ADDR24BIT), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(DUMMY, PAD1, 8) | LUT1(FSL_READ, PAD1, 1), -+ base + QUADSPI_LUT(lut_base + 1)); -+ -+ /* -+ * Write any device register. -+ * Used for Spansion S25FS-S family flash only. -+ */ -+ lut_base = SEQID_WRAR * 4; -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_WRAR) | -+ LUT1(ADDR, PAD1, ADDR24BIT), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 1), -+ base + QUADSPI_LUT(lut_base + 1)); -+ - fsl_qspi_lock_lut(q); - } - -@@ -477,9 +502,15 @@ static void fsl_qspi_init_lut(struct fsl - static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) - { - switch (cmd) { -+ case SPINOR_OP_READ4_1_1_4: - case SPINOR_OP_READ_1_1_4: - case SPINOR_OP_READ_FAST: -+ case SPINOR_OP_READ4_FAST: - return SEQID_READ; -+ case SPINOR_OP_SPANSION_RDAR: -+ return SEQID_RDAR; -+ case SPINOR_OP_SPANSION_WRAR: -+ return SEQID_WRAR; - case SPINOR_OP_WREN: - return SEQID_WREN; - case SPINOR_OP_WRDI: -@@ -491,6 +522,7 @@ static int fsl_qspi_get_seqid(struct fsl - case SPINOR_OP_CHIP_ERASE: - return SEQID_CHIP_ERASE; - case SPINOR_OP_PP: -+ case SPINOR_OP_PP_4B: - return SEQID_PP; - case SPINOR_OP_RDID: - return SEQID_RDID; -@@ -830,8 +862,12 @@ static int fsl_qspi_read_reg(struct spi_ - { - int ret; - struct fsl_qspi *q = nor->priv; -+ u32 to = 0; -+ -+ if (opcode == SPINOR_OP_SPANSION_RDAR) -+ memcpy(&to, nor->cmd_buf, 4); - -- ret = fsl_qspi_runcmd(q, opcode, 0, len); -+ ret = fsl_qspi_runcmd(q, opcode, to, len); - if (ret) - return ret; - -@@ -843,9 +879,13 @@ static int fsl_qspi_write_reg(struct spi - { - struct fsl_qspi *q = nor->priv; - int ret; -+ u32 to = 0; -+ -+ if (opcode == SPINOR_OP_SPANSION_WRAR) -+ memcpy(&to, nor->cmd_buf, 4); - - if (!buf) { -- ret = fsl_qspi_runcmd(q, opcode, 0, 1); -+ ret = fsl_qspi_runcmd(q, opcode, to, 1); - if (ret) - return ret; - ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -74,6 +74,10 @@ - /* Used for Spansion flashes only. */ - #define SPINOR_OP_BRWR 0x17 /* Bank register write */ - -+/* Used for Spansion S25FS-S family flash only. */ -+#define SPINOR_OP_SPANSION_RDAR 0x65 /* Read any device register */ -+#define SPINOR_OP_SPANSION_WRAR 0x71 /* Write any device register */ -+ - /* Used for Micron flashes only. */ - #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ - #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ diff --git a/target/linux/layerscape/patches-4.4/1102-mtd-spi-nor-fsl-quadspi-Support-qspi-for-ls2080a.patch b/target/linux/layerscape/patches-4.4/1102-mtd-spi-nor-fsl-quadspi-Support-qspi-for-ls2080a.patch deleted file mode 100644 index da9dd72a9..000000000 --- a/target/linux/layerscape/patches-4.4/1102-mtd-spi-nor-fsl-quadspi-Support-qspi-for-ls2080a.patch +++ /dev/null @@ -1,83 +0,0 @@ -From d2d88e3432d68b11b0add84bd15a3aadaf44f1c1 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Mon, 28 Dec 2015 18:25:56 +0800 -Subject: [PATCH 102/113] mtd: spi-nor: fsl-quadspi:Support qspi for ls2080a - -There is a hardware feature that qspi_amba_base is added -internally by SOC design on ls2080a. So as to software, the driver -need support to the feature. - -Signed-off-by: Yunhui Cui -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 24 ++++++++++++++++++++++-- - 1 file changed, 22 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -41,6 +41,8 @@ - #define QUADSPI_QUIRK_TKT253890 (1 << 2) - /* Controller cannot wake up from wait mode, TKT245618 */ - #define QUADSPI_QUIRK_TKT245618 (1 << 3) -+/* QSPI_AMBA_BASE is internally added by SOC design */ -+#define QUADSPI_AMBA_BASE_INTERNAL (0x10000) - - /* The registers */ - #define QUADSPI_MCR 0x00 -@@ -217,6 +219,7 @@ enum fsl_qspi_devtype { - FSL_QUADSPI_IMX7D, - FSL_QUADSPI_IMX6UL, - FSL_QUADSPI_LS1021A, -+ FSL_QUADSPI_LS2080A, - }; - - struct fsl_qspi_devtype_data { -@@ -270,6 +273,14 @@ static struct fsl_qspi_devtype_data ls10 - .driver_data = 0, - }; - -+static struct fsl_qspi_devtype_data ls2080a_data = { -+ .devtype = FSL_QUADSPI_LS2080A, -+ .rxfifo = 128, -+ .txfifo = 64, -+ .ahb_buf_size = 1024, -+ .driver_data = QUADSPI_AMBA_BASE_INTERNAL, -+}; -+ - #define FSL_QSPI_MAX_CHIP 4 - struct fsl_qspi { - struct spi_nor nor[FSL_QSPI_MAX_CHIP]; -@@ -312,6 +323,11 @@ static inline int needs_wakeup_wait_mode - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; - } - -+static inline int has_added_amba_base_internal(struct fsl_qspi *q) -+{ -+ return q->devtype_data->driver_data & QUADSPI_AMBA_BASE_INTERNAL; -+} -+ - /* - * R/W functions for big- or little-endian registers: - * The qSPI controller's endian is independent of the CPU core's endian. -@@ -558,8 +574,11 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c - /* save the reg */ - reg = qspi_readl(q, base + QUADSPI_MCR); - -- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, -- base + QUADSPI_SFAR); -+ if (has_added_amba_base_internal(q)) -+ qspi_writel(q, q->chip_base_addr + addr, base + QUADSPI_SFAR); -+ else -+ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, -+ base + QUADSPI_SFAR); - qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, - base + QUADSPI_RBCT); - qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); -@@ -849,6 +868,7 @@ static const struct of_device_id fsl_qsp - { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, }, - { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, }, - { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, -+ { .compatible = "fsl,ls2080a-qspi", .data = (void *)&ls2080a_data, }, - { /* sentinel */ } - }; - MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); diff --git a/target/linux/layerscape/patches-4.4/1103-mtd-spi-nor-Support-R-W-for-S25FS-S-family-flash.patch b/target/linux/layerscape/patches-4.4/1103-mtd-spi-nor-Support-R-W-for-S25FS-S-family-flash.patch deleted file mode 100644 index 7b954dd5c..000000000 --- a/target/linux/layerscape/patches-4.4/1103-mtd-spi-nor-Support-R-W-for-S25FS-S-family-flash.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 0878404f549021e7fe0a49ae0454cf53fd452add Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Tue, 2 Feb 2016 12:00:27 +0800 -Subject: [PATCH 103/113] mtd: spi-nor: Support R/W for S25FS-S family flash - -With the physical sectors combination, S25FS-S family flash -requires some special operations for read/write functions. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/spi-nor.c | 60 +++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 60 insertions(+) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -40,6 +40,10 @@ - #define SPI_NOR_MAX_ID_LEN 6 - #define SPI_NOR_MAX_ADDR_WIDTH 4 - #define SPI_NOR_MICRON_WRITE_ENABLE 0x7f -+/* Added for S25FS-S family flash */ -+#define SPINOR_CONFIG_REG3_OFFSET 0x800004 -+#define CR3V_4KB_ERASE_UNABLE 0x8 -+#define SPINOR_S25FS_FAMILY_ID 0x81 - - struct flash_info { - char *name; -@@ -74,6 +78,8 @@ struct flash_info { - }; - - #define JEDEC_MFR(info) ((info)->id[0]) -+#define EXT_ID(info) ((info)->id[5]) -+ - - static const struct flash_info *spi_nor_match_id(const char *name); - -@@ -786,6 +792,7 @@ static const struct flash_info spi_nor_i - */ - { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -+ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, 0)}, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, - { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -@@ -910,6 +917,53 @@ static const struct flash_info *spi_nor_ - return ERR_PTR(-ENODEV); - } - -+/* -+ * The S25FS-S family physical sectors may be configured as a -+ * hybrid combination of eight 4-kB parameter sectors -+ * at the top or bottom of the address space with all -+ * but one of the remaining sectors being uniform size. -+ * The Parameter Sector Erase commands (20h or 21h) must -+ * be used to erase the 4-kB parameter sectors individually. -+ * The Sector (uniform sector) Erase commands (D8h or DCh) -+ * must be used to erase any of the remaining -+ * sectors, including the portion of highest or lowest address -+ * sector that is not overlaid by the parameter sectors. -+ * The uniform sector erase command has no effect on parameter sectors. -+ */ -+static int spansion_s25fs_disable_4kb_erase(struct spi_nor *nor) -+{ -+ struct fsl_qspi *q; -+ u32 cr3v_addr = SPINOR_CONFIG_REG3_OFFSET; -+ u8 cr3v = 0x0; -+ int ret = 0x0; -+ -+ q = nor->priv; -+ -+ nor->cmd_buf[2] = cr3v_addr >> 16; -+ nor->cmd_buf[1] = cr3v_addr >> 8; -+ nor->cmd_buf[0] = cr3v_addr >> 0; -+ -+ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); -+ if (ret) -+ return ret; -+ if (cr3v & CR3V_4KB_ERASE_UNABLE) -+ return 0; -+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); -+ if (ret) -+ return ret; -+ cr3v = CR3V_4KB_ERASE_UNABLE; -+ nor->program_opcode = SPINOR_OP_SPANSION_WRAR; -+ nor->write(nor, cr3v_addr, 1, &cr3v); -+ -+ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); -+ if (ret) -+ return ret; -+ if (!(cr3v & CR3V_4KB_ERASE_UNABLE)) -+ return -EPERM; -+ -+ return 0; -+} -+ - static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) - { -@@ -1255,6 +1309,12 @@ int spi_nor_scan(struct spi_nor *nor, co - write_sr(nor, ret); - } - -+ if (EXT_ID(info) == SPINOR_S25FS_FAMILY_ID) { -+ ret = spansion_s25fs_disable_4kb_erase(nor); -+ if (ret) -+ return ret; -+ } -+ - if (!mtd->name) - mtd->name = dev_name(dev); - mtd->priv = nor; diff --git a/target/linux/layerscape/patches-4.4/1104-mtd-fsl-quadspi-Add-quad-mode-for-flash-n25q128.patch b/target/linux/layerscape/patches-4.4/1104-mtd-fsl-quadspi-Add-quad-mode-for-flash-n25q128.patch deleted file mode 100644 index daf34e338..000000000 --- a/target/linux/layerscape/patches-4.4/1104-mtd-fsl-quadspi-Add-quad-mode-for-flash-n25q128.patch +++ /dev/null @@ -1,112 +0,0 @@ -From 23cd071c47c064d56921975d196dc22177069dea Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Wed, 24 Feb 2016 15:14:01 +0800 -Subject: [PATCH 104/113] mtd: fsl-quadspi: Add quad mode for flash n25q128 - -Add some lut_tables to support quad mode for flash n25q128 -on the board ls1021a-twr and solve flash Spansion and Micron -command conflict. -In switch {}, The value of command SPINOR_OP_RD_EVCR and -SPINOR_OP_SPANSION_RDAR is the same. They have to share -the same seq_id: SEQID_RDAR_OR_RD_EVCR. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 47 ++++++++++++++++++++++++++++--------- - 1 file changed, 36 insertions(+), 11 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -207,9 +207,9 @@ - #define SEQID_RDCR 9 - #define SEQID_EN4B 10 - #define SEQID_BRWR 11 --#define SEQID_RDAR 12 -+#define SEQID_RDAR_OR_RD_EVCR 12 - #define SEQID_WRAR 13 -- -+#define SEQID_WD_EVCR 14 - - #define QUADSPI_MIN_IOMAP SZ_4M - -@@ -393,6 +393,7 @@ static void fsl_qspi_init_lut(struct fsl - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; - int i; -+ const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data; - - struct spi_nor *nor = &q->nor[0]; - u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; -@@ -489,16 +490,26 @@ static void fsl_qspi_init_lut(struct fsl - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), - base + QUADSPI_LUT(lut_base)); - -+ - /* -- * Read any device register. -- * Used for Spansion S25FS-S family flash only. -+ * Flash Micron and Spansion command confilict -+ * use the same value 0x65. But it indicates different meaning. - */ -- lut_base = SEQID_RDAR * 4; -- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_RDAR) | -- LUT1(ADDR, PAD1, ADDR24BIT), -- base + QUADSPI_LUT(lut_base)); -- qspi_writel(q, LUT0(DUMMY, PAD1, 8) | LUT1(FSL_READ, PAD1, 1), -- base + QUADSPI_LUT(lut_base + 1)); -+ lut_base = SEQID_RDAR_OR_RD_EVCR * 4; -+ if (devtype_data->devtype == FSL_QUADSPI_LS2080A) { -+ /* -+ * Read any device register. -+ * Used for Spansion S25FS-S family flash only. -+ */ -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_RDAR) | -+ LUT1(ADDR, PAD1, ADDR24BIT), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(DUMMY, PAD1, 8) | LUT1(FSL_READ, PAD1, 1), -+ base + QUADSPI_LUT(lut_base + 1)); -+ } else { -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RD_EVCR), -+ base + QUADSPI_LUT(lut_base)); -+ } - - /* - * Write any device register. -@@ -511,6 +522,11 @@ static void fsl_qspi_init_lut(struct fsl - qspi_writel(q, LUT0(FSL_WRITE, PAD1, 1), - base + QUADSPI_LUT(lut_base + 1)); - -+ /* Write EVCR register */ -+ lut_base = SEQID_WD_EVCR * 4; -+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WD_EVCR), -+ base + QUADSPI_LUT(lut_base)); -+ - fsl_qspi_lock_lut(q); - } - -@@ -523,8 +539,15 @@ static int fsl_qspi_get_seqid(struct fsl - case SPINOR_OP_READ_FAST: - case SPINOR_OP_READ4_FAST: - return SEQID_READ; -+ /* -+ * Spansion & Micron use the same command value 0x65 -+ * Spansion: SPINOR_OP_SPANSION_RDAR, read any register. -+ * Micron: SPINOR_OP_RD_EVCR, -+ * read enhanced volatile configuration register. -+ * case SPINOR_OP_RD_EVCR: -+ */ - case SPINOR_OP_SPANSION_RDAR: -- return SEQID_RDAR; -+ return SEQID_RDAR_OR_RD_EVCR; - case SPINOR_OP_SPANSION_WRAR: - return SEQID_WRAR; - case SPINOR_OP_WREN: -@@ -550,6 +573,8 @@ static int fsl_qspi_get_seqid(struct fsl - return SEQID_EN4B; - case SPINOR_OP_BRWR: - return SEQID_BRWR; -+ case SPINOR_OP_WD_EVCR: -+ return SEQID_WD_EVCR; - default: - if (cmd == q->nor[0].erase_opcode) - return SEQID_SE; diff --git a/target/linux/layerscape/patches-4.4/1105-mtd-spi-nor-add-DDR-quad-read-support.patch b/target/linux/layerscape/patches-4.4/1105-mtd-spi-nor-add-DDR-quad-read-support.patch deleted file mode 100644 index df7e0d82f..000000000 --- a/target/linux/layerscape/patches-4.4/1105-mtd-spi-nor-add-DDR-quad-read-support.patch +++ /dev/null @@ -1,181 +0,0 @@ -From 924f021c0344554a4b61746e5c4dcfc91d618ce2 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Thu, 18 Feb 2016 16:41:53 +0800 -Subject: [PATCH 105/113] mtd: spi-nor: add DDR quad read support - -This patch adds the DDR quad read support by the following: - - [1] add SPI_NOR_DDR_QUAD read mode. - - [2] add DDR Quad read opcodes: - SPINOR_OP_READ_1_4_4_D / SPINOR_OP_READ4_1_4_4_D - - [3] add set_ddr_quad_mode() to initialize for the DDR quad read. - Currently it only works for Spansion NOR. - - [4] set dummy with 6 for Spansion family -Test this patch for Spansion s25fl128s NOR flash. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/spi-nor.c | 53 ++++++++++++++++++++++++++++++++++++----- - include/linux/mtd/spi-nor.h | 8 +++++-- - 2 files changed, 53 insertions(+), 8 deletions(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -73,7 +73,8 @@ struct flash_info { - #define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */ - #define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */ - #define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */ --#define USE_FSR 0x80 /* use flag status register */ -+#define SPI_NOR_DDR_QUAD_READ 0x80 /* Flash supports DDR Quad Read */ -+#define USE_FSR 0x100 /* use flag status register */ - #define SPI_NOR_HAS_LOCK 0x100 /* Flash supports lock/unlock via SR */ - }; - -@@ -145,13 +146,17 @@ static int read_cr(struct spi_nor *nor) - * It can be used to support more commands with - * different dummy cycle requirements. - */ --static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor) -+static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor, -+ const struct flash_info *info) - { - switch (nor->flash_read) { - case SPI_NOR_FAST: - case SPI_NOR_DUAL: - case SPI_NOR_QUAD: - return 8; -+ case SPI_NOR_DDR_QUAD: -+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) -+ return 6; - case SPI_NOR_NORMAL: - return 0; - } -@@ -799,7 +804,8 @@ static const struct flash_info spi_nor_i - { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, - { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, - { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, -- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, -+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ -+ | SPI_NOR_DDR_QUAD_READ) }, - { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, -@@ -1195,6 +1201,23 @@ static int spansion_quad_enable(struct s - return 0; - } - -+static int set_ddr_quad_mode(struct spi_nor *nor, const struct flash_info *info) -+{ -+ int status; -+ -+ switch (JEDEC_MFR(info)) { -+ case SNOR_MFR_SPANSION: -+ status = spansion_quad_enable(nor); -+ if (status) { -+ dev_err(nor->dev, "Spansion DDR quad-read not enabled\n"); -+ return status; -+ } -+ return status; -+ default: -+ return -EINVAL; -+ } -+} -+ - static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) - { - int status; -@@ -1385,8 +1408,15 @@ int spi_nor_scan(struct spi_nor *nor, co - if (info->flags & SPI_NOR_NO_FR) - nor->flash_read = SPI_NOR_NORMAL; - -- /* Quad/Dual-read mode takes precedence over fast/normal */ -- if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { -+ /* DDR Quad/Quad/Dual-read mode takes precedence over fast/normal */ -+ if (mode == SPI_NOR_DDR_QUAD && info->flags & SPI_NOR_DDR_QUAD_READ) { -+ ret = set_ddr_quad_mode(nor, info); -+ if (ret) { -+ dev_err(dev, "DDR quad mode not supported\n"); -+ return ret; -+ } -+ nor->flash_read = SPI_NOR_DDR_QUAD; -+ } else if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { - ret = set_quad_mode(nor, info); - if (ret) { - dev_err(dev, "quad mode not supported\n"); -@@ -1399,6 +1429,14 @@ int spi_nor_scan(struct spi_nor *nor, co - - /* Default commands */ - switch (nor->flash_read) { -+ case SPI_NOR_DDR_QUAD: -+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) { /* Spansion */ -+ nor->read_opcode = SPINOR_OP_READ_1_4_4_D; -+ } else { -+ dev_err(dev, "DDR Quad Read is not supported.\n"); -+ return -EINVAL; -+ } -+ break; - case SPI_NOR_QUAD: - nor->read_opcode = SPINOR_OP_READ_1_1_4; - break; -@@ -1426,6 +1464,9 @@ int spi_nor_scan(struct spi_nor *nor, co - if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) { - /* Dedicated 4-byte command set */ - switch (nor->flash_read) { -+ case SPI_NOR_DDR_QUAD: -+ nor->read_opcode = SPINOR_OP_READ4_1_4_4_D; -+ break; - case SPI_NOR_QUAD: - nor->read_opcode = SPINOR_OP_READ4_1_1_4; - break; -@@ -1455,7 +1496,7 @@ int spi_nor_scan(struct spi_nor *nor, co - return -EINVAL; - } - -- nor->read_dummy = spi_nor_read_dummy_cycles(nor); -+ nor->read_dummy = spi_nor_read_dummy_cycles(nor, info); - - dev_info(dev, "%s (%lld Kbytes)\n", info->name, - (long long)mtd->size >> 10); ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -30,10 +30,11 @@ - - /* - * Note on opcode nomenclature: some opcodes have a format like -- * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number -+ * SPINOR_OP_FUNCTION{4,}_x_y_z{_D}. The numbers x, y,and z stand for the number - * of I/O lines used for the opcode, address, and data (respectively). The - * FUNCTION has an optional suffix of '4', to represent an opcode which -- * requires a 4-byte (32-bit) address. -+ * requires a 4-byte (32-bit) address. The suffix of 'D' stands for the -+ * DDR mode. - */ - - /* Flash opcodes. */ -@@ -44,6 +45,7 @@ - #define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ - #define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */ - #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */ -+#define SPINOR_OP_READ_1_4_4_D 0xed /* Read data bytes (DDR Quad SPI) */ - #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ - #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ - #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ -@@ -59,6 +61,7 @@ - #define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ - #define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */ - #define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ -+#define SPINOR_OP_READ4_1_4_4_D 0xee /* Read data bytes (DDR Quad SPI) */ - #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ - #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ - -@@ -107,6 +110,7 @@ enum read_mode { - SPI_NOR_FAST, - SPI_NOR_DUAL, - SPI_NOR_QUAD, -+ SPI_NOR_DDR_QUAD, - }; - - #define SPI_NOR_MAX_CMD_SIZE 8 diff --git a/target/linux/layerscape/patches-4.4/1106-mtd-fsl-quadspi-add-DDR-quad-read-for-Spansion.patch b/target/linux/layerscape/patches-4.4/1106-mtd-fsl-quadspi-add-DDR-quad-read-for-Spansion.patch deleted file mode 100644 index 3a242608b..000000000 --- a/target/linux/layerscape/patches-4.4/1106-mtd-fsl-quadspi-add-DDR-quad-read-for-Spansion.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 16eb35ceea5b43e6f64c1a869721ea86c0da5260 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Thu, 25 Feb 2016 10:19:15 +0800 -Subject: [PATCH 106/113] mtd: fsl-quadspi: add DDR quad read for Spansion - -Add the DDR quad read support for the fsl-quadspi driver. -And, add the Spansion s25fl128s NOR flash ddr quad mode -support. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 57 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 57 insertions(+) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -296,6 +296,7 @@ struct fsl_qspi { - u32 nor_size; - u32 nor_num; - u32 clk_rate; -+ u32 ddr_smp; - unsigned int chip_base_addr; /* We may support two chips. */ - bool has_second_chip; - bool big_endian; -@@ -423,6 +424,19 @@ static void fsl_qspi_init_lut(struct fsl - qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | - LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); -+ } else if (nor->flash_read == SPI_NOR_DDR_QUAD) { -+ /* read mode : 1-4-4, such as Spansion s25fl128s. */ -+ qspi_writel(q, LUT0(CMD, PAD1, read_op) -+ | LUT1(ADDR_DDR, PAD4, addrlen), -+ base + QUADSPI_LUT(lut_base)); -+ -+ qspi_writel(q, LUT0(MODE_DDR, PAD4, 0xff) -+ | LUT1(DUMMY, PAD1, read_dm), -+ base + QUADSPI_LUT(lut_base + 1)); -+ -+ qspi_writel(q, LUT0(FSL_READ_DDR, PAD4, rxfifo) -+ | LUT1(JMP_ON_CS, PAD1, 0), -+ base + QUADSPI_LUT(lut_base + 2)); - } - - /* Write enable */ -@@ -534,6 +548,8 @@ static void fsl_qspi_init_lut(struct fsl - static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) - { - switch (cmd) { -+ case SPINOR_OP_READ_1_4_4_D: -+ case SPINOR_OP_READ4_1_4_4_D: - case SPINOR_OP_READ4_1_1_4: - case SPINOR_OP_READ_1_1_4: - case SPINOR_OP_READ_FAST: -@@ -736,6 +752,32 @@ static void fsl_qspi_set_map_addr(struct - } - - /* -+ * enable controller ddr quad mode to support different -+ * vender flashes ddr quad mode. -+ */ -+static void set_ddr_quad_mode(struct fsl_qspi *q) -+{ -+ u32 reg, reg2; -+ -+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR); -+ -+ /* Firstly, disable the module */ -+ qspi_writel(q, reg | QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); -+ -+ /* Set the Sampling Register for DDR */ -+ reg2 = qspi_readl(q, q->iobase + QUADSPI_SMPR); -+ reg2 &= ~QUADSPI_SMPR_DDRSMP_MASK; -+ reg2 |= (((q->ddr_smp) << QUADSPI_SMPR_DDRSMP_SHIFT) & -+ QUADSPI_SMPR_DDRSMP_MASK); -+ qspi_writel(q, reg2, q->iobase + QUADSPI_SMPR); -+ -+ /* Enable the module again (enable the DDR too) */ -+ reg |= QUADSPI_MCR_DDR_EN_MASK; -+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR); -+ -+} -+ -+/* - * There are two different ways to read out the data from the flash: - * the "IP Command Read" and the "AHB Command Read". - * -@@ -775,6 +817,11 @@ static void fsl_qspi_init_abh_read(struc - seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); - qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, - q->iobase + QUADSPI_BFGENCR); -+ -+ /* enable the DDR quad read */ -+ if (q->nor->flash_read == SPI_NOR_DDR_QUAD) -+ set_ddr_quad_mode(q); -+ - } - - /* This function was used to prepare and enable QSPI clock */ -@@ -1108,6 +1155,12 @@ static int fsl_qspi_probe(struct platfor - goto clk_failed; - } - -+ /* find ddrsmp value */ -+ ret = of_property_read_u32(dev->of_node, "fsl,ddr-sampling-point", -+ &q->ddr_smp); -+ if (ret) -+ q->ddr_smp = 0; -+ - /* find the irq */ - ret = platform_get_irq(pdev, 0); - if (ret < 0) { -@@ -1164,6 +1217,10 @@ static int fsl_qspi_probe(struct platfor - - ret = of_property_read_bool(np, "m25p,fast-read"); - mode = (ret) ? SPI_NOR_FAST : SPI_NOR_QUAD; -+ /* Can we enable the DDR Quad Read? */ -+ ret = of_property_read_bool(np, "ddr-quad-read"); -+ if (ret) -+ mode = SPI_NOR_DDR_QUAD; - - ret = spi_nor_scan(nor, NULL, mode); - if (ret) diff --git a/target/linux/layerscape/patches-4.4/1107-mtd-fsl-quadspi-disable-AHB-buffer-prefetch.patch b/target/linux/layerscape/patches-4.4/1107-mtd-fsl-quadspi-disable-AHB-buffer-prefetch.patch deleted file mode 100644 index 19dff6c92..000000000 --- a/target/linux/layerscape/patches-4.4/1107-mtd-fsl-quadspi-disable-AHB-buffer-prefetch.patch +++ /dev/null @@ -1,67 +0,0 @@ -From 50aac689d5be0a086f076cd4bc8b14ee0b9ab995 Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Tue, 8 Mar 2016 14:38:52 +0800 -Subject: [PATCH 107/113] mtd: fsl-quadspi: disable AHB buffer prefetch - -A-009282: QuadSPI: QuadSPI data pre-fetch can result in incorrect data -Affects: QuadSPI -Description: With AHB buffer prefetch enabled, the QuadSPI may return -incorrect data on the AHB -interface. The buffer pre-fetch is enabled if the fetch size as -configured either in the LUT or in -the BUFxCR register is greater than 8 bytes. -Impact: Only 64 bit read allowed. -Workaround: Keep the read data size to 64 bits (8 Bytes), which disables -the prefetch on the AHB buffer, -and prevents this issue from occurring. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 29 +++++++++++++++++++++++------ - 1 file changed, 23 insertions(+), 6 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -794,19 +794,36 @@ static void fsl_qspi_init_abh_read(struc - { - void __iomem *base = q->iobase; - int seqid; -+ const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data; - - /* AHB configuration for access buffer 0/1/2 .*/ - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); -+ - /* -- * Set ADATSZ with the maximum AHB buffer size to improve the -- * read performance. -+ * Errata: A-009282: QuadSPI data prefetch may result in incorrect data -+ * Workaround: Keep the read data size to 64 bits (8 bytes). -+ * This disables the prefetch on the AHB buffer and -+ * prevents this issue from occurring. - */ -- qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | -- ((q->devtype_data->ahb_buf_size / 8) -- << QUADSPI_BUF3CR_ADATSZ_SHIFT), -- base + QUADSPI_BUF3CR); -+ if (devtype_data->devtype == FSL_QUADSPI_LS2080A || -+ devtype_data->devtype == FSL_QUADSPI_LS1021A) { -+ -+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | -+ (1 << QUADSPI_BUF3CR_ADATSZ_SHIFT), -+ base + QUADSPI_BUF3CR); -+ -+ } else { -+ /* -+ * Set ADATSZ with the maximum AHB buffer size to improve the -+ * read performance. -+ */ -+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | -+ ((q->devtype_data->ahb_buf_size / 8) -+ << QUADSPI_BUF3CR_ADATSZ_SHIFT), -+ base + QUADSPI_BUF3CR); -+ } - - /* We only use the buffer3 */ - qspi_writel(q, 0, base + QUADSPI_BUF0IND); diff --git a/target/linux/layerscape/patches-4.4/1108-mtd-fsl-quadspi-add-multi-flash-chip-R-W-on-ls2080a.patch b/target/linux/layerscape/patches-4.4/1108-mtd-fsl-quadspi-add-multi-flash-chip-R-W-on-ls2080a.patch deleted file mode 100644 index 4a7a5cbcd..000000000 --- a/target/linux/layerscape/patches-4.4/1108-mtd-fsl-quadspi-add-multi-flash-chip-R-W-on-ls2080a.patch +++ /dev/null @@ -1,42 +0,0 @@ -From d3a8ee41170ff9e5298ff354c77ff99439dfe2bf Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Thu, 10 Mar 2016 11:33:40 +0800 -Subject: [PATCH 108/113] mtd: fsl-quadspi: add multi flash chip R/W on - ls2080a - -There is a hardware feature that qspi_amba_base is added -internally by SOC design on ls2080a. so memmap_phy need not -be added in driver. If memmap_phy is added, the flash A1 -addr space is [0, memmap_phy] which far more than flash size. -The AMBA memory will be divided into four parts and assign to -every chipselect. Every channel will has two valid chipselects. - -Signed-off-by: Yunhui Cui ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -744,11 +744,17 @@ static void fsl_qspi_set_map_addr(struct - { - int nor_size = q->nor_size; - void __iomem *base = q->iobase; -+ u32 mem_base; - -- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); -- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); -- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); -- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); -+ if (has_added_amba_base_internal(q)) -+ mem_base = 0x0; -+ else -+ mem_base = q->memmap_phy; -+ -+ qspi_writel(q, nor_size + mem_base, base + QUADSPI_SFA1AD); -+ qspi_writel(q, nor_size * 2 + mem_base, base + QUADSPI_SFA2AD); -+ qspi_writel(q, nor_size * 3 + mem_base, base + QUADSPI_SFB1AD); -+ qspi_writel(q, nor_size * 4 + mem_base, base + QUADSPI_SFB2AD); - } - - /* diff --git a/target/linux/layerscape/patches-4.4/1109-drivers-mtd-spi-nor-Enable-QSPI-Flash-in-Kernel.patch b/target/linux/layerscape/patches-4.4/1109-drivers-mtd-spi-nor-Enable-QSPI-Flash-in-Kernel.patch deleted file mode 100644 index 95321ebf1..000000000 --- a/target/linux/layerscape/patches-4.4/1109-drivers-mtd-spi-nor-Enable-QSPI-Flash-in-Kernel.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 70516f60de441829e7813c0b26567c8bda39c011 Mon Sep 17 00:00:00 2001 -From: Pratiyush Mohan Srivastava -Date: Sun, 24 Apr 2016 23:20:26 +0530 -Subject: [PATCH 109/113] drivers: mtd: spi-nor: Enable QSPI Flash in Kernel - -Enable read from QSPI flash, Write onto QSPI Flash and -erase QSPI Flash in Fast mode in Kernel. - -Signed-off-by: Pratiyush Mohan Srivastava -Signed-off-by: Prabhakar Kushwaha ---- - drivers/mtd/spi-nor/spi-nor.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -798,6 +798,7 @@ static const struct flash_info spi_nor_i - { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, 0)}, -+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 128 * 1024, 512, 0)}, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, - { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -@@ -964,9 +965,11 @@ static int spansion_s25fs_disable_4kb_er - ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); - if (ret) - return ret; -+/* - if (!(cr3v & CR3V_4KB_ERASE_UNABLE)) - return -EPERM; - -+*/ - return 0; - } - diff --git a/target/linux/layerscape/patches-4.4/1110-mtd-spi-nor-fsl-quad-add-flash-S25FS-extra-support.patch b/target/linux/layerscape/patches-4.4/1110-mtd-spi-nor-fsl-quad-add-flash-S25FS-extra-support.patch deleted file mode 100644 index c018a33fb..000000000 --- a/target/linux/layerscape/patches-4.4/1110-mtd-spi-nor-fsl-quad-add-flash-S25FS-extra-support.patch +++ /dev/null @@ -1,157 +0,0 @@ -From 034dd6241b55ab2256eecb845e941fa9b45da38e Mon Sep 17 00:00:00 2001 -From: Yunhui Cui -Date: Thu, 28 Apr 2016 17:03:57 +0800 -Subject: [PATCH 110/113] mtd: spi-nor: fsl-quad: add flash S25FS extra - support - -[context adjustment] -not apply changes of arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts - -There are some boards have the same QSPI controller but have -different vendor flash, So, the controller can use the same -compatible and share the driver, just for a different flash to do -the appropriate adaptation. Based on this, we need add the vendor -field in spi-nor, Because we will use the field to distribute -corresponding LUT for different flash operations. - -Signed-off-by: Yunhui Cui -Signed-off-by: Pratiyush Mohan Srivastava -Signed-off-by: Prabhakar Kushwaha -Integrated-by: Jiang Yutang ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 47 ++++++++++++++++++++++++++++++------- - drivers/mtd/spi-nor/spi-nor.c | 5 ++-- - include/linux/mtd/spi-nor.h | 1 + - 3 files changed, 42 insertions(+), 11 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -213,6 +213,9 @@ - - #define QUADSPI_MIN_IOMAP SZ_4M - -+#define FLASH_VENDOR_SPANSION_FS "s25fs" -+#define SPANSION_S25FS_FAMILY (1 << 1) -+ - enum fsl_qspi_devtype { - FSL_QUADSPI_VYBRID, - FSL_QUADSPI_IMX6SX, -@@ -329,6 +332,18 @@ static inline int has_added_amba_base_in - return q->devtype_data->driver_data & QUADSPI_AMBA_BASE_INTERNAL; - } - -+static u32 fsl_get_nor_vendor(struct spi_nor *nor) -+{ -+ u32 vendor_id; -+ -+ if (nor->vendor) { -+ if (memcmp(nor->vendor, FLASH_VENDOR_SPANSION_FS, -+ sizeof(FLASH_VENDOR_SPANSION_FS) - 1)) -+ vendor_id = SPANSION_S25FS_FAMILY; -+ } -+ return vendor_id; -+} -+ - /* - * R/W functions for big- or little-endian registers: - * The qSPI controller's endian is independent of the CPU core's endian. -@@ -394,13 +409,15 @@ static void fsl_qspi_init_lut(struct fsl - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; - int i; -- const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data; -+ u32 vendor; - - struct spi_nor *nor = &q->nor[0]; - u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; - u8 read_op = nor->read_opcode; - u8 read_dm = nor->read_dummy; - -+ vendor = fsl_get_nor_vendor(nor); -+ - fsl_qspi_unlock_lut(q); - - /* Clear all the LUT table */ -@@ -418,12 +435,25 @@ static void fsl_qspi_init_lut(struct fsl - LUT1(FSL_READ, PAD1, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - } else if (nor->flash_read == SPI_NOR_QUAD) { -- qspi_writel(q, LUT0(CMD, PAD1, read_op) | -- LUT1(ADDR, PAD1, addrlen), -- base + QUADSPI_LUT(lut_base)); -- qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -- LUT1(FSL_READ, PAD4, rxfifo), -- base + QUADSPI_LUT(lut_base + 1)); -+ if (q->nor_size == 0x4000000) { -+ read_op = 0xEC; -+ qspi_writel(q, -+ LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD4, addrlen), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, -+ LUT0(MODE, PAD4, 0xff) | LUT1(DUMMY, PAD4, read_dm), -+ base + QUADSPI_LUT(lut_base + 1)); -+ qspi_writel(q, -+ LUT0(FSL_READ, PAD4, rxfifo), -+ base + QUADSPI_LUT(lut_base + 2)); -+ } else { -+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | -+ LUT1(ADDR, PAD1, addrlen), -+ base + QUADSPI_LUT(lut_base)); -+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | -+ LUT1(FSL_READ, PAD4, rxfifo), -+ base + QUADSPI_LUT(lut_base + 1)); -+ } - } else if (nor->flash_read == SPI_NOR_DDR_QUAD) { - /* read mode : 1-4-4, such as Spansion s25fl128s. */ - qspi_writel(q, LUT0(CMD, PAD1, read_op) -@@ -510,7 +540,8 @@ static void fsl_qspi_init_lut(struct fsl - * use the same value 0x65. But it indicates different meaning. - */ - lut_base = SEQID_RDAR_OR_RD_EVCR * 4; -- if (devtype_data->devtype == FSL_QUADSPI_LS2080A) { -+ -+ if (vendor == SPANSION_S25FS_FAMILY) { - /* - * Read any device register. - * Used for Spansion S25FS-S family flash only. ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -798,7 +798,6 @@ static const struct flash_info spi_nor_i - { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, 0)}, -- { "s25fs512s", INFO6(0x010220, 0x4d0081, 128 * 1024, 512, 0)}, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, - { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -@@ -965,11 +964,9 @@ static int spansion_s25fs_disable_4kb_er - ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); - if (ret) - return ret; --/* - if (!(cr3v & CR3V_4KB_ERASE_UNABLE)) - return -EPERM; - --*/ - return 0; - } - -@@ -1343,6 +1340,8 @@ int spi_nor_scan(struct spi_nor *nor, co - - if (!mtd->name) - mtd->name = dev_name(dev); -+ if (info->name) -+ nor->vendor = info->name; - mtd->priv = nor; - mtd->type = MTD_NORFLASH; - mtd->writesize = 1; ---- a/include/linux/mtd/spi-nor.h -+++ b/include/linux/mtd/spi-nor.h -@@ -172,6 +172,7 @@ struct spi_nor { - bool sst_write_second; - u32 flags; - u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; -+ char *vendor; - - int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); - void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); diff --git a/target/linux/layerscape/patches-4.4/1111-mtd-spi-nor-disable-4kb-sector-erase-for-s25fl128.patch b/target/linux/layerscape/patches-4.4/1111-mtd-spi-nor-disable-4kb-sector-erase-for-s25fl128.patch deleted file mode 100644 index f9cdb88f1..000000000 --- a/target/linux/layerscape/patches-4.4/1111-mtd-spi-nor-disable-4kb-sector-erase-for-s25fl128.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 30d34abc80b0a602a1327bdfbddd42d250887049 Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Fri, 9 Sep 2016 22:56:12 +0800 -Subject: [PATCH 111/113] mtd: spi-nor: disable 4kb sector erase for s25fl128 - -As for s25fl128s flash, the sectors are organized either as a hybrid -combination of 4-kB and 64-kB sectors, or as uniform 256-kbyte sectors. -we should use the command 0xd8 to erase all bits, not the Parameter 4-kB -Sector Erase (P4E) command 0x20. - -Signed-off-by: Yunhui Cui -Integrated-by: Yutang Jiang ---- - drivers/mtd/spi-nor/spi-nor.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/spi-nor.c -+++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -804,7 +804,7 @@ static const struct flash_info spi_nor_i - { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, - { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, - { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, -- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ -+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ - | SPI_NOR_DDR_QUAD_READ) }, - { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, diff --git a/target/linux/layerscape/patches-4.4/1112-driver-spi-fsl-quad-Hang-memcpy-Unhandled-fault-alig.patch b/target/linux/layerscape/patches-4.4/1112-driver-spi-fsl-quad-Hang-memcpy-Unhandled-fault-alig.patch deleted file mode 100644 index c6311fc4b..000000000 --- a/target/linux/layerscape/patches-4.4/1112-driver-spi-fsl-quad-Hang-memcpy-Unhandled-fault-alig.patch +++ /dev/null @@ -1,29 +0,0 @@ -From f1b7824a42505669476f203e126fc26dd1006af2 Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Fri, 9 Sep 2016 22:57:55 +0800 -Subject: [PATCH 112/113] driver: spi: fsl-quad: Hang memcpy: Unhandled fault: - alignment fault - -vmap/iomap based on whether the buffer is in memory region or reserved region. -However, both map it as non-cacheable memory. -For armv8 specifically, non-cacheable mapping requests use a memory type -that has to be accessed aligned to the request size. memcpy() doesn't guarantee -that. memcpy_toio() can guarantee 4-bytes alignment. - -Signed-off-by: Yunhui Cui -Integrated-by: Yutang Jiang ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -1103,7 +1103,7 @@ static ssize_t fsl_qspi_read(struct spi_ - len); - - /* Read out the data directly from the AHB buffer.*/ -- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, -+ memcpy_toio(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - return len; diff --git a/target/linux/layerscape/patches-4.4/1113-mtd-spi-nor-fsl-quad-move-mtd_device_register-to-the.patch b/target/linux/layerscape/patches-4.4/1113-mtd-spi-nor-fsl-quad-move-mtd_device_register-to-the.patch deleted file mode 100644 index f54f9854e..000000000 --- a/target/linux/layerscape/patches-4.4/1113-mtd-spi-nor-fsl-quad-move-mtd_device_register-to-the.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 6f1195d231ab576809fe2f4cff44a6e48cff2457 Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Fri, 2 Sep 2016 22:00:16 +0800 -Subject: [PATCH 113/113] mtd: spi-nor: fsl-quad: move mtd_device_register to - the last of probe - -After call mtd_device_register, the mtd devices should be workable immediately. -If before finish all of init work call the mtd_device_register, it will not -respond work request timely. - -For example, openwrt/lede have a AUTO split special flash partitions mechanism -while mtd driver register. So, before call mtd_device_register, must let all of -init work ready. - -Signed-off-by: Yutang Jiang ---- - drivers/mtd/spi-nor/fsl-quadspi.c | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - ---- a/drivers/mtd/spi-nor/fsl-quadspi.c -+++ b/drivers/mtd/spi-nor/fsl-quadspi.c -@@ -1280,10 +1280,6 @@ static int fsl_qspi_probe(struct platfor - if (ret) - goto mutex_failed; - -- ret = mtd_device_register(mtd, NULL, 0); -- if (ret) -- goto mutex_failed; -- - /* Set the correct NOR size now. */ - if (q->nor_size == 0) { - q->nor_size = mtd->size; -@@ -1313,6 +1309,16 @@ static int fsl_qspi_probe(struct platfor - goto last_init_failed; - - fsl_qspi_clk_disable_unprep(q); -+ -+ for (i = 0; i < q->nor_num; i++) { -+ /* skip the holes */ -+ if (!q->has_second_chip) -+ i *= 2; -+ -+ ret = mtd_device_register(&q->nor[i].mtd, NULL, 0); -+ if (ret) -+ goto last_init_failed; -+ } - return 0; - - last_init_failed: diff --git a/target/linux/layerscape/patches-4.4/1239-mtd-extend-physmap_of-to-let-the-device-tree-specify.patch b/target/linux/layerscape/patches-4.4/1239-mtd-extend-physmap_of-to-let-the-device-tree-specify.patch deleted file mode 100644 index 0fdbb7ea3..000000000 --- a/target/linux/layerscape/patches-4.4/1239-mtd-extend-physmap_of-to-let-the-device-tree-specify.patch +++ /dev/null @@ -1,85 +0,0 @@ -From 6b54054c4053215fe4add195c67daca9a466ba92 Mon Sep 17 00:00:00 2001 -From: "ying.zhang" -Date: Fri, 23 Dec 2016 22:21:22 +0800 -Subject: [PATCH] mtd: extend physmap_of to let the device tree specify the - parition probe - -This is to support custom partitioning schemes for embedded PPC. To use -define your own mtd_part_parser and then add something like: - linux,part-probe = "my_probe", "cmdlinepart"; - To the board's dts file. - -If linux,part-probe is not specified then this behaves the same as before. - -Signed-off-by: Jason Gunthorpe -Signed-off-by: David Woodhouse ---- - drivers/mtd/maps/physmap_of.c | 46 ++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 45 insertions(+), 1 deletion(-) - ---- a/drivers/mtd/maps/physmap_of.c -+++ b/drivers/mtd/maps/physmap_of.c -@@ -112,9 +112,47 @@ static struct mtd_info *obsolete_probe(s - static const char * const part_probe_types_def[] = { - "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL }; - -+static const char * const *of_get_probes(struct device_node *dp) -+{ -+ const char *cp; -+ int cplen; -+ unsigned int l; -+ unsigned int count; -+ const char **res; -+ -+ cp = of_get_property(dp, "linux,part-probe", &cplen); -+ if (cp == NULL) -+ return part_probe_types_def; -+ -+ count = 0; -+ for (l = 0; l != cplen; l++) -+ if (cp[l] == 0) -+ count++; -+ -+ res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL); -+ if (!res) -+ return NULL; -+ count = 0; -+ while (cplen > 0) { -+ res[count] = cp; -+ l = strlen(cp) + 1; -+ cp += l; -+ cplen -= l; -+ count++; -+ } -+ return res; -+} -+ -+static void of_free_probes(const char * const *probes) -+{ -+ if (probes != part_probe_types_def) -+ kfree(probes); -+} -+ - static const struct of_device_id of_flash_match[]; - static int of_flash_probe(struct platform_device *dev) - { -+ const char * const *part_probe_types; - const struct of_device_id *match; - struct device_node *dp = dev->dev.of_node; - struct resource res; -@@ -273,8 +311,14 @@ static int of_flash_probe(struct platfor - goto err_out; - - ppdata.of_node = dp; -- mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata, -+ part_probe_types = of_get_probes(dp); -+ if (!part_probe_types) { -+ err = -ENOMEM; -+ goto err_out; -+ } -+ mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata, - NULL, 0); -+ of_free_probes(part_probe_types); - - kfree(mtd_list); - diff --git a/target/linux/layerscape/patches-4.4/2006-armv8-aarch32-Add-the-default-config-ls_aarch32_defc.patch b/target/linux/layerscape/patches-4.4/2006-armv8-aarch32-Add-the-default-config-ls_aarch32_defc.patch deleted file mode 100644 index 22ce5a9b9..000000000 --- a/target/linux/layerscape/patches-4.4/2006-armv8-aarch32-Add-the-default-config-ls_aarch32_defc.patch +++ /dev/null @@ -1,209 +0,0 @@ -From 4c4e5c275a0e37570d6267802e66a350b0b93dcd Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Tue, 17 May 2016 17:30:19 +0800 -Subject: [PATCH 06/70] armv8: aarch32: Add the default config - ls_aarch32_defconfig - -ls_aarch32_defconfig is used as the default config for running 32-bit -Linux. - -Signed-off-by: Ebony Zhu -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 190 +++++++++++++++++++++++++++++++++ - 1 file changed, 190 insertions(+) - create mode 100644 arch/arm/configs/ls_aarch32_defconfig - ---- /dev/null -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -0,0 +1,190 @@ -+# CONFIG_LOCALVERSION_AUTO is not set -+CONFIG_SYSVIPC=y -+CONFIG_POSIX_MQUEUE=y -+# CONFIG_CROSS_MEMORY_ATTACH is not set -+CONFIG_IRQ_DOMAIN_DEBUG=y -+CONFIG_NO_HZ_IDLE=y -+CONFIG_HIGH_RES_TIMERS=y -+CONFIG_LOG_BUF_SHIFT=16 -+CONFIG_BLK_DEV_INITRD=y -+CONFIG_CC_OPTIMIZE_FOR_SIZE=y -+CONFIG_SYSCTL_SYSCALL=y -+CONFIG_KALLSYMS_ALL=y -+CONFIG_EMBEDDED=y -+# CONFIG_VM_EVENT_COUNTERS is not set -+# CONFIG_SLUB_DEBUG is not set -+CONFIG_PROFILING=y -+CONFIG_OPROFILE=y -+CONFIG_KPROBES=y -+CONFIG_JUMP_LABEL=y -+CONFIG_MODULES=y -+CONFIG_MODULE_FORCE_LOAD=y -+CONFIG_MODULE_UNLOAD=y -+# CONFIG_BLK_DEV_BSG is not set -+CONFIG_BLK_CMDLINE_PARSER=y -+CONFIG_ARCH_MXC=y -+CONFIG_ARCH_LAYERSCAPE=y -+CONFIG_ARM_LPAE=y -+# CONFIG_CACHE_L2X0 is not set -+CONFIG_PCI=y -+CONFIG_PCI_MSI=y -+CONFIG_PCI_HOST_GENERIC=y -+CONFIG_PCI_LAYERSCAPE=y -+CONFIG_SMP=y -+CONFIG_VMSPLIT_2G=y -+CONFIG_PREEMPT_VOLUNTARY=y -+CONFIG_AEABI=y -+CONFIG_HIGHMEM=y -+CONFIG_CLEANCACHE=y -+CONFIG_FRONTSWAP=y -+CONFIG_CMDLINE="console=ttyS0,115200" -+CONFIG_CPU_FREQ=y -+CONFIG_CPU_IDLE=y -+CONFIG_VFP=y -+CONFIG_NEON=y -+CONFIG_KERNEL_MODE_NEON=y -+CONFIG_BINFMT_MISC=y -+CONFIG_NET=y -+CONFIG_PACKET=y -+CONFIG_UNIX=y -+CONFIG_UNIX_DIAG=y -+CONFIG_XFRM_USER=y -+CONFIG_NET_KEY=y -+CONFIG_INET=y -+CONFIG_IP_MULTICAST=y -+CONFIG_IP_ADVANCED_ROUTER=y -+CONFIG_IP_PNP=y -+CONFIG_IP_PNP_DHCP=y -+CONFIG_IP_MROUTE=y -+CONFIG_INET_AH=y -+CONFIG_INET_ESP=y -+CONFIG_INET_IPCOMP=y -+CONFIG_INET_UDP_DIAG=y -+# CONFIG_IPV6 is not set -+CONFIG_NETFILTER=y -+CONFIG_CAN=y -+# CONFIG_CAN_BCM is not set -+# CONFIG_CAN_GW is not set -+CONFIG_CAN_FLEXCAN=y -+CONFIG_DEVTMPFS=y -+CONFIG_DEVTMPFS_MOUNT=y -+# CONFIG_FW_LOADER is not set -+CONFIG_MTD=y -+CONFIG_MTD_CMDLINE_PARTS=y -+CONFIG_MTD_BLOCK=y -+CONFIG_MTD_CFI=y -+CONFIG_MTD_CFI_ADV_OPTIONS=y -+CONFIG_MTD_CFI_BE_BYTE_SWAP=y -+CONFIG_MTD_CFI_GEOMETRY=y -+CONFIG_MTD_CFI_INTELEXT=y -+CONFIG_MTD_CFI_AMDSTD=y -+CONFIG_MTD_CFI_STAA=y -+CONFIG_MTD_PHYSMAP_OF=y -+CONFIG_MTD_DATAFLASH=y -+CONFIG_MTD_SST25L=y -+CONFIG_MTD_NAND=y -+CONFIG_MTD_NAND_FSL_IFC=y -+CONFIG_MTD_SPI_NOR=y -+CONFIG_SPI_FSL_QUADSPI=y -+CONFIG_BLK_DEV_LOOP=y -+CONFIG_BLK_DEV_RAM=y -+CONFIG_BLK_DEV_RAM_COUNT=8 -+CONFIG_BLK_DEV_RAM_SIZE=262144 -+CONFIG_NETDEVICES=y -+# CONFIG_NET_VENDOR_FREESCALE is not set -+CONFIG_E1000=y -+CONFIG_E1000E=y -+CONFIG_PHYLIB=y -+CONFIG_AT803X_PHY=y -+CONFIG_VITESSE_PHY=y -+CONFIG_BROADCOM_PHY=y -+CONFIG_REALTEK_PHY=y -+CONFIG_NATIONAL_PHY=y -+CONFIG_MICREL_PHY=y -+CONFIG_MDIO_BUS_MUX_MMIOREG=y -+CONFIG_INPUT_EVDEV=y -+# CONFIG_MOUSE_PS2_TRACKPOINT is not set -+CONFIG_SERIO_SERPORT=m -+# CONFIG_CONSOLE_TRANSLATIONS is not set -+CONFIG_SERIAL_8250=y -+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_SERIAL_8250_EXTENDED=y -+CONFIG_SERIAL_8250_SHARE_IRQ=y -+CONFIG_SERIAL_OF_PLATFORM=y -+CONFIG_SERIAL_FSL_LPUART=y -+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y -+CONFIG_HW_RANDOM=y -+CONFIG_I2C=y -+CONFIG_I2C_CHARDEV=y -+CONFIG_I2C_MUX=y -+CONFIG_I2C_MUX_PCA954x=y -+CONFIG_I2C_IMX=y -+CONFIG_SPI=y -+CONFIG_SPI_BITBANG=y -+CONFIG_SPI_FSL_DSPI=y -+CONFIG_PTP_1588_CLOCK=y -+CONFIG_GPIO_SYSFS=y -+CONFIG_GPIO_MPC8XXX=y -+CONFIG_SENSORS_LTC2945=y -+CONFIG_SENSORS_LM90=y -+CONFIG_SENSORS_INA2XX=y -+CONFIG_WATCHDOG=y -+CONFIG_IMX2_WDT=y -+CONFIG_MFD_SYSCON=y -+CONFIG_FB=y -+CONFIG_USB=y -+CONFIG_USB_XHCI_HCD=y -+CONFIG_MMC=y -+CONFIG_MMC_SDHCI=y -+CONFIG_MMC_SDHCI_PLTFM=y -+CONFIG_MMC_SDHCI_OF_ESDHC=y -+CONFIG_RTC_CLASS=y -+CONFIG_RTC_DRV_DS3232=y -+CONFIG_DMADEVICES=y -+CONFIG_FSL_EDMA=y -+CONFIG_CLK_QORIQ=y -+# CONFIG_IOMMU_SUPPORT is not set -+CONFIG_MEMORY=y -+CONFIG_PWM=y -+CONFIG_PWM_FSL_FTM=y -+# CONFIG_RESET_CONTROLLER is not set -+CONFIG_EXT2_FS=y -+CONFIG_EXT2_FS_XATTR=y -+CONFIG_EXT3_FS=y -+CONFIG_EXT4_FS=y -+CONFIG_FANOTIFY=y -+CONFIG_ISO9660_FS=m -+CONFIG_JOLIET=y -+CONFIG_ZISOFS=y -+CONFIG_UDF_FS=m -+CONFIG_MSDOS_FS=y -+CONFIG_VFAT_FS=y -+CONFIG_NTFS_FS=m -+CONFIG_TMPFS=y -+CONFIG_TMPFS_POSIX_ACL=y -+CONFIG_CONFIGFS_FS=y -+CONFIG_JFFS2_FS=y -+CONFIG_NFS_FS=y -+CONFIG_NFS_V4=y -+CONFIG_ROOT_NFS=y -+CONFIG_NLS_DEFAULT="cp437" -+CONFIG_NLS_CODEPAGE_437=y -+CONFIG_NLS_ASCII=y -+CONFIG_NLS_ISO8859_1=y -+CONFIG_NLS_ISO8859_2=y -+CONFIG_NLS_ISO8859_15=y -+CONFIG_NLS_UTF8=y -+CONFIG_DEBUG_FS=y -+CONFIG_DEBUG_SECTION_MISMATCH=y -+CONFIG_MAGIC_SYSRQ=y -+# CONFIG_SCHED_DEBUG is not set -+# CONFIG_FTRACE is not set -+CONFIG_PID_IN_CONTEXTIDR=y -+CONFIG_CRYPTO_LZO=y -+# CONFIG_CRYPTO_ANSI_CPRNG is not set -+# CONFIG_CRYPTO_HW is not set -+CONFIG_CRC_CCITT=m -+CONFIG_CRC_T10DIF=y -+CONFIG_CRC7=m -+CONFIG_LIBCRC32C=m diff --git a/target/linux/layerscape/patches-4.4/2027-armv8-aarch32-update-defconfig-for-LayerScape-SoC.patch b/target/linux/layerscape/patches-4.4/2027-armv8-aarch32-update-defconfig-for-LayerScape-SoC.patch deleted file mode 100644 index 97f51137b..000000000 --- a/target/linux/layerscape/patches-4.4/2027-armv8-aarch32-update-defconfig-for-LayerScape-SoC.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 0cc4fd2e52f23f9b35dfdac80021da97ac6c2c52 Mon Sep 17 00:00:00 2001 -From: Pan Jiafei -Date: Tue, 24 May 2016 16:15:49 +0800 -Subject: [PATCH 27/70] armv8: aarch32: update defconfig for LayerScape SoC - -Enable QBMan, FMD, DPAA ethernet, kernel bridge, ATA, -DMA_CMA, USB_STORAGE, PHY etc. - -Signed-off-by: Pan Jiafei ---- - arch/arm/configs/ls_aarch32_defconfig | 20 +++++++++++++++++++- - 1 file changed, 19 insertions(+), 1 deletion(-) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -26,6 +26,7 @@ CONFIG_ARCH_MXC=y - CONFIG_ARCH_LAYERSCAPE=y - CONFIG_ARM_LPAE=y - # CONFIG_CACHE_L2X0 is not set -+CONFIG_HAS_FSL_QBMAN=y - CONFIG_PCI=y - CONFIG_PCI_MSI=y - CONFIG_PCI_HOST_GENERIC=y -@@ -36,6 +37,7 @@ CONFIG_PREEMPT_VOLUNTARY=y - CONFIG_AEABI=y - CONFIG_HIGHMEM=y - CONFIG_CLEANCACHE=y -+CONFIG_CMA=y - CONFIG_FRONTSWAP=y - CONFIG_CMDLINE="console=ttyS0,115200" - CONFIG_CPU_FREQ=y -@@ -62,6 +64,7 @@ CONFIG_INET_IPCOMP=y - CONFIG_INET_UDP_DIAG=y - # CONFIG_IPV6 is not set - CONFIG_NETFILTER=y -+CONFIG_BRIDGE=y - CONFIG_CAN=y - # CONFIG_CAN_BCM is not set - # CONFIG_CAN_GW is not set -@@ -69,6 +72,7 @@ CONFIG_CAN_FLEXCAN=y - CONFIG_DEVTMPFS=y - CONFIG_DEVTMPFS_MOUNT=y - # CONFIG_FW_LOADER is not set -+CONFIG_DMA_CMA=y - CONFIG_MTD=y - CONFIG_MTD_CMDLINE_PARTS=y - CONFIG_MTD_BLOCK=y -@@ -81,17 +85,26 @@ CONFIG_MTD_CFI_AMDSTD=y - CONFIG_MTD_CFI_STAA=y - CONFIG_MTD_PHYSMAP_OF=y - CONFIG_MTD_DATAFLASH=y -+CONFIG_MTD_M25P80=y - CONFIG_MTD_SST25L=y - CONFIG_MTD_NAND=y - CONFIG_MTD_NAND_FSL_IFC=y - CONFIG_MTD_SPI_NOR=y -+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y - CONFIG_SPI_FSL_QUADSPI=y -+CONFIG_BLK_DEV_SD=y -+CONFIG_ATA=y - CONFIG_BLK_DEV_LOOP=y - CONFIG_BLK_DEV_RAM=y - CONFIG_BLK_DEV_RAM_COUNT=8 - CONFIG_BLK_DEV_RAM_SIZE=262144 - CONFIG_NETDEVICES=y --# CONFIG_NET_VENDOR_FREESCALE is not set -+CONFIG_NET_VENDOR_FREESCALE is not set -+CONFIG_FSL_BMAN=y -+CONFIG_FSL_QMAN=y -+CONFIG_FSL_SDK_FMAN=y -+CONFIG_FMAN_ARM=y -+CONFIG_FSL_SDK_DPAA_ETH=y - CONFIG_E1000=y - CONFIG_E1000E=y - CONFIG_PHYLIB=y -@@ -101,6 +114,8 @@ CONFIG_BROADCOM_PHY=y - CONFIG_REALTEK_PHY=y - CONFIG_NATIONAL_PHY=y - CONFIG_MICREL_PHY=y -+CONFIG_FIXED_PHY=y -+CONFIG_FSL_XGMAC_MDIO=y - CONFIG_MDIO_BUS_MUX_MMIOREG=y - CONFIG_INPUT_EVDEV=y - # CONFIG_MOUSE_PS2_TRACKPOINT is not set -@@ -135,6 +150,8 @@ CONFIG_MFD_SYSCON=y - CONFIG_FB=y - CONFIG_USB=y - CONFIG_USB_XHCI_HCD=y -+CONFIG_USB_DWC3=y -+CONFIG_USB_STORAGE=y - CONFIG_MMC=y - CONFIG_MMC_SDHCI=y - CONFIG_MMC_SDHCI_PLTFM=y -@@ -143,6 +160,7 @@ CONFIG_RTC_CLASS=y - CONFIG_RTC_DRV_DS3232=y - CONFIG_DMADEVICES=y - CONFIG_FSL_EDMA=y -+CONFIG_STAGING=y - CONFIG_CLK_QORIQ=y - # CONFIG_IOMMU_SUPPORT is not set - CONFIG_MEMORY=y diff --git a/target/linux/layerscape/patches-4.4/2119-armv8-aarch32-defconfig-Enable-CAAM-support.patch b/target/linux/layerscape/patches-4.4/2119-armv8-aarch32-defconfig-Enable-CAAM-support.patch deleted file mode 100644 index 567195d1f..000000000 --- a/target/linux/layerscape/patches-4.4/2119-armv8-aarch32-defconfig-Enable-CAAM-support.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 035fe1e511e053c6650f37626deb5da76dcc1d92 Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 11:29:48 +0800 -Subject: [PATCH 119/124] armv8: aarch32: defconfig: Enable CAAM support - -This patch is to enable the driver module for Freescale's Cryptographics -Accelerator and Assurance Module (CAAM) and related options. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 9 +++++++-- - 1 file changed, 7 insertions(+), 2 deletions(-) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -200,8 +200,13 @@ CONFIG_MAGIC_SYSRQ=y - # CONFIG_FTRACE is not set - CONFIG_PID_IN_CONTEXTIDR=y - CONFIG_CRYPTO_LZO=y --# CONFIG_CRYPTO_ANSI_CPRNG is not set --# CONFIG_CRYPTO_HW is not set -+CONFIG_CRYPTO_ANSI_CPRNG=y -+CONFIG_CRYPTO_DEV_FSL_CAAM=y -+CONFIG_ARM_CRYPTO=y -+CONFIG_CRYPTO_SHA1_ARM_NEON=y -+CONFIG_CRYPTO_SHA256_ARM=y -+CONFIG_CRYPTO_SHA512_ARM_NEON=y -+CONFIG_CRYPTO_AES_ARM_BS=y - CONFIG_CRC_CCITT=m - CONFIG_CRC_T10DIF=y - CONFIG_CRC7=m diff --git a/target/linux/layerscape/patches-4.4/2120-armv8-aarch32-defconfig-Enable-firmware-loading.patch b/target/linux/layerscape/patches-4.4/2120-armv8-aarch32-defconfig-Enable-firmware-loading.patch deleted file mode 100644 index 26a981c9e..000000000 --- a/target/linux/layerscape/patches-4.4/2120-armv8-aarch32-defconfig-Enable-firmware-loading.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 027ca2530ce94dd7d9954e57631aa34987db392e Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 11:38:36 +0800 -Subject: [PATCH 120/124] armv8: aarch32: defconfig: Enable firmware loading - -As some modules require userspace firmware loading support, such as -PPFE, add this feature in the defconfig for AArch32 on ARMv8. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 1 - - 1 file changed, 1 deletion(-) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -71,7 +71,6 @@ CONFIG_CAN=y - CONFIG_CAN_FLEXCAN=y - CONFIG_DEVTMPFS=y - CONFIG_DEVTMPFS_MOUNT=y --# CONFIG_FW_LOADER is not set - CONFIG_DMA_CMA=y - CONFIG_MTD=y - CONFIG_MTD_CMDLINE_PARTS=y diff --git a/target/linux/layerscape/patches-4.4/2121-armv8-aarch32-defconfig-Enable-support-for-AHCI-SATA.patch b/target/linux/layerscape/patches-4.4/2121-armv8-aarch32-defconfig-Enable-support-for-AHCI-SATA.patch deleted file mode 100644 index a3a67c37b..000000000 --- a/target/linux/layerscape/patches-4.4/2121-armv8-aarch32-defconfig-Enable-support-for-AHCI-SATA.patch +++ /dev/null @@ -1,29 +0,0 @@ -From ff37b165bdb100450c7996c9fac0fad2e6ffe31d Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 11:40:37 +0800 -Subject: [PATCH 121/124] armv8: aarch32: defconfig: Enable support for AHCI - SATA - -This patch is to enable support for the Freescale QorIQ AHCI SoC's -onboard AHCI SATA. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -92,7 +92,12 @@ CONFIG_MTD_SPI_NOR=y - CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y - CONFIG_SPI_FSL_QUADSPI=y - CONFIG_BLK_DEV_SD=y -+CONFIG_CHR_DEV_SG=y - CONFIG_ATA=y -+CONFIG_SATA_AHCI=y -+CONFIG_SATA_AHCI_PLATFORM=y -+CONFIG_AHCI_QORIQ=y -+CONFIG_SATA_SIL24=y - CONFIG_BLK_DEV_LOOP=y - CONFIG_BLK_DEV_RAM=y - CONFIG_BLK_DEV_RAM_COUNT=8 diff --git a/target/linux/layerscape/patches-4.4/2122-armv8-aarch32-defconfig-Enable-USB-and-related-confi.patch b/target/linux/layerscape/patches-4.4/2122-armv8-aarch32-defconfig-Enable-USB-and-related-confi.patch deleted file mode 100644 index 83d372ba6..000000000 --- a/target/linux/layerscape/patches-4.4/2122-armv8-aarch32-defconfig-Enable-USB-and-related-confi.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 913db32774fe5c818112232823edfda1a706552f Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 11:42:40 +0800 -Subject: [PATCH 122/124] armv8: aarch32: defconfig: Enable USB and related - configuration options - -This patch is to enable USB and related configuration options for -AArch32 on ARMv8. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -154,8 +154,14 @@ CONFIG_MFD_SYSCON=y - CONFIG_FB=y - CONFIG_USB=y - CONFIG_USB_XHCI_HCD=y -+CONFIG_USB_EHCI_HCD=y -+CONFIG_USB_EHCI_HCD_PLATFORM=y -+CONFIG_USB_OHCI_HCD=y -+CONFIG_USB_OHCI_HCD_PLATFORM=y -+CONFIG_USB_STORAGE=y - CONFIG_USB_DWC3=y - CONFIG_USB_STORAGE=y -+CONFIG_USB_ULPI=y - CONFIG_MMC=y - CONFIG_MMC_SDHCI=y - CONFIG_MMC_SDHCI_PLTFM=y diff --git a/target/linux/layerscape/patches-4.4/2123-armv8-aarch32-defconfig-Enable-KVM-related-configura.patch b/target/linux/layerscape/patches-4.4/2123-armv8-aarch32-defconfig-Enable-KVM-related-configura.patch deleted file mode 100644 index 4556ad9e0..000000000 --- a/target/linux/layerscape/patches-4.4/2123-armv8-aarch32-defconfig-Enable-KVM-related-configura.patch +++ /dev/null @@ -1,59 +0,0 @@ -From dbf356cd062d6313f90323a77d4cf7c820dae40f Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 13:51:32 +0800 -Subject: [PATCH 123/124] armv8: aarch32: defconfig: Enable KVM-related - configuration options - -This patch is to enable KVM-related configuration options for host and -guest. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -102,7 +102,11 @@ CONFIG_BLK_DEV_LOOP=y - CONFIG_BLK_DEV_RAM=y - CONFIG_BLK_DEV_RAM_COUNT=8 - CONFIG_BLK_DEV_RAM_SIZE=262144 -+CONFIG_VIRTIO_BLK=y - CONFIG_NETDEVICES=y -+CONFIG_TUN=y -+CONFIG_VIRTIO_NET=y -+CONFIG_VHOST_NET=y - CONFIG_NET_VENDOR_FREESCALE is not set - CONFIG_FSL_BMAN=y - CONFIG_FSL_QMAN=y -@@ -130,6 +134,8 @@ CONFIG_SERIAL_8250=y - CONFIG_SERIAL_8250_CONSOLE=y - CONFIG_SERIAL_8250_EXTENDED=y - CONFIG_SERIAL_8250_SHARE_IRQ=y -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y - CONFIG_SERIAL_OF_PLATFORM=y - CONFIG_SERIAL_FSL_LPUART=y - CONFIG_SERIAL_FSL_LPUART_CONSOLE=y -@@ -170,6 +176,7 @@ CONFIG_RTC_CLASS=y - CONFIG_RTC_DRV_DS3232=y - CONFIG_DMADEVICES=y - CONFIG_FSL_EDMA=y -+CONFIG_VIRTIO_PCI=y - CONFIG_STAGING=y - CONFIG_CLK_QORIQ=y - # CONFIG_IOMMU_SUPPORT is not set -@@ -191,6 +198,7 @@ CONFIG_VFAT_FS=y - CONFIG_NTFS_FS=m - CONFIG_TMPFS=y - CONFIG_TMPFS_POSIX_ACL=y -+CONFIG_HUGETLBFS=y - CONFIG_CONFIGFS_FS=y - CONFIG_JFFS2_FS=y - CONFIG_NFS_FS=y -@@ -221,3 +229,5 @@ CONFIG_CRC_CCITT=m - CONFIG_CRC_T10DIF=y - CONFIG_CRC7=m - CONFIG_LIBCRC32C=m -+CONFIG_VIRTUALIZATION=y -+CONFIG_KVM=y diff --git a/target/linux/layerscape/patches-4.4/2124-armv8-aarch32-defconfig-Enable-FTM-alarm-support.patch b/target/linux/layerscape/patches-4.4/2124-armv8-aarch32-defconfig-Enable-FTM-alarm-support.patch deleted file mode 100644 index ae05a8867..000000000 --- a/target/linux/layerscape/patches-4.4/2124-armv8-aarch32-defconfig-Enable-FTM-alarm-support.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 6df7fdf7e0c76df7acc2d1a3a287bf094a94c4ff Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 13:55:44 +0800 -Subject: [PATCH 124/124] armv8: aarch32: defconfig: Enable FTM alarm support - -This patch is to enable FTM alarm support. - -Signed-off-by: Alison Wang ---- - arch/arm/configs/ls_aarch32_defconfig | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/arm/configs/ls_aarch32_defconfig -+++ b/arch/arm/configs/ls_aarch32_defconfig -@@ -181,6 +181,8 @@ CONFIG_STAGING=y - CONFIG_CLK_QORIQ=y - # CONFIG_IOMMU_SUPPORT is not set - CONFIG_MEMORY=y -+CONFIG_LS_SOC_DRIVERS=y -+CONFIG_FTM_ALARM=y - CONFIG_PWM=y - CONFIG_PWM_FSL_FTM=y - # CONFIG_RESET_CONTROLLER is not set diff --git a/target/linux/layerscape/patches-4.4/3001-arm64-ls1043a-add-DTS-for-Freescale-LS1043A-SoC.patch b/target/linux/layerscape/patches-4.4/3001-arm64-ls1043a-add-DTS-for-Freescale-LS1043A-SoC.patch deleted file mode 100644 index 39a33282a..000000000 --- a/target/linux/layerscape/patches-4.4/3001-arm64-ls1043a-add-DTS-for-Freescale-LS1043A-SoC.patch +++ /dev/null @@ -1,552 +0,0 @@ -From 3ce895cbe3469bfcaa84674ec4f1b2d60e8b370b Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 21 Jul 2014 14:48:42 +0800 -Subject: [PATCH 01/70] arm64/ls1043a: add DTS for Freescale LS1043A SoC - -LS1043a is an SoC with 4 ARMv8 A53 cores and most other IP blocks -similar to LS1021a which complies to Chassis 2.1 spec. - -Following levels of DTSI/DTS files have been created for the -LS1043A SoC family: - -- fsl-ls1043a.dtsi: - DTS-Include file for FSL LS1043A SoC. - -Signed-off-by: Li Yang -Signed-off-by: Hou Zhiqiang -Signed-off-by: Mingkai Hu -Signed-off-by: Wenbin Song -Signed-off-by: Mingkai Hu ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 525 ++++++++++++++++++++++++ - 1 file changed, 525 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi - ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -0,0 +1,525 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1043A family SoC. -+ * -+ * Copyright 2014-2015, Freescale Semiconductor -+ * -+ * Mingkai Hu -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/ { -+ compatible = "fsl,ls1043a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ /* -+ * We expect the enable-method for cpu's to be "psci", but this -+ * is dependent on the SoC FW, which will fill this in. -+ * -+ * Currently supported enable-method is psci v0.2 -+ */ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@2 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x2>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu3: cpu@3 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x3>; -+ clocks = <&clockgen 1 0>; -+ }; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x0 0x80000000 0 0x80000000>; -+ /* DRAM space 1, size: 2GiB DRAM */ -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI */ -+ <1 14 0x1>, /* Physical Non-Secure PPI */ -+ <1 11 0x1>, /* Virtual PPI */ -+ <1 10 0x1>; /* Hypervisor PPI */ -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <0 106 0x4>, -+ <0 107 0x4>, -+ <0 95 0x4>, -+ <0 97 0x4>; -+ interrupt-affinity = <&cpu0>, -+ <&cpu1>, -+ <&cpu2>, -+ <&cpu3>; -+ }; -+ -+ gic: interrupt-controller@1400000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ reg = <0x0 0x1401000 0 0x1000>, /* GICD */ -+ <0x0 0x1402000 0 0x2000>, /* GICC */ -+ <0x0 0x1404000 0 0x2000>, /* GICH */ -+ <0x0 0x1406000 0 0x2000>; /* GICV */ -+ interrupts = <1 9 0xf08>; -+ }; -+ -+ soc { -+ compatible = "simple-bus"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ clockgen: clocking@1ee1000 { -+ compatible = "fsl,ls1043a-clockgen"; -+ reg = <0x0 0x1ee1000 0x0 0x1000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ scfg: scfg@1570000 { -+ compatible = "fsl,ls1043a-scfg", "syscon"; -+ reg = <0x0 0x1570000 0x0 0x10000>; -+ big-endian; -+ }; -+ -+ dcfg: dcfg@1ee0000 { -+ compatible = "fsl,ls1043a-dcfg", "syscon"; -+ reg = <0x0 0x1ee0000 0x0 0x10000>; -+ }; -+ -+ ifc: ifc@1530000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x1530000 0x0 0x10000>; -+ interrupts = <0 43 0x4>; -+ }; -+ -+ esdhc: esdhc@1560000 { -+ compatible = "fsl,ls1043a-esdhc", "fsl,esdhc"; -+ reg = <0x0 0x1560000 0x0 0x10000>; -+ interrupts = <0 62 0x4>; -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ big-endian; -+ bus-width = <4>; -+ }; -+ -+ dspi0: dspi@2100000 { -+ compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 64 0x4>; -+ clock-names = "dspi"; -+ clocks = <&clockgen 4 0>; -+ spi-num-chipselects = <5>; -+ big-endian; -+ status = "disabled"; -+ }; -+ -+ dspi1: dspi@2110000 { -+ compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2110000 0x0 0x10000>; -+ interrupts = <0 65 0x4>; -+ clock-names = "dspi"; -+ clocks = <&clockgen 4 0>; -+ spi-num-chipselects = <5>; -+ big-endian; -+ status = "disabled"; -+ }; -+ -+ i2c0: i2c@2180000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2180000 0x0 0x10000>; -+ interrupts = <0 56 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ dmas = <&edma0 1 39>, -+ <&edma0 1 38>; -+ dma-names = "tx", "rx"; -+ status = "disabled"; -+ }; -+ -+ i2c1: i2c@2190000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2190000 0x0 0x10000>; -+ interrupts = <0 57 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ status = "disabled"; -+ }; -+ -+ i2c2: i2c@21a0000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x21a0000 0x0 0x10000>; -+ interrupts = <0 58 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ status = "disabled"; -+ }; -+ -+ i2c3: i2c@21b0000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x21b0000 0x0 0x10000>; -+ interrupts = <0 59 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ status = "disabled"; -+ }; -+ -+ duart0: serial@21c0500 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0500 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ duart1: serial@21c0600 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0600 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ duart2: serial@21d0500 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21d0500 0x0 0x100>; -+ interrupts = <0 55 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ duart3: serial@21d0600 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21d0600 0x0 0x100>; -+ interrupts = <0 55 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ gpio1: gpio@2300000 { -+ compatible = "fsl,ls1043a-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 66 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2310000 { -+ compatible = "fsl,ls1043a-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 67 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2320000 { -+ compatible = "fsl,ls1043a-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 68 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio4: gpio@2330000 { -+ compatible = "fsl,ls1043a-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 134 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ lpuart0: serial@2950000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2950000 0x0 0x1000>; -+ interrupts = <0 48 0x4>; -+ clocks = <&clockgen 0 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart1: serial@2960000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2960000 0x0 0x1000>; -+ interrupts = <0 49 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart2: serial@2970000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2970000 0x0 0x1000>; -+ interrupts = <0 50 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart3: serial@2980000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2980000 0x0 0x1000>; -+ interrupts = <0 51 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart4: serial@2990000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2990000 0x0 0x1000>; -+ interrupts = <0 52 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart5: serial@29a0000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x29a0000 0x0 0x1000>; -+ interrupts = <0 53 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ wdog0: wdog@2ad0000 { -+ compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt"; -+ reg = <0x0 0x2ad0000 0x0 0x10000>; -+ interrupts = <0 83 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "wdog"; -+ big-endian; -+ }; -+ -+ edma0: edma@2c00000 { -+ #dma-cells = <2>; -+ compatible = "fsl,vf610-edma"; -+ reg = <0x0 0x2c00000 0x0 0x10000>, -+ <0x0 0x2c10000 0x0 0x10000>, -+ <0x0 0x2c20000 0x0 0x10000>; -+ interrupts = <0 103 0x4>, -+ <0 103 0x4>; -+ interrupt-names = "edma-tx", "edma-err"; -+ dma-channels = <32>; -+ big-endian; -+ clock-names = "dmamux0", "dmamux1"; -+ clocks = <&clockgen 4 0>, -+ <&clockgen 4 0>; -+ }; -+ -+ usb0: usb3@2f00000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x2f00000 0x0 0x10000>; -+ interrupts = <0 60 0x4>; -+ dr_mode = "host"; -+ }; -+ -+ usb1: usb3@3000000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3000000 0x0 0x10000>; -+ interrupts = <0 61 0x4>; -+ dr_mode = "host"; -+ }; -+ -+ usb2: usb3@3100000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 63 0x4>; -+ dr_mode = "host"; -+ }; -+ -+ sata: sata@3200000 { -+ compatible = "fsl,ls1043a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 69 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ msi1: msi-controller1@1571000 { -+ compatible = "fsl,1s1043a-msi"; -+ reg = <0x0 0x1571000 0x0 0x4>, -+ <0x0 0x1571004 0x0 0x4>; -+ reg-names = "msiir", "msir"; -+ msi-controller; -+ interrupts = <0 116 0x4>; -+ }; -+ -+ msi2: msi-controller2@1572000 { -+ compatible = "fsl,1s1043a-msi"; -+ reg = <0x0 0x1572000 0x0 0x4>, -+ <0x0 0x1572004 0x0 0x4>; -+ reg-names = "msiir", "msir"; -+ msi-controller; -+ interrupts = <0 126 0x4>; -+ }; -+ -+ msi3: msi-controller3@1573000 { -+ compatible = "fsl,1s1043a-msi"; -+ reg = <0x0 0x1573000 0x0 0x4>, -+ <0x0 0x1573004 0x0 0x4>; -+ reg-names = "msiir", "msir"; -+ msi-controller; -+ interrupts = <0 160 0x4>; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls1043a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>, /* controller interrupt */ -+ <0 117 0x4>; /* PME interrupt */ -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi1>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -+ <0000 0 0 2 &gic 0 111 0x4>, -+ <0000 0 0 3 &gic 0 112 0x4>, -+ <0000 0 0 4 &gic 0 113 0x4>; -+ }; -+ -+ pcie@3500000 { -+ compatible = "fsl,ls1043a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 128 0x4>, -+ <0 127 0x4>; -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <2>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi2>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, -+ <0000 0 0 2 &gic 0 121 0x4>, -+ <0000 0 0 3 &gic 0 122 0x4>, -+ <0000 0 0 4 &gic 0 123 0x4>; -+ }; -+ -+ pcie@3600000 { -+ compatible = "fsl,ls1043a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 162 0x4>, -+ <0 161 0x4>; -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <2>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi3>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, -+ <0000 0 0 2 &gic 0 155 0x4>, -+ <0000 0 0 3 &gic 0 156 0x4>, -+ <0000 0 0 4 &gic 0 157 0x4>; -+ }; -+ }; -+ -+}; diff --git a/target/linux/layerscape/patches-4.4/3002-dts-ls1043a-add-LS1043ARDB-board-support.patch b/target/linux/layerscape/patches-4.4/3002-dts-ls1043a-add-LS1043ARDB-board-support.patch deleted file mode 100644 index f36509d95..000000000 --- a/target/linux/layerscape/patches-4.4/3002-dts-ls1043a-add-LS1043ARDB-board-support.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 57d949256241fb79b669bbca0426c2d74a3dfc6e Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Fri, 8 Jul 2016 10:27:39 +0800 -Subject: [PATCH 02/70] dts/ls1043a: add LS1043ARDB board support - -commit 9a6fce16a82d3412c9350b9f08eacebaa81c0a3d -[context adjustment] - -Signed-off-by: Shaohui Xie -Signed-off-by: Mingkai Hu -Signed-off-by: Wenbin Song -Signed-off-by: Hou Zhiqiang -Signed-off-by: Mingkai Hu -Integrated-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/Makefile | 1 + - arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 117 +++++++++++++++++++++ - 2 files changed, 118 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts - ---- a/arch/arm64/boot/dts/freescale/Makefile -+++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -1,6 +1,7 @@ - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb - - always := $(dtb-y) - subdir-y := $(dts-dirs) ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts -@@ -0,0 +1,117 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1043A family SoC. -+ * -+ * Copyright 2014-2015, Freescale Semiconductor -+ * -+ * Mingkai Hu -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/dts-v1/; -+#include "fsl-ls1043a.dtsi" -+ -+/ { -+ model = "LS1043A RDB Board"; -+ compatible = "fsl,ls1043a-rdb", "fsl,ls1043a"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ ina220@40 { -+ compatible = "ti,ina220"; -+ reg = <0x40>; -+ shunt-resistor = <1000>; -+ }; -+ adt7461a@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ eeprom@56 { -+ compatible = "at24,24c512"; -+ reg = <0x52>; -+ }; -+ eeprom@57 { -+ compatible = "at24,24c512"; -+ reg = <0x53>; -+ }; -+ rtc@68 { -+ compatible = "pericom,pt7c4338"; -+ reg = <0x68>; -+ }; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ /* NOR, NAND Flashes and FPGA on board */ -+ ranges = <0x0 0x0 0x0 0x60000000 0x08000000 -+ 0x1 0x0 0x0 0x7e800000 0x00010000 -+ 0x2 0x0 0x0 0x7fb00000 0x00000100>; -+ -+ nor@0,0 { -+ compatible = "cfi-flash"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ }; -+ -+ nand@1,0 { -+ compatible = "fsl,ifc-nand"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ reg = <0x1 0x0 0x10000>; -+ }; -+ -+ cpld: board-control@2,0 { -+ compatible = "fsl,ls1043ardb-cpld"; -+ reg = <0x2 0x0 0x0000100>; -+ }; -+}; -+ -+&duart0 { -+ status = "okay"; -+}; -+ -+&duart1 { -+ status = "okay"; -+}; diff --git a/target/linux/layerscape/patches-4.4/3003-arm64-dts-Update-address-cells-and-reg-properties-of.patch b/target/linux/layerscape/patches-4.4/3003-arm64-dts-Update-address-cells-and-reg-properties-of.patch deleted file mode 100644 index 20cf8522a..000000000 --- a/target/linux/layerscape/patches-4.4/3003-arm64-dts-Update-address-cells-and-reg-properties-of.patch +++ /dev/null @@ -1,141 +0,0 @@ -From 3970a709eb4c25e298e11cfe0ea7412bb2139197 Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Fri, 8 Jul 2016 10:50:46 +0800 -Subject: [PATCH 03/70] arm64: dts: Update address-cells and reg properties of - cpu nodes - -commit 67161e229a59faf81732892b45a9ab3bae62ea18 -[context adjustment] - -MPIDR_EL1[63:32] value is equal to 0 for the CPUs of the LS1043A and -LS2080A SoCs. The ARM CPU binding allows #address-cells to be set to 1, -since MPIDR_EL1[63:32] bits are not used for CPUs identification. Update -the #address-cells and reg properties accordingly. - -Signed-off-by: Alison Wang -Integrated-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 10 +++++----- - arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 18 +++++++++--------- - 2 files changed, 14 insertions(+), 14 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -51,7 +51,7 @@ - #size-cells = <2>; - - cpus { -- #address-cells = <2>; -+ #address-cells = <1>; - #size-cells = <0>; - - /* -@@ -63,28 +63,28 @@ - cpu0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; -- reg = <0x0 0x0>; -+ reg = <0x0>; - clocks = <&clockgen 1 0>; - }; - - cpu1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; -- reg = <0x0 0x1>; -+ reg = <0x1>; - clocks = <&clockgen 1 0>; - }; - - cpu2: cpu@2 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; -- reg = <0x0 0x2>; -+ reg = <0x2>; - clocks = <&clockgen 1 0>; - }; - - cpu3: cpu@3 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; -- reg = <0x0 0x3>; -+ reg = <0x3>; - clocks = <&clockgen 1 0>; - }; - }; ---- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi -@@ -51,7 +51,7 @@ - #size-cells = <2>; - - cpus { -- #address-cells = <2>; -+ #address-cells = <1>; - #size-cells = <0>; - - /* -@@ -65,56 +65,56 @@ - cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x0>; -+ reg = <0x0>; - clocks = <&clockgen 1 0>; - }; - - cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x1>; -+ reg = <0x1>; - clocks = <&clockgen 1 0>; - }; - - cpu@100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x100>; -+ reg = <0x100>; - clocks = <&clockgen 1 1>; - }; - - cpu@101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x101>; -+ reg = <0x101>; - clocks = <&clockgen 1 1>; - }; - - cpu@200 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x200>; -+ reg = <0x200>; - clocks = <&clockgen 1 2>; - }; - - cpu@201 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x201>; -+ reg = <0x201>; - clocks = <&clockgen 1 2>; - }; - - cpu@300 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x300>; -+ reg = <0x300>; - clocks = <&clockgen 1 3>; - }; - - cpu@301 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x301>; -+ reg = <0x301>; - clocks = <&clockgen 1 3>; - }; - }; diff --git a/target/linux/layerscape/patches-4.4/3004-armv8-aarch32-Add-ITS-file-for-AArch32-Linux-on-LS10.patch b/target/linux/layerscape/patches-4.4/3004-armv8-aarch32-Add-ITS-file-for-AArch32-Linux-on-LS10.patch deleted file mode 100644 index 940d1970a..000000000 --- a/target/linux/layerscape/patches-4.4/3004-armv8-aarch32-Add-ITS-file-for-AArch32-Linux-on-LS10.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 503f48a0a43ddf20098b2a5ec2c3d9d91775e441 Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Wed, 9 Dec 2015 10:53:04 +0800 -Subject: [PATCH 04/70] armv8: aarch32: Add ITS file for AArch32 Linux on - LS1043ARDB - -kernel-ls1043a-rdb-aarch32.its is added to load kernel, DTB -and root filesystrem together. - -Signed-off-by: Alison Wang ---- - kernel-ls1043a-rdb-aarch32.its | 53 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 53 insertions(+) - create mode 100644 kernel-ls1043a-rdb-aarch32.its - ---- /dev/null -+++ b/kernel-ls1043a-rdb-aarch32.its -@@ -0,0 +1,53 @@ -+/* -+ * Copyright (C) 2015, Freescale Semiconductor -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+/ { -+ description = "RDB Image file for the LS1043A Linux Kernel"; -+ #address-cells = <1>; -+ -+ images { -+ kernel@1 { -+ description = "ARM32 Linux kernel"; -+ data = /incbin/("./arch/arm/boot/zImage"); -+ type = "kernel"; -+ arch = "arm"; -+ os = "linux"; -+ compression = "none"; -+ load = <0x80008000>; -+ entry = <0x80008000>; -+ }; -+ fdt@1 { -+ description = "Flattened Device Tree blob"; -+ data = /incbin/("./fsl-ls1043a-rdb.dtb"); -+ type = "flat_dt"; -+ arch = "arm"; -+ compression = "none"; -+ load = <0x90000000>; -+ }; -+ ramdisk@1 { -+ description = "LS1 Ramdisk"; -+ data = /incbin/("./fsl-image-core-ls1021atwr-wifi.rootfs.ext2.gz"); -+ type = "ramdisk"; -+ arch = "arm"; -+ os = "linux"; -+ compression = "none"; -+ }; -+ }; -+ -+ configurations { -+ default = "config@1"; -+ config@1 { -+ description = "Boot Linux kernel"; -+ kernel = "kernel@1"; -+ fdt = "fdt@1"; -+ ramdisk = "ramdisk@1"; -+ }; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3005-armv8-aarch32-change-FS-file-name-in-ITS.patch b/target/linux/layerscape/patches-4.4/3005-armv8-aarch32-change-FS-file-name-in-ITS.patch deleted file mode 100644 index 2836f1ae8..000000000 --- a/target/linux/layerscape/patches-4.4/3005-armv8-aarch32-change-FS-file-name-in-ITS.patch +++ /dev/null @@ -1,21 +0,0 @@ -From 8f9b9d829ea0e67760b2e67c9339f6c417084fdc Mon Sep 17 00:00:00 2001 -From: Pan Jiafei -Date: Thu, 28 Jan 2016 12:10:24 +0800 -Subject: [PATCH 05/70] armv8: aarch32: change FS file name in ITS - -Signed-off-by: Pan Jiafei ---- - kernel-ls1043a-rdb-aarch32.its | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel-ls1043a-rdb-aarch32.its -+++ b/kernel-ls1043a-rdb-aarch32.its -@@ -33,7 +33,7 @@ - }; - ramdisk@1 { - description = "LS1 Ramdisk"; -- data = /incbin/("./fsl-image-core-ls1021atwr-wifi.rootfs.ext2.gz"); -+ data = /incbin/("./fsl-image-core-ls1043ardb-32b.ext2.gz"); - type = "ramdisk"; - arch = "arm"; - os = "linux"; diff --git a/target/linux/layerscape/patches-4.4/3007-armv8-aarch32-Run-32-bit-Linux-in-AArch32-execution-.patch b/target/linux/layerscape/patches-4.4/3007-armv8-aarch32-Run-32-bit-Linux-in-AArch32-execution-.patch deleted file mode 100644 index c2f521564..000000000 --- a/target/linux/layerscape/patches-4.4/3007-armv8-aarch32-Run-32-bit-Linux-in-AArch32-execution-.patch +++ /dev/null @@ -1,79 +0,0 @@ -From 3a827762e11670ca815bd4ee305f5faf5f02acb9 Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Tue, 17 May 2016 17:23:51 +0800 -Subject: [PATCH 07/70] armv8: aarch32: Run 32-bit Linux in AArch32 execution - state - -This patch adds AArch32 execution state support for LS1043A. Verified -32-bit Linux kernel can run on LS1043ARDB board. - -Signed-off-by: Ebony Zhu -Signed-off-by: Alison Wang ---- - arch/arm/mach-imx/Kconfig | 10 ++++++++++ - arch/arm/mach-imx/Makefile | 4 +++- - arch/arm/mach-imx/mach-ls1043a.c | 21 +++++++++++++++++++++ - 3 files changed, 34 insertions(+), 1 deletion(-) - create mode 100644 arch/arm/mach-imx/mach-ls1043a.c - ---- a/arch/arm/mach-imx/Kconfig -+++ b/arch/arm/mach-imx/Kconfig -@@ -612,6 +612,16 @@ endchoice - - endif - -+config ARCH_LAYERSCAPE -+ bool "Freescale Layerscape SoC support" -+ select ARM_GIC -+ select HAVE_ARM_ARCH_TIMER -+ select PCI_LAYERSCAPE if PCI -+ select LS1_MSI if PCI_MSI -+ -+ help -+ This enables support for Freescale Layerscape SoC family. -+ - source "arch/arm/mach-imx/devices/Kconfig" - - endif ---- a/arch/arm/mach-imx/Makefile -+++ b/arch/arm/mach-imx/Makefile -@@ -75,7 +75,7 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop. - obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o - obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o - obj-$(CONFIG_HAVE_IMX_SRC) += src.o --ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_LS1021A),) -+ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_LS1021A)$(CONFIG_ARCH_LAYERSCAPE),) - AFLAGS_headsmp.o :=-Wa,-march=armv7-a - obj-$(CONFIG_SMP) += headsmp.o platsmp.o - obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o -@@ -101,4 +101,6 @@ obj-$(CONFIG_SOC_VF610) += mach-vf610.o - - obj-$(CONFIG_SOC_LS1021A) += mach-ls1021a.o - -+obj-$(CONFIG_ARCH_LAYERSCAPE) += mach-ls1043a.o -+ - obj-y += devices/ ---- /dev/null -+++ b/arch/arm/mach-imx/mach-ls1043a.c -@@ -0,0 +1,21 @@ -+/* -+ * Copyright 2015-2016 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#include -+ -+#include "common.h" -+ -+static const char * const ls1043a_dt_compat[] __initconst = { -+ "fsl,ls1043a", -+ NULL, -+}; -+ -+DT_MACHINE_START(LS1043A, "Freescale LS1043A") -+ .dt_compat = ls1043a_dt_compat, -+MACHINE_END diff --git a/target/linux/layerscape/patches-4.4/3008-armv8-aarch32-Add-SMP-support-for-32-bit-Linux.patch b/target/linux/layerscape/patches-4.4/3008-armv8-aarch32-Add-SMP-support-for-32-bit-Linux.patch deleted file mode 100644 index 31f1d98ae..000000000 --- a/target/linux/layerscape/patches-4.4/3008-armv8-aarch32-Add-SMP-support-for-32-bit-Linux.patch +++ /dev/null @@ -1,103 +0,0 @@ -From 5d06e90bd0e3bdd104b7b25173e05617f02dc44d Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Fri, 13 May 2016 15:09:47 +0800 -Subject: [PATCH 08/70] armv8: aarch32: Add SMP support for 32-bit Linux - -The patch adds SMP support for running 32-bit Linux kernel. Spin-table -method is used for SMP support. - -Signed-off-by: Alison Wang -Signed-off-by: Chenhui Zhao ---- - arch/arm/mach-imx/common.h | 1 + - arch/arm/mach-imx/mach-ls1043a.c | 1 + - arch/arm/mach-imx/platsmp.c | 49 ++++++++++++++++++++++++++++++++++++++ - 3 files changed, 51 insertions(+) - ---- a/arch/arm/mach-imx/common.h -+++ b/arch/arm/mach-imx/common.h -@@ -155,5 +155,6 @@ static inline void imx_init_l2cache(void - - extern struct smp_operations imx_smp_ops; - extern struct smp_operations ls1021a_smp_ops; -+extern const struct smp_operations layerscape_smp_ops; - - #endif ---- a/arch/arm/mach-imx/mach-ls1043a.c -+++ b/arch/arm/mach-imx/mach-ls1043a.c -@@ -17,5 +17,6 @@ static const char * const ls1043a_dt_com - }; - - DT_MACHINE_START(LS1043A, "Freescale LS1043A") -+ .smp = smp_ops(layerscape_smp_ops), - .dt_compat = ls1043a_dt_compat, - MACHINE_END ---- a/arch/arm/mach-imx/platsmp.c -+++ b/arch/arm/mach-imx/platsmp.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -26,6 +27,8 @@ - u32 g_diag_reg; - static void __iomem *scu_base; - -+static u64 cpu_release_addr[NR_CPUS]; -+ - static struct map_desc scu_io_desc __initdata = { - /* .virtual and .pfn are run-time assigned */ - .length = SZ_4K, -@@ -127,3 +130,49 @@ struct smp_operations ls1021a_smp_ops _ - .smp_prepare_cpus = ls1021a_smp_prepare_cpus, - .smp_boot_secondary = ls1021a_boot_secondary, - }; -+ -+static int layerscape_smp_boot_secondary(unsigned int cpu, -+ struct task_struct *idle) -+{ -+ u32 secondary_startup_phys; -+ __le32 __iomem *release_addr; -+ -+ secondary_startup_phys = virt_to_phys(secondary_startup); -+ -+ release_addr = ioremap_cache((u32)cpu_release_addr[cpu], -+ sizeof(u64)); -+ if (!release_addr) -+ return -ENOMEM; -+ -+ writel_relaxed(secondary_startup_phys, release_addr); -+ writel_relaxed(0, release_addr + 1); -+ __cpuc_flush_dcache_area((__force void *)release_addr, -+ sizeof(u64)); -+ -+ sev(); -+ -+ iounmap(release_addr); -+ -+ return 0; -+} -+ -+static void layerscape_smp_init_cpus(void) -+{ -+ struct device_node *dnt = NULL; -+ unsigned int cpu = 0; -+ -+ while ((dnt = of_find_node_by_type(dnt, "cpu"))) { -+ if (of_property_read_u64(dnt, "cpu-release-addr", -+ &cpu_release_addr[cpu])) { -+ pr_err("CPU %d: missing or invalid cpu-release-addr property\n", -+ cpu); -+ } -+ -+ cpu++; -+ } -+} -+ -+const struct smp_operations layerscape_smp_ops __initconst = { -+ .smp_init_cpus = layerscape_smp_init_cpus, -+ .smp_boot_secondary = layerscape_smp_boot_secondary, -+}; diff --git a/target/linux/layerscape/patches-4.4/3009-armv8-aarch32-Allow-RAM-to-be-mapped-for-LayerScape-.patch b/target/linux/layerscape/patches-4.4/3009-armv8-aarch32-Allow-RAM-to-be-mapped-for-LayerScape-.patch deleted file mode 100644 index 240204a16..000000000 --- a/target/linux/layerscape/patches-4.4/3009-armv8-aarch32-Allow-RAM-to-be-mapped-for-LayerScape-.patch +++ /dev/null @@ -1,31 +0,0 @@ -From ef25bf644b7de83849a2f804c84bb54cd2f1255f Mon Sep 17 00:00:00 2001 -From: Alison Wang -Date: Mon, 11 Apr 2016 17:25:40 +0800 -Subject: [PATCH 09/70] armv8: aarch32: Allow RAM to be mapped for LayerScape - SoC - -This patch is based on Kernel v4.1.8. As in v4.1.8, memremap() is not -introduced and the WARN() check is not relaxed to allow MT_MEMORY_RW -mappings of pfn_valid() pages, this patch is needed as a workaround for -spin-table address which locates in RAM to be mapped. - -For the latest kernel in upstream, this patch is not needed anymore. - -Signed-off-by: Alison Wang ---- - arch/arm/mm/ioremap.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/arm/mm/ioremap.c -+++ b/arch/arm/mm/ioremap.c -@@ -298,8 +298,10 @@ static void __iomem * __arm_ioremap_pfn_ - /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ - */ -+#ifndef CONFIG_ARCH_LAYERSCAPE - if (WARN_ON(pfn_valid(pfn))) - return NULL; -+#endif - - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) diff --git a/target/linux/layerscape/patches-4.4/3010-arm-add-pgprot_cached-and-pgprot_cached_ns-support.patch b/target/linux/layerscape/patches-4.4/3010-arm-add-pgprot_cached-and-pgprot_cached_ns-support.patch deleted file mode 100644 index 43ab4d924..000000000 --- a/target/linux/layerscape/patches-4.4/3010-arm-add-pgprot_cached-and-pgprot_cached_ns-support.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 31a5b5189bdd33bb87f88320964a47c0da983af2 Mon Sep 17 00:00:00 2001 -From: Jianhua Xie -Date: Fri, 29 Jan 2016 16:40:46 +0800 -Subject: [PATCH 10/70] arm: add pgprot_cached and pgprot_cached_ns support - -Signed-off-by: Jianhua Xie ---- - arch/arm/include/asm/pgtable.h | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/arch/arm/include/asm/pgtable.h -+++ b/arch/arm/include/asm/pgtable.h -@@ -116,6 +116,13 @@ extern pgprot_t pgprot_s2_device; - #define pgprot_noncached(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) - -+#define pgprot_cached(prot) \ -+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED) -+ -+#define pgprot_cached_ns(prot) \ -+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \ -+ L_PTE_MT_DEV_NONSHARED) -+ - #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) - diff --git a/target/linux/layerscape/patches-4.4/3011-arm-add-new-non-shareable-ioremap.patch b/target/linux/layerscape/patches-4.4/3011-arm-add-new-non-shareable-ioremap.patch deleted file mode 100644 index f3ef2b3ff..000000000 --- a/target/linux/layerscape/patches-4.4/3011-arm-add-new-non-shareable-ioremap.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 707627a28924320a7a36bdb8b02c05651c0c384d Mon Sep 17 00:00:00 2001 -From: Pan Jiafei -Date: Fri, 8 Jul 2016 11:16:13 +0800 -Subject: [PATCH 11/70] arm: add new non-shareable ioremap - -commit 17d7448eef0fa57a0899e6a864d875e7a9082561 -[modify ioremap_cache_ns according to Linux v4.4.7] - -Signed-off-by: Pan Jiafei -Integrated-by: Zhao Qiang ---- - arch/arm/include/asm/io.h | 4 ++++ - arch/arm/include/asm/mach/map.h | 4 ++-- - arch/arm/mm/ioremap.c | 7 +++++++ - arch/arm/mm/mmu.c | 9 +++++++++ - 4 files changed, 22 insertions(+), 2 deletions(-) - ---- a/arch/arm/include/asm/io.h -+++ b/arch/arm/include/asm/io.h -@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola - #define MT_DEVICE_NONSHARED 1 - #define MT_DEVICE_CACHED 2 - #define MT_DEVICE_WC 3 -+#define MT_MEMORY_RW_NS 4 - /* - * types 4 onwards can be found in asm/mach/map.h and are undefined - * for ioremap -@@ -399,6 +400,9 @@ void __iomem *ioremap_wc(resource_size_t - #define ioremap_wc ioremap_wc - #define ioremap_wt ioremap_wc - -+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size); -+#define ioremap_cache_ns ioremap_cache_ns -+ - void iounmap(volatile void __iomem *iomem_cookie); - #define iounmap iounmap - ---- a/arch/arm/include/asm/mach/map.h -+++ b/arch/arm/include/asm/mach/map.h -@@ -21,9 +21,9 @@ struct map_desc { - unsigned int type; - }; - --/* types 0-3 are defined in asm/io.h */ -+/* types 0-4 are defined in asm/io.h */ - enum { -- MT_UNCACHED = 4, -+ MT_UNCACHED = 5, - MT_CACHECLEAN, - MT_MINICLEAN, - MT_LOW_VECTORS, ---- a/arch/arm/mm/ioremap.c -+++ b/arch/arm/mm/ioremap.c -@@ -394,6 +394,13 @@ void __iomem *ioremap_wc(resource_size_t - } - EXPORT_SYMBOL(ioremap_wc); - -+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size) -+{ -+ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS, -+ __builtin_return_address(0)); -+} -+EXPORT_SYMBOL(ioremap_cache_ns); -+ - /* - * Remap an arbitrary physical address space into the kernel virtual - * address space as memory. Needed when the kernel wants to execute ---- a/arch/arm/mm/mmu.c -+++ b/arch/arm/mm/mmu.c -@@ -313,6 +313,13 @@ static struct mem_type mem_types[] = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, - .domain = DOMAIN_KERNEL, - }, -+ [MT_MEMORY_RW_NS] = { -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | -+ L_PTE_XN, -+ .prot_l1 = PMD_TYPE_TABLE, -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN, -+ .domain = DOMAIN_KERNEL, -+ }, - [MT_ROM] = { - .prot_sect = PMD_TYPE_SECT, - .domain = DOMAIN_KERNEL, -@@ -644,6 +651,7 @@ static void __init build_mem_type_table( - } - kern_pgprot |= PTE_EXT_AF; - vecs_pgprot |= PTE_EXT_AF; -+ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte; - - /* - * Set PXN for user mappings -@@ -672,6 +680,7 @@ static void __init build_mem_type_table( - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; - mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; -+ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd; - mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; - mem_types[MT_ROM].prot_sect |= cp->pmd; diff --git a/target/linux/layerscape/patches-4.4/3012-dts-ls1043a-add-fman-bman-qman-ethernet-nodes.patch b/target/linux/layerscape/patches-4.4/3012-dts-ls1043a-add-fman-bman-qman-ethernet-nodes.patch deleted file mode 100644 index cbb67c47d..000000000 --- a/target/linux/layerscape/patches-4.4/3012-dts-ls1043a-add-fman-bman-qman-ethernet-nodes.patch +++ /dev/null @@ -1,747 +0,0 @@ -From 10b0a19d62d932a6eb01ceb8749190aaf0ff063e Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Mon, 11 Jul 2016 10:47:20 +0800 -Subject: [PATCH 12/70] dts: ls1043a: add fman/bman/qman/ethernet nodes - -commit ecb0901ba0a6558a05054d21ad9e70999a6f7ca1 -[context adjustment] - -Signed-off-by: Shaohui Xie -Signed-off-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 461 ++++++++++++++++++++ - .../boot/dts/freescale/qoriq-bman1-portals.dtsi | 104 +++++ - .../boot/dts/freescale/qoriq-qman1-portals.dtsi | 136 ++++++ - 3 files changed, 701 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi - create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -50,6 +50,16 @@ - #address-cells = <2>; - #size-cells = <2>; - -+ aliases { -+ ethernet0 = &fm1mac1; -+ ethernet1 = &fm1mac2; -+ ethernet2 = &fm1mac3; -+ ethernet3 = &fm1mac4; -+ ethernet4 = &fm1mac5; -+ ethernet5 = &fm1mac6; -+ ethernet6 = &fm1mac9; -+ }; -+ - cpus { - #address-cells = <1>; - #size-cells = <0>; -@@ -174,6 +184,323 @@ - bus-width = <4>; - }; - -+ qman: qman@1880000 { -+ compatible = "fsl,qman"; -+ reg = <0x00 0x1880000 0x0 0x10000>; -+ interrupts = <0 45 0x4>; -+ }; -+ -+ bman: bman@1890000 { -+ compatible = "fsl,bman"; -+ reg = <0x00 0x1890000 0x0 0x10000>; -+ interrupts = <0 45 0x4>; -+ }; -+ -+ fman0: fman@1a00000 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ cell-index = <0>; -+ compatible = "fsl,fman", "simple-bus"; -+ ranges = <0x0 0x00 0x1a00000 0x100000>; -+ reg = <0x00 0x1a00000 0x0 0x100000>; -+ clock-frequency = <0>; -+ interrupts = <0 44 0x4>, -+ <0 45 0x4>; -+ -+ cc { -+ compatible = "fsl,fman-cc"; -+ }; -+ -+ muram@0 { -+ compatible = "fsl,fman-muram"; -+ reg = <0x0 0x60000>; -+ }; -+ -+ bmi@80000 { -+ compatible = "fsl,fman-bmi"; -+ reg = <0x80000 0x400>; -+ }; -+ -+ qmi@80400 { -+ compatible = "fsl,fman-qmi"; -+ reg = <0x80400 0x400>; -+ }; -+ -+ fman0_oh1: port@82000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x82000 0x1000>; -+ }; -+ -+ fman0_oh2: port@83000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x83000 0x1000>; -+ }; -+ -+ fman0_oh3: port@84000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x84000 0x1000>; -+ }; -+ -+ fman0_oh4: port@85000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x85000 0x1000>; -+ }; -+ -+ fman0_oh5: port@86000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x86000 0x1000>; -+ }; -+ -+ fman0_oh6: port@87000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x87000 0x1000>; -+ }; -+ -+ policer@c0000 { -+ compatible = "fsl,fman-policer"; -+ reg = <0xc0000 0x1000>; -+ }; -+ -+ keygen@c1000 { -+ compatible = "fsl,fman-keygen"; -+ reg = <0xc1000 0x1000>; -+ }; -+ -+ dma@c2000 { -+ compatible = "fsl,fman-dma"; -+ reg = <0xc2000 0x1000>; -+ }; -+ -+ fpm@c3000 { -+ compatible = "fsl,fman-fpm"; -+ reg = <0xc3000 0x1000>; -+ }; -+ -+ parser@c7000 { -+ compatible = "fsl,fman-parser"; -+ reg = <0xc7000 0x1000>; -+ }; -+ -+ vsps@dc000 { -+ compatible = "fsl,fman-vsps"; -+ reg = <0xdc000 0x1000>; -+ }; -+ -+ mdio0: mdio@fc000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xfc000 0x1000>; -+ }; -+ -+ xmdio0: mdio@fd000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xfd000 0x1000>; -+ }; -+ -+ fman0_rx0: port@88000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x88000 0x1000>; -+ }; -+ -+ fman0_tx0: port@a8000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xa8000 0x1000>; -+ }; -+ -+ fm1mac1: ethernet@e0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe0000 0x1000>; -+ fsl,port-handles = <&fman0_rx0 &fman0_tx0>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e1000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe1000 0x1000>; -+ }; -+ -+ fman0_rx1: port@89000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x89000 0x1000>; -+ }; -+ -+ fman0_tx1: port@a9000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xa9000 0x1000>; -+ }; -+ -+ fm1mac2: ethernet@e2000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe2000 0x1000>; -+ fsl,port-handles = <&fman0_rx1 &fman0_tx1>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e3000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe3000 0x1000>; -+ }; -+ -+ fman0_rx2: port@8a000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8a000 0x1000>; -+ }; -+ -+ fman0_tx2: port@aa000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xaa000 0x1000>; -+ }; -+ -+ fm1mac3: ethernet@e4000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe4000 0x1000>; -+ fsl,port-handles = <&fman0_rx2 &fman0_tx2>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e5000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe5000 0x1000>; -+ }; -+ -+ fman0_rx3: port@8b000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8b000 0x1000>; -+ }; -+ -+ fman0_tx3: port@ab000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xab000 0x1000>; -+ }; -+ -+ fm1mac4: ethernet@e6000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe6000 0x1000>; -+ fsl,port-handles = <&fman0_rx3 &fman0_tx3>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e7000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe7000 0x1000>; -+ }; -+ -+ fman0_rx4: port@8c000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8c000 0x1000>; -+ }; -+ -+ fman0_tx4: port@ac000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xac000 0x1000>; -+ }; -+ -+ fm1mac5: ethernet@e8000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe8000 0x1000>; -+ fsl,port-handles = <&fman0_rx4 &fman0_tx4>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e9000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe9000 0x1000>; -+ }; -+ -+ fman0_rx5: port@8d000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8d000 0x1000>; -+ }; -+ -+ fman0_tx5: port@ad000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xad000 0x1000>; -+ }; -+ -+ fm1mac6: ethernet@ea000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xea000 0x1000>; -+ fsl,port-handles = <&fman0_rx5 &fman0_tx5>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@eb000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xeb000 0x1000>; -+ }; -+ -+ fman0_10g_rx0: port@90000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-10g-rx"; -+ reg = <0x90000 0x1000>; -+ }; -+ -+ fman0_10g_tx0: port@b0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-10g-tx"; -+ reg = <0xb0000 0x1000>; -+ fsl,qman-channel-id = <0x800>; -+ }; -+ -+ fm1mac9: ethernet@f0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xf0000 0x1000>; -+ fsl,port-handles = <&fman0_10g_rx0 &fman0_10g_tx0>; -+ }; -+ -+ mdio@f1000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xf1000 0x1000>; -+ }; -+ -+ ptp_timer0: rtc@fe000 { -+ compatible = "fsl,fman-rtc"; -+ reg = <0xfe000 0x1000>; -+ }; -+ }; -+ - dspi0: dspi@2100000 { - compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; - #address-cells = <1>; -@@ -522,4 +849,138 @@ - }; - }; - -+ fsl,dpaa { -+ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa"; -+ ethernet@0 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac1>; -+ }; -+ ethernet@1 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac2>; -+ }; -+ ethernet@2 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac3>; -+ }; -+ ethernet@3 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac4>; -+ }; -+ ethernet@4 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac5>; -+ }; -+ ethernet@5 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac6>; -+ }; -+ ethernet@8 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac9>; -+ }; -+ }; -+ -+ qportals: qman-portals@500000000 { -+ ranges = <0x0 0x5 0x00000000 0x8000000>; -+ }; -+ bportals: bman-portals@508000000 { -+ ranges = <0x0 0x5 0x08000000 0x8000000>; -+ }; -+ reserved-memory { -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ bman_fbpr: bman-fbpr { -+ size = <0 0x1000000>; -+ alignment = <0 0x1000000>; -+ }; -+ qman_fqd: qman-fqd { -+ size = <0 0x400000>; -+ alignment = <0 0x400000>; -+ }; -+ qman_pfdr: qman-pfdr { -+ size = <0 0x2000000>; -+ alignment = <0 0x2000000>; -+ }; -+ }; -+}; -+ -+&fman0 { -+ /* offline - 1 */ -+ port@82000 { -+ fsl,qman-channel-id = <0x809>; -+ }; -+ -+ /* tx - 10g - 2 */ -+ port@a8000 { -+ fsl,qman-channel-id = <0x802>; -+ }; -+ /* tx - 10g - 3 */ -+ port@a9000 { -+ fsl,qman-channel-id = <0x803>; -+ }; -+ /* tx - 1g - 2 */ -+ port@aa000 { -+ fsl,qman-channel-id = <0x804>; -+ }; -+ /* tx - 1g - 3 */ -+ port@ab000 { -+ fsl,qman-channel-id = <0x805>; -+ }; -+ /* tx - 1g - 4 */ -+ port@ac000 { -+ fsl,qman-channel-id = <0x806>; -+ }; -+ /* tx - 1g - 5 */ -+ port@ad000 { -+ fsl,qman-channel-id = <0x807>; -+ }; -+ /* tx - 10g - 0 */ -+ port@b0000 { -+ fsl,qman-channel-id = <0x800>; -+ }; -+ /* tx - 10g - 1 */ -+ port@b1000 { -+ fsl,qman-channel-id = <0x801>; -+ }; -+ /* offline - 2 */ -+ port@83000 { -+ fsl,qman-channel-id = <0x80a>; -+ }; -+ /* offline - 3 */ -+ port@84000 { -+ fsl,qman-channel-id = <0x80b>; -+ }; -+ /* offline - 4 */ -+ port@85000 { -+ fsl,qman-channel-id = <0x80c>; -+ }; -+ /* offline - 5 */ -+ port@86000 { -+ fsl,qman-channel-id = <0x80d>; -+ }; -+ /* offline - 6 */ -+ port@87000 { -+ fsl,qman-channel-id = <0x80e>; -+ }; -+}; -+ -+&bman_fbpr { -+ compatible = "fsl,bman-fbpr"; -+ alloc-ranges = <0 0 0x10000 0>; - }; -+ -+&qman_fqd { -+ compatible = "fsl,qman-fqd"; -+ alloc-ranges = <0 0 0x10000 0>; -+}; -+ -+&qman_pfdr { -+ compatible = "fsl,qman-pfdr"; -+ alloc-ranges = <0 0 0x10000 0>; -+}; -+ -+/include/ "qoriq-qman1-portals.dtsi" -+/include/ "qoriq-bman1-portals.dtsi" ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi -@@ -0,0 +1,104 @@ -+/* -+ * QorIQ BMan Portal device tree stub for 10 portals -+ * -+ * Copyright 2011-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+&bportals { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "simple-bus"; -+ bportal0: bman-portal@0 { -+ cell-index = <0>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x0 0x4000 0x4000000 0x4000>; -+ interrupts = <0 173 0x4>; -+ }; -+ bportal1: bman-portal@10000 { -+ cell-index = <1>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x10000 0x4000 0x4010000 0x4000>; -+ interrupts = <0 175 0x4>; -+ }; -+ bportal2: bman-portal@20000 { -+ cell-index = <2>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x20000 0x4000 0x4020000 0x4000>; -+ interrupts = <0 177 0x4>; -+ }; -+ bportal3: bman-portal@30000 { -+ cell-index = <3>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x30000 0x4000 0x4030000 0x4000>; -+ interrupts = <0 179 0x4>; -+ }; -+ bportal4: bman-portal@40000 { -+ cell-index = <4>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x40000 0x4000 0x4040000 0x4000>; -+ interrupts = <0 181 0x4>; -+ }; -+ bportal5: bman-portal@50000 { -+ cell-index = <5>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x50000 0x4000 0x4050000 0x4000>; -+ interrupts = <0 183 0x4>; -+ }; -+ bportal6: bman-portal@60000 { -+ cell-index = <6>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x60000 0x4000 0x4060000 0x4000>; -+ interrupts = <0 185 0x4>; -+ }; -+ bportal7: bman-portal@70000 { -+ cell-index = <7>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x70000 0x4000 0x4070000 0x4000>; -+ interrupts = <0 187 0x4>; -+ }; -+ bportal8: bman-portal@80000 { -+ cell-index = <8>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x80000 0x4000 0x4080000 0x4000>; -+ interrupts = <0 189 0x4>; -+ }; -+/* bportal9: bman-portal@90000 { -+ cell-index = <9>; -+ compatible = "fsl,bman-portal"; -+ reg = <0x90000 0x4000 0x4090000 0x4000>; -+ interrupts = <0 191 0x4>; -+ }; */ -+ bman-bpids@0 { -+ compatible = "fsl,bpid-range"; -+ fsl,bpid-range = <32 32>; -+ }; -+ -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi -@@ -0,0 +1,136 @@ -+/* -+ * QorIQ QMan Portal device tree stub for 10 portals & 15 pool channels -+ * -+ * Copyright 2011-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+&qportals { -+ -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "simple-bus"; -+ qportal0: qman-portal@0 { -+ cell-index = <0>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x0 0x4000 0x4000000 0x4000>; -+ interrupts = <0 172 0x4>; -+ fsl,qman-channel-id = <0x0>; -+ }; -+ -+ qportal1: qman-portal@10000 { -+ cell-index = <1>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x10000 0x4000 0x4010000 0x4000>; -+ interrupts = <0 174 0x4>; -+ fsl,qman-channel-id = <1>; -+ }; -+ -+ qportal2: qman-portal@20000 { -+ cell-index = <2>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x20000 0x4000 0x4020000 0x4000>; -+ interrupts = <0 176 0x4>; -+ fsl,qman-channel-id = <2>; -+ }; -+ -+ qportal3: qman-portal@30000 { -+ cell-index = <3>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x30000 0x4000 0x4030000 0x4000>; -+ interrupts = <0 178 0x4>; -+ fsl,qman-channel-id = <3>; -+ }; -+ -+ qportal4: qman-portal@40000 { -+ cell-index = <4>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x40000 0x4000 0x4040000 0x4000>; -+ interrupts = <0 180 0x4>; -+ fsl,qman-channel-id = <4>; -+ }; -+ -+ qportal5: qman-portal@50000 { -+ cell-index = <5>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x50000 0x4000 0x4050000 0x4000>; -+ interrupts = <0 182 0x4>; -+ fsl,qman-channel-id = <5>; -+ }; -+ -+ qportal6: qman-portal@60000 { -+ cell-index = <6>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x60000 0x4000 0x4060000 0x4000>; -+ interrupts = <0 184 0x4>; -+ fsl,qman-channel-id = <6>; -+ }; -+ -+ qportal7: qman-portal@70000 { -+ cell-index = <7>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x70000 0x4000 0x4070000 0x4000>; -+ interrupts = <0 186 0x4>; -+ fsl,qman-channel-id = <7>; -+ }; -+ -+ qportal8: qman-portal@80000 { -+ cell-index = <8>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x80000 0x4000 0x4080000 0x4000>; -+ interrupts = <0 188 0x4>; -+ fsl,qman-channel-id = <8>; -+ }; -+ -+/* qportal9: qman-portal@90000 { -+ cell-index = <9>; -+ compatible = "fsl,qman-portal"; -+ reg = <0x90000 0x4000 0x4090000 0x4000>; -+ interrupts = <0 190 0x4>; -+ fsl,qman-channel-id = <9>; -+ }; */ -+ -+ qman-fqids@0 { -+ compatible = "fsl,fqid-range"; -+ fsl,fqid-range = <256 256>; -+ }; -+ qman-fqids@1 { -+ compatible = "fsl,fqid-range"; -+ fsl,fqid-range = <32768 32768>; -+ }; -+ qman-pools@0 { -+ compatible = "fsl,pool-channel-range"; -+ fsl,pool-channel-range = <0x401 0xf>; -+ }; -+ qman-cgrids@0 { -+ compatible = "fsl,cgrid-range"; -+ fsl,cgrid-range = <0 256>; -+ }; -+ -+}; -\ No newline at end of file diff --git a/target/linux/layerscape/patches-4.4/3013-dts-ls1043ardb-add-mdio-phy-nodes.patch b/target/linux/layerscape/patches-4.4/3013-dts-ls1043ardb-add-mdio-phy-nodes.patch deleted file mode 100644 index 8347b4f84..000000000 --- a/target/linux/layerscape/patches-4.4/3013-dts-ls1043ardb-add-mdio-phy-nodes.patch +++ /dev/null @@ -1,81 +0,0 @@ -From e2b301610e6201df40deb62942b18c772365eb1c Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Thu, 21 Jan 2016 11:29:22 +0800 -Subject: [PATCH 13/70] dts: ls1043ardb: add mdio & phy nodes - -Signed-off-by: Shaohui Xie ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 65 +++++++++++++++++++++ - 1 file changed, 65 insertions(+) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts -@@ -115,3 +115,68 @@ - &duart1 { - status = "okay"; - }; -+ -+&fman0 { -+ ethernet@e0000 { -+ phy-handle = <&qsgmii_phy1>; -+ phy-connection-type = "qsgmii"; -+ }; -+ -+ ethernet@e2000 { -+ phy-handle = <&qsgmii_phy2>; -+ phy-connection-type = "qsgmii"; -+ }; -+ -+ ethernet@e4000 { -+ phy-handle = <&rgmii_phy1>; -+ phy-connection-type = "rgmii"; -+ }; -+ -+ ethernet@e6000 { -+ phy-handle = <&rgmii_phy2>; -+ phy-connection-type = "rgmii"; -+ }; -+ -+ ethernet@e8000 { -+ phy-handle = <&qsgmii_phy3>; -+ phy-connection-type = "qsgmii"; -+ }; -+ -+ ethernet@ea000 { -+ phy-handle = <&qsgmii_phy4>; -+ phy-connection-type = "qsgmii"; -+ }; -+ -+ ethernet@f0000 { /* 10GEC1 */ -+ phy-handle = <&aqr105_phy>; -+ phy-connection-type = "xgmii"; -+ }; -+ -+ mdio@fc000 { -+ rgmii_phy1: ethernet-phy@1 { -+ reg = <0x1>; -+ }; -+ rgmii_phy2: ethernet-phy@2 { -+ reg = <0x2>; -+ }; -+ qsgmii_phy1: ethernet-phy@3 { -+ reg = <0x4>; -+ }; -+ qsgmii_phy2: ethernet-phy@4 { -+ reg = <0x5>; -+ }; -+ qsgmii_phy3: ethernet-phy@5 { -+ reg = <0x6>; -+ }; -+ qsgmii_phy4: ethernet-phy@6 { -+ reg = <0x7>; -+ }; -+ }; -+ -+ mdio@fd000 { -+ aqr105_phy: ethernet-phy@c { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ reg = <0x1>; -+ }; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3022-dt-move-guts-devicetree-doc-out-of-powerpc-directory.patch b/target/linux/layerscape/patches-4.4/3022-dt-move-guts-devicetree-doc-out-of-powerpc-directory.patch deleted file mode 100644 index a3dded373..000000000 --- a/target/linux/layerscape/patches-4.4/3022-dt-move-guts-devicetree-doc-out-of-powerpc-directory.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 0e9d79db770196e94869650d7c4d13ea23937138 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Mon, 25 Jan 2016 14:27:27 +0800 -Subject: [PATCH 22/70] dt: move guts devicetree doc out of powerpc directory - -Move guts devicetree doc to Documentation/devicetree/bindings/soc/fsl/ -since it's used by not only PowerPC but also ARM. And add a specification -for 'little-endian' property. - -Signed-off-by: Yangbo Lu ---- - .../devicetree/bindings/powerpc/fsl/guts.txt | 41 ------------------ - Documentation/devicetree/bindings/soc/fsl/guts.txt | 44 ++++++++++++++++++++ - 2 files changed, 44 insertions(+), 41 deletions(-) - delete mode 100644 Documentation/devicetree/bindings/powerpc/fsl/guts.txt - create mode 100644 Documentation/devicetree/bindings/soc/fsl/guts.txt - ---- a/Documentation/devicetree/bindings/powerpc/fsl/guts.txt -+++ /dev/null -@@ -1,41 +0,0 @@ --* Global Utilities Block -- --The global utilities block controls power management, I/O device --enabling, power-on-reset configuration monitoring, general-purpose --I/O signal configuration, alternate function selection for multiplexed --signals, and clock control. -- --Required properties: -- -- - compatible : Should define the compatible device type for -- global-utilities. -- Possible compatibles: -- "fsl,qoriq-device-config-1.0" -- "fsl,qoriq-device-config-2.0" -- "fsl,-device-config" -- "fsl,-guts" -- - reg : Offset and length of the register set for the device. -- --Recommended properties: -- -- - fsl,has-rstcr : Indicates that the global utilities register set -- contains a functioning "reset control register" (i.e. the board -- is wired to reset upon setting the HRESET_REQ bit in this register). -- -- - fsl,liodn-bits : Indicates the number of defined bits in the LIODN -- registers, for those SOCs that have a PAMU device. -- --Examples: -- global-utilities@e0000 { /* global utilities block */ -- compatible = "fsl,mpc8548-guts"; -- reg = ; -- fsl,has-rstcr; -- }; -- -- guts: global-utilities@e0000 { -- compatible = "fsl,qoriq-device-config-1.0"; -- reg = <0xe0000 0xe00>; -- fsl,has-rstcr; -- #sleep-cells = <1>; -- fsl,liodn-bits = <12>; -- }; ---- /dev/null -+++ b/Documentation/devicetree/bindings/soc/fsl/guts.txt -@@ -0,0 +1,44 @@ -+* Global Utilities Block -+ -+The global utilities block controls power management, I/O device -+enabling, power-on-reset configuration monitoring, general-purpose -+I/O signal configuration, alternate function selection for multiplexed -+signals, and clock control. -+ -+Required properties: -+ -+ - compatible : Should define the compatible device type for -+ global-utilities. -+ Possible compatibles: -+ "fsl,qoriq-device-config-1.0" -+ "fsl,qoriq-device-config-2.0" -+ "fsl,-device-config" -+ "fsl,-guts" -+ - reg : Offset and length of the register set for the device. -+ -+Recommended properties: -+ -+ - fsl,has-rstcr : Indicates that the global utilities register set -+ contains a functioning "reset control register" (i.e. the board -+ is wired to reset upon setting the HRESET_REQ bit in this register). -+ -+ - fsl,liodn-bits : Indicates the number of defined bits in the LIODN -+ registers, for those SOCs that have a PAMU device. -+ -+ - little-endian : Indicates that the global utilities block is little -+ endian. The default is big endian. -+ -+Examples: -+ global-utilities@e0000 { /* global utilities block */ -+ compatible = "fsl,mpc8548-guts"; -+ reg = ; -+ fsl,has-rstcr; -+ }; -+ -+ guts: global-utilities@e0000 { -+ compatible = "fsl,qoriq-device-config-1.0"; -+ reg = <0xe0000 0xe00>; -+ fsl,has-rstcr; -+ #sleep-cells = <1>; -+ fsl,liodn-bits = <12>; -+ }; diff --git a/target/linux/layerscape/patches-4.4/3023-powerpc-fsl-move-mpc85xx.h-to-include-linux-fsl.patch b/target/linux/layerscape/patches-4.4/3023-powerpc-fsl-move-mpc85xx.h-to-include-linux-fsl.patch deleted file mode 100644 index 168209f2d..000000000 --- a/target/linux/layerscape/patches-4.4/3023-powerpc-fsl-move-mpc85xx.h-to-include-linux-fsl.patch +++ /dev/null @@ -1,283 +0,0 @@ -From 2d8816af7c19882f62c4a25edb9fcc9040312f96 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Tue, 12 Apr 2016 14:21:19 +0800 -Subject: [PATCH 23/70] powerpc/fsl: move mpc85xx.h to include/linux/fsl - -commit 2a76fbe35c14717b4f4a145e0ab83517b1f4ab4a -[context adjustment] -[doesn't apply arch/powerpc/kernel/cpu_setup_fsl_booke.S] -[doesn't apply arch/powerpc/sysdev/mpic_timer.c] - -Move mpc85xx.h to include/linux/fsl and rename it to svr.h as -a common header file. It has been used for mpc85xx and it will -be used for ARM-based SoC as well. - -Signed-off-by: Yangbo Lu -Integrated-by: Zhao Qiang ---- - arch/powerpc/include/asm/mpc85xx.h | 95 ------------------------------ - drivers/clk/clk-qoriq.c | 2 +- - drivers/i2c/busses/i2c-mpc.c | 2 +- - drivers/iommu/fsl_pamu.c | 2 +- - drivers/net/ethernet/freescale/gianfar.c | 2 +- - include/linux/fsl/svr.h | 95 ++++++++++++++++++++++++++++++ - 6 files changed, 99 insertions(+), 99 deletions(-) - delete mode 100644 arch/powerpc/include/asm/mpc85xx.h - create mode 100644 include/linux/fsl/svr.h - ---- a/arch/powerpc/include/asm/mpc85xx.h -+++ /dev/null -@@ -1,95 +0,0 @@ --/* -- * MPC85xx cpu type detection -- * -- * Copyright 2011-2012 Freescale Semiconductor, Inc. -- * -- * This is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License as published by -- * the Free Software Foundation; either version 2 of the License, or -- * (at your option) any later version. -- */ -- --#ifndef __ASM_PPC_MPC85XX_H --#define __ASM_PPC_MPC85XX_H -- --#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ --#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ --#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -- --/* Some parts define SVR[0:23] as the SOC version */ --#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -- --#define SVR_8533 0x803400 --#define SVR_8535 0x803701 --#define SVR_8536 0x803700 --#define SVR_8540 0x803000 --#define SVR_8541 0x807200 --#define SVR_8543 0x803200 --#define SVR_8544 0x803401 --#define SVR_8545 0x803102 --#define SVR_8547 0x803101 --#define SVR_8548 0x803100 --#define SVR_8555 0x807100 --#define SVR_8560 0x807000 --#define SVR_8567 0x807501 --#define SVR_8568 0x807500 --#define SVR_8569 0x808000 --#define SVR_8572 0x80E000 --#define SVR_P1010 0x80F100 --#define SVR_P1011 0x80E500 --#define SVR_P1012 0x80E501 --#define SVR_P1013 0x80E700 --#define SVR_P1014 0x80F101 --#define SVR_P1017 0x80F700 --#define SVR_P1020 0x80E400 --#define SVR_P1021 0x80E401 --#define SVR_P1022 0x80E600 --#define SVR_P1023 0x80F600 --#define SVR_P1024 0x80E402 --#define SVR_P1025 0x80E403 --#define SVR_P2010 0x80E300 --#define SVR_P2020 0x80E200 --#define SVR_P2040 0x821000 --#define SVR_P2041 0x821001 --#define SVR_P3041 0x821103 --#define SVR_P4040 0x820100 --#define SVR_P4080 0x820000 --#define SVR_P5010 0x822100 --#define SVR_P5020 0x822000 --#define SVR_P5021 0X820500 --#define SVR_P5040 0x820400 --#define SVR_T4240 0x824000 --#define SVR_T4120 0x824001 --#define SVR_T4160 0x824100 --#define SVR_T4080 0x824102 --#define SVR_C291 0x850000 --#define SVR_C292 0x850020 --#define SVR_C293 0x850030 --#define SVR_B4860 0X868000 --#define SVR_G4860 0x868001 --#define SVR_G4060 0x868003 --#define SVR_B4440 0x868100 --#define SVR_G4440 0x868101 --#define SVR_B4420 0x868102 --#define SVR_B4220 0x868103 --#define SVR_T1040 0x852000 --#define SVR_T1041 0x852001 --#define SVR_T1042 0x852002 --#define SVR_T1020 0x852100 --#define SVR_T1021 0x852101 --#define SVR_T1022 0x852102 --#define SVR_T2080 0x853000 --#define SVR_T2081 0x853100 -- --#define SVR_8610 0x80A000 --#define SVR_8641 0x809000 --#define SVR_8641D 0x809001 -- --#define SVR_9130 0x860001 --#define SVR_9131 0x860000 --#define SVR_9132 0x861000 --#define SVR_9232 0x861400 -- --#define SVR_Unknown 0xFFFFFF -- --#endif ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -1155,7 +1156,6 @@ bad_args: - } - - #ifdef CONFIG_PPC --#include - - static const u32 a4510_svrs[] __initconst = { - (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ ---- a/drivers/i2c/busses/i2c-mpc.c -+++ b/drivers/i2c/busses/i2c-mpc.c -@@ -27,9 +27,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define DRV_NAME "mpc-i2c" ---- a/drivers/iommu/fsl_pamu.c -+++ b/drivers/iommu/fsl_pamu.c -@@ -21,10 +21,10 @@ - #include "fsl_pamu.h" - - #include -+#include - #include - #include - --#include - - /* define indexes for each operation mapping scenario */ - #define OMI_QMAN 0x00 ---- a/drivers/net/ethernet/freescale/gianfar.c -+++ b/drivers/net/ethernet/freescale/gianfar.c -@@ -86,11 +86,11 @@ - #include - #include - #include -+#include - - #include - #ifdef CONFIG_PPC - #include --#include - #endif - #include - #include ---- /dev/null -+++ b/include/linux/fsl/svr.h -@@ -0,0 +1,95 @@ -+/* -+ * MPC85xx cpu type detection -+ * -+ * Copyright 2011-2012 Freescale Semiconductor, Inc. -+ * -+ * This is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#ifndef FSL_SVR_H -+#define FSL_SVR_H -+ -+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ -+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ -+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -+ -+/* Some parts define SVR[0:23] as the SOC version */ -+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -+ -+#define SVR_8533 0x803400 -+#define SVR_8535 0x803701 -+#define SVR_8536 0x803700 -+#define SVR_8540 0x803000 -+#define SVR_8541 0x807200 -+#define SVR_8543 0x803200 -+#define SVR_8544 0x803401 -+#define SVR_8545 0x803102 -+#define SVR_8547 0x803101 -+#define SVR_8548 0x803100 -+#define SVR_8555 0x807100 -+#define SVR_8560 0x807000 -+#define SVR_8567 0x807501 -+#define SVR_8568 0x807500 -+#define SVR_8569 0x808000 -+#define SVR_8572 0x80E000 -+#define SVR_P1010 0x80F100 -+#define SVR_P1011 0x80E500 -+#define SVR_P1012 0x80E501 -+#define SVR_P1013 0x80E700 -+#define SVR_P1014 0x80F101 -+#define SVR_P1017 0x80F700 -+#define SVR_P1020 0x80E400 -+#define SVR_P1021 0x80E401 -+#define SVR_P1022 0x80E600 -+#define SVR_P1023 0x80F600 -+#define SVR_P1024 0x80E402 -+#define SVR_P1025 0x80E403 -+#define SVR_P2010 0x80E300 -+#define SVR_P2020 0x80E200 -+#define SVR_P2040 0x821000 -+#define SVR_P2041 0x821001 -+#define SVR_P3041 0x821103 -+#define SVR_P4040 0x820100 -+#define SVR_P4080 0x820000 -+#define SVR_P5010 0x822100 -+#define SVR_P5020 0x822000 -+#define SVR_P5021 0X820500 -+#define SVR_P5040 0x820400 -+#define SVR_T4240 0x824000 -+#define SVR_T4120 0x824001 -+#define SVR_T4160 0x824100 -+#define SVR_T4080 0x824102 -+#define SVR_C291 0x850000 -+#define SVR_C292 0x850020 -+#define SVR_C293 0x850030 -+#define SVR_B4860 0X868000 -+#define SVR_G4860 0x868001 -+#define SVR_G4060 0x868003 -+#define SVR_B4440 0x868100 -+#define SVR_G4440 0x868101 -+#define SVR_B4420 0x868102 -+#define SVR_B4220 0x868103 -+#define SVR_T1040 0x852000 -+#define SVR_T1041 0x852001 -+#define SVR_T1042 0x852002 -+#define SVR_T1020 0x852100 -+#define SVR_T1021 0x852101 -+#define SVR_T1022 0x852102 -+#define SVR_T2080 0x853000 -+#define SVR_T2081 0x853100 -+ -+#define SVR_8610 0x80A000 -+#define SVR_8641 0x809000 -+#define SVR_8641D 0x809001 -+ -+#define SVR_9130 0x860001 -+#define SVR_9131 0x860000 -+#define SVR_9132 0x861000 -+#define SVR_9232 0x861400 -+ -+#define SVR_Unknown 0xFFFFFF -+ -+#endif diff --git a/target/linux/layerscape/patches-4.4/3025-arm64-dts-align-to-the-new-clocking-model.patch b/target/linux/layerscape/patches-4.4/3025-arm64-dts-align-to-the-new-clocking-model.patch deleted file mode 100644 index 51d1b6485..000000000 --- a/target/linux/layerscape/patches-4.4/3025-arm64-dts-align-to-the-new-clocking-model.patch +++ /dev/null @@ -1,25 +0,0 @@ -From caddf479c2deacf3d681a84db56ff164d8a5c9f7 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Wed, 23 Mar 2016 21:25:30 +0200 -Subject: [PATCH 25/70] arm64/dts: align to the new clocking model - -Signed-off-by: Madalin Bucur ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -203,9 +203,9 @@ - compatible = "fsl,fman", "simple-bus"; - ranges = <0x0 0x00 0x1a00000 0x100000>; - reg = <0x00 0x1a00000 0x0 0x100000>; -- clock-frequency = <0>; -- interrupts = <0 44 0x4>, -- <0 45 0x4>; -+ interrupts = <0 44 0x4>, <0 45 0x4>; -+ clocks = <&clockgen 3 0>; -+ clock-names = "fmanclk"; - - cc { - compatible = "fsl,fman-cc"; diff --git a/target/linux/layerscape/patches-4.4/3028-dts-ls1043-update-dts-for-ls1043.patch b/target/linux/layerscape/patches-4.4/3028-dts-ls1043-update-dts-for-ls1043.patch deleted file mode 100644 index 0517557eb..000000000 --- a/target/linux/layerscape/patches-4.4/3028-dts-ls1043-update-dts-for-ls1043.patch +++ /dev/null @@ -1,523 +0,0 @@ -From ad6176d72132d020317db1496be1485056ac88d7 Mon Sep 17 00:00:00 2001 -From: Liu Gang -Date: Mon, 6 Jun 2016 15:46:00 +0800 -Subject: [PATCH 28/70] dts/ls1043: update dts for ls1043 - -Signed-off-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 59 +++++ - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 264 +++++++++++++++++++- - .../boot/dts/freescale/qoriq-qman1-portals.dtsi | 10 +- - 3 files changed, 321 insertions(+), 12 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts -@@ -50,6 +50,10 @@ - / { - model = "LS1043A RDB Board"; - compatible = "fsl,ls1043a-rdb", "fsl,ls1043a"; -+ -+ aliases { -+ crypto = &crypto; -+ }; - }; - - &i2c0 { -@@ -108,6 +112,35 @@ - }; - }; - -+&dspi0 { -+ bus-num = <0>; -+ status = "okay"; -+ -+ flash@0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "n25q128a13", "jedec,spi-nor"; /* 16MB */ -+ reg = <0>; -+ spi-max-frequency = <1000000>; /* input clock */ -+ }; -+ -+ slic@2 { -+ compatible = "maxim,ds26522"; -+ reg = <2>; -+ spi-max-frequency = <2000000>; -+ fsl,spi-cs-sck-delay = <100>; -+ fsl,spi-sck-cs-delay = <50>; -+ }; -+ -+ slic@3 { -+ compatible = "maxim,ds26522"; -+ reg = <3>; -+ spi-max-frequency = <2000000>; -+ fsl,spi-cs-sck-delay = <100>; -+ fsl,spi-sck-cs-delay = <50>; -+ }; -+}; -+ - &duart0 { - status = "okay"; - }; -@@ -176,7 +209,33 @@ - mdio@fd000 { - aqr105_phy: ethernet-phy@c { - compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 132 4>; - reg = <0x1>; - }; - }; - }; -+ -+&uqe { -+ ucc_hdlc: ucc@2000 { -+ compatible = "fsl,ucc_hdlc"; -+ rx-clock-name = "clk8"; -+ tx-clock-name = "clk9"; -+ fsl,rx-sync-clock = "rsync_pin"; -+ fsl,tx-sync-clock = "tsync_pin"; -+ fsl,tx-timeslot = <0xfffffffe>; -+ fsl,rx-timeslot = <0xfffffffe>; -+ fsl,tdm-framer-type = "e1"; -+ fsl,tdm-mode = "normal"; -+ fsl,tdm-id = <0>; -+ fsl,siram-entry-id = <0>; -+ fsl,tdm-interface; -+ }; -+ -+ ucc_serial: ucc@2200 { -+ device_type = "serial"; -+ compatible = "ucc_uart"; -+ port-number = <0>; -+ rx-clock-name = "brg2"; -+ tx-clock-name = "brg2"; -+ }; -+}; ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -44,6 +44,8 @@ - * OTHER DEALINGS IN THE SOFTWARE. - */ - -+#include -+ - / { - compatible = "fsl,ls1043a"; - interrupt-parent = <&gic>; -@@ -75,6 +77,7 @@ - compatible = "arm,cortex-a53"; - reg = <0x0>; - clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; - }; - - cpu1: cpu@1 { -@@ -118,6 +121,8 @@ - <1 14 0x1>, /* Physical Non-Secure PPI */ - <1 11 0x1>, /* Virtual PPI */ - <1 10 0x1>; /* Hypervisor PPI */ -+ arm,reread-timer; -+ fsl,erratum-a008585; - }; - - pmu { -@@ -162,11 +167,64 @@ - big-endian; - }; - -+ crypto: crypto@1700000 { -+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", -+ "fsl,sec-v4.0"; -+ fsl,sec-era = <3>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x00 0x1700000 0x100000>; -+ reg = <0x00 0x1700000 0x0 0x100000>; -+ interrupts = <0 75 0x4>; -+ -+ sec_jr0: jr@10000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x10000 0x10000>; -+ interrupts = <0 71 0x4>; -+ }; -+ -+ sec_jr1: jr@20000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x20000 0x10000>; -+ interrupts = <0 72 0x4>; -+ }; -+ -+ sec_jr2: jr@30000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ interrupts = <0 73 0x4>; -+ }; -+ -+ sec_jr3: jr@40000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x40000 0x10000>; -+ interrupts = <0 74 0x4>; -+ }; -+ }; -+ - dcfg: dcfg@1ee0000 { - compatible = "fsl,ls1043a-dcfg", "syscon"; - reg = <0x0 0x1ee0000 0x0 0x10000>; - }; - -+ reset: reset@1EE00B0 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1EE00B0 0x0 0x4>; -+ big-endian; -+ }; -+ -+ rcpm: rcpm@1ee2000 { -+ compatible = "fsl,ls1043a-rcpm", "fsl,qoriq-rcpm-2.1"; -+ reg = <0x0 0x1ee2000 0x0 0x10000>; -+ }; -+ - ifc: ifc@1530000 { - compatible = "fsl,ifc", "simple-bus"; - reg = <0x0 0x1530000 0x0 0x10000>; -@@ -501,6 +559,82 @@ - }; - }; - -+ tmu: tmu@1f00000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls1043a-tmu"; -+ reg = <0x0 0x1f00000 0x0 0x10000>; -+ interrupts = <0 33 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ big-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 3>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <95000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ - dspi0: dspi@2100000 { - compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; - #address-cells = <1>; -@@ -527,6 +661,20 @@ - status = "disabled"; - }; - -+ qspi: quadspi@1550000 { -+ compatible = "fsl,ls1043a-qspi", "fsl,ls1021a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x1550000 0x0 0x10000>, -+ <0x0 0x40000000 0x0 0x4000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 99 0x4>; -+ clock-names = "qspi_en", "qspi"; -+ clocks = <&clockgen 4 0>, <&clockgen 4 0>; -+ big-endian; -+ status = "disabled"; -+ }; -+ - i2c0: i2c@2180000 { - compatible = "fsl,vf610-i2c"; - #address-cells = <1>; -@@ -602,8 +750,8 @@ - clocks = <&clockgen 4 0>; - }; - -- gpio1: gpio@2300000 { -- compatible = "fsl,ls1043a-gpio"; -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; - reg = <0x0 0x2300000 0x0 0x10000>; - interrupts = <0 66 0x4>; - gpio-controller; -@@ -612,8 +760,8 @@ - #interrupt-cells = <2>; - }; - -- gpio2: gpio@2310000 { -- compatible = "fsl,ls1043a-gpio"; -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; - reg = <0x0 0x2310000 0x0 0x10000>; - interrupts = <0 67 0x4>; - gpio-controller; -@@ -622,8 +770,8 @@ - #interrupt-cells = <2>; - }; - -- gpio3: gpio@2320000 { -- compatible = "fsl,ls1043a-gpio"; -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; - reg = <0x0 0x2320000 0x0 0x10000>; - interrupts = <0 68 0x4>; - gpio-controller; -@@ -632,8 +780,8 @@ - #interrupt-cells = <2>; - }; - -- gpio4: gpio@2330000 { -- compatible = "fsl,ls1043a-gpio"; -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; - reg = <0x0 0x2330000 0x0 0x10000>; - interrupts = <0 134 0x4>; - gpio-controller; -@@ -642,6 +790,70 @@ - #interrupt-cells = <2>; - }; - -+ uqe: uqe@2400000 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ device_type = "qe"; -+ compatible = "fsl,qe", "simple-bus"; -+ ranges = <0x0 0x0 0x2400000 0x40000>; -+ reg = <0x0 0x2400000 0x0 0x480>; -+ brg-frequency = <100000000>; -+ bus-frequency = <200000000>; -+ -+ fsl,qe-num-riscs = <1>; -+ fsl,qe-num-snums = <28>; -+ -+ qeic: qeic@80 { -+ compatible = "fsl,qe-ic"; -+ reg = <0x80 0x80>; -+ #address-cells = <0>; -+ interrupt-controller; -+ #interrupt-cells = <1>; -+ interrupts = <0 77 0x04 0 77 0x04>; -+ }; -+ -+ si1: si@700 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,qe-si"; -+ reg = <0x700 0x80>; -+ }; -+ -+ siram1: siram@1000 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,qe-siram"; -+ reg = <0x1000 0x800>; -+ }; -+ -+ ucc@2000 { -+ cell-index = <1>; -+ reg = <0x2000 0x200>; -+ interrupts = <32>; -+ interrupt-parent = <&qeic>; -+ }; -+ -+ ucc@2200 { -+ cell-index = <3>; -+ reg = <0x2200 0x200>; -+ interrupts = <34>; -+ interrupt-parent = <&qeic>; -+ }; -+ -+ muram@10000 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,qe-muram", "fsl,cpm-muram"; -+ ranges = <0x0 0x10000 0x6000>; -+ -+ data-only@0 { -+ compatible = "fsl,qe-muram-data", -+ "fsl,cpm-muram-data"; -+ reg = <0x0 0x6000>; -+ }; -+ }; -+ }; -+ - lpuart0: serial@2950000 { - compatible = "fsl,ls1021a-lpuart"; - reg = <0x0 0x2950000 0x0 0x1000>; -@@ -696,6 +908,15 @@ - status = "disabled"; - }; - -+ ftm0: ftm0@29d0000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x29d0000 0x0 0x10000>; -+ interrupts = <0 86 0x4>; -+ big-endian; -+ rcpm-wakeup = <&rcpm 0x0 0x20000000>; -+ status = "okay"; -+ }; -+ - wdog0: wdog@2ad0000 { - compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt"; - reg = <0x0 0x2ad0000 0x0 0x10000>; -@@ -726,6 +947,8 @@ - reg = <0x0 0x2f00000 0x0 0x10000>; - interrupts = <0 60 0x4>; - dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; - }; - - usb1: usb3@3000000 { -@@ -733,6 +956,8 @@ - reg = <0x0 0x3000000 0x0 0x10000>; - interrupts = <0 61 0x4>; - dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; - }; - - usb2: usb3@3100000 { -@@ -740,6 +965,8 @@ - reg = <0x0 0x3100000 0x0 0x10000>; - interrupts = <0 63 0x4>; - dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; - }; - - sata: sata@3200000 { -@@ -749,6 +976,20 @@ - clocks = <&clockgen 4 0>; - }; - -+ qdma: qdma@8380000 { -+ compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma"; -+ reg = <0x0 0x838f000 0x0 0x11000 /* Controller regs */ -+ 0x0 0x83a0000 0x0 0x40000>; /* Block regs */ -+ interrupts = <0 152 0x4>, -+ <0 39 0x4>; -+ interrupt-names = "qdma-error", "qdma-queue"; -+ channels = <8>; -+ queues = <2>; -+ status-sizes = <64>; -+ queue-sizes = <64 64>; -+ big-endian; -+ }; -+ - msi1: msi-controller1@1571000 { - compatible = "fsl,1s1043a-msi"; - reg = <0x0 0x1571000 0x0 0x4>, -@@ -787,6 +1028,7 @@ - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <4>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ -@@ -811,6 +1053,7 @@ - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <2>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ -@@ -835,6 +1078,7 @@ - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <2>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ -@@ -897,8 +1141,8 @@ - alignment = <0 0x1000000>; - }; - qman_fqd: qman-fqd { -- size = <0 0x400000>; -- alignment = <0 0x400000>; -+ size = <0 0x800000>; -+ alignment = <0 0x800000>; - }; - qman_pfdr: qman-pfdr { - size = <0 0x2000000>; ---- a/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi -+++ b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi -@@ -132,5 +132,11 @@ - compatible = "fsl,cgrid-range"; - fsl,cgrid-range = <0 256>; - }; -- --}; -\ No newline at end of file -+ qman-ceetm@0 { -+ compatible = "fsl,qman-ceetm"; -+ fsl,ceetm-lfqid-range = <0xf00000 0x1000>; -+ fsl,ceetm-sp-range = <0 12>; -+ fsl,ceetm-lni-range = <0 8>; -+ fsl,ceetm-channel-range = <0 32>; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3032-arm64-Add-pdev_archdata-for-dmamask.patch b/target/linux/layerscape/patches-4.4/3032-arm64-Add-pdev_archdata-for-dmamask.patch deleted file mode 100644 index 524687254..000000000 --- a/target/linux/layerscape/patches-4.4/3032-arm64-Add-pdev_archdata-for-dmamask.patch +++ /dev/null @@ -1,51 +0,0 @@ -From f7e48669bb75f3b52e9f3ce1a5d885c49b7c4712 Mon Sep 17 00:00:00 2001 -From: Cristian Sovaiala -Date: Thu, 4 Jun 2015 18:27:20 +0300 -Subject: [PATCH 32/70] arm64: Add pdev_archdata for dmamask - -The dma_mask for a device structure is a pointer. This pointer -needs to be set up before the dma mask can actually be set. Most -frameworks in the kernel take care of setting this up properly but -platform devices that don't follow a regular bus structure may not -ever have this set. As a result, checks such as dma_capable will -always return false on a raw platform device and dma_set_mask will -always return -EIO. Fix this by adding a dma_mask in the -platform_device archdata and setting it to be the dma_mask. Devices -used in other frameworks can change this as needed. - -Signed-off-by: Laura Abbott ---- - arch/arm64/include/asm/device.h | 1 + - arch/arm64/kernel/setup.c | 7 +++++++ - 2 files changed, 8 insertions(+) - ---- a/arch/arm64/include/asm/device.h -+++ b/arch/arm64/include/asm/device.h -@@ -25,6 +25,7 @@ struct dev_archdata { - }; - - struct pdev_archdata { -+ u64 dma_mask; - }; - - #endif ---- a/arch/arm64/kernel/setup.c -+++ b/arch/arm64/kernel/setup.c -@@ -44,6 +44,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -381,3 +382,9 @@ static int __init topology_init(void) - return 0; - } - subsys_initcall(topology_init); -+ -+void arch_setup_pdev_archdata(struct platform_device *pdev) -+{ -+ pdev->archdata.dma_mask = DMA_BIT_MASK(32); -+ pdev->dev.dma_mask = &pdev->archdata.dma_mask; -+} diff --git a/target/linux/layerscape/patches-4.4/3033-arm64-add-ioremap-for-normal-cacheable-non-shareable.patch b/target/linux/layerscape/patches-4.4/3033-arm64-add-ioremap-for-normal-cacheable-non-shareable.patch deleted file mode 100644 index 3dd0548b8..000000000 --- a/target/linux/layerscape/patches-4.4/3033-arm64-add-ioremap-for-normal-cacheable-non-shareable.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 6ef5cf7b8f6b86fb3856f3449f1ad431118e5c9d Mon Sep 17 00:00:00 2001 -From: Haiying Wang -Date: Wed, 22 Apr 2015 13:07:25 -0400 -Subject: [PATCH 33/70] arm64: add ioremap for normal cacheable non-shareable - memory - -[context adjustment] - -Signed-off-by: Haiying Wang -Change-Id: Iab7413f182a64bd6ad4707dd1d6254d04f51a3b1 -Reviewed-on: http://git.am.freescale.net:8181/35486 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder -Integrated-by: Jiang Yutang ---- - arch/arm64/include/asm/io.h | 2 ++ - arch/arm64/include/asm/pgtable.h | 2 ++ - 2 files changed, 4 insertions(+) - ---- a/arch/arm64/include/asm/io.h -+++ b/arch/arm64/include/asm/io.h -@@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_ - #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) - #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) - #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \ -+ __pgprot(PROT_NORMAL_NS)) - #define iounmap __iounmap - - /* ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -74,6 +74,8 @@ extern void __pgd_error(const char *file - #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) - #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) - #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) -+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | \ -+ PTE_ATTRINDX(MT_NORMAL)) - - #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) diff --git a/target/linux/layerscape/patches-4.4/3034-arm64-add-support-to-remap-kernel-cacheable-memory-t.patch b/target/linux/layerscape/patches-4.4/3034-arm64-add-support-to-remap-kernel-cacheable-memory-t.patch deleted file mode 100644 index 33479dcf6..000000000 --- a/target/linux/layerscape/patches-4.4/3034-arm64-add-support-to-remap-kernel-cacheable-memory-t.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 52bffb91d40a3090ecf9138fadca97f77c99afa4 Mon Sep 17 00:00:00 2001 -From: Haiying Wang -Date: Wed, 22 Apr 2015 13:09:47 -0400 -Subject: [PATCH 34/70] arm64: add support to remap kernel cacheable memory to - userspace - -Signed-off-by: Haiying Wang -Change-Id: I50ee4798a2929932fa9ff7c9cdb42cd1a215f77a -Reviewed-on: http://git.am.freescale.net:8181/35488 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder ---- - arch/arm64/include/asm/pgtable.h | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -389,6 +389,9 @@ static inline int has_transparent_hugepa - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) - #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) -+#define pgprot_cached(prot) \ -+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \ -+ PTE_PXN | PTE_UXN) - #define pgprot_device(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) - #define __HAVE_PHYS_MEM_ACCESS_PROT diff --git a/target/linux/layerscape/patches-4.4/3035-arm64-pgtable-add-support-to-map-cacheable-and-non-s.patch b/target/linux/layerscape/patches-4.4/3035-arm64-pgtable-add-support-to-map-cacheable-and-non-s.patch deleted file mode 100644 index 841d5efd0..000000000 --- a/target/linux/layerscape/patches-4.4/3035-arm64-pgtable-add-support-to-map-cacheable-and-non-s.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 1d6d5b6d2363cf5a0d175f086209fb208692ec00 Mon Sep 17 00:00:00 2001 -From: Haiying Wang -Date: Sat, 8 Aug 2015 07:25:02 -0400 -Subject: [PATCH 35/70] arm64/pgtable: add support to map cacheable and non - shareable memory - -Signed-off-by: Haiying Wang ---- - arch/arm64/include/asm/pgtable.h | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -392,6 +392,8 @@ static inline int has_transparent_hugepa - #define pgprot_cached(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \ - PTE_PXN | PTE_UXN) -+#define pgprot_cached_ns(prot) \ -+ __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED) - #define pgprot_device(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) - #define __HAVE_PHYS_MEM_ACCESS_PROT diff --git a/target/linux/layerscape/patches-4.4/3039-arch-arm-add-ARM-specific-fucntions-required-for-ehc.patch b/target/linux/layerscape/patches-4.4/3039-arch-arm-add-ARM-specific-fucntions-required-for-ehc.patch deleted file mode 100644 index 0eac882c3..000000000 --- a/target/linux/layerscape/patches-4.4/3039-arch-arm-add-ARM-specific-fucntions-required-for-ehc.patch +++ /dev/null @@ -1,79 +0,0 @@ -From 03eea243622d85d59653ee076ce43ac0653dc51d Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Fri, 10 Oct 2014 10:38:48 +0800 -Subject: [PATCH 39/70] arch: arm: add ARM specific fucntions required for - ehci fsl driver - -Add below functions for ARM platform which are used by ehci fsl driver: -1. spin_event_timeout function -2. set/clear bits functions - -Signed-off-by: Zhao Qiang -Signed-off-by: Rajesh Bhagat ---- - arch/arm/include/asm/delay.h | 16 ++++++++++++++++ - arch/arm/include/asm/io.h | 28 ++++++++++++++++++++++++++++ - 2 files changed, 44 insertions(+) - ---- a/arch/arm/include/asm/delay.h -+++ b/arch/arm/include/asm/delay.h -@@ -57,6 +57,22 @@ extern void __bad_udelay(void); - __const_udelay((n) * UDELAY_MULT)) : \ - __udelay(n)) - -+#define spin_event_timeout(condition, timeout, delay) \ -+({ \ -+ typeof(condition) __ret; \ -+ int i = 0; \ -+ while (!(__ret = (condition)) && (i++ < timeout)) { \ -+ if (delay) \ -+ udelay(delay); \ -+ else \ -+ cpu_relax(); \ -+ udelay(1); \ -+ } \ -+ if (!__ret) \ -+ __ret = (condition); \ -+ __ret; \ -+}) -+ - /* Loop-based definitions for assembly code. */ - extern void __loop_delay(unsigned long loops); - extern void __loop_udelay(unsigned long usecs); ---- a/arch/arm/include/asm/io.h -+++ b/arch/arm/include/asm/io.h -@@ -221,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o - #endif - #endif - -+/* access ports */ -+#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) -+#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) -+ -+#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) -+#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) -+ -+#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) -+#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) -+ -+/* Clear and set bits in one shot. These macros can be used to clear and -+ * set multiple bits in a register using a single read-modify-write. These -+ * macros can also be used to set a multiple-bit bit pattern using a mask, -+ * by specifying the mask in the 'clear' parameter and the new bit pattern -+ * in the 'set' parameter. -+ */ -+ -+#define clrsetbits_be32(addr, clear, set) \ -+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) -+#define clrsetbits_le32(addr, clear, set) \ -+ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr)) -+#define clrsetbits_be16(addr, clear, set) \ -+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) -+#define clrsetbits_le16(addr, clear, set) \ -+ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr)) -+#define clrsetbits_8(addr, clear, set) \ -+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) -+ - /* - * IO port access primitives - * ------------------------- diff --git a/target/linux/layerscape/patches-4.4/3063-arm64-add-NO_IRQ-macro.patch b/target/linux/layerscape/patches-4.4/3063-arm64-add-NO_IRQ-macro.patch deleted file mode 100644 index 9f34fe701..000000000 --- a/target/linux/layerscape/patches-4.4/3063-arm64-add-NO_IRQ-macro.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 9417a4b5978a7c38ae1dee217c3b22cda8e94731 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Thu, 19 Nov 2015 15:25:37 +0800 -Subject: [PATCH 63/70] arm64: add NO_IRQ macro - -Signed-off-by: Mingkai Hu ---- - arch/arm64/include/asm/irq.h | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/arch/arm64/include/asm/irq.h -+++ b/arch/arm64/include/asm/irq.h -@@ -3,6 +3,14 @@ - - #include - -+/* -+ * Use this value to indicate lack of interrupt -+ * capability -+ */ -+#ifndef NO_IRQ -+#define NO_IRQ ((unsigned int)(-1)) -+#endif -+ - struct pt_regs; - - extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); diff --git a/target/linux/layerscape/patches-4.4/3071-arm64-dts-add-device-tree-for-ls1012a-SoC-and-boards.patch b/target/linux/layerscape/patches-4.4/3071-arm64-dts-add-device-tree-for-ls1012a-SoC-and-boards.patch deleted file mode 100644 index 10c11eb07..000000000 --- a/target/linux/layerscape/patches-4.4/3071-arm64-dts-add-device-tree-for-ls1012a-SoC-and-boards.patch +++ /dev/null @@ -1,880 +0,0 @@ -From 70e0080366e76dabf90b713f57fb9fc47aa35557 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Thu, 11 Aug 2016 10:36:05 +0800 -Subject: [PATCH 071/113] arm64: dts: add device tree for ls1012a SoC and - boards -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This patch is to add device tree for ls1012a SoC and RDB/FREEDOM boards. - -Signed-off-by: Pratiyush Mohan Srivastava -Signed-off-by: Prabhakar Kushwaha -Signed-off-by: Yunhui Cui -Signed-off-by: Rajesh Bhagat -Signed-off-by: Alison Wang -Signed-off-by: Horia Geantă -Signed-off-by: Bhaskar Upadhaya -Signed-off-by: Tang Yuantian -Signed-off-by: Chenhui Zhao -Signed-off-by: Jia Hongtao -Signed-off-by: Calvin Johnson -[yangbolu: integrate] -Signed-off-by: Yangbo Lu ---- - arch/arm64/boot/dts/freescale/Makefile | 2 + - arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts | 186 +++++++ - arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 114 +++++ - arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 526 ++++++++++++++++++++ - 4 files changed, 828 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi - ---- a/arch/arm64/boot/dts/freescale/Makefile -+++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -2,6 +2,8 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2 - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb - - always := $(dtb-y) - subdir-y := $(dts-dirs) ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts -@@ -0,0 +1,186 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1012A family SoC. -+ * -+ * Copyright 2016, Freescale Semiconductor Inc. -+ -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/dts-v1/; -+ -+#include "fsl-ls1012a.dtsi" -+ -+/ { -+ model = "LS1012A FREEDOM Board"; -+ compatible = "fsl,ls1012a-frdm", "fsl,ls1012a"; -+ -+ aliases { -+ crypto = &crypto; -+ }; -+ -+ sys_mclk: clock-mclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <25000000>; -+ }; -+ -+ regulators { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ reg_1p8v: regulator@0 { -+ compatible = "regulator-fixed"; -+ reg = <0>; -+ regulator-name = "1P8V"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-always-on; -+ }; -+ }; -+ -+ sound { -+ compatible = "simple-audio-card"; -+ simple-audio-card,format = "i2s"; -+ simple-audio-card,widgets = -+ "Microphone", "Microphone Jack", -+ "Headphone", "Headphone Jack", -+ "Speaker", "Speaker Ext", -+ "Line", "Line In Jack"; -+ simple-audio-card,routing = -+ "MIC_IN", "Microphone Jack", -+ "Microphone Jack", "Mic Bias", -+ "LINE_IN", "Line In Jack", -+ "Headphone Jack", "HP_OUT", -+ "Speaker Ext", "LINE_OUT"; -+ -+ simple-audio-card,cpu { -+ sound-dai = <&sai2>; -+ frame-master; -+ bitclock-master; -+ }; -+ -+ simple-audio-card,codec { -+ sound-dai = <&codec>; -+ frame-master; -+ bitclock-master; -+ system-clock-frequency = <25000000>; -+ }; -+ }; -+}; -+ -+&qspi { -+ num-cs = <2>; -+ bus-num = <0>; -+ status = "okay"; -+ fsl,ddr-sampling-point = <4>; -+ -+ qflash0: s25fs512s@0 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ m25p,fast-read; -+ reg = <0>; -+ }; -+}; -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ -+ codec: sgtl5000@a { -+ #sound-dai-cells = <0>; -+ compatible = "fsl,sgtl5000"; -+ reg = <0xa>; -+ VDDA-supply = <®_1p8v>; -+ VDDIO-supply = <®_1p8v>; -+ clocks = <&sys_mclk 1>; -+ }; -+}; -+ -+&duart0 { -+ status = "okay"; -+}; -+&pfe { -+ status = "okay"; -+ ethernet@0 { -+ compatible = "fsl,pfe-gemac-port"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = < 0x0 >; /* GEM_ID */ -+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */ -+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */ -+ fsl,mdio-mux-val = <0x0>; -+ local-mac-address = [ 00 1A 2B 3C 4D 5E ]; -+ phy-mode = "sgmii"; -+ fsl,pfe-gemac-if-name = "eth0"; -+ fsl,pfe-phy-if-flags = <0x0>; -+ fsl,pfe-gemac-mode = <0x1B00>; /* GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G */ -+ -+ mdio@0 { -+ reg = <0x1>; /* enabled/disabled */ -+ fsl,mdio-phy-mask = <0xFFFFFFF9>; -+ }; -+ }; -+ ethernet@1 { -+ compatible = "fsl,pfe-gemac-port"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = < 0x1 >; /* GEM_ID */ -+ fsl,gemac-bus-id = < 0x1 >; /* BUS_ID */ -+ fsl,gemac-phy-id = < 0x1 >; /* PHY_ID */ -+ fsl,mdio-mux-val = <0x0>; -+ local-mac-address = [ 00 AA BB CC DD EE ]; -+ phy-mode = "sgmii"; -+ fsl,pfe-gemac-if-name = "eth1"; -+ fsl,pfe-phy-if-flags = <0x0>; -+ fsl,pfe-gemac-mode = <0x1B00>; /* GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G */ -+ mdio@0 { -+ reg = <0x0>; /* enabled/disabled */ -+ fsl,mdio-phy-mask = <0xFFFFFFF9>; -+ }; -+ -+ }; -+ -+}; -+ -+ -+&esdhc0 { -+ status = "disabled"; -+}; -+ -+&esdhc1 { -+ status = "disabled"; -+}; -+ -+&sai2 { -+ status = "okay"; -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts -@@ -0,0 +1,114 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1012A family SoC. -+ * -+ * Copyright 2016, Freescale Semiconductor Inc. -+ -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/dts-v1/; -+ -+#include "fsl-ls1012a.dtsi" -+ -+/ { -+ model = "LS1012A RDB Board"; -+ compatible = "fsl,ls1012a-rdb", "fsl,ls1012a"; -+ -+ aliases { -+ crypto = &crypto; -+ }; -+}; -+ -+&qspi { -+ num-cs = <2>; -+ bus-num = <0>; -+ status = "okay"; -+ fsl,ddr-sampling-point = <4>; -+ -+ qflash0: s25fs512s@0 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ m25p,fast-read; -+ reg = <0>; -+ }; -+}; -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+}; -+ -+&duart0 { -+ status = "okay"; -+}; -+&pfe { -+ status = "okay"; -+ ethernet@0 { -+ compatible = "fsl,pfe-gemac-port"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = < 0x0 >; /* GEM_ID */ -+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */ -+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */ -+ fsl,mdio-mux-val = <0x0>; -+ local-mac-address = [ 00 1A 2B 3C 4D 5E ]; -+ phy-mode = "sgmii"; -+ fsl,pfe-gemac-if-name = "eth0"; -+ fsl,pfe-phy-if-flags = <0x0>; -+ fsl,pfe-gemac-mode = <0x1B00>; /* GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G */ -+ -+ mdio@0 { -+ reg = <0x1>; /* enabled/disabled */ -+ fsl,mdio-phy-mask = <0xFFFFFFF9>; -+ }; -+ }; -+ ethernet@1 { -+ compatible = "fsl,pfe-gemac-port"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = < 0x1 >; /* GEM_ID */ -+ fsl,gemac-bus-id = < 0x1 >; /* BUS_ID */ -+ fsl,gemac-phy-id = < 0x1 >; /* PHY_ID */ -+ fsl,mdio-mux-val = <0x0>; -+ local-mac-address = [ 00 AA BB CC DD EE ]; -+ phy-mode = "rgmii"; -+ fsl,pfe-gemac-if-name = "eth2"; -+ fsl,pfe-phy-if-flags = <0x0>; -+ fsl,pfe-gemac-mode = <0x1B00>; /* GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G */ -+ mdio@0 { -+ reg = <0x0>; /* enabled/disabled */ -+ fsl,mdio-phy-mask = <0xFFFFFFF9>; -+ }; -+ -+ }; -+ -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi -@@ -0,0 +1,526 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1043A family SoC. -+ * -+ * Copyright 2016, Freescale Semiconductor -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+/ { -+ compatible = "fsl,ls1012a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ /* -+ * We expect the enable-method for cpu's to be "psci", but this -+ * is dependent on the SoC FW, which will fill this in. -+ * -+ * Currently supported enable-method is psci v0.2 -+ */ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ }; -+ -+ }; -+ -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI */ -+ <1 14 0x1>, /* Physical Non-Secure PPI */ -+ <1 11 0x1>, /* Virtual PPI */ -+ <1 10 0x1>; /* Hypervisor PPI */ -+ arm,reread-timer; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <0 106 0x4>; -+ }; -+ -+ gic: interrupt-controller@1400000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ reg = <0x0 0x1401000 0 0x1000>, /* GICD */ -+ <0x0 0x1402000 0 0x2000>, /* GICC */ -+ <0x0 0x1404000 0 0x2000>, /* GICH */ -+ <0x0 0x1406000 0 0x2000>; /* GICV */ -+ interrupts = <1 9 0xf08>; -+ }; -+ -+ soc { -+ compatible = "simple-bus"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ clockgen: clocking@1ee1000 { -+ compatible = "fsl,ls1012a-clockgen"; -+ reg = <0x0 0x1ee1000 0x0 0x1000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ scfg: scfg@1570000 { -+ compatible = "fsl,ls1012a-scfg", -+ "fsl,ls1043a-scfg", -+ "syscon"; -+ reg = <0x0 0x1570000 0x0 0x10000>; -+ big-endian; -+ }; -+ -+ crypto: crypto@1700000 { -+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", -+ "fsl,sec-v4.0"; -+ fsl,sec-era = <8>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x00 0x1700000 0x100000>; -+ reg = <0x00 0x1700000 0x0 0x100000>; -+ interrupts = <0 75 0x4>; -+ -+ sec_jr0: jr@10000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x10000 0x10000>; -+ interrupts = <0 71 0x4>; -+ }; -+ -+ sec_jr1: jr@20000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x20000 0x10000>; -+ interrupts = <0 72 0x4>; -+ }; -+ -+ sec_jr2: jr@30000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x30000 0x10000>; -+ interrupts = <0 73 0x4>; -+ }; -+ -+ sec_jr3: jr@40000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x40000 0x10000>; -+ interrupts = <0 74 0x4>; -+ }; -+ }; -+ -+ -+ dcfg: dcfg@1ee0000 { -+ compatible = "fsl,ls1012a-dcfg", -+ "fsl,ls1043a-dcfg", -+ "syscon"; -+ reg = <0x0 0x1ee0000 0x0 0x10000>; -+ }; -+ -+ reset: reset@1EE00B0 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1EE00B0 0x0 0x4>; -+ big-endian; -+ }; -+ -+ rcpm: rcpm@1ee2000 { -+ compatible = "fsl,ls1012a-rcpm", -+ "fsl,ls1043a-rcpm", -+ "fsl,qoriq-rcpm-2.1"; -+ reg = <0x0 0x1ee2000 0x0 0x10000>; -+ }; -+ -+ ftm0: ftm0@29d0000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x29d0000 0x0 0x10000>; -+ interrupts = <0 86 0x4>; -+ big-endian; -+ rcpm-wakeup = <&rcpm 0x00020000 0x0>; -+ status = "okay"; -+ }; -+ -+ esdhc0: esdhc@1560000 { -+ compatible = "fsl,ls1012a-esdhc0", "fsl,esdhc"; -+ reg = <0x0 0x1560000 0x0 0x10000>; -+ interrupts = <0 62 0x4>; -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ big-endian; -+ bus-width = <4>; -+ }; -+ -+ esdhc1: esdhc@1580000 { -+ compatible = "fsl,ls1012a-esdhc1", "fsl,esdhc"; -+ reg = <0x0 0x1580000 0x0 0x10000>; -+ interrupts = <0 65 0x4>; -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ big-endian; -+ bus-width = <4>; -+ }; -+ -+ dspi0: dspi@2100000 { -+ compatible = "fsl,ls1012a-dspi", -+ "fsl,ls1043a-dspi", -+ "fsl,ls1021a-v1.0-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 64 0x4>; -+ clock-names = "dspi"; -+ clocks = <&clockgen 4 0>; -+ spi-num-chipselects = <5>; -+ big-endian; -+ status = "enabled"; -+ }; -+ -+ qspi: quadspi@1550000 { -+ compatible = "fsl,ls1012a-qspi", -+ "fsl,ls1043a-qspi", -+ "fsl,ls1021a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x1550000 0x0 0x10000>, -+ <0x0 0x40000000 0x0 0x4000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 99 0x4>; -+ clock-names = "qspi_en", "qspi"; -+ clocks = <&clockgen 4 0>, <&clockgen 4 0>; -+ big-endian; -+ amba-base = <0x42000000>; -+ }; -+ -+ tmu: tmu@1f00000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls1012a-tmu"; -+ reg = <0x0 0x1f00000 0x0 0x10000>; -+ interrupts = <0 33 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ big-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 0>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <95000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ i2c0: i2c@2180000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2180000 0x0 0x10000>; -+ interrupts = <0 56 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ status = "disabled"; -+ }; -+ -+ i2c1: i2c@2190000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2190000 0x0 0x10000>; -+ interrupts = <0 57 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 0>; -+ status = "disabled"; -+ }; -+ -+ -+ duart0: serial@21c0500 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0500 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ duart1: serial@21c0600 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0600 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 66 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 67 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ wdog0: wdog@2ad0000 { -+ compatible = "fsl,ls1012a-wdt", -+ "fsl,ls1043a-wdt", -+ "fsl,imx21-wdt"; -+ reg = <0x0 0x2ad0000 0x0 0x10000>; -+ interrupts = <0 83 0x4>; -+ clocks = <&clockgen 4 0>; -+ clock-names = "wdog"; -+ big-endian; -+ }; -+ -+ sai1: sai@2b50000 { -+ #sound-dai-cells = <0>; -+ compatible = "fsl,vf610-sai"; -+ reg = <0x0 0x2b50000 0x0 0x10000>; -+ interrupts = <0 148 0x4>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>, -+ <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "bus", "mclk1", "mclk2", "mclk3"; -+ dma-names = "tx", "rx"; -+ dmas = <&edma0 1 47>, -+ <&edma0 1 46>; -+ status = "disabled"; -+ }; -+ -+ sai2: sai@2b60000 { -+ #sound-dai-cells = <0>; -+ compatible = "fsl,vf610-sai"; -+ reg = <0x0 0x2b60000 0x0 0x10000>; -+ interrupts = <0 149 0x4>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>, -+ <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "bus", "mclk1", "mclk2", "mclk3"; -+ dma-names = "tx", "rx"; -+ dmas = <&edma0 1 45>, -+ <&edma0 1 44>; -+ status = "disabled"; -+ }; -+ -+ edma0: edma@2c00000 { -+ #dma-cells = <2>; -+ compatible = "fsl,vf610-edma"; -+ reg = <0x0 0x2c00000 0x0 0x10000>, -+ <0x0 0x2c10000 0x0 0x10000>, -+ <0x0 0x2c20000 0x0 0x10000>; -+ interrupts = <0 103 0x4>, -+ <0 103 0x4>; -+ interrupt-names = "edma-tx", "edma-err"; -+ dma-channels = <32>; -+ big-endian; -+ clock-names = "dmamux0", "dmamux1"; -+ clocks = <&clockgen 4 3>, -+ <&clockgen 4 3>; -+ }; -+ -+ sata: sata@3200000 { -+ compatible = "fsl,ls1012a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 69 0x4>; -+ clocks = <&clockgen 4 0>; -+ }; -+ -+ msi2: msi-controller2@1572000 { -+ compatible ="fsl,1s1012a-msi", "fsl,1s1021a-msi"; -+ reg = <0x0 0x1572000 0x0 0x4>, -+ <0x0 0x1572004 0x0 0x4>; -+ reg-names = "msiir", "msir"; -+ msi-controller; -+ interrupts = <0 126 0x4>; -+ }; -+ -+ usb@8600000 { -+ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr"; -+ reg = <0x0 0x8600000 0x0 0x1000>; -+ interrupts = <0 139 0x4>; -+ dr_mode = "host"; -+ phy_type = "ulpi"; -+ fsl,usb-erratum-a005697; -+ }; -+ -+ usb0: usb3@2f00000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x2f00000 0x0 0x10000>; -+ interrupts = <0 60 0x4>; -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls1012a-pcie", -+ "fsl,ls1043a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>, /* controller interrupt */ -+ <0 117 0x4>; /* PME interrupt */ -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi2>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -+ <0000 0 0 2 &gic 0 111 0x4>, -+ <0000 0 0 3 &gic 0 112 0x4>, -+ <0000 0 0 4 &gic 0 113 0x4>; -+ }; -+ }; -+ reserved-memory { -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ pfe_reserved: packetbuffer@83400000 { -+ reg = <0 0x83400000 0 0xc00000>; -+ }; -+ }; -+ -+ pfe: pfe@04000000 { -+ compatible = "fsl,pfe"; -+ ranges = <0x0 0x00 0x04000000 0xc00000 -+ 0x1 0x00 0x83400000 0xc00000>; -+ reg = <0x0 0x90500000 0x0 0x10000>, /* APB 64K */ -+ <0x0 0x04000000 0x0 0xc00000>, /* AXI 16M */ -+ <0x0 0x83400000 0x0 0xc00000>, /* PFE DDR 12M */ -+ <0x0 0x10000000 0x0 0x2000>; /* OCRAM 8K */ -+ fsl,pfe-num-interfaces = < 0x2 >; -+ interrupts = <0 172 0x4>; -+ #interrupt-names = "hifirq"; -+ memory-region = <&pfe_reserved>; -+ fsl,pfe-scfg = <&scfg 0>; -+ }; -+ -+}; diff --git a/target/linux/layerscape/patches-4.4/3117-armv8-aarch32-Run-32-bit-Linux-for-LayerScape-SoCs.patch b/target/linux/layerscape/patches-4.4/3117-armv8-aarch32-Run-32-bit-Linux-for-LayerScape-SoCs.patch deleted file mode 100644 index 9b580b473..000000000 --- a/target/linux/layerscape/patches-4.4/3117-armv8-aarch32-Run-32-bit-Linux-for-LayerScape-SoCs.patch +++ /dev/null @@ -1,49 +0,0 @@ -From f6dcf8936845ea95eba432ee725cec761032fe2a Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 10:12:29 +0800 -Subject: [PATCH 117/124] armv8: aarch32: Run 32-bit Linux for LayerScape SoCs - -This patch adds AArch32 execution state support for LayerScape SoCs. - -Signed-off-by: Ebony Zhu -Signed-off-by: Alison Wang ---- - arch/arm/mach-imx/Makefile | 2 ++ - arch/arm/mach-imx/mach-layerscape.c | 22 ++++++++++++++++++++++ - 2 files changed, 24 insertions(+) - create mode 100644 arch/arm/mach-imx/mach-layerscape.c - ---- a/arch/arm/mach-imx/Makefile -+++ b/arch/arm/mach-imx/Makefile -@@ -103,4 +103,6 @@ obj-$(CONFIG_SOC_LS1021A) += mach-ls1021 - - obj-$(CONFIG_ARCH_LAYERSCAPE) += mach-ls1043a.o - -+obj-$(CONFIG_ARCH_LAYERSCAPE) += mach-layerscape.o -+ - obj-y += devices/ ---- /dev/null -+++ b/arch/arm/mach-imx/mach-layerscape.c -@@ -0,0 +1,22 @@ -+/* -+ * Copyright 2015-2016 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#include -+ -+#include "common.h" -+ -+static const char * const layerscape_dt_compat[] __initconst = { -+ "fsl,ls1043a", -+ "fsl,ls1012a", -+ NULL, -+}; -+ -+DT_MACHINE_START(LAYERSCAPE, "Freescale LAYERSCAPE") -+ .dt_compat = layerscape_dt_compat, -+MACHINE_END diff --git a/target/linux/layerscape/patches-4.4/3118-armv8-aarch32-Add-KVM-support-for-AArch32-on-ARMv8.patch b/target/linux/layerscape/patches-4.4/3118-armv8-aarch32-Add-KVM-support-for-AArch32-on-ARMv8.patch deleted file mode 100644 index 32ebc36b1..000000000 --- a/target/linux/layerscape/patches-4.4/3118-armv8-aarch32-Add-KVM-support-for-AArch32-on-ARMv8.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 44914d3b7baff0f96c3584e4d46aa992e402ef4f Mon Sep 17 00:00:00 2001 -From: Ying Zhang -Date: Thu, 29 Sep 2016 11:16:16 +0800 -Subject: [PATCH 118/124] armv8: aarch32: Add KVM support for AArch32 on ARMv8 - -This patch is to add KVM support for AArch32 execution state on ARMv8. - -Signed-off-by: Alison Wang ---- - arch/arm/include/asm/cputype.h | 2 ++ - arch/arm/kvm/guest.c | 1 + - 2 files changed, 3 insertions(+) - ---- a/arch/arm/include/asm/cputype.h -+++ b/arch/arm/include/asm/cputype.h -@@ -76,6 +76,8 @@ - #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 - #define ARM_CPU_PART_MASK 0xff00fff0 - -+#define ARM_CPU_PART_CORTEX_AARCH32_A53 0x4100d030 -+ - #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 - #define ARM_CPU_XSCALE_ARCH_V1 0x2000 - #define ARM_CPU_XSCALE_ARCH_V2 0x4000 ---- a/arch/arm/kvm/guest.c -+++ b/arch/arm/kvm/guest.c -@@ -247,6 +247,7 @@ int __attribute_const__ kvm_target_cpu(v - { - switch (read_cpuid_part()) { - case ARM_CPU_PART_CORTEX_A7: -+ case ARM_CPU_PART_CORTEX_AARCH32_A53: - return KVM_ARM_TARGET_CORTEX_A7; - case ARM_CPU_PART_CORTEX_A15: - return KVM_ARM_TARGET_CORTEX_A15; diff --git a/target/linux/layerscape/patches-4.4/3131-arm64-ls1046a-add-DTS-for-Freescale-LS1046A-SoC.patch b/target/linux/layerscape/patches-4.4/3131-arm64-ls1046a-add-DTS-for-Freescale-LS1046A-SoC.patch deleted file mode 100644 index ca11f9fb8..000000000 --- a/target/linux/layerscape/patches-4.4/3131-arm64-ls1046a-add-DTS-for-Freescale-LS1046A-SoC.patch +++ /dev/null @@ -1,1056 +0,0 @@ -From 80ca93f1a5590529e39560099a71edb03897050e Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Wed, 11 May 2016 11:29:51 +0800 -Subject: [PATCH 131/141] arm64/ls1046a: add DTS for Freescale LS1046A SoC - -LS1046a is an SoC with 4 ARMv8 A72 cores and most other IP blocks -similar to LS1043a which complies to Chassis 2.1 spec. - -Following levels of DTSI/DTS files have been created for the -LS1046A SoC family: - -- fsl-ls1046a.dtsi: - DTS-Include file for FSL LS1046A SoC. - -Signed-off-by: Gong Qianyu -Signed-off-by: Minghuan Lian -Signed-off-by: Hou Zhiqiang -Signed-off-by: Mihai Bantea -Signed-off-by: Mingkai Hu ---- - arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 1029 ++++++++++++++++++++++++ - 1 file changed, 1029 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi - ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -@@ -0,0 +1,1029 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1046A family SoC. -+ * -+ * Copyright 2016, Freescale Semiconductor -+ * -+ * Mingkai Hu -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/ { -+ compatible = "fsl,ls1046a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ aliases { -+ ethernet0 = &fm1mac1; -+ ethernet1 = &fm1mac2; -+ ethernet2 = &fm1mac3; -+ ethernet3 = &fm1mac4; -+ ethernet4 = &fm1mac5; -+ ethernet5 = &fm1mac6; -+ ethernet6 = &fm1mac9; -+ }; -+ -+ cpus { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0>; -+ clocks = <&clockgen 1 0>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x1>; -+ clocks = <&clockgen 1 0>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu2: cpu@2 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x2>; -+ clocks = <&clockgen 1 0>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu3: cpu@3 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x3>; -+ clocks = <&clockgen 1 0>; -+ next-level-cache = <&l2>; -+ }; -+ -+ l2: l2-cache { -+ compatible = "cache"; -+ }; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x0 0x80000000 0 0x80000000>; -+ /* DRAM space 1, size: 2GiB DRAM */ -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI */ -+ <1 14 0x1>, /* Physical Non-Secure PPI */ -+ <1 11 0x1>, /* Virtual PPI */ -+ <1 10 0x1>; /* Hypervisor PPI */ -+ arm,reread-timer; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <0 106 0x4>, -+ <0 107 0x4>, -+ <0 95 0x4>, -+ <0 97 0x4>; -+ interrupt-affinity = <&cpu0>, -+ <&cpu1>, -+ <&cpu2>, -+ <&cpu3>; -+ }; -+ -+ gic: interrupt-controller@1400000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ reg = <0x0 0x1410000 0 0x10000>, /* GICD */ -+ <0x0 0x1420000 0 0x20000>, /* GICC */ -+ <0x0 0x1440000 0 0x20000>, /* GICH */ -+ <0x0 0x1460000 0 0x20000>; /* GICV */ -+ interrupts = <1 9 0xf08>; -+ }; -+ -+ soc { -+ compatible = "simple-bus"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ clockgen: clocking@1ee1000 { -+ compatible = "fsl,ls1046a-clockgen"; -+ reg = <0x0 0x1ee1000 0x0 0x1000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ scfg: scfg@1570000 { -+ compatible = "fsl,ls1046a-scfg", "syscon"; -+ reg = <0x0 0x1570000 0x0 0x10000>; -+ big-endian; -+ }; -+ -+ reset: reset@1ee00b0 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1ee00b0 0x0 0x4>; -+ big-endian; -+ }; -+ -+ rcpm: rcpm@1ee2000 { -+ compatible = "fsl,ls1046a-rcpm", "fsl,qoriq-rcpm-2.1"; -+ reg = <0x0 0x1ee2000 0x0 0x10000>; -+ }; -+ -+ ifc: ifc@1530000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x1530000 0x0 0x10000>; -+ interrupts = <0 43 0x4>; -+ }; -+ -+ esdhc: esdhc@1560000 { -+ compatible = "fsl,ls1046a-esdhc", "fsl,esdhc"; -+ reg = <0x0 0x1560000 0x0 0x10000>; -+ interrupts = <0 62 0x4>; -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ big-endian; -+ bus-width = <4>; -+ }; -+ -+ qman: qman@1880000 { -+ compatible = "fsl,qman"; -+ reg = <0x00 0x1880000 0x0 0x10000>; -+ interrupts = <0 45 0x4>; -+ }; -+ -+ bman: bman@1890000 { -+ compatible = "fsl,bman"; -+ reg = <0x00 0x1890000 0x0 0x10000>; -+ interrupts = <0 45 0x4>; -+ }; -+ -+ fman0: fman@1a00000 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ cell-index = <0>; -+ compatible = "fsl,fman", "simple-bus"; -+ ranges = <0x0 0x00 0x1a00000 0x100000>; -+ reg = <0x00 0x1a00000 0x0 0x100000>; -+ interrupts = <0 44 0x4>, <0 45 0x4>; -+ clocks = <&clockgen 3 0>; -+ clock-names = "fmanclk"; -+ -+ cc { -+ compatible = "fsl,fman-cc"; -+ }; -+ -+ muram@0 { -+ compatible = "fsl,fman-muram"; -+ reg = <0x0 0x60000>; -+ }; -+ -+ bmi@80000 { -+ compatible = "fsl,fman-bmi"; -+ reg = <0x80000 0x400>; -+ }; -+ -+ qmi@80400 { -+ compatible = "fsl,fman-qmi"; -+ reg = <0x80400 0x400>; -+ }; -+ -+ fman0_oh1: port@82000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x82000 0x1000>; -+ }; -+ -+ fman0_oh2: port@83000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x83000 0x1000>; -+ }; -+ -+ fman0_oh3: port@84000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x84000 0x1000>; -+ }; -+ -+ fman0_oh4: port@85000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x85000 0x1000>; -+ }; -+ -+ fman0_oh5: port@86000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x86000 0x1000>; -+ }; -+ -+ fman0_oh6: port@87000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-oh"; -+ reg = <0x87000 0x1000>; -+ }; -+ -+ policer@c0000 { -+ compatible = "fsl,fman-policer"; -+ reg = <0xc0000 0x1000>; -+ }; -+ -+ keygen@c1000 { -+ compatible = "fsl,fman-keygen"; -+ reg = <0xc1000 0x1000>; -+ }; -+ -+ dma@c2000 { -+ compatible = "fsl,fman-dma"; -+ reg = <0xc2000 0x1000>; -+ }; -+ -+ fpm@c3000 { -+ compatible = "fsl,fman-fpm"; -+ reg = <0xc3000 0x1000>; -+ }; -+ -+ parser@c7000 { -+ compatible = "fsl,fman-parser"; -+ reg = <0xc7000 0x1000>; -+ }; -+ -+ vsps@dc000 { -+ compatible = "fsl,fman-vsps"; -+ reg = <0xdc000 0x1000>; -+ }; -+ -+ mdio0: mdio@fc000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xfc000 0x1000>; -+ }; -+ -+ xmdio0: mdio@fd000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xfd000 0x1000>; -+ }; -+ -+ fman0_rx0: port@88000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x88000 0x1000>; -+ }; -+ -+ fman0_tx0: port@a8000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xa8000 0x1000>; -+ }; -+ -+ fm1mac1: ethernet@e0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe0000 0x1000>; -+ fsl,port-handles = <&fman0_rx0 &fman0_tx0>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e1000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe1000 0x1000>; -+ }; -+ -+ fman0_rx1: port@89000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x89000 0x1000>; -+ }; -+ -+ fman0_tx1: port@a9000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xa9000 0x1000>; -+ }; -+ -+ fm1mac2: ethernet@e2000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe2000 0x1000>; -+ fsl,port-handles = <&fman0_rx1 &fman0_tx1>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e3000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe3000 0x1000>; -+ }; -+ -+ fman0_rx2: port@8a000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8a000 0x1000>; -+ }; -+ -+ fman0_tx2: port@aa000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xaa000 0x1000>; -+ }; -+ -+ fm1mac3: ethernet@e4000 { -+ cell-index = <2>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe4000 0x1000>; -+ fsl,port-handles = <&fman0_rx2 &fman0_tx2>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e5000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe5000 0x1000>; -+ }; -+ -+ fman0_rx3: port@8b000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8b000 0x1000>; -+ }; -+ -+ fman0_tx3: port@ab000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xab000 0x1000>; -+ }; -+ -+ fm1mac4: ethernet@e6000 { -+ cell-index = <3>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe6000 0x1000>; -+ fsl,port-handles = <&fman0_rx3 &fman0_tx3>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e7000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe7000 0x1000>; -+ }; -+ -+ fman0_rx4: port@8c000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8c000 0x1000>; -+ }; -+ -+ fman0_tx4: port@ac000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xac000 0x1000>; -+ }; -+ -+ fm1mac5: ethernet@e8000 { -+ cell-index = <4>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe8000 0x1000>; -+ fsl,port-handles = <&fman0_rx4 &fman0_tx4>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@e9000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xe9000 0x1000>; -+ }; -+ -+ fman0_rx5: port@8d000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-1g-rx"; -+ reg = <0x8d000 0x1000>; -+ }; -+ -+ fman0_tx5: port@ad000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-port-1g-tx"; -+ reg = <0xad000 0x1000>; -+ }; -+ -+ fm1mac6: ethernet@ea000 { -+ cell-index = <5>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xea000 0x1000>; -+ fsl,port-handles = <&fman0_rx5 &fman0_tx5>; -+ ptimer-handle = <&ptp_timer0>; -+ }; -+ -+ mdio@eb000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xeb000 0x1000>; -+ }; -+ -+ fman0_10g_rx0: port@90000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-10g-rx"; -+ reg = <0x90000 0x1000>; -+ }; -+ -+ fman0_10g_tx0: port@b0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-port-10g-tx"; -+ reg = <0xb0000 0x1000>; -+ fsl,qman-channel-id = <0x800>; -+ }; -+ -+ fm1mac9: ethernet@f0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xf0000 0x1000>; -+ fsl,port-handles = <&fman0_10g_rx0 &fman0_10g_tx0>; -+ }; -+ -+ mdio@f1000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xf1000 0x1000>; -+ }; -+ -+ ptp_timer0: rtc@fe000 { -+ compatible = "fsl,fman-rtc"; -+ reg = <0xfe000 0x1000>; -+ }; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls1046a-dspi", "fsl,ls1021a-v1.0-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 64 0x4>; -+ clock-names = "dspi"; -+ clocks = <&clockgen 4 1>; -+ spi-num-chipselects = <5>; -+ big-endian; -+ status = "disabled"; -+ }; -+ -+ qspi: quadspi@1550000 { -+ compatible = "fsl,ls1046a-qspi", "fsl,ls1021a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x1550000 0x0 0x10000>, -+ <0x0 0x40000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 99 0x4>; -+ clock-names = "qspi_en", "qspi"; -+ clocks = <&clockgen 4 1>, <&clockgen 4 1>; -+ big-endian; -+ fsl,qspi-has-second-chip; -+ status = "disabled"; -+ }; -+ -+ i2c0: i2c@2180000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2180000 0x0 0x10000>; -+ interrupts = <0 56 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 1>; -+ dmas = <&edma0 1 39>, -+ <&edma0 1 38>; -+ dma-names = "tx", "rx"; -+ status = "disabled"; -+ }; -+ -+ i2c1: i2c@2190000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2190000 0x0 0x10000>; -+ interrupts = <0 57 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 1>; -+ status = "disabled"; -+ }; -+ -+ i2c2: i2c@21a0000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x21a0000 0x0 0x10000>; -+ interrupts = <0 58 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 1>; -+ status = "disabled"; -+ }; -+ -+ i2c3: i2c@21b0000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x21b0000 0x0 0x10000>; -+ interrupts = <0 59 0x4>; -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 1>; -+ status = "disabled"; -+ }; -+ -+ duart0: serial@21c0500 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0500 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 1>; -+ }; -+ -+ duart1: serial@21c0600 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x00 0x21c0600 0x0 0x100>; -+ interrupts = <0 54 0x4>; -+ clocks = <&clockgen 4 1>; -+ }; -+ -+ duart2: serial@21d0500 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21d0500 0x0 0x100>; -+ interrupts = <0 55 0x4>; -+ clocks = <&clockgen 4 1>; -+ }; -+ -+ duart3: serial@21d0600 { -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21d0600 0x0 0x100>; -+ interrupts = <0 55 0x4>; -+ clocks = <&clockgen 4 1>; -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 66 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 67 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 68 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 134 0x4>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ lpuart0: serial@2950000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2950000 0x0 0x1000>; -+ interrupts = <0 48 0x4>; -+ clocks = <&clockgen 0 0>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart1: serial@2960000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2960000 0x0 0x1000>; -+ interrupts = <0 49 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart2: serial@2970000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2970000 0x0 0x1000>; -+ interrupts = <0 50 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart3: serial@2980000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2980000 0x0 0x1000>; -+ interrupts = <0 51 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart4: serial@2990000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x2990000 0x0 0x1000>; -+ interrupts = <0 52 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ lpuart5: serial@29a0000 { -+ compatible = "fsl,ls1021a-lpuart"; -+ reg = <0x0 0x29a0000 0x0 0x1000>; -+ interrupts = <0 53 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "ipg"; -+ status = "disabled"; -+ }; -+ -+ ftm0: ftm0@29d0000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x29d0000 0x0 0x10000>; -+ interrupts = <0 86 0x4>; -+ big-endian; -+ rcpm-wakeup = <&rcpm 0x0 0x20000000>; -+ status = "okay"; -+ }; -+ -+ wdog0: wdog@2ad0000 { -+ compatible = "fsl,ls1046a-wdt", "fsl,imx21-wdt"; -+ reg = <0x0 0x2ad0000 0x0 0x10000>; -+ interrupts = <0 83 0x4>; -+ clocks = <&clockgen 4 1>; -+ clock-names = "wdog"; -+ big-endian; -+ }; -+ -+ edma0: edma@2c00000 { -+ #dma-cells = <2>; -+ compatible = "fsl,vf610-edma"; -+ reg = <0x0 0x2c00000 0x0 0x10000>, -+ <0x0 0x2c10000 0x0 0x10000>, -+ <0x0 0x2c20000 0x0 0x10000>; -+ interrupts = <0 103 0x4>, -+ <0 103 0x4>; -+ interrupt-names = "edma-tx", "edma-err"; -+ dma-channels = <32>; -+ big-endian; -+ clock-names = "dmamux0", "dmamux1"; -+ clocks = <&clockgen 4 1>, -+ <&clockgen 4 1>; -+ }; -+ -+ usb0: usb3@2f00000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x2f00000 0x0 0x10000>; -+ interrupts = <0 60 0x4>; -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb1: usb3@3000000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3000000 0x0 0x10000>; -+ interrupts = <0 61 0x4>; -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb2: usb3@3100000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 63 0x4>; -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ sata: sata@3200000 { -+ compatible = "fsl,ls1046a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 69 0x4>; -+ clocks = <&clockgen 4 1>; -+ }; -+ -+ qdma: qdma@8380000 { -+ compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma"; -+ reg = <0x0 0x838f000 0x0 0x11000 /* Controller regs */ -+ 0x0 0x83a0000 0x0 0x40000>; /* Block regs */ -+ interrupts = <0 152 0x4>, -+ <0 39 0x4>; -+ interrupt-names = "qdma-error", "qdma-queue"; -+ channels = <8>; -+ queues = <2>; -+ status-sizes = <64>; -+ queue-sizes = <64 64>; -+ big-endian; -+ }; -+ -+ msi1: msi-controller@1580000 { -+ compatible = "fsl,1s1046a-msi"; -+ reg = <0x0 0x1580000 0x0 0x10000>; -+ msi-controller; -+ interrupts = <0 116 0x4>, -+ <0 111 0x4>, -+ <0 112 0x4>, -+ <0 113 0x4>; -+ }; -+ -+ msi2: msi-controller@1590000 { -+ compatible = "fsl,1s1046a-msi"; -+ reg = <0x0 0x1590000 0x0 0x10000>; -+ msi-controller; -+ interrupts = <0 126 0x4>, -+ <0 121 0x4>, -+ <0 122 0x4>, -+ <0 123 0x4>; -+ }; -+ -+ msi3: msi-controller@15a0000 { -+ compatible = "fsl,1s1046a-msi"; -+ reg = <0x0 0x15a0000 0x0 0x10000>; -+ msi-controller; -+ interrupts = <0 160 0x4>, -+ <0 155 0x4>, -+ <0 156 0x4>, -+ <0 157 0x4>; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>, /* controller interrupt */ -+ <0 117 0x4>; /* PME interrupt */ -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi1>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -+ <0000 0 0 2 &gic 0 110 0x4>, -+ <0000 0 0 3 &gic 0 110 0x4>, -+ <0000 0 0 4 &gic 0 110 0x4>; -+ }; -+ -+ pcie@3500000 { -+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 128 0x4>, -+ <0 127 0x4>; -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <2>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi2>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, -+ <0000 0 0 2 &gic 0 120 0x4>, -+ <0000 0 0 3 &gic 0 120 0x4>, -+ <0000 0 0 4 &gic 0 120 0x4>; -+ }; -+ -+ pcie@3600000 { -+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 162 0x4>, -+ <0 161 0x4>; -+ interrupt-names = "intr", "pme"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <2>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&msi3>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, -+ <0000 0 0 2 &gic 0 154 0x4>, -+ <0000 0 0 3 &gic 0 154 0x4>, -+ <0000 0 0 4 &gic 0 154 0x4>; -+ }; -+ }; -+ -+ fsl,dpaa { -+ compatible = "fsl,ls1046a-dpaa", "simple-bus", "fsl,dpaa"; -+ ethernet@0 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac1>; -+ }; -+ ethernet@1 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac2>; -+ }; -+ ethernet@2 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac3>; -+ }; -+ ethernet@3 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac4>; -+ }; -+ ethernet@4 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac5>; -+ }; -+ ethernet@5 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac6>; -+ }; -+ ethernet@8 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac9>; -+ }; -+ }; -+ -+ qportals: qman-portals@500000000 { -+ ranges = <0x0 0x5 0x00000000 0x8000000>; -+ }; -+ bportals: bman-portals@508000000 { -+ ranges = <0x0 0x5 0x08000000 0x8000000>; -+ }; -+ reserved-memory { -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ -+ bman_fbpr: bman-fbpr { -+ size = <0 0x1000000>; -+ alignment = <0 0x1000000>; -+ }; -+ qman_fqd: qman-fqd { -+ size = <0 0x800000>; -+ alignment = <0 0x800000>; -+ }; -+ qman_pfdr: qman-pfdr { -+ size = <0 0x2000000>; -+ alignment = <0 0x2000000>; -+ }; -+ }; -+}; -+ -+&fman0 { -+ /* offline - 1 */ -+ port@82000 { -+ fsl,qman-channel-id = <0x809>; -+ }; -+ -+ /* tx - 10g - 2 */ -+ port@a8000 { -+ fsl,qman-channel-id = <0x802>; -+ }; -+ /* tx - 10g - 3 */ -+ port@a9000 { -+ fsl,qman-channel-id = <0x803>; -+ }; -+ /* tx - 1g - 2 */ -+ port@aa000 { -+ fsl,qman-channel-id = <0x804>; -+ }; -+ /* tx - 1g - 3 */ -+ port@ab000 { -+ fsl,qman-channel-id = <0x805>; -+ }; -+ /* tx - 1g - 4 */ -+ port@ac000 { -+ fsl,qman-channel-id = <0x806>; -+ }; -+ /* tx - 1g - 5 */ -+ port@ad000 { -+ fsl,qman-channel-id = <0x807>; -+ }; -+ /* tx - 10g - 0 */ -+ port@b0000 { -+ fsl,qman-channel-id = <0x800>; -+ }; -+ /* tx - 10g - 1 */ -+ port@b1000 { -+ fsl,qman-channel-id = <0x801>; -+ }; -+ /* offline - 2 */ -+ port@83000 { -+ fsl,qman-channel-id = <0x80a>; -+ }; -+ /* offline - 3 */ -+ port@84000 { -+ fsl,qman-channel-id = <0x80b>; -+ }; -+ /* offline - 4 */ -+ port@85000 { -+ fsl,qman-channel-id = <0x80c>; -+ }; -+ /* offline - 5 */ -+ port@86000 { -+ fsl,qman-channel-id = <0x80d>; -+ }; -+ /* offline - 6 */ -+ port@87000 { -+ fsl,qman-channel-id = <0x80e>; -+ }; -+}; -+ -+&bman_fbpr { -+ compatible = "fsl,bman-fbpr"; -+ alloc-ranges = <0 0 0x10000 0>; -+}; -+ -+&qman_fqd { -+ compatible = "fsl,qman-fqd"; -+ alloc-ranges = <0 0 0x10000 0>; -+}; -+ -+&qman_pfdr { -+ compatible = "fsl,qman-pfdr"; -+ alloc-ranges = <0 0 0x10000 0>; -+}; -+ -+/include/ "qoriq-qman1-portals.dtsi" -+/include/ "qoriq-bman1-portals.dtsi" diff --git a/target/linux/layerscape/patches-4.4/3132-dts-ls1046a-add-LS1046ARDB-board-support.patch b/target/linux/layerscape/patches-4.4/3132-dts-ls1046a-add-LS1046ARDB-board-support.patch deleted file mode 100644 index 77b53d76b..000000000 --- a/target/linux/layerscape/patches-4.4/3132-dts-ls1046a-add-LS1046ARDB-board-support.patch +++ /dev/null @@ -1,557 +0,0 @@ -From feb12cb699adbac2d4619401c7ff4fcc2fc97b6c Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 26 Sep 2016 12:33:42 +0800 -Subject: [PATCH 132/141] dts/ls1046a: add LS1046ARDB board support - -commit e95a28cfd9a392fe5dc189a9ae097bbaaccd1228 -[context adjustment] - -Signed-off-by: Mingkai Hu -Integrated-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/Makefile | 1 + - arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 198 +++++++++++++++++++++ - arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 178 +++++++++++++----- - 3 files changed, 328 insertions(+), 49 deletions(-) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts - ---- a/arch/arm64/boot/dts/freescale/Makefile -+++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -4,6 +4,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2 - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb - - always := $(dtb-y) - subdir-y := $(dts-dirs) ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts -@@ -0,0 +1,198 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1046A family SoC. -+ * -+ * Copyright 2016, Freescale Semiconductor -+ * -+ * Mingkai Hu -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/dts-v1/; -+#include "fsl-ls1046a.dtsi" -+ -+/ { -+ model = "LS1046A RDB Board"; -+ compatible = "fsl,ls1046a-rdb", "fsl,ls1046a"; -+ -+ aliases { -+ ethernet0 = &fm1mac3; -+ ethernet1 = &fm1mac4; -+ ethernet2 = &fm1mac5; -+ ethernet3 = &fm1mac6; -+ ethernet4 = &fm1mac9; -+ ethernet5 = &fm1mac10; -+ }; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ ina220@40 { -+ compatible = "ti,ina220"; -+ reg = <0x40>; -+ shunt-resistor = <1000>; -+ }; -+ adt7461a@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ eeprom@56 { -+ compatible = "at24,24c512"; -+ reg = <0x52>; -+ }; -+ eeprom@57 { -+ compatible = "at24,24c512"; -+ reg = <0x53>; -+ }; -+}; -+ -+&i2c3 { -+ status = "okay"; -+ rtc@51 { -+ compatible = "nxp,pcf2129"; -+ reg = <0x51>; -+ }; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ /* NAND Flashe and CPLD on board */ -+ ranges = <0x0 0x0 0x0 0x7e800000 0x00010000 -+ 0x2 0x0 0x0 0x7fb00000 0x00000100>; -+ -+ nand@0,0 { -+ compatible = "fsl,ifc-nand"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ reg = <0x0 0x0 0x10000>; -+ }; -+ -+ cpld: board-control@2,0 { -+ compatible = "fsl,ls1046ardb-cpld"; -+ reg = <0x2 0x0 0x0000100>; -+ }; -+}; -+ -+&qspi { -+ num-cs = <2>; -+ bus-num = <0>; -+ status = "okay"; -+ -+ qflash0: s25fs128s@0 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ reg = <0>; -+ }; -+ -+ qflash1: s25fs128s@1 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ reg = <1>; -+ }; -+ -+}; -+ -+&duart0 { -+ status = "okay"; -+}; -+ -+&duart1 { -+ status = "okay"; -+}; -+ -+&fman0 { -+ ethernet@e4000 { -+ phy-handle = <&rgmii_phy1>; -+ phy-connection-type = "rgmii"; -+ }; -+ -+ ethernet@e6000 { -+ phy-handle = <&rgmii_phy2>; -+ phy-connection-type = "rgmii"; -+ }; -+ -+ ethernet@e8000 { -+ phy-handle = <&sgmii_phy1>; -+ phy-connection-type = "sgmii"; -+ }; -+ -+ ethernet@ea000 { -+ phy-handle = <&sgmii_phy2>; -+ phy-connection-type = "sgmii"; -+ }; -+ -+ ethernet@f0000 { /* 10GEC1 */ -+ phy-handle = <&aqr106_phy>; -+ phy-connection-type = "xgmii"; -+ }; -+ -+ ethernet@f2000 { /* 10GEC2 */ -+ fixed-link = <0 1 10000 0 0>; -+ phy-connection-type = "xgmii"; -+ }; -+ -+ mdio@fc000 { -+ rgmii_phy1: ethernet-phy@1 { -+ reg = <0x1>; -+ }; -+ rgmii_phy2: ethernet-phy@2 { -+ reg = <0x2>; -+ }; -+ sgmii_phy1: ethernet-phy@3 { -+ reg = <0x3>; -+ }; -+ sgmii_phy2: ethernet-phy@4 { -+ reg = <0x4>; -+ }; -+ }; -+ -+ mdio@fd000 { -+ aqr106_phy: ethernet-phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 131 4>; -+ reg = <0x0>; -+ }; -+ }; -+}; ---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -@@ -51,13 +51,7 @@ - #size-cells = <2>; - - aliases { -- ethernet0 = &fm1mac1; -- ethernet1 = &fm1mac2; -- ethernet2 = &fm1mac3; -- ethernet3 = &fm1mac4; -- ethernet4 = &fm1mac5; -- ethernet5 = &fm1mac6; -- ethernet6 = &fm1mac9; -+ crypto = &crypto; - }; - - cpus { -@@ -70,6 +64,7 @@ - reg = <0x0>; - clocks = <&clockgen 1 0>; - next-level-cache = <&l2>; -+ cpu-idle-states = <&CPU_PH20>; - }; - - cpu1: cpu@1 { -@@ -78,6 +73,7 @@ - reg = <0x1>; - clocks = <&clockgen 1 0>; - next-level-cache = <&l2>; -+ cpu-idle-states = <&CPU_PH20>; - }; - - cpu2: cpu@2 { -@@ -86,6 +82,7 @@ - reg = <0x2>; - clocks = <&clockgen 1 0>; - next-level-cache = <&l2>; -+ cpu-idle-states = <&CPU_PH20>; - }; - - cpu3: cpu@3 { -@@ -94,6 +91,7 @@ - reg = <0x3>; - clocks = <&clockgen 1 0>; - next-level-cache = <&l2>; -+ cpu-idle-states = <&CPU_PH20>; - }; - - l2: l2-cache { -@@ -101,6 +99,19 @@ - }; - }; - -+ idle-states { -+ entry-method = "arm,psci"; -+ -+ CPU_PH20: cpu-ph20 { -+ compatible = "arm,idle-state"; -+ idle-state-name = "PH20"; -+ arm,psci-suspend-param = <0x00010000>; -+ entry-latency-us = <1000>; -+ exit-latency-us = <1000>; -+ min-residency-us = <3000>; -+ }; -+ }; -+ - memory@80000000 { - device_type = "memory"; - reg = <0x0 0x80000000 0 0x80000000>; -@@ -193,6 +204,49 @@ - bus-width = <4>; - }; - -+ crypto: crypto@1700000 { -+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", -+ "fsl,sec-v4.0"; -+ fsl,sec-era = <8>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x00 0x1700000 0x100000>; -+ reg = <0x00 0x1700000 0x0 0x100000>; -+ interrupts = <0 75 0x4>; -+ -+ sec_jr0: jr@10000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x10000 0x10000>; -+ interrupts = <0 71 0x4>; -+ }; -+ -+ sec_jr1: jr@20000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x20000 0x10000>; -+ interrupts = <0 72 0x4>; -+ }; -+ -+ sec_jr2: jr@30000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x30000 0x10000>; -+ interrupts = <0 73 0x4>; -+ }; -+ -+ sec_jr3: jr@40000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x40000 0x10000>; -+ interrupts = <0 74 0x4>; -+ }; -+ }; -+ - qman: qman@1880000 { - compatible = "fsl,qman"; - reg = <0x00 0x1880000 0x0 0x10000>; -@@ -490,6 +544,19 @@ - fsl,qman-channel-id = <0x800>; - }; - -+ fman0_10g_rx1: port@91000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-10g-rx"; -+ reg = <0x91000 0x1000>; -+ }; -+ -+ fman0_10g_tx1: port@b1000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-port-10g-tx"; -+ reg = <0xb1000 0x1000>; -+ fsl,qman-channel-id = <0x801>; -+ }; -+ - fm1mac9: ethernet@f0000 { - cell-index = <0>; - compatible = "fsl,fman-memac"; -@@ -497,6 +564,13 @@ - fsl,port-handles = <&fman0_10g_rx0 &fman0_10g_tx0>; - }; - -+ fm1mac10: ethernet@f2000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xf2000 0x1000>; -+ fsl,port-handles = <&fman0_10g_rx1 &fman0_10g_tx1>; -+ }; -+ - mdio@f1000 { - #address-cells = <1>; - #size-cells = <0>; -@@ -504,6 +578,13 @@ - reg = <0xf1000 0x1000>; - }; - -+ mdio@f3000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0xf3000 0x1000>; -+ }; -+ - ptp_timer0: rtc@fe000 { - compatible = "fsl,fman-rtc"; - reg = <0xfe000 0x1000>; -@@ -657,7 +738,7 @@ - compatible = "fsl,ls1021a-lpuart"; - reg = <0x0 0x2950000 0x0 0x1000>; - interrupts = <0 48 0x4>; -- clocks = <&clockgen 0 0>; -+ clocks = <&clockgen 4 0>; - clock-names = "ipg"; - status = "disabled"; - }; -@@ -712,7 +793,7 @@ - reg = <0x0 0x29d0000 0x0 0x10000>; - interrupts = <0 86 0x4>; - big-endian; -- rcpm-wakeup = <&rcpm 0x0 0x20000000>; -+ rcpm-wakeup = <&rcpm 0x00020000 0x0>; - status = "okay"; - }; - -@@ -789,34 +870,34 @@ - big-endian; - }; - -- msi1: msi-controller@1580000 { -- compatible = "fsl,1s1046a-msi"; -- reg = <0x0 0x1580000 0x0 0x10000>; -+ msi: msi-controller@1580000 { -+ compatible = "fsl,ls1046a-msi"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; - msi-controller; -- interrupts = <0 116 0x4>, -- <0 111 0x4>, -- <0 112 0x4>, -- <0 113 0x4>; -- }; - -- msi2: msi-controller@1590000 { -- compatible = "fsl,1s1046a-msi"; -- reg = <0x0 0x1590000 0x0 0x10000>; -- msi-controller; -- interrupts = <0 126 0x4>, -- <0 121 0x4>, -- <0 122 0x4>, -- <0 123 0x4>; -- }; -- -- msi3: msi-controller@15a0000 { -- compatible = "fsl,1s1046a-msi"; -- reg = <0x0 0x15a0000 0x0 0x10000>; -- msi-controller; -- interrupts = <0 160 0x4>, -- <0 155 0x4>, -- <0 156 0x4>, -- <0 157 0x4>; -+ msi-bank@1580000 { -+ reg = <0x0 0x1580000 0x0 0x10000>; -+ interrupts = <0 116 0x4>, -+ <0 111 0x4>, -+ <0 112 0x4>, -+ <0 113 0x4>; -+ }; -+ msi-bank@1590000 { -+ reg = <0x0 0x1590000 0x0 0x10000>; -+ interrupts = <0 126 0x4>, -+ <0 121 0x4>, -+ <0 122 0x4>, -+ <0 123 0x4>; -+ }; -+ msi-bank@15a0000 { -+ reg = <0x0 0x15a0000 0x0 0x10000>; -+ interrupts = <0 160 0x4>, -+ <0 155 0x4>, -+ <0 156 0x4>, -+ <0 157 0x4>; -+ }; - }; - - pcie@3400000 { -@@ -826,15 +907,16 @@ - reg-names = "regs", "config"; - interrupts = <0 118 0x4>, /* controller interrupt */ - <0 117 0x4>; /* PME interrupt */ -- interrupt-names = "intr", "pme"; -+ interrupt-names = "aer"; - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <4>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi1>; -+ msi-parent = <&msi>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -@@ -850,15 +932,16 @@ - reg-names = "regs", "config"; - interrupts = <0 128 0x4>, - <0 127 0x4>; -- interrupt-names = "intr", "pme"; -+ interrupt-names = "aer"; - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <2>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi2>; -+ msi-parent = <&msi>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, -@@ -874,15 +957,16 @@ - reg-names = "regs", "config"; - interrupts = <0 162 0x4>, - <0 161 0x4>; -- interrupt-names = "intr", "pme"; -+ interrupt-names = "aer"; - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; -+ dma-coherent; - num-lanes = <2>; - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi3>; -+ msi-parent = <&msi>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, -@@ -894,14 +978,6 @@ - - fsl,dpaa { - compatible = "fsl,ls1046a-dpaa", "simple-bus", "fsl,dpaa"; -- ethernet@0 { -- compatible = "fsl,dpa-ethernet"; -- fsl,fman-mac = <&fm1mac1>; -- }; -- ethernet@1 { -- compatible = "fsl,dpa-ethernet"; -- fsl,fman-mac = <&fm1mac2>; -- }; - ethernet@2 { - compatible = "fsl,dpa-ethernet"; - fsl,fman-mac = <&fm1mac3>; -@@ -922,6 +998,10 @@ - compatible = "fsl,dpa-ethernet"; - fsl,fman-mac = <&fm1mac9>; - }; -+ ethernet@9 { -+ compatible = "fsl,dpa-ethernet"; -+ fsl,fman-mac = <&fm1mac10>; -+ }; - }; - - qportals: qman-portals@500000000 { diff --git a/target/linux/layerscape/patches-4.4/3133-ls1046ardb-add-ITS-file.patch b/target/linux/layerscape/patches-4.4/3133-ls1046ardb-add-ITS-file.patch deleted file mode 100644 index 7461b928d..000000000 --- a/target/linux/layerscape/patches-4.4/3133-ls1046ardb-add-ITS-file.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 1a831d848c88f1bb9e599abc641a201be6b03ef7 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 18 Apr 2016 15:38:24 +0800 -Subject: [PATCH 133/141] ls1046ardb: add ITS file - -Signed-off-by: Mingkai Hu ---- - kernel-ls1046a-rdb.its | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 55 insertions(+) - create mode 100644 kernel-ls1046a-rdb.its - ---- /dev/null -+++ b/kernel-ls1046a-rdb.its -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Mingkai Hu -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+/ { -+ description = "Image file for the LS1046A Linux Kernel"; -+ #address-cells = <1>; -+ -+ images { -+ kernel@1 { -+ description = "ARM64 Linux kernel"; -+ data = /incbin/("./arch/arm64/boot/Image.gz"); -+ type = "kernel"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "gzip"; -+ load = <0x80080000>; -+ entry = <0x80080000>; -+ }; -+ fdt@1 { -+ description = "Flattened Device Tree blob"; -+ data = /incbin/("./arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dtb"); -+ type = "flat_dt"; -+ arch = "arm64"; -+ compression = "none"; -+ load = <0x90000000>; -+ }; -+ ramdisk@1 { -+ description = "LS1046 Ramdisk"; -+ data = /incbin/("./fsl-image-core-ls1046ardb-be.ext2.gz"); -+ type = "ramdisk"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "gzip"; -+ }; -+ }; -+ -+ configurations { -+ default = "config@1"; -+ config@1 { -+ description = "Boot Linux kernel"; -+ kernel = "kernel@1"; -+ fdt = "fdt@1"; -+ ramdisk = "ramdisk@1"; -+ }; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch b/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch deleted file mode 100644 index c8aaeee2c..000000000 --- a/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch +++ /dev/null @@ -1,790 +0,0 @@ -From cbacf87fa6fb262c98033405f15697798c3a9c5d Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Sun, 9 Oct 2016 14:31:50 +0800 -Subject: [PATCH 135/141] arm64: Add DTS support for FSL's LS1088ARDB - -Signed-off-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/Makefile | 1 + - arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 203 ++++++++ - arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 557 +++++++++++++++++++++ - 3 files changed, 761 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi - ---- a/arch/arm64/boot/dts/freescale/Makefile -+++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -5,6 +5,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1 - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb - - always := $(dtb-y) - subdir-y := $(dts-dirs) ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -@@ -0,0 +1,203 @@ -+/* -+ * Device Tree file for Freescale LS1088a RDB board -+ * -+ * Copyright (C) 2015, Freescale Semiconductor -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls1088a.dtsi" -+ -+/ { -+ model = "Freescale Layerscape 1088a RDB Board"; -+ compatible = "fsl,ls1088a-rdb", "fsl,ls1088a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "disabled"; -+}; -+ -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@77 { -+ compatible = "philips,pca9547"; -+ reg = <0x77>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ i2c@2 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x2>; -+ -+ ina220@40 { -+ compatible = "ti,ina220"; -+ reg = <0x40>; -+ shunt-resistor = <1000>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ rtc@51 { -+ compatible = "nxp,pcf2129"; -+ reg = <0x51>; -+ /* IRQ10_B */ -+ interrupts = <0 150 0x4>; -+ }; -+ -+ adt7461a@4c { -+ compatible = "adt7461a"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "disabled"; -+}; -+ -+&qspi { -+ status = "okay"; -+ qflash0: s25fs512s@0 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ reg = <0>; -+ }; -+ -+ qflash1: s25fs512s@1 { -+ compatible = "spansion,m25p80"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ spi-max-frequency = <20000000>; -+ reg = <1>; -+ }; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&serial0 { -+ status = "okay"; -+}; -+ -+&serial1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ /* Freescale F104 PHY1 */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x1c>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x1d>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x1e>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x1f>; -+ phy-connection-type = "qsgmii"; -+ }; -+ /* F104 PHY2 */ -+ mdio1_phy5: emdio1_phy@5 { -+ reg = <0x0c>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy6: emdio1_phy@6 { -+ reg = <0x0d>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy7: emdio1_phy@7 { -+ reg = <0x0e>; -+ phy-connection-type = "qsgmii"; -+ }; -+ mdio1_phy8: emdio1_phy@8 { -+ reg = <0x0f>; -+ phy-connection-type = "qsgmii"; -+ }; -+}; -+ -+&emdio2 { -+ /* Aquantia AQR105 10G PHY */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* DPMAC connections to external PHYs -+ * based on LS1088A RM RevC - $24.1.2 SerDes Options -+ */ -+/* DPMAC1 is 10G SFP+, fixed link */ -+&dpmac2 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy5>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy6>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio1_phy7>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio1_phy8>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac9 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac10 { -+ phy-handle = <&mdio1_phy4>; -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -@@ -0,0 +1,557 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-1088A family SoC. -+ * -+ * Copyright (C) 2015, Freescale Semiconductor -+ * -+ */ -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls1088a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ /* We have 2 clusters having 4 Cortex-A57 cores each */ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@2 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x2>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu3: cpu@3 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x3>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu4: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu5: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu6: cpu@102 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x102>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu7: cpu@103 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a53"; -+ reg = <0x0 0x103>; -+ clocks = <&clockgen 1 1>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR(RD_base+SGI_base)*/ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2080a-clockgen", "fsl,ls1088a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2080a-esdhc", "fsl,ls1088a-esdhc", "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2085a-dspi", "fsl,ls1088a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2080a-qspi", "fsl,ls1088a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* aer interrupt */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ pcie@3500000 { -+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* aer interrupt */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie@3600000 { -+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* aer interrupt */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ compatible = "fsl,ls1088a-ahci", "fsl,ls1043a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb1: usb3@3110000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>,/*Phy Secure PPI, edge triggered*/ -+ <1 14 0x1>, /*Phy Non-Secure PPI, edge triggered*/ -+ <1 11 0x1>, /*Virtual PPI, edge triggered */ -+ <1 10 0x1>; /*Hypervisor PPI, edge triggered */ -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ }; -+ }; -+ -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch b/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch deleted file mode 100644 index 8b66c3e57..000000000 --- a/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch +++ /dev/null @@ -1,69 +0,0 @@ -From caaab508dc2ba749d8394b5934353b1c47f37d75 Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Sun, 9 Oct 2016 15:14:16 +0800 -Subject: [PATCH 139/141] ls1088ardb: add ITS file - -Signed-off-by: Zhao Qiang ---- - kernel-ls1088a-rdb.its | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 55 insertions(+) - create mode 100644 kernel-ls1088a-rdb.its - ---- /dev/null -+++ b/kernel-ls1088a-rdb.its -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (C) 2015, Freescale Semiconductor -+ * -+ * Raghav Dogra -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+/ { -+ description = "Simulator Image file for the LS1088A Linux Kernel"; -+ #address-cells = <1>; -+ -+ images { -+ kernel@1 { -+ description = "ARM64 Linux kernel"; -+ data = /incbin/("./arch/arm64/boot/Image.gz"); -+ type = "kernel"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "gzip"; -+ load = <0x80080000>; -+ entry = <0x80080000>; -+ }; -+ fdt@1 { -+ description = "Flattened Device Tree blob"; -+ data = /incbin/("./arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dtb"); -+ type = "flat_dt"; -+ arch = "arm64"; -+ compression = "none"; -+ load = <0x90000000>; -+ }; -+ ramdisk@1 { -+ description = "LS2 Ramdisk"; -+ data = /incbin/("./fsl-image-core-ls1088ardb-be.ext2.gz"); -+ type = "ramdisk"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "none"; -+ }; -+ }; -+ -+ configurations { -+ default = "config@1"; -+ config@1 { -+ description = "Boot Linux kernel"; -+ kernel = "kernel@1"; -+ fdt = "fdt@1"; -+ ramdisk = "ramdisk@1"; -+ }; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch b/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch deleted file mode 100644 index 386a32102..000000000 --- a/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 89b3b66aa955fed15585a4ba7120cf63f9e92aba Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Thu, 13 Oct 2016 10:19:08 +0800 -Subject: [PATCH 141/141] caam: add caam node for ls1088a - -Signed-off-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 43 ++++++++++++++++++++++++ - 1 file changed, 43 insertions(+) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -@@ -485,6 +485,49 @@ - <1 10 0x1>; /*Hypervisor PPI, edge triggered */ - }; - -+ crypto: crypto@8000000 { -+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", -+ "fsl,sec-v4.0"; -+ fsl,sec-era = <8>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x00 0x8000000 0x100000>; -+ reg = <0x00 0x8000000 0x0 0x100000>; -+ interrupts = <0 139 0x4>; -+ -+ sec_jr0: jr@10000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x10000 0x10000>; -+ interrupts = <0 140 0x4>; -+ }; -+ -+ sec_jr1: jr@20000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x20000 0x10000>; -+ interrupts = <0 141 0x4>; -+ }; -+ -+ sec_jr2: jr@30000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x30000 0x10000>; -+ interrupts = <0 142 0x4>; -+ }; -+ -+ sec_jr3: jr@40000 { -+ compatible = "fsl,sec-v5.4-job-ring", -+ "fsl,sec-v5.0-job-ring", -+ "fsl,sec-v4.0-job-ring"; -+ reg = <0x40000 0x10000>; -+ interrupts = <0 143 0x4>; -+ }; -+ }; -+ - fsl_mc: fsl-mc@80c000000 { - compatible = "fsl,qoriq-mc"; - #stream-id-cells = <2>; diff --git a/target/linux/layerscape/patches-4.4/3143-armv8-aarch32-Execute-32-bit-Linux-for-ls1046a.patch b/target/linux/layerscape/patches-4.4/3143-armv8-aarch32-Execute-32-bit-Linux-for-ls1046a.patch deleted file mode 100644 index 8754ea4fd..000000000 --- a/target/linux/layerscape/patches-4.4/3143-armv8-aarch32-Execute-32-bit-Linux-for-ls1046a.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 887fd872791ce6e60045f4c6a97926365c6c817c Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Wed, 7 Dec 2016 00:47:35 +0800 -Subject: [PATCH] armv8: aarch32: Execute 32-bit Linux for ls1046a - -Signed-off-by: Yutang Jiang ---- - arch/arm/mach-imx/mach-layerscape.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/arch/arm/mach-imx/mach-layerscape.c -+++ b/arch/arm/mach-imx/mach-layerscape.c -@@ -12,11 +12,13 @@ - #include "common.h" - - static const char * const layerscape_dt_compat[] __initconst = { -- "fsl,ls1043a", - "fsl,ls1012a", -+ "fsl,ls1043a", -+ "fsl,ls1046a", - NULL, - }; - - DT_MACHINE_START(LAYERSCAPE, "Freescale LAYERSCAPE") -+ .smp = smp_ops(layerscape_smp_ops), - .dt_compat = layerscape_dt_compat, - MACHINE_END diff --git a/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch b/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch deleted file mode 100644 index fa2a1d308..000000000 --- a/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 72b250c04f543d4eeda06b32e699444b15cac5cc Mon Sep 17 00:00:00 2001 -From: "ying.zhang" -Date: Sat, 17 Dec 2016 00:39:28 +0800 -Subject: [PATCH 226/226] mtd:spi-nor:fsl-quadspi:Enable fast-read for - LS1088ARDB - -Add fast-read mode for LS1088ARDB board. - -Signed-off-by: Yuan Yao -Integrated-by: Jiang Yutang ---- - arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 2 ++ - arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 1 + - 2 files changed, 3 insertions(+) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -@@ -91,6 +91,7 @@ - compatible = "spansion,m25p80"; - #address-cells = <1>; - #size-cells = <1>; -+ m25p,fast-read; - spi-max-frequency = <20000000>; - reg = <0>; - }; -@@ -99,6 +100,7 @@ - compatible = "spansion,m25p80"; - #address-cells = <1>; - #size-cells = <1>; -+ m25p,fast-read; - spi-max-frequency = <20000000>; - reg = <1>; - }; ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -@@ -294,6 +294,7 @@ - interrupts = <0 25 0x4>; /* Level high type */ - clocks = <&clockgen 4 3>, <&clockgen 4 3>; - clock-names = "qspi_en", "qspi"; -+ fsl,qspi-has-second-chip; - }; - - pcie@3400000 { diff --git a/target/linux/layerscape/patches-4.4/3227-ls2088a-dts-add-ls2088a-dts.patch b/target/linux/layerscape/patches-4.4/3227-ls2088a-dts-add-ls2088a-dts.patch deleted file mode 100644 index b3b6991dd..000000000 --- a/target/linux/layerscape/patches-4.4/3227-ls2088a-dts-add-ls2088a-dts.patch +++ /dev/null @@ -1,1338 +0,0 @@ -From 45ba5bb2bdc9462fe5998aeb75e2c7e33b56c9fb Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Mon, 7 Nov 2016 10:23:52 +0800 -Subject: [PATCH 227/238] ls2088a/dts: add ls2088a dts - -Signed-off-by: Zhao Qiang ---- - arch/arm64/boot/dts/freescale/Makefile | 2 + - arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts | 241 ++++++ - arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts | 207 +++++ - arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 854 +++++++++++++++++++++ - 4 files changed, 1304 insertions(+) - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts - create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi - ---- a/arch/arm64/boot/dts/freescale/Makefile -+++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -6,6 +6,8 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1 - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb - dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb - - always := $(dtb-y) - subdir-y := $(dts-dirs) ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts -@@ -0,0 +1,241 @@ -+/* -+ * Device Tree file for Freescale LS2080a QDS Board -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2088a.dtsi" -+ -+/ { -+ model = "Freescale Layerscape 2088a QDS Board"; -+ compatible = "fsl,ls2088a-qds", "fsl,ls2088a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ }; -+ -+ nand@2,0 { -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2088a-qds-qixis", "fsl,ls2080a-qds-qixis", -+ "fsl,fpga-qixis"; -+ }; -+}; -+ -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@77 { -+ compatible = "nxp,pca9547"; -+ reg = <0x77>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c@0 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x00>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@2 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x02>; -+ -+ ina220@40 { -+ compatible = "ti,ina220"; -+ reg = <0x40>; -+ shunt-resistor = <500>; -+ }; -+ ina220@41 { -+ compatible = "ti,ina220"; -+ reg = <0x41>; -+ shunt-resistor = <1000>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q128a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+ dflash1: sst25wf040b { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <1>; -+ }; -+ dflash2: en25s64 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <2>; -+ }; -+}; -+ -+&qspi { -+ status = "okay"; -+ qflash0: s25fs256s1@0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <20000000>; -+ m25p,fast-read; -+ reg = <0>; -+ }; -+ -+ qflash2: s25fs256s1@2 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <20000000>; -+ m25p,fast-read; -+ reg = <2>; -+ }; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&ifc { -+ boardctrl: board-control@3,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus"; -+ reg = <3 0 0x300>; /* TODO check address */ -+ ranges = <0 3 0 0x300>; -+ -+ mdio_mux_emi1 { -+ compatible = "mdio-mux-mmioreg", "mdio-mux"; -+ mdio-parent-bus = <&emdio1>; -+ reg = <0x54 1>; /* BRDCFG4 */ -+ mux-mask = <0xe0>; /* EMI1_MDIO */ -+ -+ #address-cells=<1>; -+ #size-cells = <0>; -+ -+ /* Child MDIO buses, one for each riser card: -+ reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0. -+ -+ VSC8234 PHYs on the riser cards. -+ */ -+ -+ mdio_mux3: mdio@60 { -+ reg = <0x60>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ mdio0_phy12: mdio_phy0@1c { -+ reg = <0x1c>; -+ phy-connection-type = "sgmii"; -+ }; -+ mdio0_phy13: mdio_phy1@1d { -+ reg = <0x1d>; -+ phy-connection-type = "sgmii"; -+ }; -+ mdio0_phy14: mdio_phy2@1e { -+ reg = <0x1e>; -+ phy-connection-type = "sgmii"; -+ }; -+ mdio0_phy15: mdio_phy3@1f { -+ reg = <0x1f>; -+ phy-connection-type = "sgmii"; -+ }; -+ }; -+ }; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */ -+&dpmac9 { -+ phy-handle = <&mdio0_phy12>; -+}; -+&dpmac10 { -+ phy-handle = <&mdio0_phy13>; -+}; -+&dpmac11 { -+ phy-handle = <&mdio0_phy14>; -+}; -+&dpmac12 { -+ phy-handle = <&mdio0_phy15>; -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts -@@ -0,0 +1,207 @@ -+/* -+ * Device Tree file for Freescale LS2080a RDB board -+ * -+ * Copyright (C) 2015, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2088a.dtsi" -+ -+/ { -+ model = "Freescale Layerscape 2088a RDB Board"; -+ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ }; -+ -+ nand@2,0 { -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2088a-qds-qixis", "fsl,ls2080a-qds-qixis", -+ "fsl,fpga-qixis"; -+ }; -+}; -+ -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@75 { -+ compatible = "nxp,pca9547"; -+ reg = <0x75>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c-mux-never-disable; -+ i2c@1 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x01>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q512a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+}; -+ -+&qspi { -+ status = "disabled"; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ /* CS4340 PHYs */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x10>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x11>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x12>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x13>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+&emdio2 { -+ /* AQR405 PHYs */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 1 0x4>; /* Level high type */ -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy2: emdio2_phy@2 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 2 0x4>; /* Level high type */ -+ reg = <0x1>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy3: emdio2_phy@3 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 4 0x4>; /* Level high type */ -+ reg = <0x2>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy4: emdio2_phy@4 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 5 0x4>; /* Level high type */ -+ reg = <0x3>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under the assumption of -+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. -+ */ -+&dpmac1 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac2 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy4>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio2_phy2>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio2_phy3>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio2_phy4>; -+}; ---- /dev/null -+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi -@@ -0,0 +1,854 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-2088A family SoC. -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls2088a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu2: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ #cooling-cells = <2>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu3: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu4: cpu@200 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x200>; -+ clocks = <&clockgen 1 2>; -+ #cooling-cells = <2>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu5: cpu@201 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x201>; -+ clocks = <&clockgen 1 2>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu6: cpu@300 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x300>; -+ clocks = <&clockgen 1 3>; -+ #cooling-cells = <2>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ -+ cpu7: cpu@301 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x301>; -+ clocks = <&clockgen 1 3>; -+ cpu-idle-states = <&CPU_PW20>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ idle-states { -+ entry-method = "arm,psci"; -+ -+ CPU_PW20: cpu-pw20 { -+ compatible = "arm,idle-state"; -+ idle-state-name = "PW20"; -+ arm,psci-suspend-param = <0x00010000>; -+ entry-latency-us = <2000>; -+ exit-latency-us = <2000>; -+ min-residency-us = <6000>; -+ }; -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2088a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ tmu: tmu@1f80000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu", "fsl,ls2088a-tmu"; -+ reg = <0x0 0x1f80000 0x0 0x10000>; -+ interrupts = <0 23 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ little-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 4>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <75000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map1 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu2 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map2 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu4 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map3 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu6 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ cluster1_core0_watchdog: wdt@c000000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc000000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster1_core1_watchdog: wdt@c010000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc010000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core0_watchdog: wdt@c100000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc100000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core1_watchdog: wdt@c110000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc110000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core0_watchdog: wdt@c200000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc200000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core1_watchdog: wdt@c210000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc210000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core0_watchdog: wdt@c300000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc300000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core1_watchdog: wdt@c310000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc310000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2088a-esdhc", "fsl,ls2080a-esdhc", -+ "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2088a-dspi", "fsl,ls2085a-dspi", -+ "fsl,ls2080a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2088a-qspi", "fsl,ls2080a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie1: pcie@3400000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ -+ pcie2: pcie@3500000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie3: pcie@3600000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ pcie4: pcie@3700000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ -+ 0x38 0x00000000 0x0 0x00002000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 123 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, -+ <0000 0 0 2 &gic 0 0 0 125 4>, -+ <0000 0 0 3 &gic 0 0 0 126 4>, -+ <0000 0 0 4 &gic 0 0 0 127 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ sata1: sata@3210000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3210000 0x0 0x10000>; -+ interrupts = <0 136 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb1: usb3@3110000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ -+ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ -+ <1 11 0x1>, /* Virtual PPI, edge triggered */ -+ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ -+ arm,reread-timer; -+ fsl,erratum-a008585; -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ /* -+ * Define the maximum number of MACs present on the SoC. -+ * They won't necessarily be all probed, since the -+ * Data Path Layout file and the MC firmware can put fewer -+ * actual DPMAC objects on the MC bus. -+ */ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ dpmac11: dpmac@11 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xb>; -+ }; -+ dpmac12: dpmac@12 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xc>; -+ }; -+ dpmac13: dpmac@13 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xd>; -+ }; -+ dpmac14: dpmac@14 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xe>; -+ }; -+ dpmac15: dpmac@15 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xf>; -+ }; -+ dpmac16: dpmac@16 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0x10>; -+ }; -+ }; -+ }; -+ -+ ccn@4000000 { -+ compatible = "arm,ccn-504"; -+ reg = <0x0 0x04000000 0x0 0x01000000>; -+ interrupts = <0 12 4>; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3228-ls2088a-add-ls2088a-its.patch b/target/linux/layerscape/patches-4.4/3228-ls2088a-add-ls2088a-its.patch deleted file mode 100644 index 7aa638257..000000000 --- a/target/linux/layerscape/patches-4.4/3228-ls2088a-add-ls2088a-its.patch +++ /dev/null @@ -1,129 +0,0 @@ -From e0f9ccd657893d1a10dfbae291900b3045c471fc Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Mon, 7 Nov 2016 10:38:51 +0800 -Subject: [PATCH 228/238] ls2088a: add ls2088a its - -Signed-off-by: Zhao Qiang ---- - kernel2088a-qds.its | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ - kernel2088a-rdb.its | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 110 insertions(+) - create mode 100644 kernel2088a-qds.its - create mode 100644 kernel2088a-rdb.its - ---- /dev/null -+++ b/kernel2088a-qds.its -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+/ { -+ description = "QDS Image file for the LS2080A Linux Kernel"; -+ #address-cells = <1>; -+ -+ images { -+ kernel@1 { -+ description = "ARM64 Linux kernel"; -+ data = /incbin/("./arch/arm64/boot/Image.gz"); -+ type = "kernel"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "gzip"; -+ load = <0x80080000>; -+ entry = <0x80080000>; -+ }; -+ fdt@1 { -+ description = "Flattened Device Tree blob"; -+ data = /incbin/("./arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dtb"); -+ type = "flat_dt"; -+ arch = "arm64"; -+ compression = "none"; -+ load = <0x90000000>; -+ }; -+ ramdisk@1 { -+ description = "LS2 Ramdisk"; -+ data = /incbin/("./fsl-image-core-ls2088aqds.ext2.gz"); -+ type = "ramdisk"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "none"; -+ }; -+ }; -+ -+ configurations { -+ default = "config@1"; -+ config@1 { -+ description = "Boot Linux kernel"; -+ kernel = "kernel@1"; -+ fdt = "fdt@1"; -+ ramdisk = "ramdisk@1"; -+ }; -+ }; -+}; ---- /dev/null -+++ b/kernel2088a-rdb.its -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+/ { -+ description = "RDB Image file for the LS2080A Linux Kernel"; -+ #address-cells = <1>; -+ -+ images { -+ kernel@1 { -+ description = "ARM64 Linux kernel"; -+ data = /incbin/("./arch/arm64/boot/Image.gz"); -+ type = "kernel"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "gzip"; -+ load = <0x80080000>; -+ entry = <0x80080000>; -+ }; -+ fdt@1 { -+ description = "Flattened Device Tree blob"; -+ data = /incbin/("./arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dtb"); -+ type = "flat_dt"; -+ arch = "arm64"; -+ compression = "none"; -+ load = <0x90000000>; -+ }; -+ ramdisk@1 { -+ description = "LS2 Ramdisk"; -+ data = /incbin/("./fsl-image-core-ls2088ardb.ext2.gz"); -+ type = "ramdisk"; -+ arch = "arm64"; -+ os = "linux"; -+ compression = "none"; -+ }; -+ }; -+ -+ configurations { -+ default = "config@1"; -+ config@1 { -+ description = "Boot Linux kernel"; -+ kernel = "kernel@1"; -+ fdt = "fdt@1"; -+ ramdisk = "ramdisk@1"; -+ }; -+ }; -+}; diff --git a/target/linux/layerscape/patches-4.4/3229-arm-dts-ls1021a-fix-typo-of-MSI-compatible-string.patch b/target/linux/layerscape/patches-4.4/3229-arm-dts-ls1021a-fix-typo-of-MSI-compatible-string.patch deleted file mode 100644 index 926efff7a..000000000 --- a/target/linux/layerscape/patches-4.4/3229-arm-dts-ls1021a-fix-typo-of-MSI-compatible-string.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 2c39dc6c100854ed9d617bab9a25557adb6f531a Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:36 +0800 -Subject: [PATCH 05/13] arm: dts: ls1021a: fix typo of MSI compatible string - -Cherry-pick patchwork patch. - -"1" should be replaced by "l". This is a typo. -The patch is to fix it. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - arch/arm/boot/dts/ls1021a.dtsi | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/arch/arm/boot/dts/ls1021a.dtsi -+++ b/arch/arm/boot/dts/ls1021a.dtsi -@@ -120,14 +120,14 @@ - }; - - msi1: msi-controller@1570e00 { -- compatible = "fsl,1s1021a-msi"; -+ compatible = "fsl,ls1021a-msi"; - reg = <0x0 0x1570e00 0x0 0x8>; - msi-controller; - interrupts = ; - }; - - msi2: msi-controller@1570e08 { -- compatible = "fsl,1s1021a-msi"; -+ compatible = "fsl,ls1021a-msi"; - reg = <0x0 0x1570e08 0x0 0x8>; - msi-controller; - interrupts = ; diff --git a/target/linux/layerscape/patches-4.4/3230-arm64-dts-ls1043a-fix-typo-of-MSI-compatible-string.patch b/target/linux/layerscape/patches-4.4/3230-arm64-dts-ls1043a-fix-typo-of-MSI-compatible-string.patch deleted file mode 100644 index 4fcd40db7..000000000 --- a/target/linux/layerscape/patches-4.4/3230-arm64-dts-ls1043a-fix-typo-of-MSI-compatible-string.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 56ff33e848d1a9b77f81e0d3b5ee0b79edcd4938 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Thu, 16 Feb 2017 17:52:08 +0800 -Subject: [PATCH 06/13] arm64: dts: ls1043a: fix typo of MSI compatible string - -Cherry-pick patchwork patch with context adjustment. - -"1" should be replaced by "l". This is a typo. -The patch is to fix it. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -991,7 +991,7 @@ - }; - - msi1: msi-controller1@1571000 { -- compatible = "fsl,1s1043a-msi"; -+ compatible = "fsl,ls1043a-msi"; - reg = <0x0 0x1571000 0x0 0x4>, - <0x0 0x1571004 0x0 0x4>; - reg-names = "msiir", "msir"; -@@ -1000,7 +1000,7 @@ - }; - - msi2: msi-controller2@1572000 { -- compatible = "fsl,1s1043a-msi"; -+ compatible = "fsl,ls1043a-msi"; - reg = <0x0 0x1572000 0x0 0x4>, - <0x0 0x1572004 0x0 0x4>; - reg-names = "msiir", "msir"; -@@ -1009,7 +1009,7 @@ - }; - - msi3: msi-controller3@1573000 { -- compatible = "fsl,1s1043a-msi"; -+ compatible = "fsl,ls1043a-msi"; - reg = <0x0 0x1573000 0x0 0x4>, - <0x0 0x1573004 0x0 0x4>; - reg-names = "msiir", "msir"; diff --git a/target/linux/layerscape/patches-4.4/3231-arm-dts-ls1021a-share-all-MSIs.patch b/target/linux/layerscape/patches-4.4/3231-arm-dts-ls1021a-share-all-MSIs.patch deleted file mode 100644 index 09a61dd05..000000000 --- a/target/linux/layerscape/patches-4.4/3231-arm-dts-ls1021a-share-all-MSIs.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 190ae222ef6ded27021620afdc3f5a36861d3625 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:38 +0800 -Subject: [PATCH 07/13] arm: dts: ls1021a: share all MSIs - -Cherry-pick patchwork patch. - -In order to maximize the use of MSI, a PCIe controller will share -all MSI controllers. The patch changes msi-parent to refer to all -MSI controller dts nodes. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - arch/arm/boot/dts/ls1021a.dtsi | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/arch/arm/boot/dts/ls1021a.dtsi -+++ b/arch/arm/boot/dts/ls1021a.dtsi -@@ -568,7 +568,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi1>; -+ msi-parent = <&msi1>, <&msi2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, -@@ -591,7 +591,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi2>; -+ msi-parent = <&msi1>, <&msi2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, diff --git a/target/linux/layerscape/patches-4.4/3232-arm64-dts-ls1043a-share-all-MSIs.patch b/target/linux/layerscape/patches-4.4/3232-arm64-dts-ls1043a-share-all-MSIs.patch deleted file mode 100644 index 4380b5a21..000000000 --- a/target/linux/layerscape/patches-4.4/3232-arm64-dts-ls1043a-share-all-MSIs.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 60eedf7a9512683e2a8a998863cc5942e9dbdae5 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:39 +0800 -Subject: [PATCH 08/13] arm64: dts: ls1043a: share all MSIs - -Cherry-pick patchwork patch. - -In order to maximize the use of MSI, a PCIe controller will share -all MSI controllers. The patch changes "msi-parent" to refer to all -MSI controller dts nodes. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -1033,7 +1033,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi1>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -@@ -1058,7 +1058,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi2>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, -@@ -1083,7 +1083,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi3>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, diff --git a/target/linux/layerscape/patches-4.4/3233-arm64-dts-ls1046a-update-MSI-dts-node.patch b/target/linux/layerscape/patches-4.4/3233-arm64-dts-ls1046a-update-MSI-dts-node.patch deleted file mode 100644 index 02f2e724e..000000000 --- a/target/linux/layerscape/patches-4.4/3233-arm64-dts-ls1046a-update-MSI-dts-node.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 1569c166572f9576c6726472b5a726a1a56900bd Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Thu, 16 Feb 2017 18:00:14 +0800 -Subject: [PATCH] arm64: dts: ls1046a: update MSI dts node - -Update MSI dts node according to below patchwork patch. - -arm64: dts: ls1046a: add MSI dts node -https://patchwork.kernel.org/patch/9520299 - -Signed-off-by: Yangbo Lu ---- - arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 58 +++++++++++++------------- - 1 file changed, 30 insertions(+), 28 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -@@ -44,6 +44,8 @@ - * OTHER DEALINGS IN THE SOFTWARE. - */ - -+#include -+ - / { - compatible = "fsl,ls1046a"; - interrupt-parent = <&gic>; -@@ -870,34 +872,34 @@ - big-endian; - }; - -- msi: msi-controller@1580000 { -+ msi1: msi-controller@1580000 { - compatible = "fsl,ls1046a-msi"; -- #address-cells = <2>; -- #size-cells = <2>; -- ranges; - msi-controller; -+ reg = <0x0 0x1580000 0x0 0x10000>; -+ interrupts = , -+ , -+ , -+ ; -+ }; - -- msi-bank@1580000 { -- reg = <0x0 0x1580000 0x0 0x10000>; -- interrupts = <0 116 0x4>, -- <0 111 0x4>, -- <0 112 0x4>, -- <0 113 0x4>; -- }; -- msi-bank@1590000 { -- reg = <0x0 0x1590000 0x0 0x10000>; -- interrupts = <0 126 0x4>, -- <0 121 0x4>, -- <0 122 0x4>, -- <0 123 0x4>; -- }; -- msi-bank@15a0000 { -- reg = <0x0 0x15a0000 0x0 0x10000>; -- interrupts = <0 160 0x4>, -- <0 155 0x4>, -- <0 156 0x4>, -- <0 157 0x4>; -- }; -+ msi2: msi-controller@1590000 { -+ compatible = "fsl,ls1046a-msi"; -+ msi-controller; -+ reg = <0x0 0x1590000 0x0 0x10000>; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ msi3: msi-controller@15a0000 { -+ compatible = "fsl,ls1046a-msi"; -+ msi-controller; -+ reg = <0x0 0x15a0000 0x0 0x10000>; -+ interrupts = , -+ , -+ , -+ ; - }; - - pcie@3400000 { -@@ -916,7 +918,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, -@@ -941,7 +943,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, -@@ -966,7 +968,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ - 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -- msi-parent = <&msi>; -+ msi-parent = <&msi1>, <&msi2>, <&msi3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, diff --git a/target/linux/layerscape/patches-4.4/3234-dts-ls1043a-change-GIC-register-for-rev1.1.patch b/target/linux/layerscape/patches-4.4/3234-dts-ls1043a-change-GIC-register-for-rev1.1.patch deleted file mode 100644 index c693b28a7..000000000 --- a/target/linux/layerscape/patches-4.4/3234-dts-ls1043a-change-GIC-register-for-rev1.1.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 118d9d4b959d30a582d4bd26d1a1918a43ea0ddf Mon Sep 17 00:00:00 2001 -From: Gong Qianyu -Date: Wed, 7 Sep 2016 18:33:09 +0800 -Subject: [PATCH 13/13] dts: ls1043a: change GIC register for rev1.1 - -Cherry-pick sdk patch. - -Signed-off-by: Gong Qianyu -Signed-off-by: Minghuan Lian -Signed-off-by: Mingkai Hu -Signed-off-by: Yangbo Lu ---- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -141,10 +141,10 @@ - compatible = "arm,gic-400"; - #interrupt-cells = <3>; - interrupt-controller; -- reg = <0x0 0x1401000 0 0x1000>, /* GICD */ -- <0x0 0x1402000 0 0x2000>, /* GICC */ -- <0x0 0x1404000 0 0x2000>, /* GICH */ -- <0x0 0x1406000 0 0x2000>; /* GICV */ -+ reg = <0x0 0x1410000 0 0x10000>, /* GICD */ -+ <0x0 0x1420000 0 0x20000>, /* GICC */ -+ <0x0 0x1440000 0 0x20000>, /* GICH */ -+ <0x0 0x1460000 0 0x20000>; /* GICV */ - interrupts = <1 9 0xf08>; - }; - diff --git a/target/linux/layerscape/patches-4.4/4043-driver-memory-Removal-of-deprecated-NO_IRQ.patch b/target/linux/layerscape/patches-4.4/4043-driver-memory-Removal-of-deprecated-NO_IRQ.patch deleted file mode 100644 index a2ff5ab19..000000000 --- a/target/linux/layerscape/patches-4.4/4043-driver-memory-Removal-of-deprecated-NO_IRQ.patch +++ /dev/null @@ -1,24 +0,0 @@ -From ae1f7ae8bbacbdbf9df3977449f97d1432ff1957 Mon Sep 17 00:00:00 2001 -From: Raghav Dogra -Date: Wed, 16 Dec 2015 16:11:31 +0530 -Subject: [PATCH 43/70] driver/memory: Removal of deprecated NO_IRQ - -Replacing the NO_IRQ macro with 0. If there is no interrupt, -returned value will be 0 regardless of what NO_IRQ is defined. - -Signed-off-by: Raghav Dogra ---- - drivers/memory/fsl_ifc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct pla - - /* get the Controller level irq */ - fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); -- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { -+ if (fsl_ifc_ctrl_dev->irq == 0) { - dev_err(&dev->dev, "failed to get irq resource " - "for IFC\n"); - ret = -ENODEV; diff --git a/target/linux/layerscape/patches-4.4/4044-drivers-memory-Add-deep-sleep-support-for-IFC.patch b/target/linux/layerscape/patches-4.4/4044-drivers-memory-Add-deep-sleep-support-for-IFC.patch deleted file mode 100644 index dc6508f1e..000000000 --- a/target/linux/layerscape/patches-4.4/4044-drivers-memory-Add-deep-sleep-support-for-IFC.patch +++ /dev/null @@ -1,233 +0,0 @@ -From bb35d670afd2f3501de36c158e9842817ce013b8 Mon Sep 17 00:00:00 2001 -From: Raghav Dogra -Date: Fri, 15 Jan 2016 17:10:09 +0530 -Subject: [PATCH 44/70] drivers/memory: Add deep sleep support for IFC - -Add support of suspend, resume function to support deep sleep. -Also make sure of SRAM initialization during resume. - -Signed-off-by: Raghav Dogra ---- - drivers/memory/fsl_ifc.c | 163 ++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/fsl_ifc.h | 6 ++ - 2 files changed, 169 insertions(+) - ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -35,6 +36,8 @@ - - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); -+#define FSL_IFC_V1_3_0 0x01030000 -+#define IFC_TIMEOUT_MSECS 100000 /* 100ms */ - - /* - * convert_ifc_address - convert the base address -@@ -309,6 +312,161 @@ err: - return ret; - } - -+#ifdef CONFIG_PM_SLEEP -+/* save ifc registers */ -+static int fsl_ifc_suspend(struct device *dev) -+{ -+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); -+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ __be32 nand_evter_intr_en, cm_evter_intr_en, nor_evter_intr_en, -+ gpcm_evter_intr_en; -+ -+ ctrl->saved_regs = kzalloc(sizeof(struct fsl_ifc_regs), GFP_KERNEL); -+ if (!ctrl->saved_regs) -+ return -ENOMEM; -+ -+ cm_evter_intr_en = ifc_in32(&ifc->cm_evter_intr_en); -+ nand_evter_intr_en = ifc_in32(&ifc->ifc_nand.nand_evter_intr_en); -+ nor_evter_intr_en = ifc_in32(&ifc->ifc_nor.nor_evter_intr_en); -+ gpcm_evter_intr_en = ifc_in32(&ifc->ifc_gpcm.gpcm_evter_intr_en); -+ -+/* IFC interrupts disabled */ -+ -+ ifc_out32(0x0, &ifc->cm_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_nor.nor_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_gpcm.gpcm_evter_intr_en); -+ -+ memcpy_fromio(ctrl->saved_regs, ifc, sizeof(struct fsl_ifc_regs)); -+ -+/* save the interrupt values */ -+ ctrl->saved_regs->cm_evter_intr_en = cm_evter_intr_en; -+ ctrl->saved_regs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en; -+ ctrl->saved_regs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en; -+ ctrl->saved_regs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en; -+ -+ return 0; -+} -+ -+/* restore ifc registers */ -+static int fsl_ifc_resume(struct device *dev) -+{ -+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); -+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_regs *savd_regs = ctrl->saved_regs; -+ uint32_t ver = 0, ncfgr, status, ifc_bank, i; -+ -+/* -+ * IFC interrupts disabled -+ */ -+ ifc_out32(0x0, &ifc->cm_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_nor.nor_evter_intr_en); -+ ifc_out32(0x0, &ifc->ifc_gpcm.gpcm_evter_intr_en); -+ -+ -+ if (ctrl->saved_regs) { -+ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) { -+ ifc_out32(savd_regs->cspr_cs[ifc_bank].cspr_ext, -+ &ifc->cspr_cs[ifc_bank].cspr_ext); -+ ifc_out32(savd_regs->cspr_cs[ifc_bank].cspr, -+ &ifc->cspr_cs[ifc_bank].cspr); -+ ifc_out32(savd_regs->amask_cs[ifc_bank].amask, -+ &ifc->amask_cs[ifc_bank].amask); -+ ifc_out32(savd_regs->csor_cs[ifc_bank].csor_ext, -+ &ifc->csor_cs[ifc_bank].csor_ext); -+ ifc_out32(savd_regs->csor_cs[ifc_bank].csor, -+ &ifc->csor_cs[ifc_bank].csor); -+ for (i = 0; i < 4; i++) { -+ ifc_out32(savd_regs->ftim_cs[ifc_bank].ftim[i], -+ &ifc->ftim_cs[ifc_bank].ftim[i]); -+ } -+ } -+ ifc_out32(savd_regs->ifc_gcr, &ifc->ifc_gcr); -+ ifc_out32(savd_regs->cm_evter_en, &ifc->cm_evter_en); -+ -+/* -+* IFC controller NAND machine registers -+*/ -+ ifc_out32(savd_regs->ifc_nand.ncfgr, &ifc->ifc_nand.ncfgr); -+ ifc_out32(savd_regs->ifc_nand.nand_fcr0, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(savd_regs->ifc_nand.nand_fcr1, -+ &ifc->ifc_nand.nand_fcr1); -+ ifc_out32(savd_regs->ifc_nand.row0, &ifc->ifc_nand.row0); -+ ifc_out32(savd_regs->ifc_nand.row1, &ifc->ifc_nand.row1); -+ ifc_out32(savd_regs->ifc_nand.col0, &ifc->ifc_nand.col0); -+ ifc_out32(savd_regs->ifc_nand.col1, &ifc->ifc_nand.col1); -+ ifc_out32(savd_regs->ifc_nand.row2, &ifc->ifc_nand.row2); -+ ifc_out32(savd_regs->ifc_nand.col2, &ifc->ifc_nand.col2); -+ ifc_out32(savd_regs->ifc_nand.row3, &ifc->ifc_nand.row3); -+ ifc_out32(savd_regs->ifc_nand.col3, &ifc->ifc_nand.col3); -+ ifc_out32(savd_regs->ifc_nand.nand_fbcr, -+ &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(savd_regs->ifc_nand.nand_fir0, -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(savd_regs->ifc_nand.nand_fir1, -+ &ifc->ifc_nand.nand_fir1); -+ ifc_out32(savd_regs->ifc_nand.nand_fir2, -+ &ifc->ifc_nand.nand_fir2); -+ ifc_out32(savd_regs->ifc_nand.nand_csel, -+ &ifc->ifc_nand.nand_csel); -+ ifc_out32(savd_regs->ifc_nand.nandseq_strt, -+ &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(savd_regs->ifc_nand.nand_evter_en, -+ &ifc->ifc_nand.nand_evter_en); -+ ifc_out32(savd_regs->ifc_nand.nanndcr, &ifc->ifc_nand.nanndcr); -+ -+/* -+* IFC controller NOR machine registers -+*/ -+ ifc_out32(savd_regs->ifc_nor.nor_evter_en, -+ &ifc->ifc_nor.nor_evter_en); -+ ifc_out32(savd_regs->ifc_nor.norcr, &ifc->ifc_nor.norcr); -+ -+/* -+ * IFC controller GPCM Machine registers -+ */ -+ ifc_out32(savd_regs->ifc_gpcm.gpcm_evter_en, -+ &ifc->ifc_gpcm.gpcm_evter_en); -+ -+ -+ -+/* -+ * IFC interrupts enabled -+ */ -+ ifc_out32(ctrl->saved_regs->cm_evter_intr_en, &ifc->cm_evter_intr_en); -+ ifc_out32(ctrl->saved_regs->ifc_nand.nand_evter_intr_en, -+ &ifc->ifc_nand.nand_evter_intr_en); -+ ifc_out32(ctrl->saved_regs->ifc_nor.nor_evter_intr_en, -+ &ifc->ifc_nor.nor_evter_intr_en); -+ ifc_out32(ctrl->saved_regs->ifc_gpcm.gpcm_evter_intr_en, -+ &ifc->ifc_gpcm.gpcm_evter_intr_en); -+ -+ kfree(ctrl->saved_regs); -+ ctrl->saved_regs = NULL; -+ } -+ -+ ver = ifc_in32(&ctrl->regs->ifc_rev); -+ ncfgr = ifc_in32(&ifc->ifc_nand.ncfgr); -+ if (ver >= FSL_IFC_V1_3_0) { -+ -+ ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN, -+ &ifc->ifc_nand.ncfgr); -+ /* wait for SRAM_INIT bit to be clear or timeout */ -+ status = spin_event_timeout( -+ !(ifc_in32(&ifc->ifc_nand.ncfgr) -+ & IFC_NAND_SRAM_INIT_EN), -+ IFC_TIMEOUT_MSECS, 0); -+ -+ if (!status) -+ dev_err(ctrl->dev, "Timeout waiting for IFC SRAM INIT"); -+ } -+ -+ return 0; -+} -+#endif /* CONFIG_PM_SLEEP */ -+ - static const struct of_device_id fsl_ifc_match[] = { - { - .compatible = "fsl,ifc", -@@ -316,10 +474,15 @@ static const struct of_device_id fsl_ifc - {}, - }; - -+static const struct dev_pm_ops ifc_pm_ops = { -+ SET_SYSTEM_SLEEP_PM_OPS(fsl_ifc_suspend, fsl_ifc_resume) -+}; -+ - static struct platform_driver fsl_ifc_ctrl_driver = { - .driver = { - .name = "fsl-ifc", - .of_match_table = fsl_ifc_match, -+ .pm = &ifc_pm_ops, - }, - .probe = fsl_ifc_ctrl_probe, - .remove = fsl_ifc_ctrl_remove, ---- a/include/linux/fsl_ifc.h -+++ b/include/linux/fsl_ifc.h -@@ -270,6 +270,8 @@ - */ - /* Auto Boot Mode */ - #define IFC_NAND_NCFGR_BOOT 0x80000000 -+/* SRAM INIT EN */ -+#define IFC_NAND_SRAM_INIT_EN 0x20000000 - /* Addressing Mode-ROW0+n/COL0 */ - #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 - /* Addressing Mode-ROW0+n/COL0+n */ -@@ -842,6 +844,10 @@ struct fsl_ifc_ctrl { - u32 nand_stat; - wait_queue_head_t nand_wait; - bool little_endian; -+#ifdef CONFIG_PM_SLEEP -+ /*save regs when system goes to deep sleep*/ -+ struct fsl_ifc_regs *saved_regs; -+#endif - }; - - extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; diff --git a/target/linux/layerscape/patches-4.4/4045-driver-memory-Update-dependency-of-IFC-for-Layerscap.patch b/target/linux/layerscape/patches-4.4/4045-driver-memory-Update-dependency-of-IFC-for-Layerscap.patch deleted file mode 100644 index a40eef1c2..000000000 --- a/target/linux/layerscape/patches-4.4/4045-driver-memory-Update-dependency-of-IFC-for-Layerscap.patch +++ /dev/null @@ -1,51 +0,0 @@ -From bf489e54ef3de7de2d9caf896141fd08490a89c9 Mon Sep 17 00:00:00 2001 -From: Raghav Dogra -Date: Wed, 16 Dec 2015 16:11:50 +0530 -Subject: [PATCH 45/70] driver/memory: Update dependency of IFC for Layerscape - -IFC NAND is not working on ARM layescape platform due to -undefined macro FSL_SOC. -This patch fixes the dependency to enable NAND. - -Signed-off-by: Raghav Dogra ---- - drivers/memory/Kconfig | 2 +- - drivers/memory/fsl_ifc.c | 4 +++- - drivers/mtd/nand/Kconfig | 2 +- - 3 files changed, 5 insertions(+), 3 deletions(-) - ---- a/drivers/memory/Kconfig -+++ b/drivers/memory/Kconfig -@@ -103,7 +103,7 @@ config FSL_CORENET_CF - - config FSL_IFC - bool -- depends on FSL_SOC -+ depends on FSL_SOC || ARCH_LAYERSCAPE - - config JZ4780_NEMC - bool "Ingenic JZ4780 SoC NEMC driver" ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -32,7 +32,9 @@ - #include - #include - #include --#include -+#include -+#include -+#include - - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); ---- a/drivers/mtd/nand/Kconfig -+++ b/drivers/mtd/nand/Kconfig -@@ -436,7 +436,7 @@ config MTD_NAND_FSL_ELBC - - config MTD_NAND_FSL_IFC - tristate "NAND support for Freescale IFC controller" -- depends on MTD_NAND && FSL_SOC -+ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) - select FSL_IFC - select MEMORY - help diff --git a/target/linux/layerscape/patches-4.4/4046-mtd-ifc-Segregate-IFC-fcm-and-runtime-registers.patch b/target/linux/layerscape/patches-4.4/4046-mtd-ifc-Segregate-IFC-fcm-and-runtime-registers.patch deleted file mode 100644 index d64e00298..000000000 --- a/target/linux/layerscape/patches-4.4/4046-mtd-ifc-Segregate-IFC-fcm-and-runtime-registers.patch +++ /dev/null @@ -1,705 +0,0 @@ -From 1c62b9982b7f6cb560d1237d2658945c070c91d4 Mon Sep 17 00:00:00 2001 -From: Raghav Dogra -Date: Wed, 20 Jan 2016 13:06:32 +0530 -Subject: [PATCH 46/70] mtd/ifc: Segregate IFC fcm and runtime registers - -IFC has two set of registers viz FCM (Flash control machine) -aka global and run time registers. These set are defined in two -memory map PAGES. Upto IFC 1.4 PAGE size is 4 KB and from IFC2.0 -PAGE size is 64KB - -Signed-off-by: Jaiprakash Singh -Signed-off-by: Raghav Dogra ---- - drivers/memory/fsl_ifc.c | 251 ++++++++++++++++++++------------------- - drivers/mtd/nand/fsl_ifc_nand.c | 72 ++++++----- - include/linux/fsl_ifc.h | 48 +++++--- - 3 files changed, 203 insertions(+), 168 deletions(-) - ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -64,11 +64,11 @@ int fsl_ifc_find(phys_addr_t addr_base) - { - int i = 0; - -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) - return -ENODEV; - - for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { -- u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); -+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); - if (cspr & CSPR_V && (cspr & CSPR_BA) == - convert_ifc_address(addr_base)) - return i; -@@ -80,7 +80,7 @@ EXPORT_SYMBOL(fsl_ifc_find); - - static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_fcm __iomem *ifc = ctrl->gregs; - - /* - * Clear all the common status and event registers -@@ -109,7 +109,7 @@ static int fsl_ifc_ctrl_remove(struct pl - irq_dispose_mapping(ctrl->nand_irq); - irq_dispose_mapping(ctrl->irq); - -- iounmap(ctrl->regs); -+ iounmap(ctrl->gregs); - - dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); -@@ -127,7 +127,7 @@ static DEFINE_SPINLOCK(nand_irq_lock); - - static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - unsigned long flags; - u32 stat; - -@@ -162,7 +162,7 @@ static irqreturn_t fsl_ifc_nand_irq(int - static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) - { - struct fsl_ifc_ctrl *ctrl = data; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_fcm __iomem *ifc = ctrl->gregs; - u32 err_axiid, err_srcid, status, cs_err, err_addr; - irqreturn_t ret = IRQ_NONE; - -@@ -220,6 +220,7 @@ static int fsl_ifc_ctrl_probe(struct pla - { - int ret = 0; - int version, banks; -+ void __iomem *addr; - - dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - -@@ -230,22 +231,13 @@ static int fsl_ifc_ctrl_probe(struct pla - dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); - - /* IOMAP the entire IFC region */ -- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); -- if (!fsl_ifc_ctrl_dev->regs) { -+ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); -+ if (!fsl_ifc_ctrl_dev->gregs) { - dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; - } - -- version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) & -- FSL_IFC_VERSION_MASK; -- banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; -- dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", -- version >> 24, (version >> 16) & 0xf, banks); -- -- fsl_ifc_ctrl_dev->version = version; -- fsl_ifc_ctrl_dev->banks = banks; -- - if (of_property_read_bool(dev->dev.of_node, "little-endian")) { - fsl_ifc_ctrl_dev->little_endian = true; - dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); -@@ -254,8 +246,9 @@ static int fsl_ifc_ctrl_probe(struct pla - dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); - } - -- version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) & -+ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & - FSL_IFC_VERSION_MASK; -+ - banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; - dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", - version >> 24, (version >> 16) & 0xf, banks); -@@ -263,6 +256,14 @@ static int fsl_ifc_ctrl_probe(struct pla - fsl_ifc_ctrl_dev->version = version; - fsl_ifc_ctrl_dev->banks = banks; - -+ addr = fsl_ifc_ctrl_dev->gregs; -+ if (version >= FSL_IFC_VERSION_2_0_0) -+ fsl_ifc_ctrl_dev->rregs = -+ (struct fsl_ifc_runtime *)(addr + PGOFFSET_64K); -+ else -+ fsl_ifc_ctrl_dev->rregs = -+ (struct fsl_ifc_runtime *)(addr + PGOFFSET_4K); -+ - /* get the Controller level irq */ - fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); - if (fsl_ifc_ctrl_dev->irq == 0) { -@@ -319,33 +320,39 @@ err: - static int fsl_ifc_suspend(struct device *dev) - { - struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_fcm __iomem *fcm = ctrl->gregs; -+ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs; - __be32 nand_evter_intr_en, cm_evter_intr_en, nor_evter_intr_en, - gpcm_evter_intr_en; - -- ctrl->saved_regs = kzalloc(sizeof(struct fsl_ifc_regs), GFP_KERNEL); -- if (!ctrl->saved_regs) -+ ctrl->saved_gregs = kzalloc(sizeof(struct fsl_ifc_fcm), GFP_KERNEL); -+ if (!ctrl->saved_gregs) -+ return -ENOMEM; -+ ctrl->saved_rregs = kzalloc(sizeof(struct fsl_ifc_runtime), GFP_KERNEL); -+ if (!ctrl->saved_rregs) - return -ENOMEM; - -- cm_evter_intr_en = ifc_in32(&ifc->cm_evter_intr_en); -- nand_evter_intr_en = ifc_in32(&ifc->ifc_nand.nand_evter_intr_en); -- nor_evter_intr_en = ifc_in32(&ifc->ifc_nor.nor_evter_intr_en); -- gpcm_evter_intr_en = ifc_in32(&ifc->ifc_gpcm.gpcm_evter_intr_en); -+ cm_evter_intr_en = ifc_in32(&fcm->cm_evter_intr_en); -+ nand_evter_intr_en = ifc_in32(&runtime->ifc_nand.nand_evter_intr_en); -+ nor_evter_intr_en = ifc_in32(&runtime->ifc_nor.nor_evter_intr_en); -+ gpcm_evter_intr_en = ifc_in32(&runtime->ifc_gpcm.gpcm_evter_intr_en); - - /* IFC interrupts disabled */ - -- ifc_out32(0x0, &ifc->cm_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_nand.nand_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_nor.nor_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_gpcm.gpcm_evter_intr_en); -- -- memcpy_fromio(ctrl->saved_regs, ifc, sizeof(struct fsl_ifc_regs)); -+ ifc_out32(0x0, &fcm->cm_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en); -+ -+ memcpy_fromio(ctrl->saved_gregs, fcm, sizeof(struct fsl_ifc_fcm)); -+ memcpy_fromio(ctrl->saved_rregs, runtime, -+ sizeof(struct fsl_ifc_runtime)); - - /* save the interrupt values */ -- ctrl->saved_regs->cm_evter_intr_en = cm_evter_intr_en; -- ctrl->saved_regs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en; -- ctrl->saved_regs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en; -- ctrl->saved_regs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en; -+ ctrl->saved_gregs->cm_evter_intr_en = cm_evter_intr_en; -+ ctrl->saved_rregs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en; -+ ctrl->saved_rregs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en; -+ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en; - - return 0; - } -@@ -354,110 +361,116 @@ static int fsl_ifc_suspend(struct device - static int fsl_ifc_resume(struct device *dev) - { - struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -- struct fsl_ifc_regs *savd_regs = ctrl->saved_regs; -+ struct fsl_ifc_fcm __iomem *fcm = ctrl->gregs; -+ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs; -+ struct fsl_ifc_fcm *savd_gregs = ctrl->saved_gregs; -+ struct fsl_ifc_runtime *savd_rregs = ctrl->saved_rregs; - uint32_t ver = 0, ncfgr, status, ifc_bank, i; - - /* - * IFC interrupts disabled - */ -- ifc_out32(0x0, &ifc->cm_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_nand.nand_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_nor.nor_evter_intr_en); -- ifc_out32(0x0, &ifc->ifc_gpcm.gpcm_evter_intr_en); -+ ifc_out32(0x0, &fcm->cm_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en); -+ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en); - - -- if (ctrl->saved_regs) { -+ if (ctrl->saved_gregs) { - for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) { -- ifc_out32(savd_regs->cspr_cs[ifc_bank].cspr_ext, -- &ifc->cspr_cs[ifc_bank].cspr_ext); -- ifc_out32(savd_regs->cspr_cs[ifc_bank].cspr, -- &ifc->cspr_cs[ifc_bank].cspr); -- ifc_out32(savd_regs->amask_cs[ifc_bank].amask, -- &ifc->amask_cs[ifc_bank].amask); -- ifc_out32(savd_regs->csor_cs[ifc_bank].csor_ext, -- &ifc->csor_cs[ifc_bank].csor_ext); -- ifc_out32(savd_regs->csor_cs[ifc_bank].csor, -- &ifc->csor_cs[ifc_bank].csor); -+ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr_ext, -+ &fcm->cspr_cs[ifc_bank].cspr_ext); -+ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr, -+ &fcm->cspr_cs[ifc_bank].cspr); -+ ifc_out32(savd_gregs->amask_cs[ifc_bank].amask, -+ &fcm->amask_cs[ifc_bank].amask); -+ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor_ext, -+ &fcm->csor_cs[ifc_bank].csor_ext); -+ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor, -+ &fcm->csor_cs[ifc_bank].csor); - for (i = 0; i < 4; i++) { -- ifc_out32(savd_regs->ftim_cs[ifc_bank].ftim[i], -- &ifc->ftim_cs[ifc_bank].ftim[i]); -+ ifc_out32(savd_gregs->ftim_cs[ifc_bank].ftim[i], -+ &fcm->ftim_cs[ifc_bank].ftim[i]); - } - } -- ifc_out32(savd_regs->ifc_gcr, &ifc->ifc_gcr); -- ifc_out32(savd_regs->cm_evter_en, &ifc->cm_evter_en); -- --/* --* IFC controller NAND machine registers --*/ -- ifc_out32(savd_regs->ifc_nand.ncfgr, &ifc->ifc_nand.ncfgr); -- ifc_out32(savd_regs->ifc_nand.nand_fcr0, -- &ifc->ifc_nand.nand_fcr0); -- ifc_out32(savd_regs->ifc_nand.nand_fcr1, -- &ifc->ifc_nand.nand_fcr1); -- ifc_out32(savd_regs->ifc_nand.row0, &ifc->ifc_nand.row0); -- ifc_out32(savd_regs->ifc_nand.row1, &ifc->ifc_nand.row1); -- ifc_out32(savd_regs->ifc_nand.col0, &ifc->ifc_nand.col0); -- ifc_out32(savd_regs->ifc_nand.col1, &ifc->ifc_nand.col1); -- ifc_out32(savd_regs->ifc_nand.row2, &ifc->ifc_nand.row2); -- ifc_out32(savd_regs->ifc_nand.col2, &ifc->ifc_nand.col2); -- ifc_out32(savd_regs->ifc_nand.row3, &ifc->ifc_nand.row3); -- ifc_out32(savd_regs->ifc_nand.col3, &ifc->ifc_nand.col3); -- ifc_out32(savd_regs->ifc_nand.nand_fbcr, -- &ifc->ifc_nand.nand_fbcr); -- ifc_out32(savd_regs->ifc_nand.nand_fir0, -- &ifc->ifc_nand.nand_fir0); -- ifc_out32(savd_regs->ifc_nand.nand_fir1, -- &ifc->ifc_nand.nand_fir1); -- ifc_out32(savd_regs->ifc_nand.nand_fir2, -- &ifc->ifc_nand.nand_fir2); -- ifc_out32(savd_regs->ifc_nand.nand_csel, -- &ifc->ifc_nand.nand_csel); -- ifc_out32(savd_regs->ifc_nand.nandseq_strt, -- &ifc->ifc_nand.nandseq_strt); -- ifc_out32(savd_regs->ifc_nand.nand_evter_en, -- &ifc->ifc_nand.nand_evter_en); -- ifc_out32(savd_regs->ifc_nand.nanndcr, &ifc->ifc_nand.nanndcr); -- --/* --* IFC controller NOR machine registers --*/ -- ifc_out32(savd_regs->ifc_nor.nor_evter_en, -- &ifc->ifc_nor.nor_evter_en); -- ifc_out32(savd_regs->ifc_nor.norcr, &ifc->ifc_nor.norcr); -- --/* -- * IFC controller GPCM Machine registers -- */ -- ifc_out32(savd_regs->ifc_gpcm.gpcm_evter_en, -- &ifc->ifc_gpcm.gpcm_evter_en); -- -- -- --/* -- * IFC interrupts enabled -- */ -- ifc_out32(ctrl->saved_regs->cm_evter_intr_en, &ifc->cm_evter_intr_en); -- ifc_out32(ctrl->saved_regs->ifc_nand.nand_evter_intr_en, -- &ifc->ifc_nand.nand_evter_intr_en); -- ifc_out32(ctrl->saved_regs->ifc_nor.nor_evter_intr_en, -- &ifc->ifc_nor.nor_evter_intr_en); -- ifc_out32(ctrl->saved_regs->ifc_gpcm.gpcm_evter_intr_en, -- &ifc->ifc_gpcm.gpcm_evter_intr_en); -+ ifc_out32(savd_gregs->rb_map, &fcm->rb_map); -+ ifc_out32(savd_gregs->wb_map, &fcm->wb_map); -+ ifc_out32(savd_gregs->ifc_gcr, &fcm->ifc_gcr); -+ ifc_out32(savd_gregs->ddr_ccr_low, &fcm->ddr_ccr_low); -+ ifc_out32(savd_gregs->cm_evter_en, &fcm->cm_evter_en); -+ } - -- kfree(ctrl->saved_regs); -- ctrl->saved_regs = NULL; -+ if (ctrl->saved_rregs) { -+ /* IFC controller NAND machine registers */ -+ ifc_out32(savd_rregs->ifc_nand.ncfgr, -+ &runtime->ifc_nand.ncfgr); -+ ifc_out32(savd_rregs->ifc_nand.nand_fcr0, -+ &runtime->ifc_nand.nand_fcr0); -+ ifc_out32(savd_rregs->ifc_nand.nand_fcr1, -+ &runtime->ifc_nand.nand_fcr1); -+ ifc_out32(savd_rregs->ifc_nand.row0, &runtime->ifc_nand.row0); -+ ifc_out32(savd_rregs->ifc_nand.row1, &runtime->ifc_nand.row1); -+ ifc_out32(savd_rregs->ifc_nand.col0, &runtime->ifc_nand.col0); -+ ifc_out32(savd_rregs->ifc_nand.col1, &runtime->ifc_nand.col1); -+ ifc_out32(savd_rregs->ifc_nand.row2, &runtime->ifc_nand.row2); -+ ifc_out32(savd_rregs->ifc_nand.col2, &runtime->ifc_nand.col2); -+ ifc_out32(savd_rregs->ifc_nand.row3, &runtime->ifc_nand.row3); -+ ifc_out32(savd_rregs->ifc_nand.col3, &runtime->ifc_nand.col3); -+ ifc_out32(savd_rregs->ifc_nand.nand_fbcr, -+ &runtime->ifc_nand.nand_fbcr); -+ ifc_out32(savd_rregs->ifc_nand.nand_fir0, -+ &runtime->ifc_nand.nand_fir0); -+ ifc_out32(savd_rregs->ifc_nand.nand_fir1, -+ &runtime->ifc_nand.nand_fir1); -+ ifc_out32(savd_rregs->ifc_nand.nand_fir2, -+ &runtime->ifc_nand.nand_fir2); -+ ifc_out32(savd_rregs->ifc_nand.nand_csel, -+ &runtime->ifc_nand.nand_csel); -+ ifc_out32(savd_rregs->ifc_nand.nandseq_strt, -+ &runtime->ifc_nand.nandseq_strt); -+ ifc_out32(savd_rregs->ifc_nand.nand_evter_en, -+ &runtime->ifc_nand.nand_evter_en); -+ ifc_out32(savd_rregs->ifc_nand.nanndcr, -+ &runtime->ifc_nand.nanndcr); -+ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg0, -+ &runtime->ifc_nand.nand_dll_lowcfg0); -+ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg1, -+ &runtime->ifc_nand.nand_dll_lowcfg1); -+ -+ /* IFC controller NOR machine registers */ -+ ifc_out32(savd_rregs->ifc_nor.nor_evter_en, -+ &runtime->ifc_nor.nor_evter_en); -+ ifc_out32(savd_rregs->ifc_nor.norcr, &runtime->ifc_nor.norcr); -+ -+ /* IFC controller GPCM Machine registers */ -+ ifc_out32(savd_rregs->ifc_gpcm.gpcm_evter_en, -+ &runtime->ifc_gpcm.gpcm_evter_en); -+ -+ /* IFC interrupts enabled */ -+ ifc_out32(ctrl->saved_gregs->cm_evter_intr_en, -+ &fcm->cm_evter_intr_en); -+ ifc_out32(ctrl->saved_rregs->ifc_nand.nand_evter_intr_en, -+ &runtime->ifc_nand.nand_evter_intr_en); -+ ifc_out32(ctrl->saved_rregs->ifc_nor.nor_evter_intr_en, -+ &runtime->ifc_nor.nor_evter_intr_en); -+ ifc_out32(ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en, -+ &runtime->ifc_gpcm.gpcm_evter_intr_en); -+ -+ kfree(ctrl->saved_gregs); -+ kfree(ctrl->saved_rregs); -+ ctrl->saved_gregs = NULL; -+ ctrl->saved_rregs = NULL; - } - -- ver = ifc_in32(&ctrl->regs->ifc_rev); -- ncfgr = ifc_in32(&ifc->ifc_nand.ncfgr); -+ ver = ifc_in32(&fcm->ifc_rev); -+ ncfgr = ifc_in32(&runtime->ifc_nand.ncfgr); - if (ver >= FSL_IFC_V1_3_0) { - - ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN, -- &ifc->ifc_nand.ncfgr); -+ &runtime->ifc_nand.ncfgr); - /* wait for SRAM_INIT bit to be clear or timeout */ - status = spin_event_timeout( -- !(ifc_in32(&ifc->ifc_nand.ncfgr) -+ !(ifc_in32(&runtime->ifc_nand.ncfgr) - & IFC_NAND_SRAM_INIT_EN), - IFC_TIMEOUT_MSECS, 0); - ---- a/drivers/mtd/nand/fsl_ifc_nand.c -+++ b/drivers/mtd/nand/fsl_ifc_nand.c -@@ -233,7 +233,7 @@ static void set_addr(struct mtd_info *mt - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - int buf_num; - - ifc_nand_ctrl->page = page_addr; -@@ -296,7 +296,7 @@ static void fsl_ifc_run_command(struct m - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 eccstat[4]; - int i; - -@@ -372,7 +372,7 @@ static void fsl_ifc_do_read(struct nand_ - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ - if (mtd->writesize > 512) { -@@ -412,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_i - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* clear the read buffer */ - ifc_nand_ctrl->read_bytes = 0; -@@ -724,7 +724,7 @@ static int fsl_ifc_wait(struct mtd_info - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 nand_fsr; - - /* Use READ_STATUS command, but wait for the device to be ready */ -@@ -826,39 +826,42 @@ static int fsl_ifc_chip_init_tail(struct - static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; -+ struct fsl_ifc_fcm __iomem *ifc_global = ctrl->gregs; - uint32_t csor = 0, csor_8k = 0, csor_ext = 0; - uint32_t cs = priv->bank; - - /* Save CSOR and CSOR_ext */ -- csor = ifc_in32(&ifc->csor_cs[cs].csor); -- csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext); -+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); -+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); - - /* chage PageSize 8K and SpareSize 1K*/ - csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; -- ifc_out32(csor_8k, &ifc->csor_cs[cs].csor); -- ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); - - /* READID */ - ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), -+ &ifc_runtime->ifc_nand.nand_fir0); - ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- ifc_out32(0x0, &ifc->ifc_nand.row3); -+ &ifc_runtime->ifc_nand.nand_fcr0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); - -- ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); - - /* Program ROW0/COL0 */ -- ifc_out32(0x0, &ifc->ifc_nand.row0); -- ifc_out32(0x0, &ifc->ifc_nand.col0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); - - /* set the chip select for NAND Transaction */ -- ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); -+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, -+ &ifc_runtime->ifc_nand.nand_csel); - - /* start read seq */ -- ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, -+ &ifc_runtime->ifc_nand.nandseq_strt); - - /* wait for command complete flag or timeout */ - wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, -@@ -868,14 +871,15 @@ static void fsl_ifc_sram_init(struct fsl - printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); - - /* Restore CSOR and CSOR_ext */ -- ifc_out32(csor, &ifc->csor_cs[cs].csor); -- ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); - } - - static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_fcm __iomem *ifc_global = ctrl->gregs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; - struct nand_chip *chip = &priv->chip; - struct nand_ecclayout *layout; - u32 csor; -@@ -886,7 +890,8 @@ static int fsl_ifc_chip_init(struct fsl_ - - /* fill in nand_chip structure */ - /* set up function call table */ -- if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) -+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) -+ & CSPR_PORT_SIZE_16) - chip->read_byte = fsl_ifc_read_byte16; - else - chip->read_byte = fsl_ifc_read_byte; -@@ -900,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ - chip->bbt_td = &bbt_main_descr; - chip->bbt_md = &bbt_mirror_descr; - -- ifc_out32(0x0, &ifc->ifc_nand.ncfgr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); - - /* set up nand options */ - chip->bbt_options = NAND_BBT_USE_FLASH; - chip->options = NAND_NO_SUBPAGE_WRITE; - -- if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { -+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) -+ & CSPR_PORT_SIZE_16) { - chip->read_byte = fsl_ifc_read_byte16; - chip->options |= NAND_BUSWIDTH_16; - } else { -@@ -919,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ - chip->ecc.read_page = fsl_ifc_read_page; - chip->ecc.write_page = fsl_ifc_write_page; - -- csor = ifc_in32(&ifc->csor_cs[priv->bank].csor); -+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); - - /* Hardware generates ECC per 512 Bytes */ - chip->ecc.size = 512; -@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fs - return 0; - } - --static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, -+static int match_bank(struct fsl_ifc_fcm __iomem *ifc_global, int bank, - phys_addr_t addr) - { -- u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr); -+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); - - if (!(cspr & CSPR_V)) - return 0; -@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); - - static int fsl_ifc_nand_probe(struct platform_device *dev) - { -- struct fsl_ifc_regs __iomem *ifc; -+ struct fsl_ifc_runtime __iomem *ifc; - struct fsl_ifc_mtd *priv; - struct resource res; - static const char *part_probe_types[] -@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct pla - struct mtd_part_parser_data ppdata; - - ppdata.of_node = dev->dev.of_node; -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) - return -ENODEV; -- ifc = fsl_ifc_ctrl_dev->regs; -+ ifc = fsl_ifc_ctrl_dev->rregs; - - /* get, allocate and map the memory resource */ - ret = of_address_to_resource(node, 0, &res); -@@ -1046,7 +1052,7 @@ static int fsl_ifc_nand_probe(struct pla - - /* find which chip select it is connected to */ - for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { -- if (match_bank(ifc, bank, res.start)) -+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) - break; - } - ---- a/include/linux/fsl_ifc.h -+++ b/include/linux/fsl_ifc.h -@@ -39,6 +39,10 @@ - #define FSL_IFC_VERSION_MASK 0x0F0F0000 - #define FSL_IFC_VERSION_1_0_0 0x01000000 - #define FSL_IFC_VERSION_1_1_0 0x01010000 -+#define FSL_IFC_VERSION_2_0_0 0x02000000 -+ -+#define PGOFFSET_64K (64*1024) -+#define PGOFFSET_4K (4*1024) - - /* - * CSPR - Chip Select Property Register -@@ -725,20 +729,26 @@ struct fsl_ifc_nand { - __be32 nand_evter_en; - u32 res17[0x2]; - __be32 nand_evter_intr_en; -- u32 res18[0x2]; -+ __be32 nand_vol_addr_stat; -+ u32 res18; - __be32 nand_erattr0; - __be32 nand_erattr1; - u32 res19[0x10]; - __be32 nand_fsr; -- u32 res20; -- __be32 nand_eccstat[4]; -- u32 res21[0x20]; -+ u32 res20[0x3]; -+ __be32 nand_eccstat[6]; -+ u32 res21[0x1c]; - __be32 nanndcr; - u32 res22[0x2]; - __be32 nand_autoboot_trgr; - u32 res23; - __be32 nand_mdr; -- u32 res24[0x5C]; -+ u32 res24[0x1C]; -+ __be32 nand_dll_lowcfg0; -+ __be32 nand_dll_lowcfg1; -+ u32 res25; -+ __be32 nand_dll_lowstat; -+ u32 res26[0x3c]; - }; - - /* -@@ -773,13 +783,12 @@ struct fsl_ifc_gpcm { - __be32 gpcm_erattr1; - __be32 gpcm_erattr2; - __be32 gpcm_stat; -- u32 res4[0x1F3]; - }; - - /* - * IFC Controller Registers - */ --struct fsl_ifc_regs { -+struct fsl_ifc_fcm { - __be32 ifc_rev; - u32 res1[0x2]; - struct { -@@ -805,21 +814,26 @@ struct fsl_ifc_regs { - } ftim_cs[FSL_IFC_BANK_COUNT]; - u32 res9[0x30]; - __be32 rb_stat; -- u32 res10[0x2]; -+ __be32 rb_map; -+ __be32 wb_map; - __be32 ifc_gcr; -- u32 res11[0x2]; -+ u32 res10[0x2]; - __be32 cm_evter_stat; -- u32 res12[0x2]; -+ u32 res11[0x2]; - __be32 cm_evter_en; -- u32 res13[0x2]; -+ u32 res12[0x2]; - __be32 cm_evter_intr_en; -- u32 res14[0x2]; -+ u32 res13[0x2]; - __be32 cm_erattr0; - __be32 cm_erattr1; -- u32 res15[0x2]; -+ u32 res14[0x2]; - __be32 ifc_ccr; - __be32 ifc_csr; -- u32 res16[0x2EB]; -+ __be32 ddr_ccr_low; -+}; -+ -+ -+struct fsl_ifc_runtime { - struct fsl_ifc_nand ifc_nand; - struct fsl_ifc_nor ifc_nor; - struct fsl_ifc_gpcm ifc_gpcm; -@@ -833,7 +847,8 @@ extern int fsl_ifc_find(phys_addr_t addr - struct fsl_ifc_ctrl { - /* device info */ - struct device *dev; -- struct fsl_ifc_regs __iomem *regs; -+ struct fsl_ifc_fcm __iomem *gregs; -+ struct fsl_ifc_runtime __iomem *rregs; - int irq; - int nand_irq; - spinlock_t lock; -@@ -846,7 +861,8 @@ struct fsl_ifc_ctrl { - bool little_endian; - #ifdef CONFIG_PM_SLEEP - /*save regs when system goes to deep sleep*/ -- struct fsl_ifc_regs *saved_regs; -+ struct fsl_ifc_fcm *saved_gregs; -+ struct fsl_ifc_runtime *saved_rregs; - #endif - }; - diff --git a/target/linux/layerscape/patches-4.4/4047-drivers-memory-Fix-build-error-for-arm64.patch b/target/linux/layerscape/patches-4.4/4047-drivers-memory-Fix-build-error-for-arm64.patch deleted file mode 100644 index da7f18f1f..000000000 --- a/target/linux/layerscape/patches-4.4/4047-drivers-memory-Fix-build-error-for-arm64.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 0ce5d6bd62a9f1dbaa2d39c3535a8bdb31cf7951 Mon Sep 17 00:00:00 2001 -From: Raghav Dogra -Date: Wed, 24 Feb 2016 23:12:58 +0530 -Subject: [PATCH 47/70] drivers/memory: Fix build error for arm64 - -Replace spin_event_timeout() with arch independent macro - -Signed-off-by: Raghav Dogra -Signed-off-by: Prabhakar Kushwaha ---- - drivers/memory/fsl_ifc.c | 16 +++++++++------- - 1 file changed, 9 insertions(+), 7 deletions(-) - ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -39,7 +39,7 @@ - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); - #define FSL_IFC_V1_3_0 0x01030000 --#define IFC_TIMEOUT_MSECS 100000 /* 100ms */ -+#define IFC_TIMEOUT_MSECS 1000 /* 1000ms */ - - /* - * convert_ifc_address - convert the base address -@@ -365,7 +365,7 @@ static int fsl_ifc_resume(struct device - struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs; - struct fsl_ifc_fcm *savd_gregs = ctrl->saved_gregs; - struct fsl_ifc_runtime *savd_rregs = ctrl->saved_rregs; -- uint32_t ver = 0, ncfgr, status, ifc_bank, i; -+ uint32_t ver = 0, ncfgr, timeout, ifc_bank, i; - - /* - * IFC interrupts disabled -@@ -469,12 +469,14 @@ static int fsl_ifc_resume(struct device - ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN, - &runtime->ifc_nand.ncfgr); - /* wait for SRAM_INIT bit to be clear or timeout */ -- status = spin_event_timeout( -- !(ifc_in32(&runtime->ifc_nand.ncfgr) -- & IFC_NAND_SRAM_INIT_EN), -- IFC_TIMEOUT_MSECS, 0); -+ timeout = 10; -+ while ((ifc_in32(&runtime->ifc_nand.ncfgr) & -+ IFC_NAND_SRAM_INIT_EN) && timeout) { -+ mdelay(IFC_TIMEOUT_MSECS); -+ timeout--; -+ } - -- if (!status) -+ if (!timeout) - dev_err(ctrl->dev, "Timeout waiting for IFC SRAM INIT"); - } - diff --git a/target/linux/layerscape/patches-4.4/4234-fsl-ifc-fix-compilation-error-when-COMPAT-not-enable.patch b/target/linux/layerscape/patches-4.4/4234-fsl-ifc-fix-compilation-error-when-COMPAT-not-enable.patch deleted file mode 100644 index 2097be0c7..000000000 --- a/target/linux/layerscape/patches-4.4/4234-fsl-ifc-fix-compilation-error-when-COMPAT-not-enable.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 6183d512e7539033ccfd177d5f5819302d1fda99 Mon Sep 17 00:00:00 2001 -From: Lijun Pan -Date: Wed, 23 Sep 2015 17:06:01 -0500 -Subject: [PATCH 234/238] fsl-ifc: fix compilation error when COMPAT not - enabled - -When CONFIG_COMPAT is not enabled for cases when 64K pages -are enabled, there are a series of include dependencies that -result in some definitions in sched.h that get missed (e.g. - TASK_NORMAL). Explictly include sched.h to resolve this. -(This seems to be what other drivers do as well) - -Signed-off-by: Lijun Pan -[Stuart: updated subject and commit message] -Signed-off-by: Stuart Yoder ---- - drivers/memory/fsl_ifc.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); diff --git a/target/linux/layerscape/patches-4.4/7014-temp-QE-headers-are-needed-by-FMD.patch b/target/linux/layerscape/patches-4.4/7014-temp-QE-headers-are-needed-by-FMD.patch deleted file mode 100644 index e6962c266..000000000 --- a/target/linux/layerscape/patches-4.4/7014-temp-QE-headers-are-needed-by-FMD.patch +++ /dev/null @@ -1,1317 +0,0 @@ -From 03c463111e16f9bae8a659408e5f02333af13239 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Tue, 5 Jan 2016 15:41:28 +0200 -Subject: [PATCH 14/70] temp: QE headers are needed by FMD - -Signed-off-by: Madalin Bucur ---- - include/linux/fsl/immap_qe.h | 488 +++++++++++++++++++++++++ - include/linux/fsl/qe.h | 810 ++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 1298 insertions(+) - create mode 100644 include/linux/fsl/immap_qe.h - create mode 100644 include/linux/fsl/qe.h - ---- /dev/null -+++ b/include/linux/fsl/immap_qe.h -@@ -0,0 +1,488 @@ -+/* -+ * QUICC Engine (QE) Internal Memory Map. -+ * The Internal Memory Map for devices with QE on them. This -+ * is the superset of all QE devices (8360, etc.). -+ * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Authors: -+ * Shlomi Gridish -+ * Li Yang -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+#ifndef _ASM_POWERPC_IMMAP_QE_H -+#define _ASM_POWERPC_IMMAP_QE_H -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */ -+ -+/* QE I-RAM */ -+struct qe_iram { -+ __be32 iadd; /* I-RAM Address Register */ -+ __be32 idata; /* I-RAM Data Register */ -+ u8 res0[0x04]; -+ __be32 iready; /* I-RAM Ready Register */ -+ u8 res1[0x70]; -+} __packed; -+ -+/* QE Interrupt Controller */ -+struct qe_ic_regs { -+ __be32 qicr; -+ __be32 qivec; -+ __be32 qripnr; -+ __be32 qipnr; -+ __be32 qipxcc; -+ __be32 qipycc; -+ __be32 qipwcc; -+ __be32 qipzcc; -+ __be32 qimr; -+ __be32 qrimr; -+ __be32 qicnr; -+ u8 res0[0x4]; -+ __be32 qiprta; -+ __be32 qiprtb; -+ u8 res1[0x4]; -+ __be32 qricr; -+ u8 res2[0x20]; -+ __be32 qhivec; -+ u8 res3[0x1C]; -+} __packed; -+ -+/* Communications Processor */ -+struct cp_qe { -+ __be32 cecr; /* QE command register */ -+ __be32 ceccr; /* QE controller configuration register */ -+ __be32 cecdr; /* QE command data register */ -+ u8 res0[0xA]; -+ __be16 ceter; /* QE timer event register */ -+ u8 res1[0x2]; -+ __be16 cetmr; /* QE timers mask register */ -+ __be32 cetscr; /* QE time-stamp timer control register */ -+ __be32 cetsr1; /* QE time-stamp register 1 */ -+ __be32 cetsr2; /* QE time-stamp register 2 */ -+ u8 res2[0x8]; -+ __be32 cevter; /* QE virtual tasks event register */ -+ __be32 cevtmr; /* QE virtual tasks mask register */ -+ __be16 cercr; /* QE RAM control register */ -+ u8 res3[0x2]; -+ u8 res4[0x24]; -+ __be16 ceexe1; /* QE external request 1 event register */ -+ u8 res5[0x2]; -+ __be16 ceexm1; /* QE external request 1 mask register */ -+ u8 res6[0x2]; -+ __be16 ceexe2; /* QE external request 2 event register */ -+ u8 res7[0x2]; -+ __be16 ceexm2; /* QE external request 2 mask register */ -+ u8 res8[0x2]; -+ __be16 ceexe3; /* QE external request 3 event register */ -+ u8 res9[0x2]; -+ __be16 ceexm3; /* QE external request 3 mask register */ -+ u8 res10[0x2]; -+ __be16 ceexe4; /* QE external request 4 event register */ -+ u8 res11[0x2]; -+ __be16 ceexm4; /* QE external request 4 mask register */ -+ u8 res12[0x3A]; -+ __be32 ceurnr; /* QE microcode revision number register */ -+ u8 res13[0x244]; -+} __packed; -+ -+/* QE Multiplexer */ -+struct qe_mux { -+ __be32 cmxgcr; /* CMX general clock route register */ -+ __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ -+ __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ -+ __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ -+ __be32 cmxucr[4]; /* CMX UCCx clock route registers */ -+ __be32 cmxupcr; /* CMX UPC clock route register */ -+ u8 res0[0x1C]; -+} __packed; -+ -+/* QE Timers */ -+struct qe_timers { -+ u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/ -+ u8 res0[0x3]; -+ u8 gtcfr2; /* Timer 3 and timer 4 global config register*/ -+ u8 res1[0xB]; -+ __be16 gtmdr1; /* Timer 1 mode register */ -+ __be16 gtmdr2; /* Timer 2 mode register */ -+ __be16 gtrfr1; /* Timer 1 reference register */ -+ __be16 gtrfr2; /* Timer 2 reference register */ -+ __be16 gtcpr1; /* Timer 1 capture register */ -+ __be16 gtcpr2; /* Timer 2 capture register */ -+ __be16 gtcnr1; /* Timer 1 counter */ -+ __be16 gtcnr2; /* Timer 2 counter */ -+ __be16 gtmdr3; /* Timer 3 mode register */ -+ __be16 gtmdr4; /* Timer 4 mode register */ -+ __be16 gtrfr3; /* Timer 3 reference register */ -+ __be16 gtrfr4; /* Timer 4 reference register */ -+ __be16 gtcpr3; /* Timer 3 capture register */ -+ __be16 gtcpr4; /* Timer 4 capture register */ -+ __be16 gtcnr3; /* Timer 3 counter */ -+ __be16 gtcnr4; /* Timer 4 counter */ -+ __be16 gtevr1; /* Timer 1 event register */ -+ __be16 gtevr2; /* Timer 2 event register */ -+ __be16 gtevr3; /* Timer 3 event register */ -+ __be16 gtevr4; /* Timer 4 event register */ -+ __be16 gtps; /* Timer 1 prescale register */ -+ u8 res2[0x46]; -+} __packed; -+ -+/* BRG */ -+struct qe_brg { -+ __be32 brgc[16]; /* BRG configuration registers */ -+ u8 res0[0x40]; -+} __packed; -+ -+/* SPI */ -+struct spi { -+ u8 res0[0x20]; -+ __be32 spmode; /* SPI mode register */ -+ u8 res1[0x2]; -+ u8 spie; /* SPI event register */ -+ u8 res2[0x1]; -+ u8 res3[0x2]; -+ u8 spim; /* SPI mask register */ -+ u8 res4[0x1]; -+ u8 res5[0x1]; -+ u8 spcom; /* SPI command register */ -+ u8 res6[0x2]; -+ __be32 spitd; /* SPI transmit data register (cpu mode) */ -+ __be32 spird; /* SPI receive data register (cpu mode) */ -+ u8 res7[0x8]; -+} __packed; -+ -+/* SI */ -+struct si1 { -+ __be16 sixmr1[4]; /* SI1 TDMx (x = A B C D) mode register */ -+ u8 siglmr1_h; /* SI1 global mode register high */ -+ u8 res0[0x1]; -+ u8 sicmdr1_h; /* SI1 command register high */ -+ u8 res2[0x1]; -+ u8 sistr1_h; /* SI1 status register high */ -+ u8 res3[0x1]; -+ __be16 sirsr1_h; /* SI1 RAM shadow address register high */ -+ u8 sitarc1; /* SI1 RAM counter Tx TDMA */ -+ u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ -+ u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ -+ u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ -+ u8 sirarc1; /* SI1 RAM counter Rx TDMA */ -+ u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ -+ u8 sircrc1; /* SI1 RAM counter Rx TDMC */ -+ u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ -+ u8 res4[0x8]; -+ __be16 siemr1; /* SI1 TDME mode register 16 bits */ -+ __be16 sifmr1; /* SI1 TDMF mode register 16 bits */ -+ __be16 sigmr1; /* SI1 TDMG mode register 16 bits */ -+ __be16 sihmr1; /* SI1 TDMH mode register 16 bits */ -+ u8 siglmg1_l; /* SI1 global mode register low 8 bits */ -+ u8 res5[0x1]; -+ u8 sicmdr1_l; /* SI1 command register low 8 bits */ -+ u8 res6[0x1]; -+ u8 sistr1_l; /* SI1 status register low 8 bits */ -+ u8 res7[0x1]; -+ __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/ -+ u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ -+ u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ -+ u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ -+ u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ -+ u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ -+ u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ -+ u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ -+ u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ -+ u8 res8[0x8]; -+ __be32 siml1; /* SI1 multiframe limit register */ -+ u8 siedm1; /* SI1 extended diagnostic mode register */ -+ u8 res9[0xBB]; -+} __packed; -+ -+/* SI Routing Tables */ -+struct sir { -+ u8 tx[0x400]; -+ u8 rx[0x400]; -+ u8 res0[0x800]; -+} __packed; -+ -+/* USB Controller */ -+struct qe_usb_ctlr { -+ u8 usb_usmod; -+ u8 usb_usadr; -+ u8 usb_uscom; -+ u8 res1[1]; -+ __be16 usb_usep[4]; -+ u8 res2[4]; -+ __be16 usb_usber; -+ u8 res3[2]; -+ __be16 usb_usbmr; -+ u8 res4[1]; -+ u8 usb_usbs; -+ __be16 usb_ussft; -+ u8 res5[2]; -+ __be16 usb_usfrn; -+ u8 res6[0x22]; -+} __packed; -+ -+/* MCC */ -+struct qe_mcc { -+ __be32 mcce; /* MCC event register */ -+ __be32 mccm; /* MCC mask register */ -+ __be32 mccf; /* MCC configuration register */ -+ __be32 merl; /* MCC emergency request level register */ -+ u8 res0[0xF0]; -+} __packed; -+ -+/* QE UCC Slow */ -+struct ucc_slow { -+ __be32 gumr_l; /* UCCx general mode register (low) */ -+ __be32 gumr_h; /* UCCx general mode register (high) */ -+ __be16 upsmr; /* UCCx protocol-specific mode register */ -+ u8 res0[0x2]; -+ __be16 utodr; /* UCCx transmit on demand register */ -+ __be16 udsr; /* UCCx data synchronization register */ -+ __be16 ucce; /* UCCx event register */ -+ u8 res1[0x2]; -+ __be16 uccm; /* UCCx mask register */ -+ u8 res2[0x1]; -+ u8 uccs; /* UCCx status register */ -+ u8 res3[0x24]; -+ __be16 utpt; -+ u8 res4[0x52]; -+ u8 guemr; /* UCC general extended mode register */ -+} __packed; -+ -+/* QE UCC Fast */ -+struct ucc_fast { -+ __be32 gumr; /* UCCx general mode register */ -+ __be32 upsmr; /* UCCx protocol-specific mode register */ -+ __be16 utodr; /* UCCx transmit on demand register */ -+ u8 res0[0x2]; -+ __be16 udsr; /* UCCx data synchronization register */ -+ u8 res1[0x2]; -+ __be32 ucce; /* UCCx event register */ -+ __be32 uccm; /* UCCx mask register */ -+ u8 uccs; /* UCCx status register */ -+ u8 res2[0x7]; -+ __be32 urfb; /* UCC receive FIFO base */ -+ __be16 urfs; /* UCC receive FIFO size */ -+ u8 res3[0x2]; -+ __be16 urfet; /* UCC receive FIFO emergency threshold */ -+ __be16 urfset; /* UCC receive FIFO special emergency -+ threshold */ -+ __be32 utfb; /* UCC transmit FIFO base */ -+ __be16 utfs; /* UCC transmit FIFO size */ -+ u8 res4[0x2]; -+ __be16 utfet; /* UCC transmit FIFO emergency threshold */ -+ u8 res5[0x2]; -+ __be16 utftt; /* UCC transmit FIFO transmit threshold */ -+ u8 res6[0x2]; -+ __be16 utpt; /* UCC transmit polling timer */ -+ u8 res7[0x2]; -+ __be32 urtry; /* UCC retry counter register */ -+ u8 res8[0x4C]; -+ u8 guemr; /* UCC general extended mode register */ -+} __packed; -+ -+struct ucc { -+ union { -+ struct ucc_slow slow; -+ struct ucc_fast fast; -+ u8 res[0x200]; /* UCC blocks are 512 bytes each */ -+ }; -+} __packed; -+ -+/* MultiPHY UTOPIA POS Controllers (UPC) */ -+struct upc { -+ __be32 upgcr; /* UTOPIA/POS general configuration register */ -+ __be32 uplpa; /* UTOPIA/POS last PHY address */ -+ __be32 uphec; /* ATM HEC register */ -+ __be32 upuc; /* UTOPIA/POS UCC configuration */ -+ __be32 updc1; /* UTOPIA/POS device 1 configuration */ -+ __be32 updc2; /* UTOPIA/POS device 2 configuration */ -+ __be32 updc3; /* UTOPIA/POS device 3 configuration */ -+ __be32 updc4; /* UTOPIA/POS device 4 configuration */ -+ __be32 upstpa; /* UTOPIA/POS STPA threshold */ -+ u8 res0[0xC]; -+ __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */ -+ __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */ -+ __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */ -+ __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */ -+ __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */ -+ __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */ -+ __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */ -+ __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */ -+ __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */ -+ __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */ -+ __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */ -+ __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */ -+ __be32 upde1; /* UTOPIA/POS device 1 event */ -+ __be32 upde2; /* UTOPIA/POS device 2 event */ -+ __be32 upde3; /* UTOPIA/POS device 3 event */ -+ __be32 upde4; /* UTOPIA/POS device 4 event */ -+ __be16 uprp1; -+ __be16 uprp2; -+ __be16 uprp3; -+ __be16 uprp4; -+ u8 res1[0x8]; -+ __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */ -+ __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */ -+ __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */ -+ __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */ -+ __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */ -+ __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */ -+ __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */ -+ __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */ -+ __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */ -+ __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */ -+ __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */ -+ __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */ -+ __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */ -+ __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */ -+ __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */ -+ __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */ -+ __be32 uper1; /* Device 1 port enable register */ -+ __be32 uper2; /* Device 2 port enable register */ -+ __be32 uper3; /* Device 3 port enable register */ -+ __be32 uper4; /* Device 4 port enable register */ -+ u8 res2[0x150]; -+} __packed; -+ -+/* SDMA */ -+struct sdma { -+ __be32 sdsr; /* Serial DMA status register */ -+ __be32 sdmr; /* Serial DMA mode register */ -+ __be32 sdtr1; /* SDMA system bus threshold register */ -+ __be32 sdtr2; /* SDMA secondary bus threshold register */ -+ __be32 sdhy1; /* SDMA system bus hysteresis register */ -+ __be32 sdhy2; /* SDMA secondary bus hysteresis register */ -+ __be32 sdta1; /* SDMA system bus address register */ -+ __be32 sdta2; /* SDMA secondary bus address register */ -+ __be32 sdtm1; /* SDMA system bus MSNUM register */ -+ __be32 sdtm2; /* SDMA secondary bus MSNUM register */ -+ u8 res0[0x10]; -+ __be32 sdaqr; /* SDMA address bus qualify register */ -+ __be32 sdaqmr; /* SDMA address bus qualify mask register */ -+ u8 res1[0x4]; -+ __be32 sdebcr; /* SDMA CAM entries base register */ -+ u8 res2[0x38]; -+} __packed; -+ -+/* Debug Space */ -+struct dbg { -+ __be32 bpdcr; /* Breakpoint debug command register */ -+ __be32 bpdsr; /* Breakpoint debug status register */ -+ __be32 bpdmr; /* Breakpoint debug mask register */ -+ __be32 bprmrr0; /* Breakpoint request mode risc register 0 */ -+ __be32 bprmrr1; /* Breakpoint request mode risc register 1 */ -+ u8 res0[0x8]; -+ __be32 bprmtr0; /* Breakpoint request mode trb register 0 */ -+ __be32 bprmtr1; /* Breakpoint request mode trb register 1 */ -+ u8 res1[0x8]; -+ __be32 bprmir; /* Breakpoint request mode immediate register */ -+ __be32 bprmsr; /* Breakpoint request mode serial register */ -+ __be32 bpemr; /* Breakpoint exit mode register */ -+ u8 res2[0x48]; -+} __packed; -+ -+/* -+ * RISC Special Registers (Trap and Breakpoint). These are described in -+ * the QE Developer's Handbook. -+ */ -+struct rsp { -+ __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */ -+ u8 res0[64]; -+ __be32 ibcr0; -+ __be32 ibs0; -+ __be32 ibcnr0; -+ u8 res1[4]; -+ __be32 ibcr1; -+ __be32 ibs1; -+ __be32 ibcnr1; -+ __be32 npcr; -+ __be32 dbcr; -+ __be32 dbar; -+ __be32 dbamr; -+ __be32 dbsr; -+ __be32 dbcnr; -+ u8 res2[12]; -+ __be32 dbdr_h; -+ __be32 dbdr_l; -+ __be32 dbdmr_h; -+ __be32 dbdmr_l; -+ __be32 bsr; -+ __be32 bor; -+ __be32 bior; -+ u8 res3[4]; -+ __be32 iatr[4]; -+ __be32 eccr; /* Exception control configuration register */ -+ __be32 eicr; -+ u8 res4[0x100-0xf8]; -+} __packed; -+ -+struct qe_immap { -+ struct qe_iram iram; /* I-RAM */ -+ struct qe_ic_regs ic; /* Interrupt Controller */ -+ struct cp_qe cp; /* Communications Processor */ -+ struct qe_mux qmx; /* QE Multiplexer */ -+ struct qe_timers qet; /* QE Timers */ -+ struct spi spi[0x2]; /* spi */ -+ struct qe_mcc mcc; /* mcc */ -+ struct qe_brg brg; /* brg */ -+ struct qe_usb_ctlr usb; /* USB */ -+ struct si1 si1; /* SI */ -+ u8 res11[0x800]; -+ struct sir sir; /* SI Routing Tables */ -+ struct ucc ucc1; /* ucc1 */ -+ struct ucc ucc3; /* ucc3 */ -+ struct ucc ucc5; /* ucc5 */ -+ struct ucc ucc7; /* ucc7 */ -+ u8 res12[0x600]; -+ struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/ -+ struct ucc ucc2; /* ucc2 */ -+ struct ucc ucc4; /* ucc4 */ -+ struct ucc ucc6; /* ucc6 */ -+ struct ucc ucc8; /* ucc8 */ -+ u8 res13[0x600]; -+ struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ -+ struct sdma sdma; /* SDMA */ -+ struct dbg dbg; /* 0x104080 - 0x1040FF -+ Debug Space */ -+ struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF -+ RISC Special Registers -+ (Trap and Breakpoint) */ -+ u8 res14[0x300]; /* 0x104300 - 0x1045FF */ -+ u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */ -+ u8 res16[0x8000]; /* 0x108000 - 0x110000 */ -+ u8 muram[0xC000]; /* 0x110000 - 0x11C000 -+ Multi-user RAM */ -+ u8 res17[0x24000]; /* 0x11C000 - 0x140000 */ -+ u8 res18[0xC0000]; /* 0x140000 - 0x200000 */ -+} __packed; -+ -+extern struct qe_immap __iomem *qe_immr; -+extern phys_addr_t get_qe_base(void); -+ -+/* -+ * Returns the offset within the QE address space of the given pointer. -+ * -+ * Note that the QE does not support 36-bit physical addresses, so if -+ * get_qe_base() returns a number above 4GB, the caller will probably fail. -+ */ -+static inline phys_addr_t immrbar_virt_to_phys(void *address) -+{ -+ void *q = (void *)qe_immr; -+ -+ /* Is it a MURAM address? */ -+ if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) -+ return get_qe_base() + (address - q); -+ -+ /* It's an address returned by kmalloc */ -+ return virt_to_phys(address); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* _ASM_POWERPC_IMMAP_QE_H */ ---- /dev/null -+++ b/include/linux/fsl/qe.h -@@ -0,0 +1,810 @@ -+/* -+ * Copyright (C) 2006, 2012 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Authors: Shlomi Gridish -+ * Li Yang -+ * -+ * Description: -+ * QUICC Engine (QE) external definitions and structure. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+#ifndef _ASM_POWERPC_QE_H -+#define _ASM_POWERPC_QE_H -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ -+#define QE_NUM_OF_BRGS 16 -+#define QE_NUM_OF_PORTS 1024 -+ -+/* Memory partitions -+*/ -+#define MEM_PART_SYSTEM 0 -+#define MEM_PART_SECONDARY 1 -+#define MEM_PART_MURAM 2 -+ -+extern int siram_init_flag; -+ -+/* Clocks and BRGs */ -+enum qe_clock { -+ QE_CLK_NONE = 0, -+ QE_BRG1, /* Baud Rate Generator 1 */ -+ QE_BRG2, /* Baud Rate Generator 2 */ -+ QE_BRG3, /* Baud Rate Generator 3 */ -+ QE_BRG4, /* Baud Rate Generator 4 */ -+ QE_BRG5, /* Baud Rate Generator 5 */ -+ QE_BRG6, /* Baud Rate Generator 6 */ -+ QE_BRG7, /* Baud Rate Generator 7 */ -+ QE_BRG8, /* Baud Rate Generator 8 */ -+ QE_BRG9, /* Baud Rate Generator 9 */ -+ QE_BRG10, /* Baud Rate Generator 10 */ -+ QE_BRG11, /* Baud Rate Generator 11 */ -+ QE_BRG12, /* Baud Rate Generator 12 */ -+ QE_BRG13, /* Baud Rate Generator 13 */ -+ QE_BRG14, /* Baud Rate Generator 14 */ -+ QE_BRG15, /* Baud Rate Generator 15 */ -+ QE_BRG16, /* Baud Rate Generator 16 */ -+ QE_CLK1, /* Clock 1 */ -+ QE_CLK2, /* Clock 2 */ -+ QE_CLK3, /* Clock 3 */ -+ QE_CLK4, /* Clock 4 */ -+ QE_CLK5, /* Clock 5 */ -+ QE_CLK6, /* Clock 6 */ -+ QE_CLK7, /* Clock 7 */ -+ QE_CLK8, /* Clock 8 */ -+ QE_CLK9, /* Clock 9 */ -+ QE_CLK10, /* Clock 10 */ -+ QE_CLK11, /* Clock 11 */ -+ QE_CLK12, /* Clock 12 */ -+ QE_CLK13, /* Clock 13 */ -+ QE_CLK14, /* Clock 14 */ -+ QE_CLK15, /* Clock 15 */ -+ QE_CLK16, /* Clock 16 */ -+ QE_CLK17, /* Clock 17 */ -+ QE_CLK18, /* Clock 18 */ -+ QE_CLK19, /* Clock 19 */ -+ QE_CLK20, /* Clock 20 */ -+ QE_CLK21, /* Clock 21 */ -+ QE_CLK22, /* Clock 22 */ -+ QE_CLK23, /* Clock 23 */ -+ QE_CLK24, /* Clock 24 */ -+ QE_RSYNC_PIN, /* RSYNC from pin */ -+ QE_TSYNC_PIN, /* TSYNC from pin */ -+ QE_CLK_DUMMY -+}; -+ -+static inline bool qe_clock_is_brg(enum qe_clock clk) -+{ -+ return clk >= QE_BRG1 && clk <= QE_BRG16; -+} -+ -+extern spinlock_t cmxgcr_lock; -+ -+/* Export QE common operations */ -+#ifdef CONFIG_QUICC_ENGINE -+extern void qe_reset(void); -+#else -+static inline void qe_reset(void) {} -+#endif -+ -+/* QE PIO */ -+#define QE_PIO_PINS 32 -+ -+struct qe_pio_regs { -+ __be32 cpodr; /* Open drain register */ -+ __be32 cpdata; /* Data register */ -+ __be32 cpdir1; /* Direction register */ -+ __be32 cpdir2; /* Direction register */ -+ __be32 cppar1; /* Pin assignment register */ -+ __be32 cppar2; /* Pin assignment register */ -+#ifdef CONFIG_PPC_85xx -+ u8 pad[8]; -+#endif -+}; -+ -+#define QE_PIO_DIR_IN 2 -+#define QE_PIO_DIR_OUT 1 -+extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, -+ int dir, int open_drain, int assignment, -+ int has_irq); -+#ifdef CONFIG_QUICC_ENGINE -+extern int par_io_init(struct device_node *np); -+extern int par_io_of_config(struct device_node *np); -+extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, -+ int assignment, int has_irq); -+extern int par_io_data_set(u8 port, u8 pin, u8 val); -+#else -+static inline int par_io_init(struct device_node *np) { return -ENOSYS; } -+static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } -+static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, -+ int assignment, int has_irq) { return -ENOSYS; } -+static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } -+#endif /* CONFIG_QUICC_ENGINE */ -+ -+/* -+ * Pin multiplexing functions. -+ */ -+struct qe_pin; -+#ifdef CONFIG_QE_GPIO -+extern struct qe_pin *qe_pin_request(struct device_node *np, int index); -+extern void qe_pin_free(struct qe_pin *qe_pin); -+extern void qe_pin_set_gpio(struct qe_pin *qe_pin); -+extern void qe_pin_set_dedicated(struct qe_pin *pin); -+#else -+static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) -+{ -+ return ERR_PTR(-ENOSYS); -+} -+static inline void qe_pin_free(struct qe_pin *qe_pin) {} -+static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} -+static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} -+#endif /* CONFIG_QE_GPIO */ -+ -+#ifdef CONFIG_QUICC_ENGINE -+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); -+#else -+static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, -+ u32 cmd_input) -+{ -+ return -ENOSYS; -+} -+#endif /* CONFIG_QUICC_ENGINE */ -+ -+/* QE internal API */ -+enum qe_clock qe_clock_source(const char *source); -+unsigned int qe_get_brg_clk(void); -+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); -+int qe_get_snum(void); -+void qe_put_snum(u8 snum); -+unsigned int qe_get_num_of_risc(void); -+unsigned int qe_get_num_of_snums(void); -+ -+static inline int qe_alive_during_sleep(void) -+{ -+ /* -+ * MPC8568E reference manual says: -+ * -+ * "...power down sequence waits for all I/O interfaces to become idle. -+ * In some applications this may happen eventually without actively -+ * shutting down interfaces, but most likely, software will have to -+ * take steps to shut down the eTSEC, QUICC Engine Block, and PCI -+ * interfaces before issuing the command (either the write to the core -+ * MSR[WE] as described above or writing to POWMGTCSR) to put the -+ * device into sleep state." -+ * -+ * MPC8569E reference manual has a similar paragraph. -+ */ -+#ifdef CONFIG_PPC_85xx -+ return 0; -+#else -+ return 1; -+#endif -+} -+ -+int qe_muram_init(void); -+ -+#if defined(CONFIG_QUICC_ENGINE) -+unsigned long qe_muram_alloc(unsigned long size, unsigned long align); -+int qe_muram_free(unsigned long offset); -+unsigned long qe_muram_alloc_fixed(unsigned long offset, unsigned long size); -+void __iomem *qe_muram_addr(unsigned long offset); -+unsigned long qe_muram_offset(void __iomem *addr); -+dma_addr_t qe_muram_dma(void __iomem *addr); -+#else -+static inline unsigned long qe_muram_alloc(unsigned long size, -+ unsigned long align) -+{ -+ return -ENOSYS; -+} -+ -+static inline int qe_muram_free(unsigned long offset) -+{ -+ return -ENOSYS; -+} -+ -+static inline unsigned long qe_muram_alloc_fixed(unsigned long offset, -+ unsigned long size) -+{ -+ return -ENOSYS; -+} -+ -+static inline void __iomem *qe_muram_addr(unsigned long offset) -+{ -+ return NULL; -+} -+ -+static inline unsigned long qe_muram_offset(void __iomem *addr) -+{ -+ return -ENOSYS; -+} -+ -+static inline dma_addr_t qe_muram_dma(void __iomem *addr) -+{ -+ return 0; -+} -+#endif /* defined(CONFIG_QUICC_ENGINE) */ -+ -+/* Structure that defines QE firmware binary files. -+ * -+ * See Documentation/powerpc/qe_firmware.txt for a description of these -+ * fields. -+ */ -+struct qe_firmware { -+ struct qe_header { -+ __be32 length; /* Length of the entire structure, in bytes */ -+ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */ -+ u8 version; /* Version of this layout. First ver is '1' */ -+ } header; -+ u8 id[62]; /* Null-terminated identifier string */ -+ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */ -+ u8 count; /* Number of microcode[] structures */ -+ struct { -+ __be16 model; /* The SOC model */ -+ u8 major; /* The SOC revision major */ -+ u8 minor; /* The SOC revision minor */ -+ } __packed soc; -+ u8 padding[4]; /* Reserved, for alignment */ -+ __be64 extended_modes; /* Extended modes */ -+ __be32 vtraps[8]; /* Virtual trap addresses */ -+ u8 reserved[4]; /* Reserved, for future expansion */ -+ struct qe_microcode { -+ u8 id[32]; /* Null-terminated identifier */ -+ __be32 traps[16]; /* Trap addresses, 0 == ignore */ -+ __be32 eccr; /* The value for the ECCR register */ -+ __be32 iram_offset; /* Offset into I-RAM for the code */ -+ __be32 count; /* Number of 32-bit words of the code */ -+ __be32 code_offset; /* Offset of the actual microcode */ -+ u8 major; /* The microcode version major */ -+ u8 minor; /* The microcode version minor */ -+ u8 revision; /* The microcode version revision */ -+ u8 padding; /* Reserved, for alignment */ -+ u8 reserved[4]; /* Reserved, for future expansion */ -+ } __packed microcode[1]; -+ /* All microcode binaries should be located here */ -+ /* CRC32 should be located here, after the microcode binaries */ -+} __packed; -+ -+struct qe_firmware_info { -+ char id[64]; /* Firmware name */ -+ u32 vtraps[8]; /* Virtual trap addresses */ -+ u64 extended_modes; /* Extended modes */ -+}; -+ -+#ifdef CONFIG_QUICC_ENGINE -+/* Upload a firmware to the QE */ -+int qe_upload_firmware(const struct qe_firmware *firmware); -+#else -+static inline int qe_upload_firmware(const struct qe_firmware *firmware) -+{ -+ return -ENOSYS; -+} -+#endif /* CONFIG_QUICC_ENGINE */ -+ -+/* Obtain information on the uploaded firmware */ -+struct qe_firmware_info *qe_get_firmware_info(void); -+ -+/* QE USB */ -+int qe_usb_clock_set(enum qe_clock clk, int rate); -+ -+/* Buffer descriptors */ -+struct qe_bd { -+ __be16 status; -+ __be16 length; -+ __be32 buf; -+} __packed; -+ -+#define BD_STATUS_MASK 0xffff0000 -+#define BD_LENGTH_MASK 0x0000ffff -+ -+/* Buffer descriptor control/status used by serial -+ */ -+ -+#define BD_SC_EMPTY (0x8000) /* Receive is empty */ -+#define BD_SC_READY (0x8000) /* Transmit is ready */ -+#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */ -+#define BD_SC_INTRPT (0x1000) /* Interrupt on change */ -+#define BD_SC_LAST (0x0800) /* Last buffer in frame */ -+#define BD_SC_TC (0x0400) /* Transmit CRC */ -+#define BD_SC_CM (0x0200) /* Continuous mode */ -+#define BD_SC_ID (0x0100) /* Rec'd too many idles */ -+#define BD_SC_P (0x0100) /* xmt preamble */ -+#define BD_SC_BR (0x0020) /* Break received */ -+#define BD_SC_FR (0x0010) /* Framing error */ -+#define BD_SC_PR (0x0008) /* Parity error */ -+#define BD_SC_NAK (0x0004) /* NAK - did not respond */ -+#define BD_SC_OV (0x0002) /* Overrun */ -+#define BD_SC_UN (0x0002) /* Underrun */ -+#define BD_SC_CD (0x0001) /* */ -+#define BD_SC_CL (0x0001) /* Collision */ -+ -+/* Alignment */ -+#define QE_INTR_TABLE_ALIGN 16 /* ??? */ -+#define QE_ALIGNMENT_OF_BD 8 -+#define QE_ALIGNMENT_OF_PRAM 64 -+ -+/* RISC allocation */ -+#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ -+#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ -+#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ -+#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ -+#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ -+ QE_RISC_ALLOCATION_RISC2) -+#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ -+ QE_RISC_ALLOCATION_RISC2 | \ -+ QE_RISC_ALLOCATION_RISC3 | \ -+ QE_RISC_ALLOCATION_RISC4) -+ -+/* QE extended filtering Table Lookup Key Size */ -+enum qe_fltr_tbl_lookup_key_size { -+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES -+ = 0x3f, /* LookupKey parsed by the Generate LookupKey -+ CMD is truncated to 8 bytes */ -+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES -+ = 0x5f, /* LookupKey parsed by the Generate LookupKey -+ CMD is truncated to 16 bytes */ -+}; -+ -+/* QE FLTR extended filtering Largest External Table Lookup Key Size */ -+enum qe_fltr_largest_external_tbl_lookup_key_size { -+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE -+ = 0x0,/* not used */ -+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES -+ = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */ -+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES -+ = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */ -+}; -+ -+/* structure representing QE parameter RAM */ -+struct qe_timer_tables { -+ u16 tm_base; /* QE timer table base adr */ -+ u16 tm_ptr; /* QE timer table pointer */ -+ u16 r_tmr; /* QE timer mode register */ -+ u16 r_tmv; /* QE timer valid register */ -+ u32 tm_cmd; /* QE timer cmd register */ -+ u32 tm_cnt; /* QE timer internal cnt */ -+} __packed; -+ -+#define QE_FLTR_TAD_SIZE 8 -+ -+/* QE extended filtering Termination Action Descriptor (TAD) */ -+struct qe_fltr_tad { -+ u8 serialized[QE_FLTR_TAD_SIZE]; -+} __packed; -+ -+/* Communication Direction */ -+enum comm_dir { -+ COMM_DIR_NONE = 0, -+ COMM_DIR_RX = 1, -+ COMM_DIR_TX = 2, -+ COMM_DIR_RX_AND_TX = 3 -+}; -+ -+/* QE CMXUCR Registers. -+ * There are two UCCs represented in each of the four CMXUCR registers. -+ * These values are for the UCC in the LSBs -+ */ -+#define QE_CMXUCR_MII_ENET_MNG 0x00007000 -+#define QE_CMXUCR_MII_ENET_MNG_SHIFT 12 -+#define QE_CMXUCR_GRANT 0x00008000 -+#define QE_CMXUCR_TSA 0x00004000 -+#define QE_CMXUCR_BKPT 0x00000100 -+#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F -+ -+/* QE CMXGCR Registers. -+*/ -+#define QE_CMXGCR_MII_ENET_MNG 0x00007000 -+#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 -+#define QE_CMXGCR_USBCS 0x0000000f -+#define QE_CMXGCR_USBCS_CLK3 0x1 -+#define QE_CMXGCR_USBCS_CLK5 0x2 -+#define QE_CMXGCR_USBCS_CLK7 0x3 -+#define QE_CMXGCR_USBCS_CLK9 0x4 -+#define QE_CMXGCR_USBCS_CLK13 0x5 -+#define QE_CMXGCR_USBCS_CLK17 0x6 -+#define QE_CMXGCR_USBCS_CLK19 0x7 -+#define QE_CMXGCR_USBCS_CLK21 0x8 -+#define QE_CMXGCR_USBCS_BRG9 0x9 -+#define QE_CMXGCR_USBCS_BRG10 0xa -+ -+/* QE CECR Commands. -+*/ -+#define QE_CR_FLG 0x00010000 -+#define QE_RESET 0x80000000 -+#define QE_INIT_TX_RX 0x00000000 -+#define QE_INIT_RX 0x00000001 -+#define QE_INIT_TX 0x00000002 -+#define QE_ENTER_HUNT_MODE 0x00000003 -+#define QE_STOP_TX 0x00000004 -+#define QE_GRACEFUL_STOP_TX 0x00000005 -+#define QE_RESTART_TX 0x00000006 -+#define QE_CLOSE_RX_BD 0x00000007 -+#define QE_SWITCH_COMMAND 0x00000007 -+#define QE_SET_GROUP_ADDRESS 0x00000008 -+#define QE_START_IDMA 0x00000009 -+#define QE_MCC_STOP_RX 0x00000009 -+#define QE_ATM_TRANSMIT 0x0000000a -+#define QE_HPAC_CLEAR_ALL 0x0000000b -+#define QE_GRACEFUL_STOP_RX 0x0000001a -+#define QE_RESTART_RX 0x0000001b -+#define QE_HPAC_SET_PRIORITY 0x0000010b -+#define QE_HPAC_STOP_TX 0x0000020b -+#define QE_HPAC_STOP_RX 0x0000030b -+#define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b -+#define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b -+#define QE_HPAC_START_TX 0x0000060b -+#define QE_HPAC_START_RX 0x0000070b -+#define QE_USB_STOP_TX 0x0000000a -+#define QE_USB_RESTART_TX 0x0000000c -+#define QE_QMC_STOP_TX 0x0000000c -+#define QE_QMC_STOP_RX 0x0000000d -+#define QE_SS7_SU_FIL_RESET 0x0000000e -+/* jonathbr added from here down for 83xx */ -+#define QE_RESET_BCS 0x0000000a -+#define QE_MCC_INIT_TX_RX_16 0x00000003 -+#define QE_MCC_STOP_TX 0x00000004 -+#define QE_MCC_INIT_TX_1 0x00000005 -+#define QE_MCC_INIT_RX_1 0x00000006 -+#define QE_MCC_RESET 0x00000007 -+#define QE_SET_TIMER 0x00000008 -+#define QE_RANDOM_NUMBER 0x0000000c -+#define QE_ATM_MULTI_THREAD_INIT 0x00000011 -+#define QE_ASSIGN_PAGE 0x00000012 -+#define QE_ADD_REMOVE_HASH_ENTRY 0x00000013 -+#define QE_START_FLOW_CONTROL 0x00000014 -+#define QE_STOP_FLOW_CONTROL 0x00000015 -+#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016 -+ -+#define QE_ASSIGN_RISC 0x00000010 -+#define QE_CR_MCN_NORMAL_SHIFT 6 -+#define QE_CR_MCN_USB_SHIFT 4 -+#define QE_CR_MCN_RISC_ASSIGN_SHIFT 8 -+#define QE_CR_SNUM_SHIFT 17 -+ -+/* QE CECR Sub Block - sub block of QE command. -+*/ -+#define QE_CR_SUBBLOCK_INVALID 0x00000000 -+#define QE_CR_SUBBLOCK_USB 0x03200000 -+#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000 -+#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000 -+#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000 -+#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000 -+#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000 -+#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000 -+#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000 -+#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000 -+#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000 -+#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000 -+#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000 -+#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000 -+#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000 -+#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000 -+#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000 -+#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000 -+#define QE_CR_SUBBLOCK_MCC1 0x03800000 -+#define QE_CR_SUBBLOCK_MCC2 0x03a00000 -+#define QE_CR_SUBBLOCK_MCC3 0x03000000 -+#define QE_CR_SUBBLOCK_IDMA1 0x02800000 -+#define QE_CR_SUBBLOCK_IDMA2 0x02a00000 -+#define QE_CR_SUBBLOCK_IDMA3 0x02c00000 -+#define QE_CR_SUBBLOCK_IDMA4 0x02e00000 -+#define QE_CR_SUBBLOCK_HPAC 0x01e00000 -+#define QE_CR_SUBBLOCK_SPI1 0x01400000 -+#define QE_CR_SUBBLOCK_SPI2 0x01600000 -+#define QE_CR_SUBBLOCK_RAND 0x01c00000 -+#define QE_CR_SUBBLOCK_TIMER 0x01e00000 -+#define QE_CR_SUBBLOCK_GENERAL 0x03c00000 -+ -+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */ -+#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */ -+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00 -+#define QE_CR_PROTOCOL_QMC 0x02 -+#define QE_CR_PROTOCOL_UART 0x04 -+#define QE_CR_PROTOCOL_ATM_POS 0x0A -+#define QE_CR_PROTOCOL_ETHERNET 0x0C -+#define QE_CR_PROTOCOL_L2_SWITCH 0x0D -+ -+/* BRG configuration register */ -+#define QE_BRGC_ENABLE 0x00010000 -+#define QE_BRGC_DIVISOR_SHIFT 1 -+#define QE_BRGC_DIVISOR_MAX 0xFFF -+#define QE_BRGC_DIV16 1 -+ -+/* QE Timers registers */ -+#define QE_GTCFR1_PCAS 0x80 -+#define QE_GTCFR1_STP2 0x20 -+#define QE_GTCFR1_RST2 0x10 -+#define QE_GTCFR1_GM2 0x08 -+#define QE_GTCFR1_GM1 0x04 -+#define QE_GTCFR1_STP1 0x02 -+#define QE_GTCFR1_RST1 0x01 -+ -+/* SDMA registers */ -+#define QE_SDSR_BER1 0x02000000 -+#define QE_SDSR_BER2 0x01000000 -+ -+#define QE_SDMR_GLB_1_MSK 0x80000000 -+#define QE_SDMR_ADR_SEL 0x20000000 -+#define QE_SDMR_BER1_MSK 0x02000000 -+#define QE_SDMR_BER2_MSK 0x01000000 -+#define QE_SDMR_EB1_MSK 0x00800000 -+#define QE_SDMR_ER1_MSK 0x00080000 -+#define QE_SDMR_ER2_MSK 0x00040000 -+#define QE_SDMR_CEN_MASK 0x0000E000 -+#define QE_SDMR_SBER_1 0x00000200 -+#define QE_SDMR_SBER_2 0x00000200 -+#define QE_SDMR_EB1_PR_MASK 0x000000C0 -+#define QE_SDMR_ER1_PR 0x00000008 -+ -+#define QE_SDMR_CEN_SHIFT 13 -+#define QE_SDMR_EB1_PR_SHIFT 6 -+ -+#define QE_SDTM_MSNUM_SHIFT 24 -+ -+#define QE_SDEBCR_BA_MASK 0x01FFFFFF -+ -+/* Communication Processor */ -+#define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */ -+#define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */ -+#define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */ -+ -+/* I-RAM */ -+#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */ -+#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */ -+#define QE_IRAM_READY 0x80000000 /* Ready */ -+ -+/* UPC */ -+#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */ -+#define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */ -+#define UPGCR_RMS 0x20000000 /* Receive master/slave mode */ -+#define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */ -+#define UPGCR_DIAG 0x01000000 /* Diagnostic mode */ -+ -+/* UCC GUEMR register */ -+#define UCC_GUEMR_MODE_MASK_RX 0x02 -+#define UCC_GUEMR_MODE_FAST_RX 0x02 -+#define UCC_GUEMR_MODE_SLOW_RX 0x00 -+#define UCC_GUEMR_MODE_MASK_TX 0x01 -+#define UCC_GUEMR_MODE_FAST_TX 0x01 -+#define UCC_GUEMR_MODE_SLOW_TX 0x00 -+#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX) -+#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but -+ must be set 1 */ -+ -+/* structure representing UCC SLOW parameter RAM */ -+struct ucc_slow_pram { -+ __be16 rbase; /* RX BD base address */ -+ __be16 tbase; /* TX BD base address */ -+ u8 rbmr; /* RX bus mode register (same as CPM's RFCR) */ -+ u8 tbmr; /* TX bus mode register (same as CPM's TFCR) */ -+ __be16 mrblr; /* Rx buffer length */ -+ __be32 rstate; /* Rx internal state */ -+ __be32 rptr; /* Rx internal data pointer */ -+ __be16 rbptr; /* rb BD Pointer */ -+ __be16 rcount; /* Rx internal byte count */ -+ __be32 rtemp; /* Rx temp */ -+ __be32 tstate; /* Tx internal state */ -+ __be32 tptr; /* Tx internal data pointer */ -+ __be16 tbptr; /* Tx BD pointer */ -+ __be16 tcount; /* Tx byte count */ -+ __be32 ttemp; /* Tx temp */ -+ __be32 rcrc; /* temp receive CRC */ -+ __be32 tcrc; /* temp transmit CRC */ -+} __packed; -+ -+/* General UCC SLOW Mode Register (GUMRH & GUMRL) */ -+#define UCC_SLOW_GUMR_H_SAM_QMC 0x00000000 -+#define UCC_SLOW_GUMR_H_SAM_SATM 0x00008000 -+#define UCC_SLOW_GUMR_H_REVD 0x00002000 -+#define UCC_SLOW_GUMR_H_TRX 0x00001000 -+#define UCC_SLOW_GUMR_H_TTX 0x00000800 -+#define UCC_SLOW_GUMR_H_CDP 0x00000400 -+#define UCC_SLOW_GUMR_H_CTSP 0x00000200 -+#define UCC_SLOW_GUMR_H_CDS 0x00000100 -+#define UCC_SLOW_GUMR_H_CTSS 0x00000080 -+#define UCC_SLOW_GUMR_H_TFL 0x00000040 -+#define UCC_SLOW_GUMR_H_RFW 0x00000020 -+#define UCC_SLOW_GUMR_H_TXSY 0x00000010 -+#define UCC_SLOW_GUMR_H_4SYNC 0x00000004 -+#define UCC_SLOW_GUMR_H_8SYNC 0x00000008 -+#define UCC_SLOW_GUMR_H_16SYNC 0x0000000c -+#define UCC_SLOW_GUMR_H_RTSM 0x00000002 -+#define UCC_SLOW_GUMR_H_RSYN 0x00000001 -+ -+#define UCC_SLOW_GUMR_L_TCI 0x10000000 -+#define UCC_SLOW_GUMR_L_RINV 0x02000000 -+#define UCC_SLOW_GUMR_L_TINV 0x01000000 -+#define UCC_SLOW_GUMR_L_TEND 0x00040000 -+#define UCC_SLOW_GUMR_L_TDCR_MASK 0x00030000 -+#define UCC_SLOW_GUMR_L_TDCR_32 0x00030000 -+#define UCC_SLOW_GUMR_L_TDCR_16 0x00020000 -+#define UCC_SLOW_GUMR_L_TDCR_8 0x00010000 -+#define UCC_SLOW_GUMR_L_TDCR_1 0x00000000 -+#define UCC_SLOW_GUMR_L_RDCR_MASK 0x0000c000 -+#define UCC_SLOW_GUMR_L_RDCR_32 0x0000c000 -+#define UCC_SLOW_GUMR_L_RDCR_16 0x00008000 -+#define UCC_SLOW_GUMR_L_RDCR_8 0x00004000 -+#define UCC_SLOW_GUMR_L_RDCR_1 0x00000000 -+#define UCC_SLOW_GUMR_L_RENC_NRZI 0x00000800 -+#define UCC_SLOW_GUMR_L_RENC_NRZ 0x00000000 -+#define UCC_SLOW_GUMR_L_TENC_NRZI 0x00000100 -+#define UCC_SLOW_GUMR_L_TENC_NRZ 0x00000000 -+#define UCC_SLOW_GUMR_L_DIAG_MASK 0x000000c0 -+#define UCC_SLOW_GUMR_L_DIAG_LE 0x000000c0 -+#define UCC_SLOW_GUMR_L_DIAG_ECHO 0x00000080 -+#define UCC_SLOW_GUMR_L_DIAG_LOOP 0x00000040 -+#define UCC_SLOW_GUMR_L_DIAG_NORM 0x00000000 -+#define UCC_SLOW_GUMR_L_ENR 0x00000020 -+#define UCC_SLOW_GUMR_L_ENT 0x00000010 -+#define UCC_SLOW_GUMR_L_MODE_MASK 0x0000000F -+#define UCC_SLOW_GUMR_L_MODE_BISYNC 0x00000008 -+#define UCC_SLOW_GUMR_L_MODE_AHDLC 0x00000006 -+#define UCC_SLOW_GUMR_L_MODE_UART 0x00000004 -+#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 -+ -+/* General UCC FAST Mode Register */ -+#define UCC_FAST_GUMR_TCI 0x20000000 -+#define UCC_FAST_GUMR_TRX 0x10000000 -+#define UCC_FAST_GUMR_TTX 0x08000000 -+#define UCC_FAST_GUMR_CDP 0x04000000 -+#define UCC_FAST_GUMR_CTSP 0x02000000 -+#define UCC_FAST_GUMR_CDS 0x01000000 -+#define UCC_FAST_GUMR_CTSS 0x00800000 -+#define UCC_FAST_GUMR_TXSY 0x00020000 -+#define UCC_FAST_GUMR_RSYN 0x00010000 -+#define UCC_FAST_GUMR_RTSM 0x00002000 -+#define UCC_FAST_GUMR_REVD 0x00000400 -+#define UCC_FAST_GUMR_ENR 0x00000020 -+#define UCC_FAST_GUMR_ENT 0x00000010 -+ -+/* UART Slow UCC Event Register (UCCE) */ -+#define UCC_UART_UCCE_AB 0x0200 -+#define UCC_UART_UCCE_IDLE 0x0100 -+#define UCC_UART_UCCE_GRA 0x0080 -+#define UCC_UART_UCCE_BRKE 0x0040 -+#define UCC_UART_UCCE_BRKS 0x0020 -+#define UCC_UART_UCCE_CCR 0x0008 -+#define UCC_UART_UCCE_BSY 0x0004 -+#define UCC_UART_UCCE_TX 0x0002 -+#define UCC_UART_UCCE_RX 0x0001 -+ -+/* HDLC Slow UCC Event Register (UCCE) */ -+#define UCC_HDLC_UCCE_GLR 0x1000 -+#define UCC_HDLC_UCCE_GLT 0x0800 -+#define UCC_HDLC_UCCE_IDLE 0x0100 -+#define UCC_HDLC_UCCE_BRKE 0x0040 -+#define UCC_HDLC_UCCE_BRKS 0x0020 -+#define UCC_HDLC_UCCE_TXE 0x0010 -+#define UCC_HDLC_UCCE_RXF 0x0008 -+#define UCC_HDLC_UCCE_BSY 0x0004 -+#define UCC_HDLC_UCCE_TXB 0x0002 -+#define UCC_HDLC_UCCE_RXB 0x0001 -+ -+/* BISYNC Slow UCC Event Register (UCCE) */ -+#define UCC_BISYNC_UCCE_GRA 0x0080 -+#define UCC_BISYNC_UCCE_TXE 0x0010 -+#define UCC_BISYNC_UCCE_RCH 0x0008 -+#define UCC_BISYNC_UCCE_BSY 0x0004 -+#define UCC_BISYNC_UCCE_TXB 0x0002 -+#define UCC_BISYNC_UCCE_RXB 0x0001 -+ -+/* Transparent UCC Event Register (UCCE) */ -+#define UCC_TRANS_UCCE_GRA 0x0080 -+#define UCC_TRANS_UCCE_TXE 0x0010 -+#define UCC_TRANS_UCCE_RXF 0x0008 -+#define UCC_TRANS_UCCE_BSY 0x0004 -+#define UCC_TRANS_UCCE_TXB 0x0002 -+#define UCC_TRANS_UCCE_RXB 0x0001 -+ -+ -+/* Gigabit Ethernet Fast UCC Event Register (UCCE) */ -+#define UCC_GETH_UCCE_MPD 0x80000000 -+#define UCC_GETH_UCCE_SCAR 0x40000000 -+#define UCC_GETH_UCCE_GRA 0x20000000 -+#define UCC_GETH_UCCE_CBPR 0x10000000 -+#define UCC_GETH_UCCE_BSY 0x08000000 -+#define UCC_GETH_UCCE_RXC 0x04000000 -+#define UCC_GETH_UCCE_TXC 0x02000000 -+#define UCC_GETH_UCCE_TXE 0x01000000 -+#define UCC_GETH_UCCE_TXB7 0x00800000 -+#define UCC_GETH_UCCE_TXB6 0x00400000 -+#define UCC_GETH_UCCE_TXB5 0x00200000 -+#define UCC_GETH_UCCE_TXB4 0x00100000 -+#define UCC_GETH_UCCE_TXB3 0x00080000 -+#define UCC_GETH_UCCE_TXB2 0x00040000 -+#define UCC_GETH_UCCE_TXB1 0x00020000 -+#define UCC_GETH_UCCE_TXB0 0x00010000 -+#define UCC_GETH_UCCE_RXB7 0x00008000 -+#define UCC_GETH_UCCE_RXB6 0x00004000 -+#define UCC_GETH_UCCE_RXB5 0x00002000 -+#define UCC_GETH_UCCE_RXB4 0x00001000 -+#define UCC_GETH_UCCE_RXB3 0x00000800 -+#define UCC_GETH_UCCE_RXB2 0x00000400 -+#define UCC_GETH_UCCE_RXB1 0x00000200 -+#define UCC_GETH_UCCE_RXB0 0x00000100 -+#define UCC_GETH_UCCE_RXF7 0x00000080 -+#define UCC_GETH_UCCE_RXF6 0x00000040 -+#define UCC_GETH_UCCE_RXF5 0x00000020 -+#define UCC_GETH_UCCE_RXF4 0x00000010 -+#define UCC_GETH_UCCE_RXF3 0x00000008 -+#define UCC_GETH_UCCE_RXF2 0x00000004 -+#define UCC_GETH_UCCE_RXF1 0x00000002 -+#define UCC_GETH_UCCE_RXF0 0x00000001 -+ -+/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */ -+#define UCC_UART_UPSMR_FLC 0x8000 -+#define UCC_UART_UPSMR_SL 0x4000 -+#define UCC_UART_UPSMR_CL_MASK 0x3000 -+#define UCC_UART_UPSMR_CL_8 0x3000 -+#define UCC_UART_UPSMR_CL_7 0x2000 -+#define UCC_UART_UPSMR_CL_6 0x1000 -+#define UCC_UART_UPSMR_CL_5 0x0000 -+#define UCC_UART_UPSMR_UM_MASK 0x0c00 -+#define UCC_UART_UPSMR_UM_NORMAL 0x0000 -+#define UCC_UART_UPSMR_UM_MAN_MULTI 0x0400 -+#define UCC_UART_UPSMR_UM_AUTO_MULTI 0x0c00 -+#define UCC_UART_UPSMR_FRZ 0x0200 -+#define UCC_UART_UPSMR_RZS 0x0100 -+#define UCC_UART_UPSMR_SYN 0x0080 -+#define UCC_UART_UPSMR_DRT 0x0040 -+#define UCC_UART_UPSMR_PEN 0x0010 -+#define UCC_UART_UPSMR_RPM_MASK 0x000c -+#define UCC_UART_UPSMR_RPM_ODD 0x0000 -+#define UCC_UART_UPSMR_RPM_LOW 0x0004 -+#define UCC_UART_UPSMR_RPM_EVEN 0x0008 -+#define UCC_UART_UPSMR_RPM_HIGH 0x000C -+#define UCC_UART_UPSMR_TPM_MASK 0x0003 -+#define UCC_UART_UPSMR_TPM_ODD 0x0000 -+#define UCC_UART_UPSMR_TPM_LOW 0x0001 -+#define UCC_UART_UPSMR_TPM_EVEN 0x0002 -+#define UCC_UART_UPSMR_TPM_HIGH 0x0003 -+ -+/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */ -+#define UCC_GETH_UPSMR_FTFE 0x80000000 -+#define UCC_GETH_UPSMR_PTPE 0x40000000 -+#define UCC_GETH_UPSMR_ECM 0x04000000 -+#define UCC_GETH_UPSMR_HSE 0x02000000 -+#define UCC_GETH_UPSMR_PRO 0x00400000 -+#define UCC_GETH_UPSMR_CAP 0x00200000 -+#define UCC_GETH_UPSMR_RSH 0x00100000 -+#define UCC_GETH_UPSMR_RPM 0x00080000 -+#define UCC_GETH_UPSMR_R10M 0x00040000 -+#define UCC_GETH_UPSMR_RLPB 0x00020000 -+#define UCC_GETH_UPSMR_TBIM 0x00010000 -+#define UCC_GETH_UPSMR_RES1 0x00002000 -+#define UCC_GETH_UPSMR_RMM 0x00001000 -+#define UCC_GETH_UPSMR_CAM 0x00000400 -+#define UCC_GETH_UPSMR_BRO 0x00000200 -+#define UCC_GETH_UPSMR_SMM 0x00000080 -+#define UCC_GETH_UPSMR_SGMM 0x00000020 -+ -+/* UCC Transmit On Demand Register (UTODR) */ -+#define UCC_SLOW_TOD 0x8000 -+#define UCC_FAST_TOD 0x8000 -+ -+/* UCC Bus Mode Register masks */ -+/* Not to be confused with the Bundle Mode Register */ -+#define UCC_BMR_GBL 0x20 -+#define UCC_BMR_BO_BE 0x10 -+#define UCC_BMR_CETM 0x04 -+#define UCC_BMR_DTB 0x02 -+#define UCC_BMR_BDB 0x01 -+ -+/* Function code masks */ -+#define FC_GBL 0x20 -+#define FC_DTB_LCL 0x02 -+#define UCC_FAST_FUNCTION_CODE_GBL 0x20 -+#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02 -+#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01 -+ -+#endif /* __KERNEL__ */ -+#endif /* _ASM_POWERPC_QE_H */ diff --git a/target/linux/layerscape/patches-4.4/7016-dpa-add-dpaa_eth-driver.patch b/target/linux/layerscape/patches-4.4/7016-dpa-add-dpaa_eth-driver.patch deleted file mode 100644 index 1c48adade..000000000 --- a/target/linux/layerscape/patches-4.4/7016-dpa-add-dpaa_eth-driver.patch +++ /dev/null @@ -1,19160 +0,0 @@ -From 2af9b49c7e6bad2dee75960ddf61fd52a4d3748f Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Wed, 16 Dec 2015 22:00:36 +0200 -Subject: [PATCH 16/70] dpa: add dpaa_eth driver - -Dpaa is Datapatch Acceleration Architecture, this architecture provides -the infrastructure to support simplified sharing of networking -interfaces and accelerators by multiple CPUs. - -Signed-off-by: Madalin Bucur -Signed-off-by: Camelia Groza -Signed-off-by: Alex Porosanu -Signed-off-by: Pan Jiafei -Signed-off-by: Shaohui Xie -Signed-off-by: Zhao Qiang ---- - drivers/net/ethernet/freescale/Kconfig | 2 + - drivers/net/ethernet/freescale/Makefile | 1 + - drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 187 ++ - drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 59 + - .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++ - .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++ - .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++ - .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 + - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1183 +++++++++++ - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 695 +++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 + - .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1719 ++++++++++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 230 +++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1787 ++++++++++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 227 +++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c | 1735 ++++++++++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h | 90 + - .../freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c | 201 ++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c | 499 +++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c | 2156 ++++++++++++++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h | 294 +++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++ - .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1128 ++++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c | 914 +++++++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++ - .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 +++++ - .../freescale/sdk_dpaa/dpaa_generic_ethtool.c | 286 +++ - .../freescale/sdk_dpaa/dpaa_macsec_ethtool.c | 250 +++ - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 287 +++ - drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 915 +++++++++ - drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 470 +++++ - drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 ++ - .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 ++++++++ - .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 + - 36 files changed, 18957 insertions(+) - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c - create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h - ---- a/drivers/net/ethernet/freescale/Kconfig -+++ b/drivers/net/ethernet/freescale/Kconfig -@@ -93,4 +93,6 @@ config GIANFAR - on the 8540. - - source "drivers/net/ethernet/freescale/sdk_fman/Kconfig" -+source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig" -+ - endif # NET_VENDOR_FREESCALE ---- a/drivers/net/ethernet/freescale/Makefile -+++ b/drivers/net/ethernet/freescale/Makefile -@@ -18,3 +18,4 @@ gianfar_driver-objs := gianfar.o \ - obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o - ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o - obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/ -+obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig -@@ -0,0 +1,187 @@ -+menuconfig FSL_SDK_DPAA_ETH -+ tristate "DPAA Ethernet" -+ depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN && FSL_SDK_FMAN -+ select PHYLIB -+ ---help--- -+ Data Path Acceleration Architecture Ethernet driver, -+ supporting the Freescale QorIQ chips. -+ Depends on Freescale Buffer Manager and Queue Manager -+ driver and Frame Manager Driver. -+ -+if FSL_SDK_DPAA_ETH -+ -+config FSL_DPAA_HOOKS -+ bool "DPAA Ethernet driver hooks" -+ -+config FSL_DPAA_MACSEC -+ tristate "DPAA MACSEC" -+ select FSL_DPAA_HOOKS -+ ---help--- -+ Enable MACSEC support in DPAA. -+ -+config FSL_DPAA_CEETM -+ bool "DPAA CEETM QoS" -+ select NET_SCHED -+ default n -+ ---help--- -+ Enable QoS offloading support through the CEETM hardware block. -+ -+config FSL_DPAA_OFFLINE_PORTS -+ bool "Offline Ports support" -+ depends on FSL_SDK_DPAA_ETH -+ default y -+ ---help--- -+ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide -+ most of the functionality of the regular, online ports, except they receive their -+ frames from a core or an accelerator on the SoC, via QMan frame queues, -+ rather than directly from the network. -+ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like -+ any online FMan port. They deliver the processed frames to frame queues, according -+ to the applied PCD configurations. -+ -+ Choosing this feature will not impact the functionality and/or performance of the system, -+ so it is safe to have it. -+ -+config FSL_DPAA_ADVANCED_DRIVERS -+ bool "Advanced DPAA Ethernet drivers" -+ depends on FSL_SDK_DPAA_ETH -+ default y -+ ---help--- -+ Besides the standard DPAA Ethernet driver there are available other flavours -+ of DPAA drivers that support advanced scenarios: -+ - DPAA Shared MAC driver -+ - DPAA MAC-less driver -+ - DPAA Proxy initialization driver (for USDPAA) -+ Select this to also build the advanced drivers. -+ -+config FSL_DPAA_GENERIC_DRIVER -+ bool "Generic DPAA Ethernet driver" -+ depends on FSL_SDK_DPAA_ETH -+ default y -+ ---help--- -+ This enables the DPAA Generic driver (oNIC). -+ -+config FSL_DPAA_ETH_JUMBO_FRAME -+ bool "Optimize for jumbo frames" -+ depends on !ARM64 && !ARM -+ default n -+ ---help--- -+ Optimize the DPAA Ethernet driver throughput for large frames -+ termination traffic (e.g. 4K and above). -+ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE -+ is set to 9600 bytes. -+ Using this option in combination with small frames increases -+ significantly the driver's memory footprint and may even deplete -+ the system memory. -+ This option is not available on LS1043. -+ -+config FSL_DPAA_TS -+ bool "Linux compliant timestamping" -+ depends on FSL_SDK_DPAA_ETH -+ default n -+ ---help--- -+ Enable Linux API compliant timestamping support. -+ -+config FSL_DPAA_1588 -+ bool "IEEE 1588-compliant timestamping" -+ depends on FSL_SDK_DPAA_ETH -+ select FSL_DPAA_TS -+ default n -+ ---help--- -+ Enable IEEE1588 support code. -+ -+config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+ bool "Use driver's Tx queue selection mechanism" -+ default y -+ depends on FSL_SDK_DPAA_ETH -+ ---help--- -+ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection -+ of the egress FQ. That will override the XPS support for this netdevice. -+ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping, -+ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this -+ and use the standard XPS support instead. -+ -+config FSL_DPAA_ETH_MAX_BUF_COUNT -+ int "Maximum nuber of buffers in private bpool" -+ depends on FSL_SDK_DPAA_ETH -+ range 64 2048 -+ default "128" -+ ---help--- -+ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's -+ buffer pool. One needn't normally modify this, as it has probably been tuned for performance -+ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD. -+ -+config FSL_DPAA_ETH_REFILL_THRESHOLD -+ int "Private bpool refill threshold" -+ depends on FSL_SDK_DPAA_ETH -+ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT -+ default "80" -+ ---help--- -+ The DPAA-Ethernet driver will start replenishing buffer pools whose count -+ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally -+ modify this value unless one has very specific performance reasons. -+ -+config FSL_DPAA_CS_THRESHOLD_1G -+ hex "Egress congestion threshold on 1G ports" -+ depends on FSL_SDK_DPAA_ETH -+ range 0x1000 0x10000000 -+ default "0x06000000" -+ ---help--- -+ The size in bytes of the egress Congestion State notification threshold on 1G ports. -+ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop -+ (e.g. by sending UDP datagrams at "while(1) speed"), -+ and the larger the frame size, the more acute the problem. -+ So we have to find a balance between these factors: -+ - avoiding the device staying congested for a prolonged time (risking -+ the netdev watchdog to fire - see also the tx_timeout module param); -+ - affecting performance of protocols such as TCP, which otherwise -+ behave well under the congestion notification mechanism; -+ - preventing the Tx cores from tightly-looping (as if the congestion -+ threshold was too low to be effective); -+ - running out of memory if the CS threshold is set too high. -+ -+config FSL_DPAA_CS_THRESHOLD_10G -+ hex "Egress congestion threshold on 10G ports" -+ depends on FSL_SDK_DPAA_ETH -+ range 0x1000 0x20000000 -+ default "0x10000000" -+ -+config FSL_DPAA_INGRESS_CS_THRESHOLD -+ hex "Ingress congestion threshold on FMan ports" -+ depends on FSL_SDK_DPAA_ETH -+ default "0x10000000" -+ ---help--- -+ The size in bytes of the ingress tail-drop threshold on FMan ports. -+ Traffic piling up above this value will be rejected by QMan and discarded by FMan. -+ -+config FSL_DPAA_ETH_DEBUGFS -+ bool "DPAA Ethernet debugfs interface" -+ depends on DEBUG_FS && FSL_SDK_DPAA_ETH -+ default y -+ ---help--- -+ This option compiles debugfs code for the DPAA Ethernet driver. -+ -+config FSL_DPAA_ETH_DEBUG -+ bool "DPAA Ethernet Debug Support" -+ depends on FSL_SDK_DPAA_ETH -+ default n -+ ---help--- -+ This option compiles debug code for the DPAA Ethernet driver. -+ -+config FSL_DPAA_DBG_LOOP -+ bool "DPAA Ethernet Debug loopback" -+ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+ default n -+ ---help--- -+ This option allows to divert all received traffic on a certain interface A towards a -+ selected interface B. This option is used to benchmark the HW + Ethernet driver in -+ isolation from the Linux networking stack. The loops are controlled by debugfs entries, -+ one for each interface. By default all loops are disabled (target value is -1). I.e. to -+ change the loop setting for interface 4 and divert all received traffic to interface 5 -+ write Tx interface number in the receive interface debugfs file: -+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop -+ 4->-1 -+ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop -+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop -+ 4->5 -+endif # FSL_SDK_DPAA_ETH ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile -@@ -0,0 +1,59 @@ -+# -+# Makefile for the Freescale Ethernet controllers -+# -+ccflags-y += -DVERSION=\"\" -+# -+# Include netcomm SW specific definitions -+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk -+ -+ccflags-y += -I$(NET_DPA) -+ -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o -+obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o -+ -+fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o -+ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y) -+fsl_dpa-objs += dpaa_debugfs.o -+endif -+ifeq ($(CONFIG_FSL_DPAA_1588),y) -+fsl_dpa-objs += dpaa_1588.o -+endif -+ifeq ($(CONFIG_FSL_DPAA_CEETM),y) -+ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper -+fsl_dpa-objs += dpaa_eth_ceetm.o -+endif -+ -+fsl_mac-objs += mac.o mac-api.o -+ -+# Advanced drivers -+ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y) -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_shared.o -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_macless.o -+obj-$(CONFIG_FSL_DPAA_MACSEC) += fsl_dpa_macsec.o -+ -+fsl_advanced-objs += dpaa_eth_base.o -+# suport for multiple drivers per kernel module comes in kernel 3.14 -+# so we are forced to generate several modules for the advanced drivers -+fsl_proxy-objs += dpaa_eth_proxy.o -+fsl_dpa_shared-objs += dpaa_eth_shared.o -+fsl_dpa_macless-objs += dpaa_eth_macless.o -+fsl_dpa_macsec-objs += dpaa_eth_macsec.o dpaa_macsec_ethtool.o -+ -+ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y) -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o -+ -+fsl_oh-objs += offline_port.o -+endif -+endif -+ -+# Generic driver -+ifeq ($(CONFIG_FSL_DPAA_GENERIC_DRIVER),y) -+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_generic.o -+ -+fsl_generic-objs += dpaa_eth_generic.o dpaa_eth_generic_sysfs.o dpaa_generic_ethtool.o -+endif -+ -+# Needed by the tracing framework -+CFLAGS_dpaa_eth.o := -I$(src) ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c -@@ -0,0 +1,580 @@ -+/* Copyright (C) 2011 Freescale Semiconductor, Inc. -+ * Copyright (C) 2009 IXXAT Automation, GmbH -+ * -+ * DPAA Ethernet Driver -- IEEE 1588 interface functionality -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_1588.h" -+#include "mac.h" -+ -+static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) -+{ -+ struct circ_buf *circ_buf = &ptp_buf->circ_buf; -+ -+ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size); -+ if (!circ_buf->buf) -+ return 1; -+ -+ circ_buf->head = 0; -+ circ_buf->tail = 0; -+ ptp_buf->size = size; -+ spin_lock_init(&ptp_buf->ptp_lock); -+ -+ return 0; -+} -+ -+static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) -+{ -+ struct circ_buf *circ_buf = &ptp_buf->circ_buf; -+ -+ circ_buf->head = 0; -+ circ_buf->tail = 0; -+ ptp_buf->size = size; -+} -+ -+static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf, -+ struct dpa_ptp_data *data) -+{ -+ struct circ_buf *circ_buf = &ptp_buf->circ_buf; -+ int size = ptp_buf->size; -+ struct dpa_ptp_data *tmp; -+ unsigned long flags; -+ int head, tail; -+ -+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags); -+ -+ head = circ_buf->head; -+ tail = circ_buf->tail; -+ -+ if (CIRC_SPACE(head, tail, size) <= 0) -+ circ_buf->tail = (tail + 1) & (size - 1); -+ -+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head; -+ memcpy(tmp, data, sizeof(struct dpa_ptp_data)); -+ -+ circ_buf->head = (head + 1) & (size - 1); -+ -+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); -+ -+ return 0; -+} -+ -+static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst, -+ struct dpa_ptp_ident *src) -+{ -+ int ret; -+ -+ if ((dst->version != src->version) || (dst->msg_type != src->msg_type)) -+ return 0; -+ -+ if ((dst->netw_prot == src->netw_prot) -+ || src->netw_prot == DPA_PTP_PROT_DONTCARE) { -+ if (dst->seq_id != src->seq_id) -+ return 0; -+ -+ ret = memcmp(dst->snd_port_id, src->snd_port_id, -+ DPA_PTP_SOURCE_PORT_LENGTH); -+ if (ret) -+ return 0; -+ else -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf, -+ struct dpa_ptp_ident *ident, -+ struct dpa_ptp_time *ts) -+{ -+ struct circ_buf *circ_buf = &ptp_buf->circ_buf; -+ int size = ptp_buf->size; -+ int head, tail, idx; -+ unsigned long flags; -+ struct dpa_ptp_data *tmp, *tmp2; -+ struct dpa_ptp_ident *tmp_ident; -+ -+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags); -+ -+ head = circ_buf->head; -+ tail = idx = circ_buf->tail; -+ -+ if (CIRC_CNT(head, tail, size) == 0) { -+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); -+ return 1; -+ } -+ -+ while (idx != head) { -+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx; -+ tmp_ident = &tmp->ident; -+ if (dpa_ptp_is_ident_match(tmp_ident, ident)) -+ break; -+ idx = (idx + 1) & (size - 1); -+ } -+ -+ if (idx == head) { -+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); -+ return 1; -+ } -+ -+ ts->sec = tmp->ts.sec; -+ ts->nsec = tmp->ts.nsec; -+ -+ if (idx != tail) { -+ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) { -+ tail = circ_buf->tail = -+ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1); -+ } -+ -+ while (CIRC_CNT(idx, tail, size) > 0) { -+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx; -+ idx = (idx - 1) & (size - 1); -+ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx; -+ *tmp = *tmp2; -+ } -+ } -+ circ_buf->tail = (tail + 1) & (size - 1); -+ -+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); -+ -+ return 0; -+} -+ -+/* Parse the PTP packets -+ * -+ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in -+ * an IEEE802.3 ethernet frame. This function returns the position of -+ * the PTP packet or NULL if no PTP found -+ */ -+static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type) -+{ -+ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN; -+ u8 *ptp_loc = NULL; -+ u8 msg_type; -+ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN; -+ struct iphdr *iph; -+ struct udphdr *udph; -+ struct ipv6hdr *ipv6h; -+ -+ /* when we can receive S/G frames we need to check the data we want to -+ * access is in the linear skb buffer -+ */ -+ if (!pskb_may_pull(skb, access_len)) -+ return NULL; -+ -+ *eth_type = *((u16 *)pos); -+ -+ /* Check if inner tag is here */ -+ if (*eth_type == ETH_P_8021Q) { -+ access_len += DPA_VLAN_TAG_LEN; -+ -+ if (!pskb_may_pull(skb, access_len)) -+ return NULL; -+ -+ pos += DPA_VLAN_TAG_LEN; -+ *eth_type = *((u16 *)pos); -+ } -+ -+ pos += DPA_ETYPE_LEN; -+ -+ switch (*eth_type) { -+ /* Transport of PTP over Ethernet */ -+ case ETH_P_1588: -+ ptp_loc = pos; -+ -+ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1)) -+ return NULL; -+ -+ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf; -+ if ((msg_type == PTP_MSGTYPE_SYNC) -+ || (msg_type == PTP_MSGTYPE_DELREQ) -+ || (msg_type == PTP_MSGTYPE_PDELREQ) -+ || (msg_type == PTP_MSGTYPE_PDELRESP)) -+ return ptp_loc; -+ break; -+ /* Transport of PTP over IPv4 */ -+ case ETH_P_IP: -+ iph = (struct iphdr *)pos; -+ access_len += sizeof(struct iphdr); -+ -+ if (!pskb_may_pull(skb, access_len)) -+ return NULL; -+ -+ if (ntohs(iph->protocol) != IPPROTO_UDP) -+ return NULL; -+ -+ access_len += iph->ihl * 4 - sizeof(struct iphdr) + -+ sizeof(struct udphdr); -+ -+ if (!pskb_may_pull(skb, access_len)) -+ return NULL; -+ -+ pos += iph->ihl * 4; -+ udph = (struct udphdr *)pos; -+ if (ntohs(udph->dest) != 319) -+ return NULL; -+ ptp_loc = pos + sizeof(struct udphdr); -+ break; -+ /* Transport of PTP over IPv6 */ -+ case ETH_P_IPV6: -+ ipv6h = (struct ipv6hdr *)pos; -+ -+ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr); -+ -+ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP) -+ return NULL; -+ -+ pos += sizeof(struct ipv6hdr); -+ udph = (struct udphdr *)pos; -+ if (ntohs(udph->dest) != 319) -+ return NULL; -+ ptp_loc = pos + sizeof(struct udphdr); -+ break; -+ default: -+ break; -+ } -+ -+ return ptp_loc; -+} -+ -+static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv, -+ struct sk_buff *skb, void *data, enum port_type rx_tx, -+ struct dpa_ptp_data *ptp_data) -+{ -+ u64 nsec; -+ u32 mod; -+ u8 *ptp_loc; -+ u16 eth_type; -+ -+ ptp_loc = dpa_ptp_parse_packet(skb, ð_type); -+ if (!ptp_loc) -+ return -EINVAL; -+ -+ switch (eth_type) { -+ case ETH_P_IP: -+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4; -+ break; -+ case ETH_P_IPV6: -+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6; -+ break; -+ case ETH_P_1588: -+ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2)) -+ return -EINVAL; -+ -+ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf; -+ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf; -+ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID)); -+ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID, -+ DPA_PTP_SOURCE_PORT_LENGTH); -+ -+ nsec = dpa_get_timestamp_ns(priv, rx_tx, data); -+ mod = do_div(nsec, NANOSEC_PER_SECOND); -+ ptp_data->ts.sec = nsec; -+ ptp_data->ts.nsec = mod; -+ -+ return 0; -+} -+ -+void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv, -+ struct sk_buff *skb, void *data) -+{ -+ struct dpa_ptp_tsu *tsu = priv->tsu; -+ struct dpa_ptp_data ptp_tx_data; -+ -+ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data)) -+ return; -+ -+ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data); -+} -+ -+void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv, -+ struct sk_buff *skb, void *data) -+{ -+ struct dpa_ptp_tsu *tsu = priv->tsu; -+ struct dpa_ptp_data ptp_rx_data; -+ -+ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data)) -+ return; -+ -+ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data); -+} -+ -+static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu, -+ struct dpa_ptp_ident *ident, -+ struct dpa_ptp_time *ts) -+{ -+ struct dpa_ptp_tsu *tsu = ptp_tsu; -+ struct dpa_ptp_time tmp; -+ int flag; -+ -+ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp); -+ if (!flag) { -+ ts->sec = tmp.sec; -+ ts->nsec = tmp.nsec; -+ return 0; -+ } -+ -+ return -1; -+} -+ -+static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu, -+ struct dpa_ptp_ident *ident, -+ struct dpa_ptp_time *ts) -+{ -+ struct dpa_ptp_tsu *tsu = ptp_tsu; -+ struct dpa_ptp_time tmp; -+ int flag; -+ -+ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp); -+ if (!flag) { -+ ts->sec = tmp.sec; -+ ts->nsec = tmp.nsec; -+ return 0; -+ } -+ -+ return -1; -+} -+ -+static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu, -+ struct dpa_ptp_time *cnt_time) -+{ -+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; -+ u64 tmp, fiper; -+ -+ if (mac_dev->fm_rtc_disable) -+ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev)); -+ -+ /* TMR_FIPER1 will pulse every second after ALARM1 expired */ -+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; -+ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS; -+ if (mac_dev->fm_rtc_set_alarm) -+ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev), -+ 0, tmp); -+ if (mac_dev->fm_rtc_set_fiper) -+ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev), -+ 0, fiper); -+ -+ if (mac_dev->fm_rtc_enable) -+ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev)); -+} -+ -+static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu, -+ struct dpa_ptp_time *curr_time) -+{ -+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; -+ u64 tmp; -+ u32 mod; -+ -+ if (mac_dev->fm_rtc_get_cnt) -+ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev), -+ &tmp); -+ -+ mod = do_div(tmp, NANOSEC_PER_SECOND); -+ curr_time->sec = (u32)tmp; -+ curr_time->nsec = mod; -+} -+ -+static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu, -+ struct dpa_ptp_time *cnt_time) -+{ -+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; -+ u64 tmp; -+ -+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; -+ -+ if (mac_dev->fm_rtc_set_cnt) -+ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev), -+ tmp); -+ -+ /* Restart fiper two seconds later */ -+ cnt_time->sec += 2; -+ cnt_time->nsec = 0; -+ dpa_set_fiper_alarm(tsu, cnt_time); -+} -+ -+static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend) -+{ -+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; -+ u32 drift; -+ -+ if (mac_dev->fm_rtc_get_drift) -+ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev), -+ &drift); -+ -+ *addend = drift; -+} -+ -+static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend) -+{ -+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; -+ -+ if (mac_dev->fm_rtc_set_drift) -+ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev), -+ addend); -+} -+ -+static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu) -+{ -+ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); -+ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); -+} -+ -+int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ struct dpa_ptp_tsu *tsu = priv->tsu; -+ struct mac_device *mac_dev = priv->mac_dev; -+ struct dpa_ptp_data ptp_data; -+ struct dpa_ptp_data *ptp_data_user; -+ struct dpa_ptp_time act_time; -+ u32 addend; -+ int retval = 0; -+ -+ if (!tsu || !tsu->valid) -+ return -ENODEV; -+ -+ switch (cmd) { -+ case PTP_ENBL_TXTS_IOCTL: -+ tsu->hwts_tx_en_ioctl = 1; -+ if (mac_dev->fm_rtc_enable) -+ mac_dev->fm_rtc_enable(get_fm_handle(dev)); -+ if (mac_dev->ptp_enable) -+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); -+ break; -+ case PTP_DSBL_TXTS_IOCTL: -+ tsu->hwts_tx_en_ioctl = 0; -+ if (mac_dev->fm_rtc_disable) -+ mac_dev->fm_rtc_disable(get_fm_handle(dev)); -+ if (mac_dev->ptp_disable) -+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); -+ break; -+ case PTP_ENBL_RXTS_IOCTL: -+ tsu->hwts_rx_en_ioctl = 1; -+ break; -+ case PTP_DSBL_RXTS_IOCTL: -+ tsu->hwts_rx_en_ioctl = 0; -+ break; -+ case PTP_GET_RX_TIMESTAMP: -+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; -+ if (copy_from_user(&ptp_data.ident, -+ &ptp_data_user->ident, sizeof(ptp_data.ident))) -+ return -EINVAL; -+ -+ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) -+ return -EAGAIN; -+ -+ if (copy_to_user((void __user *)&ptp_data_user->ts, -+ &ptp_data.ts, sizeof(ptp_data.ts))) -+ return -EFAULT; -+ break; -+ case PTP_GET_TX_TIMESTAMP: -+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; -+ if (copy_from_user(&ptp_data.ident, -+ &ptp_data_user->ident, sizeof(ptp_data.ident))) -+ return -EINVAL; -+ -+ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) -+ return -EAGAIN; -+ -+ if (copy_to_user((void __user *)&ptp_data_user->ts, -+ &ptp_data.ts, sizeof(ptp_data.ts))) -+ return -EFAULT; -+ break; -+ case PTP_GET_TIME: -+ dpa_get_curr_cnt(tsu, &act_time); -+ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time))) -+ return -EFAULT; -+ break; -+ case PTP_SET_TIME: -+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) -+ return -EINVAL; -+ dpa_set_1588cnt(tsu, &act_time); -+ break; -+ case PTP_GET_ADJ: -+ dpa_get_drift(tsu, &addend); -+ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend))) -+ return -EFAULT; -+ break; -+ case PTP_SET_ADJ: -+ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend))) -+ return -EINVAL; -+ dpa_set_drift(tsu, addend); -+ break; -+ case PTP_SET_FIPER_ALARM: -+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) -+ return -EINVAL; -+ dpa_set_fiper_alarm(tsu, &act_time); -+ break; -+ case PTP_CLEANUP_TS: -+ dpa_flush_timestamp(tsu); -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ return retval; -+} -+ -+int dpa_ptp_init(struct dpa_priv_s *priv) -+{ -+ struct dpa_ptp_tsu *tsu; -+ -+ /* Allocate memory for PTP structure */ -+ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL); -+ if (!tsu) -+ return -ENOMEM; -+ -+ tsu->valid = TRUE; -+ tsu->dpa_priv = priv; -+ -+ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); -+ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); -+ -+ priv->tsu = tsu; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_ptp_init); -+ -+void dpa_ptp_cleanup(struct dpa_priv_s *priv) -+{ -+ struct dpa_ptp_tsu *tsu = priv->tsu; -+ -+ tsu->valid = FALSE; -+ vfree(tsu->rx_timestamps.circ_buf.buf); -+ vfree(tsu->tx_timestamps.circ_buf.buf); -+ -+ kfree(tsu); -+} -+EXPORT_SYMBOL(dpa_ptp_cleanup); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h -@@ -0,0 +1,138 @@ -+/* Copyright (C) 2011 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ */ -+#ifndef __DPAA_1588_H__ -+#define __DPAA_1588_H__ -+ -+#include -+#include -+#include -+#include -+ -+#define DEFAULT_PTP_RX_BUF_SZ 256 -+#define DEFAULT_PTP_TX_BUF_SZ 256 -+ -+/* 1588 private ioctl calls */ -+#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE -+#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1) -+#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2) -+#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3) -+#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4) -+#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5) -+#define PTP_SET_TIME (SIOCDEVPRIVATE + 6) -+#define PTP_GET_TIME (SIOCDEVPRIVATE + 7) -+#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8) -+#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9) -+#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10) -+#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11) -+ -+/* PTP V2 message type */ -+enum { -+ PTP_MSGTYPE_SYNC = 0x0, -+ PTP_MSGTYPE_DELREQ = 0x1, -+ PTP_MSGTYPE_PDELREQ = 0x2, -+ PTP_MSGTYPE_PDELRESP = 0x3, -+ PTP_MSGTYPE_FLWUP = 0x8, -+ PTP_MSGTYPE_DELRESP = 0x9, -+ PTP_MSGTYPE_PDELRES_FLWUP = 0xA, -+ PTP_MSGTYPE_ANNOUNCE = 0xB, -+ PTP_MSGTYPE_SGNLNG = 0xC, -+ PTP_MSGTYPE_MNGMNT = 0xD, -+}; -+ -+/* Byte offset of data in the PTP V2 headers */ -+#define PTP_OFFS_MSG_TYPE 0 -+#define PTP_OFFS_VER_PTP 1 -+#define PTP_OFFS_MSG_LEN 2 -+#define PTP_OFFS_DOM_NMB 4 -+#define PTP_OFFS_FLAGS 6 -+#define PTP_OFFS_CORFIELD 8 -+#define PTP_OFFS_SRCPRTID 20 -+#define PTP_OFFS_SEQ_ID 30 -+#define PTP_OFFS_CTRL 32 -+#define PTP_OFFS_LOGMEAN 33 -+ -+#define PTP_IP_OFFS 14 -+#define PTP_UDP_OFFS 34 -+#define PTP_HEADER_OFFS 42 -+#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE) -+#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID) -+#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID) -+#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL) -+ -+/* 1588-2008 network protocol enumeration values */ -+#define DPA_PTP_PROT_IPV4 1 -+#define DPA_PTP_PROT_IPV6 2 -+#define DPA_PTP_PROT_802_3 3 -+#define DPA_PTP_PROT_DONTCARE 0xFFFF -+ -+#define DPA_PTP_SOURCE_PORT_LENGTH 10 -+#define DPA_PTP_HEADER_SZE 34 -+#define DPA_ETYPE_LEN 2 -+#define DPA_VLAN_TAG_LEN 4 -+#define NANOSEC_PER_SECOND 1000000000 -+ -+/* The threshold between the current found one and the oldest one */ -+#define TS_ACCUMULATION_THRESHOLD 50 -+ -+/* Struct needed to identify a timestamp */ -+struct dpa_ptp_ident { -+ u8 version; -+ u8 msg_type; -+ u16 netw_prot; -+ u16 seq_id; -+ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH]; -+}; -+ -+/* Timestamp format in 1588-2008 */ -+struct dpa_ptp_time { -+ u64 sec; /* just 48 bit used */ -+ u32 nsec; -+}; -+ -+/* needed for timestamp data over ioctl */ -+struct dpa_ptp_data { -+ struct dpa_ptp_ident ident; -+ struct dpa_ptp_time ts; -+}; -+ -+struct dpa_ptp_circ_buf { -+ struct circ_buf circ_buf; -+ u32 size; -+ spinlock_t ptp_lock; -+}; -+ -+/* PTP TSU control structure */ -+struct dpa_ptp_tsu { -+ struct dpa_priv_s *dpa_priv; -+ bool valid; -+ struct dpa_ptp_circ_buf rx_timestamps; -+ struct dpa_ptp_circ_buf tx_timestamps; -+ -+ /* HW timestamping over ioctl enabled flag */ -+ int hwts_tx_en_ioctl; -+ int hwts_rx_en_ioctl; -+}; -+ -+extern int dpa_ptp_init(struct dpa_priv_s *priv); -+extern void dpa_ptp_cleanup(struct dpa_priv_s *priv); -+extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv, -+ struct sk_buff *skb, void *data); -+extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv, -+ struct sk_buff *skb, void *data); -+extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd); -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c -@@ -0,0 +1,180 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include /* struct qm_mcr_querycgr */ -+#include -+#include "dpaa_debugfs.h" -+#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */ -+ -+#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries" -+#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa" -+ -+static struct dentry *dpa_debugfs_root; -+ -+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file); -+static ssize_t dpa_loop_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off); -+ -+static const struct file_operations dpa_debugfs_lp_fops = { -+ .open = dpa_debugfs_loop_open, -+ .write = dpa_loop_write, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static int dpa_debugfs_loop_show(struct seq_file *file, void *offset) -+{ -+ struct dpa_priv_s *priv; -+ -+ BUG_ON(offset == NULL); -+ -+ priv = netdev_priv((struct net_device *)file->private); -+ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to); -+ -+ return 0; -+} -+ -+static int user_input_convert(const char __user *user_buf, size_t count, -+ long *val) -+{ -+ char buf[12]; -+ -+ if (count > sizeof(buf) - 1) -+ return -EINVAL; -+ if (copy_from_user(buf, user_buf, count)) -+ return -EFAULT; -+ buf[count] = '\0'; -+ if (kstrtol(buf, 0, val)) -+ return -EINVAL; -+ return 0; -+} -+ -+static ssize_t dpa_loop_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ struct dpa_priv_s *priv; -+ struct net_device *netdev; -+ struct seq_file *sf; -+ int ret; -+ long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ -+ sf = (struct seq_file *)f->private_data; -+ netdev = (struct net_device *)sf->private; -+ priv = netdev_priv(netdev); -+ -+ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val; -+ -+ return count; -+} -+ -+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file) -+{ -+ int _errno; -+ const struct net_device *net_dev; -+ -+ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private); -+ if (unlikely(_errno < 0)) { -+ net_dev = (struct net_device *)inode->i_private; -+ -+ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev))) -+ netdev_err(net_dev, "single_open() = %d\n", -+ _errno); -+ } -+ -+ return _errno; -+} -+ -+ -+int dpa_netdev_debugfs_create(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ static int cnt; -+ char loop_file_name[100]; -+ -+ if (unlikely(dpa_debugfs_root == NULL)) { -+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, -+ "root debugfs missing, possible module ordering issue"); -+ return -ENOMEM; -+ } -+ -+ sprintf(loop_file_name, "eth%d_loop", ++cnt); -+ priv->debugfs_loop_file = debugfs_create_file(loop_file_name, -+ S_IRUGO, -+ dpa_debugfs_root, -+ net_dev, -+ &dpa_debugfs_lp_fops); -+ if (unlikely(priv->debugfs_loop_file == NULL)) { -+ netdev_err(net_dev, "debugfs_create_file(%s/%s)", -+ dpa_debugfs_root->d_iname, -+ loop_file_name); -+ -+ return -ENOMEM; -+ } -+ return 0; -+} -+ -+void dpa_netdev_debugfs_remove(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ -+ debugfs_remove(priv->debugfs_loop_file); -+} -+ -+int __init dpa_debugfs_module_init(void) -+{ -+ int _errno = 0; -+ -+ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n"); -+ -+ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL); -+ -+ if (unlikely(dpa_debugfs_root == NULL)) { -+ _errno = -ENOMEM; -+ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n", -+ KBUILD_BASENAME".c", __LINE__, __func__); -+ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n", -+ DPA_ETH_DEBUGFS_ROOT, _errno); -+ } -+ -+ return _errno; -+} -+ -+void __exit dpa_debugfs_module_exit(void) -+{ -+ debugfs_remove(dpa_debugfs_root); -+} ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h -@@ -0,0 +1,43 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPAA_DEBUGFS_H_ -+#define DPAA_DEBUGFS_H_ -+ -+#include -+#include /* struct dentry needed in dpaa_eth.h */ -+ -+int dpa_netdev_debugfs_create(struct net_device *net_dev); -+void dpa_netdev_debugfs_remove(struct net_device *net_dev); -+int __init dpa_debugfs_module_init(void); -+void __exit dpa_debugfs_module_exit(void); -+ -+#endif /* DPAA_DEBUGFS_H_ */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c -@@ -0,0 +1,1183 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* arp_hdr_len() */ -+#include /* VLAN_HLEN */ -+#include /* struct icmphdr */ -+#include /* struct iphdr */ -+#include /* struct ipv6hdr */ -+#include /* struct udphdr */ -+#include /* struct tcphdr */ -+#include /* net_ratelimit() */ -+#include /* ETH_P_IP and ETH_P_IPV6 */ -+#include -+#include -+#include -+#include -+ -+#include "fsl_fman.h" -+#include "fm_ext.h" -+#include "fm_port_ext.h" -+ -+#include "mac.h" -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+#include "dpaa_debugfs.h" -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+ -+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files -+ * using trace events only need to #include -+ */ -+#define CREATE_TRACE_POINTS -+#include "dpaa_eth_trace.h" -+ -+#define DPA_NAPI_WEIGHT 64 -+ -+/* Valid checksum indication */ -+#define DPA_CSUM_VALID 0xFFFF -+ -+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+MODULE_AUTHOR("Andy Fleming "); -+ -+MODULE_DESCRIPTION(DPA_DESCRIPTION); -+ -+static uint8_t debug = -1; -+module_param(debug, byte, S_IRUGO); -+MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); -+ -+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ -+static uint16_t tx_timeout = 1000; -+module_param(tx_timeout, ushort, S_IRUGO); -+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); -+ -+static const char rtx[][3] = { -+ [RX] = "RX", -+ [TX] = "TX" -+}; -+ -+/* BM */ -+ -+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8) -+ -+static uint8_t dpa_priv_common_bpid; -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+struct net_device *dpa_loop_netdevs[20]; -+#endif -+ -+#ifdef CONFIG_PM -+ -+static int dpaa_suspend(struct device *dev) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ int err = 0; -+ -+ net_dev = dev_get_drvdata(dev); -+ -+ if (net_dev->flags & IFF_UP) { -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ if (priv->wol & DPAA_WOL_MAGIC) { -+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX], -+ priv->mac_dev->get_mac_handle(mac_dev), true); -+ if (err) { -+ netdev_err(net_dev, "set_wol() = %d\n", err); -+ goto set_wol_failed; -+ } -+ } -+ -+ err = fm_port_suspend(mac_dev->port_dev[RX]); -+ if (err) { -+ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err); -+ goto rx_port_suspend_failed; -+ } -+ -+ err = fm_port_suspend(mac_dev->port_dev[TX]); -+ if (err) { -+ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err); -+ goto tx_port_suspend_failed; -+ } -+ } -+ -+ return 0; -+ -+tx_port_suspend_failed: -+ fm_port_resume(mac_dev->port_dev[RX]); -+rx_port_suspend_failed: -+ if (priv->wol & DPAA_WOL_MAGIC) { -+ priv->mac_dev->set_wol(mac_dev->port_dev[RX], -+ priv->mac_dev->get_mac_handle(mac_dev), false); -+ } -+set_wol_failed: -+ return err; -+} -+ -+static int dpaa_resume(struct device *dev) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ int err = 0; -+ -+ net_dev = dev_get_drvdata(dev); -+ -+ if (net_dev->flags & IFF_UP) { -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ err = fm_port_resume(mac_dev->port_dev[TX]); -+ if (err) { -+ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err); -+ goto resume_failed; -+ } -+ -+ err = fm_port_resume(mac_dev->port_dev[RX]); -+ if (err) { -+ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err); -+ goto resume_failed; -+ } -+ -+ if (priv->wol & DPAA_WOL_MAGIC) { -+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX], -+ priv->mac_dev->get_mac_handle(mac_dev), false); -+ if (err) { -+ netdev_err(net_dev, "set_wol() = %d\n", err); -+ goto resume_failed; -+ } -+ } -+ } -+ -+ return 0; -+ -+resume_failed: -+ return err; -+} -+ -+static const struct dev_pm_ops dpaa_pm_ops = { -+ .suspend = dpaa_suspend, -+ .resume = dpaa_resume, -+}; -+ -+#define DPAA_PM_OPS (&dpaa_pm_ops) -+ -+#else /* CONFIG_PM */ -+ -+#define DPAA_PM_OPS NULL -+ -+#endif /* CONFIG_PM */ -+ -+/* Checks whether the checksum field in Parse Results array is valid -+ * (equals 0xFFFF) and increments the .cse counter otherwise -+ */ -+static inline void -+dpa_csum_validation(const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd) -+{ -+ dma_addr_t addr = qm_fd_addr(fd); -+ struct dpa_bp *dpa_bp = priv->dpa_bp; -+ void *frm = phys_to_virt(addr); -+ fm_prs_result_t *parse_result; -+ -+ if (unlikely(!frm)) -+ return; -+ -+ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE + -+ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL); -+ -+ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE); -+ -+ if (parse_result->cksum != DPA_CSUM_VALID) -+ percpu_priv->rx_errors.cse++; -+} -+ -+static void _dpa_rx_error(struct net_device *net_dev, -+ const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd, -+ u32 fqid) -+{ -+ /* limit common, possibly innocuous Rx FIFO Overflow errors' -+ * interference with zero-loss convergence benchmark results. -+ */ -+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL)) -+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n"); -+ else -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_dbg(net_dev, "Err FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_RX_ERRORS); -+#ifdef CONFIG_FSL_DPAA_HOOKS -+ if (dpaa_eth_hooks.rx_error && -+ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) -+ /* it's up to the hook to perform resource cleanup */ -+ return; -+#endif -+ percpu_priv->stats.rx_errors++; -+ -+ if (fd->status & FM_PORT_FRM_ERR_DMA) -+ percpu_priv->rx_errors.dme++; -+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL) -+ percpu_priv->rx_errors.fpe++; -+ if (fd->status & FM_PORT_FRM_ERR_SIZE) -+ percpu_priv->rx_errors.fse++; -+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR) -+ percpu_priv->rx_errors.phe++; -+ if (fd->status & FM_FD_STAT_L4CV) -+ dpa_csum_validation(priv, percpu_priv, fd); -+ -+ dpa_fd_release(net_dev, fd); -+} -+ -+static void _dpa_tx_error(struct net_device *net_dev, -+ const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd, -+ u32 fqid) -+{ -+ struct sk_buff *skb; -+ -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_TX_ERRORS); -+#ifdef CONFIG_FSL_DPAA_HOOKS -+ if (dpaa_eth_hooks.tx_error && -+ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) -+ /* now the hook must ensure proper cleanup */ -+ return; -+#endif -+ percpu_priv->stats.tx_errors++; -+ -+ /* If we intended the buffers from this frame to go into the bpools -+ * when the FMan transmit was done, we need to put it in manually. -+ */ -+ if (fd->bpid != 0xff) { -+ dpa_fd_release(net_dev, fd); -+ return; -+ } -+ -+ skb = _dpa_cleanup_tx_fd(priv, fd); -+ dev_kfree_skb(skb); -+} -+ -+/* Helper function to factor out frame validation logic on all Rx paths. Its -+ * purpose is to extract from the Parse Results structure information about -+ * the integrity of the frame, its checksum, the length of the parsed headers -+ * and whether the frame is suitable for GRO. -+ * -+ * Assumes no parser errors, since any error frame is dropped before this -+ * function is called. -+ * -+ * @skb will have its ip_summed field overwritten; -+ * @use_gro will only be written with 0, if the frame is definitely not -+ * GRO-able; otherwise, it will be left unchanged; -+ * @hdr_size will be written with a safe value, at least the size of the -+ * headers' length. -+ */ -+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, -+ const struct qm_fd *fd, -+ struct sk_buff *skb, int *use_gro) -+{ -+ if (fd->status & FM_FD_STAT_L4CV) { -+ /* The parser has run and performed L4 checksum validation. -+ * We know there were no parser errors (and implicitly no -+ * L4 csum error), otherwise we wouldn't be here. -+ */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ -+ /* Don't go through GRO for certain types of traffic that -+ * we know are not GRO-able, such as dgram-based protocols. -+ * In the worst-case scenarios, such as small-pkt terminating -+ * UDP, the extra GRO processing would be overkill. -+ * -+ * The only protocol the Parser supports that is also GRO-able -+ * is currently TCP. -+ */ -+ if (!fm_l4_frame_is_tcp(parse_results)) -+ *use_gro = 0; -+ -+ return; -+ } -+ -+ /* We're here because either the parser didn't run or the L4 checksum -+ * was not verified. This may include the case of a UDP frame with -+ * checksum zero or an L4 proto other than TCP/UDP -+ */ -+ skb->ip_summed = CHECKSUM_NONE; -+ -+ /* Bypass GRO for unknown traffic or if no PCDs are applied */ -+ *use_gro = 0; -+} -+ -+int dpaa_eth_poll(struct napi_struct *napi, int budget) -+{ -+ struct dpa_napi_portal *np = -+ container_of(napi, struct dpa_napi_portal, napi); -+ -+ int cleaned = qman_p_poll_dqrr(np->p, budget); -+ -+ if (cleaned < budget) { -+ int tmp; -+ napi_complete(napi); -+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); -+ DPA_BUG_ON(tmp); -+ } -+ -+ return cleaned; -+} -+EXPORT_SYMBOL(dpaa_eth_poll); -+ -+static void __hot _dpa_tx_conf(struct net_device *net_dev, -+ const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd, -+ u32 fqid) -+{ -+ struct sk_buff *skb; -+ -+ /* do we need the timestamp for the error frames? */ -+ -+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_TX_ERRORS); -+ -+ percpu_priv->stats.tx_errors++; -+ } -+ -+ /* hopefully we need not get the timestamp before the hook */ -+#ifdef CONFIG_FSL_DPAA_HOOKS -+ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev, -+ fd, fqid) == DPAA_ETH_STOLEN) -+ /* it's the hook that must now perform cleanup */ -+ return; -+#endif -+ /* This might not perfectly reflect the reality, if the core dequeuing -+ * the Tx confirmation is different from the one that did the enqueue, -+ * but at least it'll show up in the total count. -+ */ -+ percpu_priv->tx_confirm++; -+ -+ skb = _dpa_cleanup_tx_fd(priv, fd); -+ -+ dev_kfree_skb(skb); -+} -+ -+enum qman_cb_dqrr_result -+priv_rx_error_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ int *count_ptr; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); -+ -+ if (dpaa_eth_napi_schedule(percpu_priv, portal)) -+ return qman_cb_dqrr_stop; -+ -+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr))) -+ /* Unable to refill the buffer pool due to insufficient -+ * system memory. Just release the frame back into the pool, -+ * otherwise we'll soon end up with an empty buffer pool. -+ */ -+ dpa_fd_release(net_dev, &dq->fd); -+ else -+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); -+ -+ return qman_cb_dqrr_consume; -+} -+ -+ -+enum qman_cb_dqrr_result __hot -+priv_rx_default_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ int *count_ptr; -+ struct dpa_bp *dpa_bp; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ dpa_bp = priv->dpa_bp; -+ -+ /* Trace the Rx fd */ -+ trace_dpa_rx_fd(net_dev, fq, &dq->fd); -+ -+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count); -+ -+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) -+ return qman_cb_dqrr_stop; -+ -+ /* Vale of plenty: make sure we didn't run out of buffers */ -+ -+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr))) -+ /* Unable to refill the buffer pool due to insufficient -+ * system memory. Just release the frame back into the pool, -+ * otherwise we'll soon end up with an empty buffer pool. -+ */ -+ dpa_fd_release(net_dev, &dq->fd); -+ else -+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid, -+ count_ptr); -+ -+ return qman_cb_dqrr_consume; -+} -+ -+enum qman_cb_dqrr_result -+priv_tx_conf_error_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ if (dpaa_eth_napi_schedule(percpu_priv, portal)) -+ return qman_cb_dqrr_stop; -+ -+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); -+ -+ return qman_cb_dqrr_consume; -+} -+ -+enum qman_cb_dqrr_result __hot -+priv_tx_conf_default_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ /* Trace the fd */ -+ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd); -+ -+ /* Non-migratable context, safe to use raw_cpu_ptr */ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ if (dpaa_eth_napi_schedule(percpu_priv, portal)) -+ return qman_cb_dqrr_stop; -+ -+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); -+ -+ return qman_cb_dqrr_consume; -+} -+ -+void priv_ern(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ struct net_device *net_dev; -+ const struct dpa_priv_s *priv; -+ struct sk_buff *skb; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct qm_fd fd = msg->ern.fd; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ /* Non-migratable context, safe to use raw_cpu_ptr */ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ percpu_priv->stats.tx_dropped++; -+ percpu_priv->stats.tx_fifo_errors++; -+ count_ern(percpu_priv, msg); -+ -+ /* If we intended this buffer to go into the pool -+ * when the FM was done, we need to put it in -+ * manually. -+ */ -+ if (msg->ern.fd.bpid != 0xff) { -+ dpa_fd_release(net_dev, &fd); -+ return; -+ } -+ -+ skb = _dpa_cleanup_tx_fd(priv, &fd); -+ dev_kfree_skb_any(skb); -+} -+ -+const struct dpa_fq_cbs_t private_fq_cbs = { -+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } }, -+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } }, -+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } }, -+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } }, -+ .egress_ern = { .cb = { .ern = priv_ern } } -+}; -+EXPORT_SYMBOL(private_fq_cbs); -+ -+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv) -+{ -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, j; -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ for (j = 0; j < qman_portal_max; j++) -+ napi_enable(&percpu_priv->np[j].napi); -+ } -+} -+ -+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv) -+{ -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, j; -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ for (j = 0; j < qman_portal_max; j++) -+ napi_disable(&percpu_priv->np[j].napi); -+ } -+} -+ -+static int __cold dpa_eth_priv_start(struct net_device *net_dev) -+{ -+ int err; -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ -+ dpaa_eth_napi_enable(priv); -+ -+ err = dpa_start(net_dev); -+ if (err < 0) -+ dpaa_eth_napi_disable(priv); -+ -+ return err; -+} -+ -+ -+ -+static int __cold dpa_eth_priv_stop(struct net_device *net_dev) -+{ -+ int _errno; -+ struct dpa_priv_s *priv; -+ -+ _errno = dpa_stop(net_dev); -+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm -+ * ingress queues. This is to avoid a race between the current -+ * context and ksoftirqd which could leave NAPI disabled while -+ * in fact there's still Rx traffic to be processed. -+ */ -+ usleep_range(5000, 10000); -+ -+ priv = netdev_priv(net_dev); -+ dpaa_eth_napi_disable(priv); -+ -+ return _errno; -+} -+ -+#ifdef CONFIG_NET_POLL_CONTROLLER -+static void dpaa_eth_poll_controller(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv = -+ raw_cpu_ptr(priv->percpu_priv); -+ struct qman_portal *p; -+ const struct qman_portal_config *pc; -+ struct dpa_napi_portal *np; -+ -+ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id()); -+ pc = qman_p_get_portal_config(p); -+ np = &percpu_priv->np[pc->index]; -+ -+ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI); -+ qman_p_poll_dqrr(np->p, np->napi.weight); -+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); -+} -+#endif -+ -+static const struct net_device_ops dpa_private_ops = { -+ .ndo_open = dpa_eth_priv_start, -+ .ndo_start_xmit = dpa_tx, -+ .ndo_stop = dpa_eth_priv_stop, -+ .ndo_tx_timeout = dpa_timeout, -+ .ndo_get_stats64 = dpa_get_stats64, -+ .ndo_set_mac_address = dpa_set_mac_address, -+ .ndo_validate_addr = eth_validate_addr, -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+ .ndo_select_queue = dpa_select_queue, -+#endif -+ .ndo_change_mtu = dpa_change_mtu, -+ .ndo_set_rx_mode = dpa_set_rx_mode, -+ .ndo_init = dpa_ndo_init, -+ .ndo_set_features = dpa_set_features, -+ .ndo_fix_features = dpa_fix_features, -+ .ndo_do_ioctl = dpa_ioctl, -+#ifdef CONFIG_NET_POLL_CONTROLLER -+ .ndo_poll_controller = dpaa_eth_poll_controller, -+#endif -+}; -+ -+static int dpa_private_napi_add(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, cpu; -+ -+ for_each_possible_cpu(cpu) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); -+ -+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent, -+ qman_portal_max * sizeof(struct dpa_napi_portal), -+ GFP_KERNEL); -+ -+ if (unlikely(percpu_priv->np == NULL)) { -+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ for (i = 0; i < qman_portal_max; i++) -+ netif_napi_add(net_dev, &percpu_priv->np[i].napi, -+ dpaa_eth_poll, DPA_NAPI_WEIGHT); -+ } -+ -+ return 0; -+} -+ -+void dpa_private_napi_del(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, cpu; -+ -+ for_each_possible_cpu(cpu) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); -+ -+ if (percpu_priv->np) { -+ for (i = 0; i < qman_portal_max; i++) -+ netif_napi_del(&percpu_priv->np[i].napi); -+ -+ devm_kfree(net_dev->dev.parent, percpu_priv->np); -+ } -+ } -+} -+EXPORT_SYMBOL(dpa_private_napi_del); -+ -+static int dpa_private_netdev_init(struct net_device *net_dev) -+{ -+ int i; -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv; -+ const uint8_t *mac_addr; -+ -+ /* Although we access another CPU's private data here -+ * we do it at initialization so it is safe -+ */ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ percpu_priv->net_dev = net_dev; -+ } -+ -+ net_dev->netdev_ops = &dpa_private_ops; -+ mac_addr = priv->mac_dev->addr; -+ -+ net_dev->mem_start = priv->mac_dev->res->start; -+ net_dev->mem_end = priv->mac_dev->res->end; -+ -+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_LLTX); -+ -+ /* Advertise S/G and HIGHDMA support for private interfaces */ -+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; -+ /* Recent kernels enable GSO automatically, if -+ * we declare NETIF_F_SG. For conformity, we'll -+ * still declare GSO explicitly. -+ */ -+ net_dev->features |= NETIF_F_GSO; -+ -+ /* Advertise GRO support */ -+ net_dev->features |= NETIF_F_GRO; -+ -+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout); -+} -+ -+static struct dpa_bp * __cold -+dpa_priv_bp_probe(struct device *dev) -+{ -+ struct dpa_bp *dpa_bp; -+ -+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL); -+ if (unlikely(dpa_bp == NULL)) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count); -+ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; -+ -+ dpa_bp->seed_cb = dpa_bp_priv_seed; -+ dpa_bp->free_buf_cb = _dpa_bp_free_pf; -+ -+ return dpa_bp; -+} -+ -+/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR. -+ * We won't be sending congestion notifications to FMan; for now, we just use -+ * this CGR to generate enqueue rejections to FMan in order to drop the frames -+ * before they reach our ingress queues and eat up memory. -+ */ -+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv) -+{ -+ struct qm_mcc_initcgr initcgr; -+ u32 cs_th; -+ int err; -+ -+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); -+ if (err < 0) { -+ pr_err("Error %d allocating CGR ID\n", err); -+ goto out_error; -+ } -+ -+ /* Enable CS TD, but disable Congestion State Change Notifications. */ -+ initcgr.we_mask = QM_CGR_WE_CS_THRES; -+ initcgr.cgr.cscn_en = QM_CGR_EN; -+ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD; -+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); -+ -+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN; -+ initcgr.cgr.cstd_en = QM_CGR_EN; -+ -+ /* This is actually a hack, because this CGR will be associated with -+ * our affine SWP. However, we'll place our ingress FQs in it. -+ */ -+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, -+ &initcgr); -+ if (err < 0) { -+ pr_err("Error %d creating ingress CGR with ID %d\n", err, -+ priv->ingress_cgr.cgrid); -+ qman_release_cgrid(priv->ingress_cgr.cgrid); -+ goto out_error; -+ } -+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", -+ priv->ingress_cgr.cgrid, priv->mac_dev->addr); -+ -+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255 -+ * range), but we have no common initialization path between the -+ * different variants of the DPAA Eth driver, so we do it here rather -+ * than modifying every other variant than "private Eth". -+ */ -+ priv->use_ingress_cgr = true; -+ -+out_error: -+ return err; -+} -+ -+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, -+ size_t count) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ int i; -+ -+ if (netif_msg_probe(priv)) -+ dev_dbg(net_dev->dev.parent, -+ "Using private BM buffer pools\n"); -+ -+ priv->bp_count = count; -+ -+ for (i = 0; i < count; i++) { -+ int err; -+ err = dpa_bp_alloc(&dpa_bp[i]); -+ if (err < 0) { -+ dpa_bp_free(priv); -+ priv->dpa_bp = NULL; -+ return err; -+ } -+ -+ priv->dpa_bp = &dpa_bp[i]; -+ } -+ -+ dpa_priv_common_bpid = priv->dpa_bp->bpid; -+ return 0; -+} -+ -+static const struct of_device_id dpa_match[]; -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+static int dpa_new_loop_id(void) -+{ -+ static int if_id; -+ -+ return if_id++; -+} -+#endif -+ -+static int -+dpaa_eth_priv_probe(struct platform_device *_of_dev) -+{ -+ int err = 0, i, channel; -+ struct device *dev; -+ struct device_node *dpa_node; -+ struct dpa_bp *dpa_bp; -+ struct dpa_fq *dpa_fq, *tmp; -+ size_t count = 1; -+ struct net_device *net_dev = NULL; -+ struct dpa_priv_s *priv = NULL; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct fm_port_fqs port_fqs; -+ struct dpa_buffer_layout_s *buf_layout = NULL; -+ struct mac_device *mac_dev; -+ struct task_struct *kth; -+ -+ dev = &_of_dev->dev; -+ -+ dpa_node = dev->of_node; -+ -+ if (!of_device_is_available(dpa_node)) -+ return -ENODEV; -+ -+ /* Get the buffer pools assigned to this interface; -+ * run only once the default pool probing code -+ */ -+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? : -+ dpa_priv_bp_probe(dev); -+ if (IS_ERR(dpa_bp)) -+ return PTR_ERR(dpa_bp); -+ -+ /* Allocate this early, so we can store relevant information in -+ * the private area (needed by 1588 code in dpa_mac_probe) -+ */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ goto alloc_etherdev_mq_failed; -+ } -+ -+ /* Do this here, so we can be verbose early */ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ strcpy(priv->if_type, "private"); -+ -+ priv->msg_enable = netif_msg_init(debug, -1); -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ priv->loop_id = dpa_new_loop_id(); -+ priv->loop_to = -1; /* disabled by default */ -+ dpa_loop_netdevs[priv->loop_id] = net_dev; -+#endif -+ -+ mac_dev = dpa_mac_probe(_of_dev); -+ if (IS_ERR(mac_dev) || !mac_dev) { -+ err = PTR_ERR(mac_dev); -+ goto mac_probe_failed; -+ } -+ -+ /* We have physical ports, so we need to establish -+ * the buffer layout. -+ */ -+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), -+ GFP_KERNEL); -+ if (!buf_layout) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ goto alloc_failed; -+ } -+ dpa_set_buffers_layout(mac_dev, buf_layout); -+ -+ /* For private ports, need to compute the size of the default -+ * buffer pool, based on FMan port buffer layout;also update -+ * the maximum buffer size for private ports if necessary -+ */ -+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]); -+ -+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME -+ /* We only want to use jumbo frame optimization if we actually have -+ * L2 MAX FRM set for jumbo frames as well. -+ */ -+ if (fm_get_max_frm() < 9600) -+ dev_warn(dev, -+ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n"); -+#endif -+ -+ INIT_LIST_HEAD(&priv->dpa_fq_list); -+ -+ memset(&port_fqs, 0, sizeof(port_fqs)); -+ -+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX); -+ if (!err) -+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, -+ &port_fqs, true, TX); -+ -+ if (err < 0) -+ goto fq_probe_failed; -+ -+ /* bp init */ -+ -+ err = dpa_priv_bp_create(net_dev, dpa_bp, count); -+ -+ if (err < 0) -+ goto bp_create_failed; -+ -+ priv->mac_dev = mac_dev; -+ -+ channel = dpa_get_channel(); -+ -+ if (channel < 0) { -+ err = channel; -+ goto get_channel_failed; -+ } -+ -+ priv->channel = (uint16_t)channel; -+ -+ /* Start a thread that will walk the cpus with affine portals -+ * and add this pool channel to each's dequeue mask. -+ */ -+ kth = kthread_run(dpaa_eth_add_channel, -+ (void *)(unsigned long)priv->channel, -+ "dpaa_%p:%d", net_dev, priv->channel); -+ if (!kth) { -+ err = -ENOMEM; -+ goto add_channel_failed; -+ } -+ -+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]); -+ -+ /* Create a congestion group for this netdev, with -+ * dynamically-allocated CGR ID. -+ * Must be executed after probing the MAC, but before -+ * assigning the egress FQs to the CGRs. -+ */ -+ err = dpaa_eth_cgr_init(priv); -+ if (err < 0) { -+ dev_err(dev, "Error initializing CGR\n"); -+ goto tx_cgr_init_failed; -+ } -+ err = dpaa_eth_priv_ingress_cgr_init(priv); -+ if (err < 0) { -+ dev_err(dev, "Error initializing ingress CGR\n"); -+ goto rx_cgr_init_failed; -+ } -+ -+ /* Add the FQs to the interface, and make them active */ -+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) { -+ err = dpa_fq_init(dpa_fq, false); -+ if (err < 0) -+ goto fq_alloc_failed; -+ } -+ -+ priv->buf_layout = buf_layout; -+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]); -+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]); -+ -+ /* All real interfaces need their ports initialized */ -+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, -+ buf_layout, dev); -+ -+#ifdef CONFIG_FMAN_PFC -+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) { -+ err = fm_port_set_pfc_priorities_mapping_to_qman_wq( -+ mac_dev->port_dev[TX], i, i); -+ if (unlikely(err != 0)) { -+ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i); -+ goto pfc_mapping_failed; -+ } -+ } -+#endif -+ -+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); -+ -+ if (priv->percpu_priv == NULL) { -+ dev_err(dev, "devm_alloc_percpu() failed\n"); -+ err = -ENOMEM; -+ goto alloc_percpu_failed; -+ } -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ memset(percpu_priv, 0, sizeof(*percpu_priv)); -+ } -+ -+ /* Initialize NAPI */ -+ err = dpa_private_napi_add(net_dev); -+ -+ if (err < 0) -+ goto napi_add_failed; -+ -+ err = dpa_private_netdev_init(net_dev); -+ -+ if (err < 0) -+ goto netdev_init_failed; -+ -+ dpaa_eth_sysfs_init(&net_dev->dev); -+ -+#ifdef CONFIG_PM -+ device_set_wakeup_capable(dev, true); -+#endif -+ -+ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name); -+ -+ return 0; -+ -+netdev_init_failed: -+napi_add_failed: -+ dpa_private_napi_del(net_dev); -+alloc_percpu_failed: -+#ifdef CONFIG_FMAN_PFC -+pfc_mapping_failed: -+#endif -+ dpa_fq_free(dev, &priv->dpa_fq_list); -+fq_alloc_failed: -+ qman_delete_cgr_safe(&priv->ingress_cgr); -+ qman_release_cgrid(priv->ingress_cgr.cgrid); -+rx_cgr_init_failed: -+ qman_delete_cgr_safe(&priv->cgr_data.cgr); -+ qman_release_cgrid(priv->cgr_data.cgr.cgrid); -+tx_cgr_init_failed: -+add_channel_failed: -+get_channel_failed: -+ dpa_bp_free(priv); -+bp_create_failed: -+fq_probe_failed: -+alloc_failed: -+mac_probe_failed: -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+alloc_etherdev_mq_failed: -+ if (atomic_read(&dpa_bp->refs) == 0) -+ devm_kfree(dev, dpa_bp); -+ -+ return err; -+} -+ -+static const struct of_device_id dpa_match[] = { -+ { -+ .compatible = "fsl,dpa-ethernet" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, dpa_match); -+ -+static struct platform_driver dpa_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .of_match_table = dpa_match, -+ .owner = THIS_MODULE, -+ .pm = DPAA_PM_OPS, -+ }, -+ .probe = dpaa_eth_priv_probe, -+ .remove = dpa_remove -+}; -+ -+static int __init __cold dpa_load(void) -+{ -+ int _errno; -+ -+ pr_info(DPA_DESCRIPTION "\n"); -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ dpa_debugfs_module_init(); -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+ -+ /* initialise dpaa_eth mirror values */ -+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); -+ dpa_max_frm = fm_get_max_frm(); -+ dpa_num_cpus = num_possible_cpus(); -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs)); -+#endif -+ -+ _errno = platform_driver_register(&dpa_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+module_init(dpa_load); -+ -+static void __exit __cold dpa_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ platform_driver_unregister(&dpa_driver); -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ dpa_debugfs_module_exit(); -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+ -+ /* Only one channel is used and needs to be relased after all -+ * interfaces are removed -+ */ -+ dpa_release_channel(); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(dpa_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h -@@ -0,0 +1,695 @@ -+/* Copyright 2008-2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPA_H -+#define __DPA_H -+ -+#include -+#include /* struct qman_fq */ -+ -+#include "fm_ext.h" -+#include "dpaa_eth_trace.h" -+ -+extern int dpa_rx_extra_headroom; -+extern int dpa_max_frm; -+extern int dpa_num_cpus; -+ -+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom -+#define dpa_get_max_frm() dpa_max_frm -+ -+#define dpa_get_max_mtu() \ -+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN)) -+ -+#define __hot -+ -+/* Simple enum of FQ types - used for array indexing */ -+enum port_type {RX, TX}; -+ -+/* TODO: This structure should be renamed & moved to the FMD wrapper */ -+struct dpa_buffer_layout_s { -+ uint16_t priv_data_size; -+ bool parse_results; -+ bool time_stamp; -+ bool hash_results; -+ uint8_t manip_extra_space; -+ uint16_t data_align; -+}; -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define DPA_BUG_ON(cond) BUG_ON(cond) -+#else -+#define DPA_BUG_ON(cond) -+#endif -+ -+#define DPA_TX_PRIV_DATA_SIZE 16 -+#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t) -+#define DPA_TIME_STAMP_SIZE 8 -+#define DPA_HASH_RESULTS_SIZE 8 -+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \ -+ dpa_get_rx_extra_headroom()) -+ -+#define FM_FD_STAT_RX_ERRORS \ -+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \ -+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \ -+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \ -+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \ -+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR) -+ -+#define FM_FD_STAT_TX_ERRORS \ -+ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \ -+ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA) -+ -+#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME -+/* The raw buffer size must be cacheline aligned. -+ * Normally we use 2K buffers. -+ */ -+#define DPA_BP_RAW_SIZE 2048 -+#else -+/* For jumbo frame optimizations, use buffers large enough to accommodate -+ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra -+ * space to account for further alignments. -+ */ -+#define DPA_MAX_FRM_SIZE 9600 -+#define DPA_BP_RAW_SIZE \ -+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \ -+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)) -+#endif -+ -+/* This is what FMan is ever allowed to use. -+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is -+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, -+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us -+ * half-page-aligned buffers (can we?), so we reserve some more space -+ * for start-of-buffer alignment. -+ */ -+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \ -+ SMP_CACHE_BYTES) -+/* We must ensure that skb_shinfo is always cacheline-aligned. */ -+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1)) -+ -+/* Maximum size of a buffer for which recycling is allowed. -+ * We need an upper limit such that forwarded skbs that get reallocated on Tx -+ * aren't allowed to grow unboundedly. On the other hand, we need to make sure -+ * that skbs allocated by us will not fail to be recycled due to their size. -+ * -+ * For a requested size, the kernel allocator provides the next power of two -+ * sized block, which the stack will use as is, regardless of the actual size -+ * it required; since we must accommodate at most 9.6K buffers (L2 maximum -+ * supported frame size), set the recycling upper limit to 16K. -+ */ -+#define DPA_RECYCLE_MAX_SIZE 16384 -+ -+#if defined(CONFIG_FSL_SDK_FMAN_TEST) -+/*TODO: temporary for fman pcd testing */ -+#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20 -+#endif -+ -+#define DPAA_ETH_FQ_DELTA 0x10000 -+ -+#define DPAA_ETH_PCD_FQ_BASE(device_addr) \ -+ (((device_addr) & 0x1fffff) >> 6) -+ -+#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \ -+ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr)) -+ -+/* Largest value that the FQD's OAL field can hold. -+ * This is DPAA-1.x specific. -+ * TODO: This rather belongs in fsl_qman.h -+ */ -+#define FSL_QMAN_MAX_OAL 127 -+ -+/* Maximum offset value for a contig or sg FD (represented on 9 bits) */ -+#define DPA_MAX_FD_OFFSET ((1 << 9) - 1) -+ -+/* Default alignment for start of data in an Rx FD */ -+#define DPA_FD_DATA_ALIGNMENT 16 -+ -+/* Values for the L3R field of the FM Parse Results -+ */ -+/* L3 Type field: First IP Present IPv4 */ -+#define FM_L3_PARSE_RESULT_IPV4 0x8000 -+/* L3 Type field: First IP Present IPv6 */ -+#define FM_L3_PARSE_RESULT_IPV6 0x4000 -+ -+/* Values for the L4R field of the FM Parse Results -+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual. -+ */ -+/* L4 Type field: UDP */ -+#define FM_L4_PARSE_RESULT_UDP 0x40 -+/* L4 Type field: TCP */ -+#define FM_L4_PARSE_RESULT_TCP 0x20 -+/* FD status field indicating whether the FM Parser has attempted to validate -+ * the L4 csum of the frame. -+ * Note that having this bit set doesn't necessarily imply that the checksum -+ * is valid. One would have to check the parse results to find that out. -+ */ -+#define FM_FD_STAT_L4CV 0x00000004 -+ -+ -+#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL -+ -+/* Check if the parsed frame was found to be a TCP segment. -+ * -+ * @parse_result_ptr must be of type (fm_prs_result_t *). -+ */ -+#define fm_l4_frame_is_tcp(parse_result_ptr) \ -+ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP) -+ -+/* number of Tx queues to FMan */ -+#ifdef CONFIG_FMAN_PFC -+#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT) -+#else -+#define DPAA_ETH_TX_QUEUES NR_CPUS -+#endif -+ -+#define DPAA_ETH_RX_QUEUES 128 -+ -+/* Convenience macros for storing/retrieving the skb back-pointers. They must -+ * accommodate both recycling and confirmation paths - i.e. cases when the buf -+ * was allocated by ourselves, respectively by the stack. In the former case, -+ * we could store the skb at negative offset; in the latter case, we can't, -+ * so we'll use 0 as offset. -+ * -+ * NB: @off is an offset from a (struct sk_buff **) pointer! -+ */ -+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \ -+{ \ -+ skbh = (struct sk_buff **)addr; \ -+ *(skbh + (off)) = skb; \ -+} -+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \ -+{ \ -+ skbh = (struct sk_buff **)addr; \ -+ skb = *(skbh + (off)); \ -+} -+ -+#ifdef CONFIG_PM -+/* Magic Packet wakeup */ -+#define DPAA_WOL_MAGIC 0x00000001 -+#endif -+ -+#if defined(CONFIG_FSL_SDK_FMAN_TEST) -+struct pcd_range { -+ uint32_t base; -+ uint32_t count; -+}; -+#endif -+ -+/* More detailed FQ types - used for fine-grained WQ assignments */ -+enum dpa_fq_type { -+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ -+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */ -+ FQ_TYPE_RX_PCD, /* User-defined PCDs */ -+ FQ_TYPE_TX, /* "Real" Tx FQs */ -+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ -+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ -+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ -+ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */ -+}; -+ -+struct dpa_fq { -+ struct qman_fq fq_base; -+ struct list_head list; -+ struct net_device *net_dev; -+ bool init; -+ uint32_t fqid; -+ uint32_t flags; -+ uint16_t channel; -+ uint8_t wq; -+ enum dpa_fq_type fq_type; -+}; -+ -+struct dpa_fq_cbs_t { -+ struct qman_fq rx_defq; -+ struct qman_fq tx_defq; -+ struct qman_fq rx_errq; -+ struct qman_fq tx_errq; -+ struct qman_fq egress_ern; -+}; -+ -+struct fqid_cell { -+ uint32_t start; -+ uint32_t count; -+}; -+ -+struct dpa_bp { -+ struct bman_pool *pool; -+ uint8_t bpid; -+ struct device *dev; -+ union { -+ /* The buffer pools used for the private ports are initialized -+ * with target_count buffers for each CPU; at runtime the -+ * number of buffers per CPU is constantly brought back to this -+ * level -+ */ -+ int target_count; -+ /* The configured value for the number of buffers in the pool, -+ * used for shared port buffer pools -+ */ -+ int config_count; -+ }; -+ size_t size; -+ bool seed_pool; -+ /* physical address of the contiguous memory used by the pool to store -+ * the buffers -+ */ -+ dma_addr_t paddr; -+ /* virtual address of the contiguous memory used by the pool to store -+ * the buffers -+ */ -+ void __iomem *vaddr; -+ /* current number of buffers in the bpool alloted to this CPU */ -+ int __percpu *percpu_count; -+ atomic_t refs; -+ /* some bpools need to be seeded before use by this cb */ -+ int (*seed_cb)(struct dpa_bp *); -+ /* some bpools need to be emptied before freeing; this cb is used -+ * for freeing of individual buffers taken from the pool -+ */ -+ void (*free_buf_cb)(void *addr); -+}; -+ -+struct dpa_rx_errors { -+ u64 dme; /* DMA Error */ -+ u64 fpe; /* Frame Physical Error */ -+ u64 fse; /* Frame Size Error */ -+ u64 phe; /* Header Error */ -+ u64 cse; /* Checksum Validation Error */ -+}; -+ -+/* Counters for QMan ERN frames - one counter per rejection code */ -+struct dpa_ern_cnt { -+ u64 cg_tdrop; /* Congestion group taildrop */ -+ u64 wred; /* WRED congestion */ -+ u64 err_cond; /* Error condition */ -+ u64 early_window; /* Order restoration, frame too early */ -+ u64 late_window; /* Order restoration, frame too late */ -+ u64 fq_tdrop; /* FQ taildrop */ -+ u64 fq_retired; /* FQ is retired */ -+ u64 orp_zero; /* ORP disabled */ -+}; -+ -+struct dpa_napi_portal { -+ struct napi_struct napi; -+ struct qman_portal *p; -+}; -+ -+struct dpa_percpu_priv_s { -+ struct net_device *net_dev; -+ struct dpa_napi_portal *np; -+ u64 in_interrupt; -+ u64 tx_returned; -+ u64 tx_confirm; -+ /* fragmented (non-linear) skbuffs received from the stack */ -+ u64 tx_frag_skbuffs; -+ /* number of S/G frames received */ -+ u64 rx_sg; -+ -+ struct rtnl_link_stats64 stats; -+ struct dpa_rx_errors rx_errors; -+ struct dpa_ern_cnt ern_cnt; -+}; -+ -+struct dpa_priv_s { -+ struct dpa_percpu_priv_s __percpu *percpu_priv; -+ struct dpa_bp *dpa_bp; -+ /* Store here the needed Tx headroom for convenience and speed -+ * (even though it can be computed based on the fields of buf_layout) -+ */ -+ uint16_t tx_headroom; -+ struct net_device *net_dev; -+ struct mac_device *mac_dev; -+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES]; -+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES]; -+ -+ size_t bp_count; -+ -+ uint16_t channel; /* "fsl,qman-channel-id" */ -+ struct list_head dpa_fq_list; -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ struct dentry *debugfs_loop_file; -+#endif -+ -+ uint32_t msg_enable; /* net_device message level */ -+#ifdef CONFIG_FSL_DPAA_1588 -+ struct dpa_ptp_tsu *tsu; -+#endif -+ -+#if defined(CONFIG_FSL_SDK_FMAN_TEST) -+/* TODO: this is temporary until pcd support is implemented in dpaa */ -+ int priv_pcd_num_ranges; -+ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES]; -+#endif -+ -+ struct { -+ /** -+ * All egress queues to a given net device belong to one -+ * (and the same) congestion group. -+ */ -+ struct qman_cgr cgr; -+ /* If congested, when it began. Used for performance stats. */ -+ u32 congestion_start_jiffies; -+ /* Number of jiffies the Tx port was congested. */ -+ u32 congested_jiffies; -+ /** -+ * Counter for the number of times the CGR -+ * entered congestion state -+ */ -+ u32 cgr_congested_count; -+ } cgr_data; -+ /* Use a per-port CGR for ingress traffic. */ -+ bool use_ingress_cgr; -+ struct qman_cgr ingress_cgr; -+ -+#ifdef CONFIG_FSL_DPAA_TS -+ bool ts_tx_en; /* Tx timestamping enabled */ -+ bool ts_rx_en; /* Rx timestamping enabled */ -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ struct dpa_buffer_layout_s *buf_layout; -+ uint16_t rx_headroom; -+ char if_type[30]; -+ -+ void *peer; -+#ifdef CONFIG_PM -+ u32 wol; -+#endif -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ int loop_id; -+ int loop_to; -+#endif -+#ifdef CONFIG_FSL_DPAA_CEETM -+ bool ceetm_en; /* CEETM QoS enabled */ -+#endif -+}; -+ -+struct fm_port_fqs { -+ struct dpa_fq *tx_defq; -+ struct dpa_fq *tx_errq; -+ struct dpa_fq *rx_defq; -+ struct dpa_fq *rx_errq; -+}; -+ -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+extern struct net_device *dpa_loop_netdevs[20]; -+#endif -+ -+/* functions with different implementation for SG and non-SG: */ -+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp); -+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr); -+void __hot _dpa_rx(struct net_device *net_dev, -+ struct qman_portal *portal, -+ const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd, -+ u32 fqid, -+ int *count_ptr); -+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev); -+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev, -+ struct qman_fq *egress_fq, struct qman_fq *conf_fq); -+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, -+ const struct qm_fd *fd); -+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, -+ const struct qm_fd *fd, -+ struct sk_buff *skb, -+ int *use_gro); -+#ifndef CONFIG_FSL_DPAA_TS -+bool dpa_skb_is_recyclable(struct sk_buff *skb); -+bool dpa_buf_is_recyclable(struct sk_buff *skb, -+ uint32_t min_size, -+ uint16_t min_offset, -+ unsigned char **new_buf_start); -+#endif -+int __hot skb_to_contig_fd(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd, -+ int *count_ptr, int *offset); -+int __hot skb_to_sg_fd(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd); -+int __cold __attribute__((nonnull)) -+ _dpa_fq_free(struct device *dev, struct qman_fq *fq); -+ -+/* Turn on HW checksum computation for this outgoing frame. -+ * If the current protocol is not something we support in this regard -+ * (or if the stack has already computed the SW checksum), we do nothing. -+ * -+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value -+ * otherwise. -+ * -+ * Note that this function may modify the fd->cmd field and the skb data buffer -+ * (the Parse Results area). -+ */ -+int dpa_enable_tx_csum(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results); -+ -+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv, -+ struct qman_portal *portal) -+{ -+ /* In case of threaded ISR for RT enable kernel, -+ * in_irq() does not return appropriate value, so use -+ * in_serving_softirq to distinguish softirq or irq context. -+ */ -+ if (unlikely(in_irq() || !in_serving_softirq())) { -+ /* Disable QMan IRQ and invoke NAPI */ -+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); -+ if (likely(!ret)) { -+ const struct qman_portal_config *pc = -+ qman_p_get_portal_config(portal); -+ struct dpa_napi_portal *np = -+ &percpu_priv->np[pc->index]; -+ -+ np->p = portal; -+ napi_schedule(&np->napi); -+ percpu_priv->in_interrupt++; -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+static inline ssize_t __const __must_check __attribute__((nonnull)) -+dpa_fd_length(const struct qm_fd *fd) -+{ -+ return fd->length20; -+} -+ -+static inline ssize_t __const __must_check __attribute__((nonnull)) -+dpa_fd_offset(const struct qm_fd *fd) -+{ -+ return fd->offset; -+} -+ -+/* Verifies if the skb length is below the interface MTU */ -+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu) -+{ -+ if (unlikely(skb->len > mtu)) -+ if ((skb->protocol != htons(ETH_P_8021Q)) -+ || (skb->len > mtu + 4)) -+ return -1; -+ -+ return 0; -+} -+ -+static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl) -+{ -+ uint16_t headroom; -+ /* The frame headroom must accommodate: -+ * - the driver private data area -+ * - parse results, hash results, timestamp if selected -+ * - manip extra space -+ * If either hash results or time stamp are selected, both will -+ * be copied to/from the frame headroom, as TS is located between PR and -+ * HR in the IC and IC copy size has a granularity of 16bytes -+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) -+ * -+ * Also make sure the headroom is a multiple of data_align bytes -+ */ -+ headroom = (uint16_t)(bl->priv_data_size + -+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) + -+ (bl->hash_results || bl->time_stamp ? -+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) + -+ bl->manip_extra_space); -+ -+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom; -+} -+ -+int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n); -+int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n); -+int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n); -+ -+void dpaa_eth_sysfs_remove(struct device *dev); -+void dpaa_eth_sysfs_init(struct device *dev); -+int dpaa_eth_poll(struct napi_struct *napi, int budget); -+ -+void dpa_private_napi_del(struct net_device *net_dev); -+ -+/* Equivalent to a memset(0), but works faster */ -+static inline void clear_fd(struct qm_fd *fd) -+{ -+ fd->opaque_addr = 0; -+ fd->opaque = 0; -+ fd->cmd = 0; -+} -+ -+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv, -+ struct qman_fq *tx_fq) -+{ -+ int i; -+ -+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) -+ if (priv->egress_fqs[i] == tx_fq) -+ return i; -+ -+ return -EINVAL; -+} -+ -+static inline int __hot dpa_xmit(struct dpa_priv_s *priv, -+ struct rtnl_link_stats64 *percpu_stats, -+ struct qm_fd *fd, struct qman_fq *egress_fq, -+ struct qman_fq *conf_fq) -+{ -+ int err, i; -+ -+ if (fd->bpid == 0xff) -+ fd->cmd |= qman_fq_fqid(conf_fq); -+ -+ /* Trace this Tx fd */ -+ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd); -+ -+ for (i = 0; i < 100000; i++) { -+ err = qman_enqueue(egress_fq, fd, 0); -+ if (err != -EBUSY) -+ break; -+ } -+ -+ if (unlikely(err < 0)) { -+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */ -+ percpu_stats->tx_errors++; -+ percpu_stats->tx_fifo_errors++; -+ return err; -+ } -+ -+ percpu_stats->tx_packets++; -+ percpu_stats->tx_bytes += dpa_fd_length(fd); -+ -+ return 0; -+} -+ -+/* Use multiple WQs for FQ assignment: -+ * - Tx Confirmation queues go to WQ1. -+ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between -+ * Rx and Tx traffic, or between Rx Default and Rx PCD frames). -+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance -+ * to be scheduled, in case there are many more FQs in WQ3). -+ * This ensures that Tx-confirmed buffers are timely released. In particular, -+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they -+ * are greatly outnumbered by other FQs in the system (usually PCDs), while -+ * dequeue scheduling is round-robin. -+ */ -+static inline void _dpa_assign_wq(struct dpa_fq *fq) -+{ -+ switch (fq->fq_type) { -+ case FQ_TYPE_TX_CONFIRM: -+ case FQ_TYPE_TX_CONF_MQ: -+ fq->wq = 1; -+ break; -+ case FQ_TYPE_RX_DEFAULT: -+ case FQ_TYPE_TX: -+ fq->wq = 3; -+ break; -+ case FQ_TYPE_RX_ERROR: -+ case FQ_TYPE_TX_ERROR: -+ case FQ_TYPE_RX_PCD_HI_PRIO: -+ fq->wq = 2; -+ break; -+ case FQ_TYPE_RX_PCD: -+ fq->wq = 5; -+ break; -+ default: -+ WARN(1, "Invalid FQ type %d for FQID %d!\n", -+ fq->fq_type, fq->fqid); -+ } -+} -+ -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+/* Use in lieu of skb_get_queue_mapping() */ -+#ifdef CONFIG_FMAN_PFC -+#define dpa_get_queue_mapping(skb) \ -+ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \ -+ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \ -+ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \ -+ dpa_num_cpus + smp_processor_id())); -+ -+#else -+#define dpa_get_queue_mapping(skb) \ -+ raw_smp_processor_id() -+#endif -+#else -+/* Use the queue selected by XPS */ -+#define dpa_get_queue_mapping(skb) \ -+ skb_get_queue_mapping(skb) -+#endif -+ -+#ifdef CONFIG_PTP_1588_CLOCK_DPAA -+struct ptp_priv_s { -+ struct device_node *node; -+ struct platform_device *of_dev; -+ struct mac_device *mac_dev; -+}; -+extern struct ptp_priv_s ptp_priv; -+#endif -+ -+static inline void _dpa_bp_free_pf(void *addr) -+{ -+ put_page(virt_to_head_page(addr)); -+} -+ -+/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue -+ * manifests itself at high traffic rates when frames exceed 4K memory -+ * boundaries; For the moment, we use a SW workaround to avoid frames larger -+ * than 4K or that exceed 4K alignements. -+ */ -+ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+#define DPAA_LS1043A_DMA_4K_ISSUE 1 -+#endif -+ -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+#define HAS_DMA_ISSUE(start, size) \ -+ (((unsigned long)(start) ^ ((unsigned long)(start) + \ -+ (unsigned long)(size))) & ~0xFFF) -+ -+#define BOUNDARY_4K(start, size) (((unsigned long)(start) + \ -+ (unsigned long)(size)) & ~0xFFF) -+#endif /* DPAA_LS1043A_DMA_4K_ISSUE */ -+ -+#endif /* __DPA_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c -@@ -0,0 +1,263 @@ -+/* Copyright 2008-2013 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_base.h" -+ -+#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+uint8_t advanced_debug = -1; -+module_param(advanced_debug, byte, S_IRUGO); -+MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level"); -+EXPORT_SYMBOL(advanced_debug); -+ -+static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1) -+{ -+ return ((struct dpa_bp *)dpa_bp0)->size - -+ ((struct dpa_bp *)dpa_bp1)->size; -+} -+ -+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ -+dpa_bp_probe(struct platform_device *_of_dev, size_t *count) -+{ -+ int i, lenp, na, ns, err; -+ struct device *dev; -+ struct device_node *dev_node; -+ const __be32 *bpool_cfg; -+ struct dpa_bp *dpa_bp; -+ u32 bpid; -+ -+ dev = &_of_dev->dev; -+ -+ *count = of_count_phandle_with_args(dev->of_node, -+ "fsl,bman-buffer-pools", NULL); -+ if (*count < 1) { -+ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL); -+ if (dpa_bp == NULL) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ dev_node = of_find_node_by_path("/"); -+ if (unlikely(dev_node == NULL)) { -+ dev_err(dev, "of_find_node_by_path(/) failed\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ na = of_n_addr_cells(dev_node); -+ ns = of_n_size_cells(dev_node); -+ -+ for (i = 0; i < *count; i++) { -+ of_node_put(dev_node); -+ -+ dev_node = of_parse_phandle(dev->of_node, -+ "fsl,bman-buffer-pools", i); -+ if (dev_node == NULL) { -+ dev_err(dev, "of_find_node_by_phandle() failed\n"); -+ return ERR_PTR(-EFAULT); -+ } -+ -+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) { -+ dev_err(dev, -+ "!of_device_is_compatible(%s, fsl,bpool)\n", -+ dev_node->full_name); -+ dpa_bp = ERR_PTR(-EINVAL); -+ goto _return_of_node_put; -+ } -+ -+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid); -+ if (err) { -+ dev_err(dev, "Cannot find buffer pool ID in the device tree\n"); -+ dpa_bp = ERR_PTR(-EINVAL); -+ goto _return_of_node_put; -+ } -+ dpa_bp[i].bpid = (uint8_t)bpid; -+ -+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", -+ &lenp); -+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { -+ const uint32_t *seed_pool; -+ -+ dpa_bp[i].config_count = -+ (int)of_read_number(bpool_cfg, ns); -+ dpa_bp[i].size = -+ (size_t)of_read_number(bpool_cfg + ns, ns); -+ dpa_bp[i].paddr = -+ of_read_number(bpool_cfg + 2 * ns, na); -+ -+ seed_pool = of_get_property(dev_node, -+ "fsl,bpool-ethernet-seeds", &lenp); -+ dpa_bp[i].seed_pool = !!seed_pool; -+ -+ } else { -+ dev_err(dev, -+ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n", -+ dev_node->full_name); -+ dpa_bp = ERR_PTR(-EINVAL); -+ goto _return_of_node_put; -+ } -+ } -+ -+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL); -+ -+ return dpa_bp; -+ -+_return_of_node_put: -+ if (dev_node) -+ of_node_put(dev_node); -+ -+ return dpa_bp; -+} -+EXPORT_SYMBOL(dpa_bp_probe); -+ -+int dpa_bp_shared_port_seed(struct dpa_bp *bp) -+{ -+ void __iomem **ptr; -+ -+ /* In MAC-less and Shared-MAC scenarios the physical -+ * address of the buffer pool in device tree is set -+ * to 0 to specify that another entity (USDPAA) will -+ * allocate and seed the buffers -+ */ -+ if (!bp->paddr) -+ return 0; -+ -+ /* allocate memory region for buffers */ -+ devm_request_mem_region(bp->dev, bp->paddr, -+ bp->size * bp->config_count, KBUILD_MODNAME); -+ /* managed ioremap unmapping */ -+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); -+ if (!ptr) -+ return -EIO; -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count); -+#else -+ bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0); -+#endif -+ if (bp->vaddr == NULL) { -+ pr_err("Could not map memory for pool %d\n", bp->bpid); -+ devres_free(ptr); -+ return -EIO; -+ } -+ *ptr = bp->vaddr; -+ devres_add(bp->dev, ptr); -+ -+ /* seed pool with buffers from that memory region */ -+ if (bp->seed_pool) { -+ int count = bp->target_count; -+ dma_addr_t addr = bp->paddr; -+ -+ while (count) { -+ struct bm_buffer bufs[8]; -+ uint8_t num_bufs = 0; -+ -+ do { -+ BUG_ON(addr > 0xffffffffffffull); -+ bufs[num_bufs].bpid = bp->bpid; -+ bm_buffer_set64(&bufs[num_bufs++], addr); -+ addr += bp->size; -+ -+ } while (--count && (num_bufs < 8)); -+ -+ while (bman_release(bp->pool, bufs, num_bufs, 0)) -+ cpu_relax(); -+ } -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_bp_shared_port_seed); -+ -+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, -+ size_t count) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ int i; -+ -+ priv->dpa_bp = dpa_bp; -+ priv->bp_count = count; -+ -+ for (i = 0; i < count; i++) { -+ int err; -+ err = dpa_bp_alloc(&dpa_bp[i]); -+ if (err < 0) { -+ dpa_bp_free(priv); -+ priv->dpa_bp = NULL; -+ return err; -+ } -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_bp_create); -+ -+static int __init __cold dpa_advanced_load(void) -+{ -+ pr_info(DPA_DESCRIPTION "\n"); -+ -+ return 0; -+} -+module_init(dpa_advanced_load); -+ -+static void __exit __cold dpa_advanced_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+} -+module_exit(dpa_advanced_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h -@@ -0,0 +1,50 @@ -+/* Copyright 2008-2013 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA_ETH_BASE_H -+#define __DPAA_ETH_BASE_H -+ -+#include /* struct net_device */ -+#include /* struct bm_buffer */ -+#include /* struct platform_device */ -+#include /* struct hwtstamp_config */ -+ -+extern uint8_t advanced_debug; -+extern const struct dpa_fq_cbs_t shared_fq_cbs; -+extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev); -+ -+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ -+dpa_bp_probe(struct platform_device *_of_dev, size_t *count); -+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, -+ size_t count); -+int dpa_bp_shared_port_seed(struct dpa_bp *bp); -+ -+#endif /* __DPAA_ETH_BASE_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c -@@ -0,0 +1,1719 @@ -+/* Copyright 2008-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include "dpaa_eth_ceetm.h" -+ -+#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc" -+ -+const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = { -+ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) }, -+ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) }, -+}; -+ -+struct Qdisc_ops ceetm_qdisc_ops; -+ -+/* Obtain the DCP and the SP ids from the FMan port */ -+static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id, -+ unsigned int *sp_id) -+{ -+ uint32_t channel; -+ t_LnxWrpFmPortDev *port_dev; -+ struct dpa_priv_s *dpa_priv = netdev_priv(dev); -+ struct mac_device *mac_dev = dpa_priv->mac_dev; -+ -+ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX]; -+ channel = port_dev->txCh; -+ -+ *sp_id = channel & CHANNEL_SP_MASK; -+ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id); -+ -+ if (channel < DCP0_MAX_CHANNEL) { -+ *dcp_id = qm_dc_portal_fman0; -+ pr_debug(KBUILD_BASENAME " : DCP ID 0\n"); -+ } else { -+ *dcp_id = qm_dc_portal_fman1; -+ pr_debug(KBUILD_BASENAME " : DCP ID 1\n"); -+ } -+} -+ -+/* Enqueue Rejection Notification callback */ -+static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ struct net_device *net_dev; -+ struct ceetm_class *cls; -+ struct ceetm_class_stats *cstats = NULL; -+ const struct dpa_priv_s *dpa_priv; -+ struct dpa_percpu_priv_s *dpa_percpu_priv; -+ struct sk_buff *skb; -+ struct qm_fd fd = msg->ern.fd; -+ -+ net_dev = ((struct ceetm_fq *)fq)->net_dev; -+ dpa_priv = netdev_priv(net_dev); -+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv); -+ -+ /* Increment DPA counters */ -+ dpa_percpu_priv->stats.tx_dropped++; -+ dpa_percpu_priv->stats.tx_fifo_errors++; -+ -+ /* Increment CEETM counters */ -+ cls = ((struct ceetm_fq *)fq)->ceetm_cls; -+ switch (cls->type) { -+ case CEETM_PRIO: -+ cstats = this_cpu_ptr(cls->prio.cstats); -+ break; -+ case CEETM_WBFS: -+ cstats = this_cpu_ptr(cls->wbfs.cstats); -+ break; -+ } -+ -+ if (cstats) -+ cstats->ern_drop_count++; -+ -+ if (fd.bpid != 0xff) { -+ dpa_fd_release(net_dev, &fd); -+ return; -+ } -+ -+ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd); -+ dev_kfree_skb_any(skb); -+} -+ -+/* Congestion State Change Notification callback */ -+static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested) -+{ -+ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx; -+ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev); -+ struct ceetm_class *cls = ceetm_fq->ceetm_cls; -+ struct ceetm_class_stats *cstats = NULL; -+ -+ switch (cls->type) { -+ case CEETM_PRIO: -+ cstats = this_cpu_ptr(cls->prio.cstats); -+ break; -+ case CEETM_WBFS: -+ cstats = this_cpu_ptr(cls->wbfs.cstats); -+ break; -+ } -+ -+ if (congested) { -+ dpa_priv->cgr_data.congestion_start_jiffies = jiffies; -+ netif_tx_stop_all_queues(dpa_priv->net_dev); -+ dpa_priv->cgr_data.cgr_congested_count++; -+ if (cstats) -+ cstats->cgr_congested_count++; -+ } else { -+ dpa_priv->cgr_data.congested_jiffies += -+ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies); -+ netif_tx_wake_all_queues(dpa_priv->net_dev); -+ } -+} -+ -+/* Allocate a ceetm fq */ -+static int ceetm_alloc_fq(struct ceetm_fq **fq, -+ struct net_device *dev, -+ struct ceetm_class *cls) -+{ -+ *fq = kzalloc(sizeof(**fq), GFP_KERNEL); -+ if (!*fq) -+ return -ENOMEM; -+ -+ (*fq)->net_dev = dev; -+ (*fq)->ceetm_cls = cls; -+ return 0; -+} -+ -+/* Configure a ceetm Class Congestion Group */ -+static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg, -+ struct qm_ceetm_channel *channel, -+ unsigned int id, -+ struct ceetm_fq *fq, -+ u32 if_support) -+{ -+ int err; -+ u32 cs_th; -+ u16 ccg_mask; -+ struct qm_ceetm_ccg_params ccg_params; -+ -+ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq); -+ if (err) -+ return err; -+ -+ /* Configure the count mode (frames/bytes), enable -+ * notifications, enable tail-drop, and configure the tail-drop -+ * mode and threshold */ -+ ccg_mask = QM_CCGR_WE_MODE | QM_CCGR_WE_CSCN_EN | -+ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE | -+ QM_CCGR_WE_TD_THRES; -+ -+ ccg_params.mode = 0; /* count bytes */ -+ ccg_params.cscn_en = 1; /* generate notifications */ -+ ccg_params.td_en = 1; /* enable tail-drop */ -+ ccg_params.td_mode = 1; /* tail-drop on threshold */ -+ -+ /* Configure the tail-drop threshold according to the link -+ * speed */ -+ if (if_support & SUPPORTED_10000baseT_Full) -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G; -+ else -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G; -+ qm_cgr_cs_thres_set64(&ccg_params.td_thres, cs_th, 1); -+ -+ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params); -+ if (err) -+ return err; -+ -+ return 0; -+} -+ -+/* Configure a ceetm Logical Frame Queue */ -+static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq, -+ struct qm_ceetm_lfq **lfq) -+{ -+ int err; -+ u64 context_a; -+ u32 context_b; -+ -+ err = qman_ceetm_lfq_claim(lfq, cq); -+ if (err) -+ return err; -+ -+ /* Get the former contexts in order to preserve context B */ -+ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b); -+ if (err) -+ return err; -+ -+ context_a = CEETM_CONTEXT_A; -+ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b); -+ if (err) -+ return err; -+ -+ (*lfq)->ern = ceetm_ern; -+ -+ err = qman_ceetm_create_fq(*lfq, &fq->fq); -+ if (err) -+ return err; -+ -+ return 0; -+} -+ -+/* Configure a prio ceetm class */ -+static int ceetm_config_prio_cls(struct ceetm_class *cls, struct net_device *dev, -+ struct qm_ceetm_channel *channel, unsigned int id) -+{ -+ int err; -+ struct dpa_priv_s *dpa_priv = netdev_priv(dev); -+ -+ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls); -+ if (err) -+ return err; -+ -+ /* Claim and configure the CCG */ -+ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq, -+ dpa_priv->mac_dev->if_support); -+ if (err) -+ return err; -+ -+ /* Claim and configure the CQ */ -+ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg); -+ if (err) -+ return err; -+ -+ if (cls->shaped) { -+ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1); -+ if (err) -+ return err; -+ -+ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1); -+ if (err) -+ return err; -+ } -+ -+ /* Claim and configure a LFQ */ -+ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq); -+ if (err) -+ return err; -+ -+ return 0; -+} -+ -+/* Configure a wbfs ceetm class */ -+static int ceetm_config_wbfs_cls(struct ceetm_class *cls, struct net_device *dev, -+ struct qm_ceetm_channel *channel, unsigned int id, int type) -+{ -+ int err; -+ struct dpa_priv_s *dpa_priv = netdev_priv(dev); -+ -+ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls); -+ if (err) -+ return err; -+ -+ /* Claim and configure the CCG */ -+ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq, -+ dpa_priv->mac_dev->if_support); -+ if (err) -+ return err; -+ -+ /* Claim and configure the CQ */ -+ if (type == WBFS_GRP_B) -+ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id, -+ cls->wbfs.ccg); -+ else -+ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id, -+ cls->wbfs.ccg); -+ if (err) -+ return err; -+ -+ /* Configure the CQ weight: real number mutiplied by 100 to get rid -+ * of the fraction */ -+ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq, -+ cls->wbfs.weight * 100); -+ if (err) -+ return err; -+ -+ /* Claim and configure a LFQ */ -+ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq); -+ if (err) -+ return err; -+ -+ return 0; -+} -+ -+/* Find class in qdisc hash table using given handle */ -+static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch) -+{ -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct Qdisc_class_common *clc; -+ -+ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n", -+ __func__, handle, sch->handle); -+ -+ clc = qdisc_class_find(&priv->clhash, handle); -+ return clc ? container_of(clc, struct ceetm_class, common) : NULL; -+} -+ -+/* Insert a class in the qdisc's class hash */ -+static void ceetm_link_class(struct Qdisc *sch, -+ struct Qdisc_class_hash *clhash, -+ struct Qdisc_class_common *common) -+{ -+ sch_tree_lock(sch); -+ qdisc_class_hash_insert(clhash, common); -+ sch_tree_unlock(sch); -+ qdisc_class_hash_grow(sch, clhash); -+} -+ -+/* Destroy a ceetm class */ -+static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl) -+{ -+ if (!cl) -+ return; -+ -+ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n", -+ __func__, cl->common.classid, sch->handle); -+ -+ switch (cl->type) { -+ case CEETM_ROOT: -+ if (cl->root.child) { -+ qdisc_destroy(cl->root.child); -+ cl->root.child = NULL; -+ } -+ -+ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the channel %d\n", -+ __func__, cl->root.ch->idx); -+ -+ break; -+ -+ case CEETM_PRIO: -+ if (cl->prio.child) { -+ qdisc_destroy(cl->prio.child); -+ cl->prio.child = NULL; -+ } -+ -+ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the LFQ %d\n", -+ __func__, cl->prio.lfq->idx); -+ -+ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the CQ %d\n", -+ __func__, cl->prio.cq->idx); -+ -+ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the CCG %d\n", -+ __func__, cl->prio.ccg->idx); -+ -+ if (cl->prio.fq) -+ kfree(cl->prio.fq); -+ -+ if (cl->prio.cstats) -+ free_percpu(cl->prio.cstats); -+ -+ break; -+ -+ case CEETM_WBFS: -+ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the LFQ %d\n", -+ __func__, cl->wbfs.lfq->idx); -+ -+ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the CQ %d\n", -+ __func__, cl->wbfs.cq->idx); -+ -+ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the CCG %d\n", -+ __func__, cl->wbfs.ccg->idx); -+ -+ if (cl->wbfs.fq) -+ kfree(cl->wbfs.fq); -+ -+ if (cl->wbfs.cstats) -+ free_percpu(cl->wbfs.cstats); -+ } -+ -+ tcf_destroy_chain(&cl->filter_list); -+ kfree(cl); -+} -+ -+/* Destroy a ceetm qdisc */ -+static void ceetm_destroy(struct Qdisc *sch) -+{ -+ unsigned int ntx, i; -+ struct hlist_node *next; -+ struct ceetm_class *cl; -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct net_device *dev = qdisc_dev(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n", -+ __func__, sch->handle); -+ -+ /* All filters need to be removed before destroying the classes */ -+ tcf_destroy_chain(&priv->filter_list); -+ -+ for (i = 0; i < priv->clhash.hashsize; i++) { -+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) -+ tcf_destroy_chain(&cl->filter_list); -+ } -+ -+ for (i = 0; i < priv->clhash.hashsize; i++) { -+ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i], -+ common.hnode) -+ ceetm_cls_destroy(sch, cl); -+ } -+ -+ qdisc_class_hash_destroy(&priv->clhash); -+ -+ switch (priv->type) { -+ case CEETM_ROOT: -+ dpa_disable_ceetm(dev); -+ -+ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the LNI %d\n", -+ __func__, priv->root.lni->idx); -+ -+ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp)) -+ pr_err(KBUILD_BASENAME -+ " : %s : error releasing the SP %d\n", -+ __func__, priv->root.sp->idx); -+ -+ if (priv->root.qstats) -+ free_percpu(priv->root.qstats); -+ -+ if (!priv->root.qdiscs) -+ break; -+ -+ /* Remove the pfifo qdiscs */ -+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) -+ if (priv->root.qdiscs[ntx]) -+ qdisc_destroy(priv->root.qdiscs[ntx]); -+ -+ kfree(priv->root.qdiscs); -+ break; -+ -+ case CEETM_PRIO: -+ if (priv->prio.parent) -+ priv->prio.parent->root.child = NULL; -+ break; -+ -+ case CEETM_WBFS: -+ if (priv->wbfs.parent) -+ priv->wbfs.parent->prio.child = NULL; -+ break; -+ } -+} -+ -+static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb) -+{ -+ struct Qdisc *qdisc; -+ unsigned int ntx, i; -+ struct nlattr *nest; -+ struct tc_ceetm_qopt qopt; -+ struct ceetm_qdisc_stats *qstats; -+ struct net_device *dev = qdisc_dev(sch); -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ sch_tree_lock(sch); -+ memset(&qopt, 0, sizeof(qopt)); -+ qopt.type = priv->type; -+ qopt.shaped = priv->shaped; -+ -+ switch (priv->type) { -+ case CEETM_ROOT: -+ /* Gather statistics from the underlying pfifo qdiscs */ -+ sch->q.qlen = 0; -+ memset(&sch->bstats, 0, sizeof(sch->bstats)); -+ memset(&sch->qstats, 0, sizeof(sch->qstats)); -+ -+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { -+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; -+ sch->q.qlen += qdisc->q.qlen; -+ sch->bstats.bytes += qdisc->bstats.bytes; -+ sch->bstats.packets += qdisc->bstats.packets; -+ sch->qstats.qlen += qdisc->qstats.qlen; -+ sch->qstats.backlog += qdisc->qstats.backlog; -+ sch->qstats.drops += qdisc->qstats.drops; -+ sch->qstats.requeues += qdisc->qstats.requeues; -+ sch->qstats.overlimits += qdisc->qstats.overlimits; -+ } -+ -+ for_each_online_cpu(i) { -+ qstats = per_cpu_ptr(priv->root.qstats, i); -+ sch->qstats.drops += qstats->drops; -+ } -+ -+ qopt.rate = priv->root.rate; -+ qopt.ceil = priv->root.ceil; -+ qopt.overhead = priv->root.overhead; -+ break; -+ -+ case CEETM_PRIO: -+ qopt.qcount = priv->prio.qcount; -+ break; -+ -+ case CEETM_WBFS: -+ qopt.qcount = priv->wbfs.qcount; -+ qopt.cr = priv->wbfs.cr; -+ qopt.er = priv->wbfs.er; -+ break; -+ -+ default: -+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); -+ sch_tree_unlock(sch); -+ return -EINVAL; -+ } -+ -+ nest = nla_nest_start(skb, TCA_OPTIONS); -+ if (nest == NULL) -+ goto nla_put_failure; -+ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt)) -+ goto nla_put_failure; -+ nla_nest_end(skb, nest); -+ -+ sch_tree_unlock(sch); -+ return skb->len; -+ -+nla_put_failure: -+ sch_tree_unlock(sch); -+ nla_nest_cancel(skb, nest); -+ return -EMSGSIZE; -+} -+ -+/* Configure a root ceetm qdisc */ -+static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv, -+ struct tc_ceetm_qopt *qopt) -+{ -+ struct netdev_queue *dev_queue; -+ struct Qdisc *qdisc; -+ enum qm_dc_portal dcp_id; -+ unsigned int i, sp_id; -+ int err; -+ u64 bps; -+ struct qm_ceetm_sp *sp; -+ struct qm_ceetm_lni *lni; -+ struct net_device *dev = qdisc_dev(sch); -+ struct dpa_priv_s *dpa_priv = netdev_priv(dev); -+ struct mac_device *mac_dev = dpa_priv->mac_dev; -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ /* Validate inputs */ -+ if (sch->parent != TC_H_ROOT) { -+ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n"); -+ tcf_destroy_chain(&priv->filter_list); -+ qdisc_class_hash_destroy(&priv->clhash); -+ return -EINVAL; -+ } -+ -+ if (!mac_dev) { -+ pr_err("CEETM: the interface is lacking a mac\n"); -+ err = -EINVAL; -+ goto err_init_root; -+ } -+ -+ /* pre-allocate underlying pfifo qdiscs */ -+ priv->root.qdiscs = kcalloc(dev->num_tx_queues, -+ sizeof(priv->root.qdiscs[0]), -+ GFP_KERNEL); -+ if (priv->root.qdiscs == NULL) { -+ err = -ENOMEM; -+ goto err_init_root; -+ } -+ -+ for (i = 0; i < dev->num_tx_queues; i++) { -+ dev_queue = netdev_get_tx_queue(dev, i); -+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, -+ TC_H_MAKE(TC_H_MAJ(sch->handle), -+ TC_H_MIN(i + PFIFO_MIN_OFFSET))); -+ if (qdisc == NULL) { -+ err = -ENOMEM; -+ goto err_init_root; -+ } -+ -+ priv->root.qdiscs[i] = qdisc; -+ qdisc->flags |= TCQ_F_ONETXQUEUE; -+ } -+ -+ sch->flags |= TCQ_F_MQROOT; -+ -+ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats); -+ if (!priv->root.qstats) { -+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_init_root; -+ } -+ -+ priv->shaped = qopt->shaped; -+ priv->root.rate = qopt->rate; -+ priv->root.ceil = qopt->ceil; -+ priv->root.overhead = qopt->overhead; -+ -+ /* Claim the SP */ -+ get_dcp_and_sp(dev, &dcp_id, &sp_id); -+ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n", -+ __func__); -+ goto err_init_root; -+ } -+ -+ priv->root.sp = sp; -+ -+ /* Claim the LNI - will use the same id as the SP id since SPs 0-7 -+ * are connected to the TX FMan ports */ -+ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n", -+ __func__); -+ goto err_init_root; -+ } -+ -+ priv->root.lni = lni; -+ -+ err = qman_ceetm_sp_set_lni(sp, lni); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and " -+ "LNI\n", __func__); -+ goto err_init_root; -+ } -+ -+ lni->sp = sp; -+ -+ /* Configure the LNI shaper */ -+ if (priv->shaped) { -+ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the LNI shaper\n", __func__); -+ goto err_init_root; -+ } -+ -+ bps = priv->root.rate << 3; /* Bps -> bps */ -+ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the LNI shaper\n", __func__); -+ goto err_init_root; -+ } -+ -+ bps = priv->root.ceil << 3; /* Bps -> bps */ -+ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the LNI shaper\n", __func__); -+ goto err_init_root; -+ } -+ } -+ -+ /* TODO default configuration */ -+ -+ dpa_enable_ceetm(dev); -+ return 0; -+ -+err_init_root: -+ ceetm_destroy(sch); -+ return err; -+} -+ -+/* Configure a prio ceetm qdisc */ -+static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv, -+ struct tc_ceetm_qopt *qopt) -+{ -+ int err; -+ unsigned int i; -+ struct ceetm_class *parent_cl, *child_cl; -+ struct Qdisc *parent_qdisc; -+ struct net_device *dev = qdisc_dev(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ if (sch->parent == TC_H_ROOT) { -+ pr_err("CEETM: a prio ceetm qdisc can not be root\n"); -+ err = -EINVAL; -+ goto err_init_prio; -+ } -+ -+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); -+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) { -+ pr_err("CEETM: a ceetm qdisc can not be attached to other " -+ "qdisc/class types\n"); -+ err = -EINVAL; -+ goto err_init_prio; -+ } -+ -+ /* Obtain the parent root ceetm_class */ -+ parent_cl = ceetm_find(sch->parent, parent_qdisc); -+ -+ if (!parent_cl || parent_cl->type != CEETM_ROOT) { -+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a " -+ "root ceetm class\n"); -+ err = -EINVAL; -+ goto err_init_prio; -+ } -+ -+ priv->prio.parent = parent_cl; -+ parent_cl->root.child = sch; -+ -+ priv->shaped = parent_cl->shaped; -+ priv->prio.qcount = qopt->qcount; -+ -+ /* Create and configure qcount child classes */ -+ for (i = 0; i < priv->prio.qcount; i++) { -+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL); -+ if (!child_cl) { -+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_init_prio; -+ } -+ -+ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats); -+ if (!child_cl->prio.cstats) { -+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_init_prio_cls; -+ } -+ -+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1)); -+ child_cl->refcnt = 1; -+ child_cl->parent = sch; -+ child_cl->type = CEETM_PRIO; -+ child_cl->shaped = priv->shaped; -+ child_cl->prio.child = NULL; -+ -+ /* All shaped CQs have CR and ER enabled by default */ -+ child_cl->prio.cr = child_cl->shaped; -+ child_cl->prio.er = child_cl->shaped; -+ child_cl->prio.fq = NULL; -+ child_cl->prio.cq = NULL; -+ -+ /* Configure the corresponding hardware CQ */ -+ err = ceetm_config_prio_cls(child_cl, dev, -+ parent_cl->root.ch, i); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the ceetm prio class %X\n", -+ __func__, -+ child_cl->common.classid); -+ goto err_init_prio_cls; -+ } -+ -+ /* Add class handle in Qdisc */ -+ ceetm_link_class(sch, &priv->clhash, &child_cl->common); -+ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X " -+ "associated with CQ %d and CCG %d\n", -+ __func__, -+ child_cl->common.classid, -+ child_cl->prio.cq->idx, -+ child_cl->prio.ccg->idx); -+ } -+ -+ return 0; -+ -+err_init_prio_cls: -+ ceetm_cls_destroy(sch, child_cl); -+err_init_prio: -+ ceetm_destroy(sch); -+ return err; -+} -+ -+/* Configure a wbfs ceetm qdisc */ -+static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv, -+ struct tc_ceetm_qopt *qopt) -+{ -+ int err, group_b, small_group; -+ unsigned int i, id, prio_a, prio_b; -+ struct ceetm_class *parent_cl, *child_cl, *root_cl; -+ struct Qdisc *parent_qdisc; -+ struct ceetm_qdisc *parent_priv; -+ struct qm_ceetm_channel *channel; -+ struct net_device *dev = qdisc_dev(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ /* Validate inputs */ -+ if (sch->parent == TC_H_ROOT) { -+ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ /* Obtain the parent prio ceetm qdisc */ -+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); -+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) { -+ pr_err("CEETM: a ceetm qdisc can not be attached to other " -+ "qdisc/class types\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ /* Obtain the parent prio ceetm class */ -+ parent_cl = ceetm_find(sch->parent, parent_qdisc); -+ parent_priv = qdisc_priv(parent_qdisc); -+ -+ if (!parent_cl || parent_cl->type != CEETM_PRIO) { -+ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a " -+ "prio ceetm class\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ priv->shaped = parent_cl->shaped; -+ -+ if (!priv->shaped && (qopt->cr || qopt->er)) { -+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs " -+ "ceetm qdiscs\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ if (priv->shaped && !(qopt->cr || qopt->er)) { -+ pr_err("CEETM: either CR or ER must be enabled for shaped " -+ "wbfs ceetm qdiscs\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ /* Obtain the parent root ceetm class */ -+ root_cl = parent_priv->prio.parent; -+ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) -+ || root_cl->root.wbfs_grp_large) { -+ pr_err("CEETM: no more wbfs classes are available\n"); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) -+ && qopt->qcount == CEETM_MAX_WBFS_QCOUNT) { -+ pr_err("CEETM: only %d wbfs classes are available\n", -+ CEETM_MIN_WBFS_QCOUNT); -+ err = -EINVAL; -+ goto err_init_wbfs; -+ } -+ -+ priv->wbfs.parent = parent_cl; -+ parent_cl->prio.child = sch; -+ -+ priv->wbfs.qcount = qopt->qcount; -+ priv->wbfs.cr = qopt->cr; -+ priv->wbfs.er = qopt->er; -+ -+ channel = root_cl->root.ch; -+ -+ /* Configure the hardware wbfs channel groups */ -+ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) { -+ /* Configure the large group A */ -+ priv->wbfs.group_type = WBFS_GRP_LARGE; -+ small_group = false; -+ group_b = false; -+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1; -+ prio_b = prio_a; -+ -+ } else if (root_cl->root.wbfs_grp_a) { -+ /* Configure the group B */ -+ priv->wbfs.group_type = WBFS_GRP_B; -+ -+ err = qman_ceetm_channel_get_group(channel, &small_group, -+ &prio_a, &prio_b); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to get group " -+ "details\n", __func__); -+ goto err_init_wbfs; -+ } -+ -+ small_group = true; -+ group_b = true; -+ prio_b = TC_H_MIN(parent_cl->common.classid) - 1; -+ /* If group A isn't configured, configure it as group B */ -+ prio_a = prio_a ? : prio_b; -+ -+ } else { -+ /* Configure the small group A */ -+ priv->wbfs.group_type = WBFS_GRP_A; -+ -+ err = qman_ceetm_channel_get_group(channel, &small_group, -+ &prio_a, &prio_b); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to get group " -+ "details\n", __func__); -+ goto err_init_wbfs; -+ } -+ -+ small_group = true; -+ group_b = false; -+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1; -+ /* If group B isn't configured, configure it as group A */ -+ prio_b = prio_b ? : prio_a; -+ } -+ -+ err = qman_ceetm_channel_set_group(channel, small_group, prio_a, prio_b); -+ if (err) -+ goto err_init_wbfs; -+ -+ if (priv->shaped) { -+ err = qman_ceetm_channel_set_group_cr_eligibility(channel, -+ group_b, -+ priv->wbfs.cr); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to set group " -+ "CR eligibility\n", __func__); -+ goto err_init_wbfs; -+ } -+ -+ err = qman_ceetm_channel_set_group_er_eligibility(channel, -+ group_b, -+ priv->wbfs.er); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to set group " -+ "ER eligibility\n", __func__); -+ goto err_init_wbfs; -+ } -+ } -+ -+ /* Create qcount child classes */ -+ for (i = 0; i < priv->wbfs.qcount; i++) { -+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL); -+ if (!child_cl) { -+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_init_wbfs; -+ } -+ -+ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats); -+ if (!child_cl->wbfs.cstats) { -+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_init_wbfs_cls; -+ } -+ -+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1)); -+ child_cl->refcnt = 1; -+ child_cl->parent = sch; -+ child_cl->type = CEETM_WBFS; -+ child_cl->shaped = priv->shaped; -+ child_cl->wbfs.fq = NULL; -+ child_cl->wbfs.cq = NULL; -+ child_cl->wbfs.weight = qopt->qweight[i]; -+ -+ if (priv->wbfs.group_type == WBFS_GRP_B) -+ id = WBFS_GRP_B_OFFSET + i; -+ else -+ id = WBFS_GRP_A_OFFSET + i; -+ -+ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id, -+ priv->wbfs.group_type); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the ceetm wbfs class %X\n", -+ __func__, -+ child_cl->common.classid); -+ goto err_init_wbfs_cls; -+ } -+ -+ /* Add class handle in Qdisc */ -+ ceetm_link_class(sch, &priv->clhash, &child_cl->common); -+ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X " -+ "associated with CQ %d and CCG %d\n", -+ __func__, -+ child_cl->common.classid, -+ child_cl->wbfs.cq->idx, -+ child_cl->wbfs.ccg->idx); -+ } -+ -+ /* Signal the root class that a group has been configured */ -+ switch (priv->wbfs.group_type) { -+ case WBFS_GRP_LARGE: -+ root_cl->root.wbfs_grp_large = true; -+ break; -+ case WBFS_GRP_A: -+ root_cl->root.wbfs_grp_a = true; -+ break; -+ case WBFS_GRP_B: -+ root_cl->root.wbfs_grp_b = true; -+ break; -+ } -+ -+ return 0; -+ -+err_init_wbfs_cls: -+ ceetm_cls_destroy(sch, child_cl); -+err_init_wbfs: -+ ceetm_destroy(sch); -+ return err; -+} -+ -+/* Configure a generic ceetm qdisc */ -+static int ceetm_init(struct Qdisc *sch, struct nlattr *opt) -+{ -+ struct tc_ceetm_qopt *qopt; -+ struct nlattr *tb[TCA_CEETM_QOPS + 1]; -+ int ret; -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct net_device *dev = qdisc_dev(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ if (!netif_is_multiqueue(dev)) -+ return -EOPNOTSUPP; -+ -+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy); -+ if (ret < 0) { -+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); -+ return ret; -+ } -+ -+ if (tb[TCA_CEETM_QOPS] == NULL) { -+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); -+ return -EINVAL; -+ } -+ -+ if (TC_H_MIN(sch->handle)) { -+ pr_err("CEETM: a qdisc should not have a minor\n"); -+ return -EINVAL; -+ } -+ -+ qopt = nla_data(tb[TCA_CEETM_QOPS]); -+ -+ /* Initialize the class hash list. Each qdisc has its own class hash */ -+ ret = qdisc_class_hash_init(&priv->clhash); -+ if (ret < 0) { -+ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init " -+ "failed\n", __func__); -+ return ret; -+ } -+ -+ priv->type = qopt->type; -+ -+ switch (priv->type) { -+ case CEETM_ROOT: -+ ret = ceetm_init_root(sch, priv, qopt); -+ break; -+ case CEETM_PRIO: -+ ret = ceetm_init_prio(sch, priv, qopt); -+ break; -+ case CEETM_WBFS: -+ ret = ceetm_init_wbfs(sch, priv, qopt); -+ break; -+ default: -+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); -+ ceetm_destroy(sch); -+ ret = -EINVAL; -+ } -+ -+ return ret; -+} -+ -+/* Attach the underlying pfifo qdiscs */ -+static void ceetm_attach(struct Qdisc *sch) -+{ -+ struct net_device *dev = qdisc_dev(sch); -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct Qdisc *qdisc, *old_qdisc; -+ unsigned int i; -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ for (i = 0; i < dev->num_tx_queues; i++) { -+ qdisc = priv->root.qdiscs[i]; -+ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); -+ if (old_qdisc) -+ qdisc_destroy(old_qdisc); -+ } -+} -+ -+static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid) -+{ -+ struct ceetm_class *cl; -+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", -+ __func__, classid, sch->handle); -+ cl = ceetm_find(classid, sch); -+ -+ if (cl) -+ cl->refcnt++; /* Will decrement in put() */ -+ return (unsigned long)cl; -+} -+ -+static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg) -+{ -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", -+ __func__, cl->common.classid, sch->handle); -+ cl->refcnt--; -+ -+ if (cl->refcnt == 0) -+ ceetm_cls_destroy(sch, cl); -+} -+ -+/* Add a ceetm root class or configure a ceetm prio class */ -+static int ceetm_cls_change(struct Qdisc *sch, u32 classid, -+ u32 parentid, struct nlattr **tca, -+ unsigned long *arg) -+{ -+ int err; -+ u64 bps; -+ struct ceetm_qdisc *priv; -+ struct ceetm_class *cl = (struct ceetm_class *)*arg; -+ struct nlattr *opt = tca[TCA_OPTIONS]; -+ struct nlattr *tb[__TCA_CEETM_MAX]; -+ struct tc_ceetm_copt *copt; -+ struct qm_ceetm_channel *channel; -+ struct net_device *dev = qdisc_dev(sch); -+ -+ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n", -+ __func__, classid, sch->handle); -+ -+ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) { -+ pr_err("CEETM: a ceetm class can not be attached to other " -+ "qdisc/class types\n"); -+ return -EINVAL; -+ } -+ -+ priv = qdisc_priv(sch); -+ -+ if (!opt) { -+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); -+ return -EINVAL; -+ } -+ -+ if (!cl && sch->handle != parentid) { -+ pr_err("CEETM: classes can be attached to the root ceetm " -+ "qdisc only\n"); -+ return -EINVAL; -+ } -+ -+ if (!cl && priv->type != CEETM_ROOT) { -+ pr_err("CEETM: only root ceetm classes can be attached to the " -+ "root ceetm qdisc\n"); -+ return -EINVAL; -+ } -+ -+ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy); -+ if (err < 0) { -+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); -+ return -EINVAL; -+ } -+ -+ if (tb[TCA_CEETM_COPT] == NULL) { -+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); -+ return -EINVAL; -+ } -+ -+ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) { -+ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm " -+ "root classes\n"); -+ return -EINVAL; -+ } -+ -+ copt = nla_data(tb[TCA_CEETM_COPT]); -+ -+ /* Configure an existing ceetm prio class */ -+ if (cl) { -+ if (copt->type != CEETM_PRIO) { -+ pr_err("CEETM: only prio ceetm classes can be changed\n"); -+ return -EINVAL; -+ } -+ -+ if (!cl->shaped && (copt->cr || copt->er)) { -+ pr_err("CEETM: only shaped classes can have CR and " -+ "ER enabled\n"); -+ return -EINVAL; -+ } -+ -+ if (cl->prio.cr != (bool)copt->cr) -+ err = qman_ceetm_channel_set_cq_cr_eligibility( -+ cl->prio.cq->parent, -+ cl->prio.cq->idx, -+ copt->cr); -+ -+ if (!err && cl->prio.er != (bool)copt->er) -+ err = qman_ceetm_channel_set_cq_er_eligibility( -+ cl->prio.cq->parent, -+ cl->prio.cq->idx, -+ copt->er); -+ -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to configure " -+ "the ceetm prio class %X\n", -+ __func__, -+ cl->common.classid); -+ return err; -+ } -+ -+ cl->prio.cr = copt->cr; -+ cl->prio.er = copt->er; -+ return 0; -+ } -+ -+ /* Add a new root ceetm class */ -+ if (copt->type != CEETM_ROOT) { -+ pr_err("CEETM: only root ceetm classes can be attached to the " -+ "root ceetm qdisc\n"); -+ return -EINVAL; -+ } -+ -+ if (copt->shaped && !priv->shaped) { -+ pr_err("CEETM: can not add a shaped ceetm root class under an " -+ "unshaped ceetm root qdisc\n"); -+ return -EINVAL; -+ } -+ -+ cl = kzalloc(sizeof(*cl), GFP_KERNEL); -+ if (!cl) { -+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", __func__); -+ return -ENOMEM; -+ } -+ -+ cl->type = copt->type; -+ cl->shaped = copt->shaped; -+ cl->root.rate = copt->rate; -+ cl->root.ceil = copt->ceil; -+ cl->root.tbl = copt->tbl; -+ -+ cl->common.classid = classid; -+ cl->refcnt = 1; -+ cl->parent = sch; -+ cl->root.child = NULL; -+ cl->root.wbfs_grp_a = false; -+ cl->root.wbfs_grp_b = false; -+ cl->root.wbfs_grp_large = false; -+ -+ /* Claim a CEETM channel */ -+ err = qman_ceetm_channel_claim(&channel, priv->root.lni); -+ if (err) { -+ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n", -+ __func__); -+ goto claim_err; -+ } -+ -+ cl->root.ch = channel; -+ -+ if (cl->shaped) { -+ /* Configure the channel shaper */ -+ err = qman_ceetm_channel_enable_shaper(channel, 1); -+ if (err) -+ goto channel_err; -+ -+ bps = cl->root.rate << 3; /* Bps -> bps */ -+ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps, -+ dev->mtu); -+ if (err) -+ goto channel_err; -+ -+ bps = cl->root.ceil << 3; /* Bps -> bps */ -+ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps, -+ dev->mtu); -+ if (err) -+ goto channel_err; -+ -+ } else { -+ /* Configure the uFQ algorithm */ -+ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl); -+ if (err) -+ goto channel_err; -+ } -+ -+ /* Add class handle in Qdisc */ -+ ceetm_link_class(sch, &priv->clhash, &cl->common); -+ -+ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with " -+ "channel %d\n", __func__, classid, channel->idx); -+ *arg = (unsigned long)cl; -+ return 0; -+ -+channel_err: -+ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n", -+ __func__, channel->idx); -+ if (qman_ceetm_channel_release(channel)) -+ pr_err(KBUILD_BASENAME " : %s : failed to release the channel " -+ "%d\n", __func__, channel->idx); -+claim_err: -+ if (cl) { -+ kfree(cl); -+ } -+ return err; -+} -+ -+static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg) -+{ -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_class *cl; -+ unsigned int i; -+ -+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); -+ -+ if (arg->stop) -+ return; -+ -+ for (i = 0; i < priv->clhash.hashsize; i++) { -+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { -+ if (arg->count < arg->skip) { -+ arg->count++; -+ continue; -+ } -+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) { -+ arg->stop = 1; -+ return; -+ } -+ arg->count++; -+ } -+ } -+} -+ -+static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg, -+ struct sk_buff *skb, struct tcmsg *tcm) -+{ -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ struct nlattr *nest; -+ struct tc_ceetm_copt copt; -+ -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", -+ __func__, cl->common.classid, sch->handle); -+ -+ sch_tree_lock(sch); -+ -+ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle; -+ tcm->tcm_handle = cl->common.classid; -+ -+ memset(&copt, 0, sizeof(copt)); -+ -+ copt.shaped = cl->shaped; -+ copt.type = cl->type; -+ -+ switch (cl->type) { -+ case CEETM_ROOT: -+ if (cl->root.child) -+ tcm->tcm_info = cl->root.child->handle; -+ -+ copt.rate = cl->root.rate; -+ copt.ceil = cl->root.ceil; -+ copt.tbl = cl->root.tbl; -+ break; -+ -+ case CEETM_PRIO: -+ if (cl->prio.child) -+ tcm->tcm_info = cl->prio.child->handle; -+ -+ copt.cr = cl->prio.cr; -+ copt.er = cl->prio.er; -+ break; -+ -+ case CEETM_WBFS: -+ copt.weight = cl->wbfs.weight; -+ break; -+ } -+ -+ nest = nla_nest_start(skb, TCA_OPTIONS); -+ if (nest == NULL) -+ goto nla_put_failure; -+ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt)) -+ goto nla_put_failure; -+ nla_nest_end(skb, nest); -+ sch_tree_unlock(sch); -+ return skb->len; -+ -+nla_put_failure: -+ sch_tree_unlock(sch); -+ nla_nest_cancel(skb, nest); -+ return -EMSGSIZE; -+} -+ -+static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg) -+{ -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", -+ __func__, cl->common.classid, sch->handle); -+ -+ sch_tree_lock(sch); -+ qdisc_class_hash_remove(&priv->clhash, &cl->common); -+ cl->refcnt--; -+ -+ /* The refcnt should be at least 1 since we have incremented it in -+ get(). Will decrement again in put() where we will call destroy() -+ to actually free the memory if it reaches 0. */ -+ BUG_ON(cl->refcnt == 0); -+ -+ sch_tree_unlock(sch); -+ return 0; -+} -+ -+/* Get the class' child qdisc, if any */ -+static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg) -+{ -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", -+ __func__, cl->common.classid, sch->handle); -+ -+ switch (cl->type) { -+ case CEETM_ROOT: -+ return cl->root.child; -+ break; -+ -+ case CEETM_PRIO: -+ return cl->prio.child; -+ break; -+ } -+ -+ return NULL; -+} -+ -+static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg, -+ struct Qdisc *new, struct Qdisc **old) -+{ -+ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) { -+ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm " -+ "classes\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg, -+ struct gnet_dump *d) -+{ -+ unsigned int i; -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ struct gnet_stats_basic_packed tmp_bstats; -+ struct ceetm_class_stats *cstats = NULL; -+ struct qm_ceetm_cq *cq = NULL; -+ struct tc_ceetm_xstats xstats; -+ -+ memset(&xstats, 0, sizeof(xstats)); -+ memset(&tmp_bstats, 0, sizeof(tmp_bstats)); -+ -+ switch (cl->type) { -+ case CEETM_ROOT: -+ return 0; -+ case CEETM_PRIO: -+ cq = cl->prio.cq; -+ break; -+ case CEETM_WBFS: -+ cq = cl->wbfs.cq; -+ break; -+ } -+ -+ for_each_online_cpu(i) { -+ switch (cl->type) { -+ case CEETM_PRIO: -+ cstats = per_cpu_ptr(cl->prio.cstats, i); -+ break; -+ case CEETM_WBFS: -+ cstats = per_cpu_ptr(cl->wbfs.cstats, i); -+ break; -+ } -+ -+ if (cstats) { -+ xstats.ern_drop_count += cstats->ern_drop_count; -+ xstats.cgr_congested_count += cstats->cgr_congested_count; -+ tmp_bstats.bytes += cstats->bstats.bytes; -+ tmp_bstats.packets += cstats->bstats.packets; -+ } -+ } -+ -+ if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0) -+ return -1; -+ -+ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0, -+ &xstats.frame_count, &xstats.byte_count)) -+ return -1; -+ -+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); -+} -+ -+static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg) -+{ -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list; -+ -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, -+ cl ? cl->common.classid : 0, sch->handle); -+ return fl; -+} -+ -+static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent, -+ u32 classid) -+{ -+ struct ceetm_class *cl = ceetm_find(classid, sch); -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, -+ cl ? cl->common.classid : 0, sch->handle); -+ return (unsigned long)cl; -+} -+ -+static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg) -+{ -+ struct ceetm_class *cl = (struct ceetm_class *)arg; -+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, -+ cl ? cl->common.classid : 0, sch->handle); -+} -+ -+const struct Qdisc_class_ops ceetm_cls_ops = { -+ .graft = ceetm_cls_graft, -+ .leaf = ceetm_cls_leaf, -+ .get = ceetm_cls_get, -+ .put = ceetm_cls_put, -+ .change = ceetm_cls_change, -+ .delete = ceetm_cls_delete, -+ .walk = ceetm_cls_walk, -+ .tcf_chain = ceetm_tcf_chain, -+ .bind_tcf = ceetm_tcf_bind, -+ .unbind_tcf = ceetm_tcf_unbind, -+ .dump = ceetm_cls_dump, -+ .dump_stats = ceetm_cls_dump_stats, -+}; -+ -+struct Qdisc_ops ceetm_qdisc_ops __read_mostly = { -+ .id = "ceetm", -+ .priv_size = sizeof(struct ceetm_qdisc), -+ .cl_ops = &ceetm_cls_ops, -+ .init = ceetm_init, -+ .destroy = ceetm_destroy, -+ .dump = ceetm_dump, -+ .attach = ceetm_attach, -+ .owner = THIS_MODULE, -+}; -+ -+/* Run the filters and classifiers attached to the qdisc on the provided skb */ -+static struct ceetm_class *ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, -+ int *qerr, bool *act_drop) -+{ -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_class *cl = NULL, *wbfs_cl; -+ struct tcf_result res; -+ struct tcf_proto *tcf; -+ int result; -+ -+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; -+ tcf = priv->filter_list; -+ while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { -+#ifdef CONFIG_NET_CLS_ACT -+ switch (result) { -+ case TC_ACT_QUEUED: -+ case TC_ACT_STOLEN: -+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; -+ case TC_ACT_SHOT: -+ /* No valid class found due to action */ -+ *act_drop = true; -+ return NULL; -+ } -+#endif -+ cl = (void *)res.class; -+ if (!cl) { -+ if (res.classid == sch->handle) { -+ /* The filter leads to the qdisc */ -+ /* TODO default qdisc */ -+ return NULL; -+ } -+ -+ cl = ceetm_find(res.classid, sch); -+ if (!cl) -+ /* The filter leads to an invalid class */ -+ break; -+ } -+ -+ /* The class might have its own filters attached */ -+ tcf = cl->filter_list; -+ } -+ -+ if (!cl) { -+ /* No valid class found */ -+ /* TODO default qdisc */ -+ return NULL; -+ } -+ -+ switch (cl->type) { -+ case CEETM_ROOT: -+ if (cl->root.child) { -+ /* Run the prio qdisc classifiers */ -+ return ceetm_classify(skb, cl->root.child, qerr, -+ act_drop); -+ } else { -+ /* The root class does not have a child prio qdisc */ -+ /* TODO default qdisc */ -+ return NULL; -+ } -+ case CEETM_PRIO: -+ if (cl->prio.child) { -+ /* If filters lead to a wbfs class, return it. -+ * Otherwise, return the prio class */ -+ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr, -+ act_drop); -+ /* A NULL result might indicate either an erroneous -+ * filter, or no filters at all. We will assume the -+ * latter */ -+ return wbfs_cl ? : cl; -+ } -+ } -+ -+ /* For wbfs and childless prio classes, return the class directly */ -+ return cl; -+} -+ -+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ int ret; -+ bool act_drop = false; -+ struct Qdisc *sch = net_dev->qdisc; -+ struct ceetm_class *cl; -+ struct dpa_priv_s *priv_dpa; -+ struct qman_fq *egress_fq, *conf_fq; -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats); -+ struct ceetm_class_stats *cstats; -+ const int queue_mapping = dpa_get_queue_mapping(skb); -+ spinlock_t *root_lock = qdisc_lock(sch); -+ -+ spin_lock(root_lock); -+ cl = ceetm_classify(skb, sch, &ret, &act_drop); -+ spin_unlock(root_lock); -+ -+#ifdef CONFIG_NET_CLS_ACT -+ if (act_drop) { -+ if (ret & __NET_XMIT_BYPASS) -+ qstats->drops++; -+ goto drop; -+ } -+#endif -+ /* TODO default class */ -+ if (unlikely(!cl)) { -+ qstats->drops++; -+ goto drop; -+ } -+ -+ priv_dpa = netdev_priv(net_dev); -+ conf_fq = priv_dpa->conf_fqs[queue_mapping]; -+ -+ /* Choose the proper tx fq and update the basic stats (bytes and -+ * packets sent by the class) */ -+ switch (cl->type) { -+ case CEETM_PRIO: -+ egress_fq = &(cl->prio.fq->fq); -+ cstats = this_cpu_ptr(cl->prio.cstats); -+ break; -+ case CEETM_WBFS: -+ egress_fq = &(cl->wbfs.fq->fq); -+ cstats = this_cpu_ptr(cl->wbfs.cstats); -+ break; -+ default: -+ qstats->drops++; -+ goto drop; -+ } -+ -+ bstats_update(&cstats->bstats, skb); -+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq); -+ -+drop: -+ dev_kfree_skb_any(skb); -+ return NET_XMIT_SUCCESS; -+} -+ -+static int __init ceetm_register(void) -+{ -+ int _errno = 0; -+ -+ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n"); -+ -+ _errno = register_qdisc(&ceetm_qdisc_ops); -+ if (unlikely(_errno)) -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): register_qdisc() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ -+ return _errno; -+} -+ -+static void __exit ceetm_unregister(void) -+{ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ unregister_qdisc(&ceetm_qdisc_ops); -+} -+ -+module_init(ceetm_register); -+module_exit(ceetm_unregister); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h -@@ -0,0 +1,230 @@ -+/* Copyright 2008-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA_ETH_CEETM_H -+#define __DPAA_ETH_CEETM_H -+ -+#include -+#include -+#include -+ -+#include "mac.h" -+#include "dpaa_eth_common.h" -+ -+/* Mask to determine the sub-portal id from a channel number */ -+#define CHANNEL_SP_MASK 0x1f -+/* The number of the last channel that services DCP0, connected to FMan 0. -+ * Value validated for B4 and T series platforms. -+ */ -+#define DCP0_MAX_CHANNEL 0x80f -+/* A2V=1 - field A2 is valid -+ * A0V=1 - field A0 is valid - enables frame confirmation -+ * OVOM=1 - override operation mode bits with values from A2 -+ * EBD=1 - external buffers are deallocated at the end of the FMan flow -+ * NL=0 - the BMI releases all the internal buffers -+ */ -+#define CEETM_CONTEXT_A 0x1a00000080000000 -+ -+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which -+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20 -+ * are reserved for the maximum 32 CEETM channels (majors and minors are in -+ * hex). -+ */ -+#define PFIFO_MIN_OFFSET 0x21 -+ -+/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */ -+#define CEETM_MAX_PRIO_QCOUNT 8 -+#define CEETM_MAX_WBFS_QCOUNT 8 -+#define CEETM_MIN_WBFS_QCOUNT 4 -+ -+/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A -+ * and/or 12-15 for group B). -+ */ -+#define WBFS_GRP_A_OFFSET 8 -+#define WBFS_GRP_B_OFFSET 12 -+ -+#define WBFS_GRP_A 1 -+#define WBFS_GRP_B 2 -+#define WBFS_GRP_LARGE 3 -+ -+enum { -+ TCA_CEETM_UNSPEC, -+ TCA_CEETM_COPT, -+ TCA_CEETM_QOPS, -+ __TCA_CEETM_MAX, -+}; -+ -+/* CEETM configuration types */ -+enum { -+ CEETM_ROOT = 1, -+ CEETM_PRIO, -+ CEETM_WBFS -+}; -+ -+#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1) -+extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1]; -+ -+struct ceetm_class; -+struct ceetm_qdisc_stats; -+struct ceetm_class_stats; -+ -+struct ceetm_fq { -+ struct qman_fq fq; -+ struct net_device *net_dev; -+ struct ceetm_class *ceetm_cls; -+}; -+ -+struct root_q { -+ struct Qdisc **qdiscs; -+ __u16 overhead; -+ __u32 rate; -+ __u32 ceil; -+ struct qm_ceetm_sp *sp; -+ struct qm_ceetm_lni *lni; -+ struct ceetm_qdisc_stats __percpu *qstats; -+}; -+ -+struct prio_q { -+ __u16 qcount; -+ struct ceetm_class *parent; -+}; -+ -+struct wbfs_q { -+ __u16 qcount; -+ int group_type; -+ struct ceetm_class *parent; -+ __u16 cr; -+ __u16 er; -+}; -+ -+struct ceetm_qdisc { -+ int type; /* LNI/CHNL/WBFS */ -+ bool shaped; -+ union { -+ struct root_q root; -+ struct prio_q prio; -+ struct wbfs_q wbfs; -+ }; -+ struct Qdisc_class_hash clhash; -+ struct tcf_proto *filter_list; /* qdisc attached filters */ -+}; -+ -+/* CEETM Qdisc configuration parameters */ -+struct tc_ceetm_qopt { -+ __u32 type; -+ __u16 shaped; -+ __u16 qcount; -+ __u16 overhead; -+ __u32 rate; -+ __u32 ceil; -+ __u16 cr; -+ __u16 er; -+ __u8 qweight[CEETM_MAX_WBFS_QCOUNT]; -+}; -+ -+struct root_c { -+ unsigned int rate; -+ unsigned int ceil; -+ unsigned int tbl; -+ bool wbfs_grp_a; -+ bool wbfs_grp_b; -+ bool wbfs_grp_large; -+ struct Qdisc *child; -+ struct qm_ceetm_channel *ch; -+}; -+ -+struct prio_c { -+ bool cr; -+ bool er; -+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */ -+ struct qm_ceetm_lfq *lfq; -+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */ -+ struct qm_ceetm_ccg *ccg; -+ /* only one wbfs can be linked to one priority CQ */ -+ struct Qdisc *child; -+ struct ceetm_class_stats __percpu *cstats; -+}; -+ -+struct wbfs_c { -+ __u8 weight; /* The weight of the class between 1 and 248 */ -+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */ -+ struct qm_ceetm_lfq *lfq; -+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */ -+ struct qm_ceetm_ccg *ccg; -+ struct ceetm_class_stats __percpu *cstats; -+}; -+ -+struct ceetm_class { -+ struct Qdisc_class_common common; -+ int refcnt; /* usage count of this class */ -+ struct tcf_proto *filter_list; /* class attached filters */ -+ struct Qdisc *parent; -+ bool shaped; -+ int type; /* ROOT/PRIO/WBFS */ -+ union { -+ struct root_c root; -+ struct prio_c prio; -+ struct wbfs_c wbfs; -+ }; -+}; -+ -+/* CEETM Class configuration parameters */ -+struct tc_ceetm_copt { -+ __u32 type; -+ __u16 shaped; -+ __u32 rate; -+ __u32 ceil; -+ __u16 tbl; -+ __u16 cr; -+ __u16 er; -+ __u8 weight; -+}; -+ -+/* CEETM stats */ -+struct ceetm_qdisc_stats { -+ __u32 drops; -+}; -+ -+struct ceetm_class_stats { -+ struct gnet_stats_basic_packed bstats; -+ __u32 ern_drop_count; -+ __u32 cgr_congested_count; -+}; -+ -+struct tc_ceetm_xstats { -+ __u32 ern_drop_count; -+ __u32 cgr_congested_count; -+ __u64 frame_count; -+ __u64 byte_count; -+}; -+ -+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev); -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -@@ -0,0 +1,1787 @@ -+/* Copyright 2008-2013 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* vlan_eth_hdr */ -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#ifdef CONFIG_FSL_DPAA_1588 -+#include "dpaa_1588.h" -+#endif -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+#include "dpaa_debugfs.h" -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+#include "mac.h" -+ -+/* DPAA platforms benefit from hardware-assisted queue management */ -+#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ -+ -+/* Size in bytes of the FQ taildrop threshold */ -+#define DPA_FQ_TD 0x200000 -+ -+#ifdef CONFIG_PTP_1588_CLOCK_DPAA -+struct ptp_priv_s ptp_priv; -+#endif -+ -+static struct dpa_bp *dpa_bp_array[64]; -+ -+int dpa_max_frm; -+EXPORT_SYMBOL(dpa_max_frm); -+ -+int dpa_rx_extra_headroom; -+EXPORT_SYMBOL(dpa_rx_extra_headroom); -+ -+int dpa_num_cpus = NR_CPUS; -+ -+static const struct fqid_cell tx_confirm_fqids[] = { -+ {0, DPAA_ETH_TX_QUEUES} -+}; -+ -+static struct fqid_cell default_fqids[][3] = { -+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} }, -+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} } -+}; -+ -+static const char fsl_qman_frame_queues[][25] = { -+ [RX] = "fsl,qman-frame-queues-rx", -+ [TX] = "fsl,qman-frame-queues-tx" -+}; -+#ifdef CONFIG_FSL_DPAA_HOOKS -+/* A set of callbacks for hooking into the fastpath at different points. */ -+struct dpaa_eth_hooks_s dpaa_eth_hooks; -+EXPORT_SYMBOL(dpaa_eth_hooks); -+/* This function should only be called on the probe paths, since it makes no -+ * effort to guarantee consistency of the destination hooks structure. -+ */ -+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks) -+{ -+ if (hooks) -+ dpaa_eth_hooks = *hooks; -+ else -+ pr_err("NULL pointer to hooks!\n"); -+} -+EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks); -+#endif -+ -+int dpa_netdev_init(struct net_device *net_dev, -+ const uint8_t *mac_addr, -+ uint16_t tx_timeout) -+{ -+ int err; -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; -+ -+ net_dev->hw_features |= DPA_NETIF_FEATURES; -+ -+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; -+ -+ net_dev->features |= net_dev->hw_features; -+ net_dev->vlan_features = net_dev->features; -+ -+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); -+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); -+ -+ net_dev->ethtool_ops = &dpa_ethtool_ops; -+ -+ net_dev->needed_headroom = priv->tx_headroom; -+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); -+ -+ err = register_netdev(net_dev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev() = %d\n", err); -+ return err; -+ } -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ /* create debugfs entry for this net_device */ -+ err = dpa_netdev_debugfs_create(net_dev); -+ if (err) { -+ unregister_netdev(net_dev); -+ return err; -+ } -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_netdev_init); -+ -+int __cold dpa_start(struct net_device *net_dev) -+{ -+ int err, i; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ err = mac_dev->init_phy(net_dev, priv->mac_dev); -+ if (err < 0) { -+ if (netif_msg_ifup(priv)) -+ netdev_err(net_dev, "init_phy() = %d\n", err); -+ return err; -+ } -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ err = fm_port_enable(mac_dev->port_dev[i]); -+ if (err) -+ goto mac_start_failed; -+ } -+ -+ err = priv->mac_dev->start(mac_dev); -+ if (err < 0) { -+ if (netif_msg_ifup(priv)) -+ netdev_err(net_dev, "mac_dev->start() = %d\n", err); -+ goto mac_start_failed; -+ } -+ -+ netif_tx_start_all_queues(net_dev); -+ -+ return 0; -+ -+mac_start_failed: -+ for_each_port_device(i, mac_dev->port_dev) -+ fm_port_disable(mac_dev->port_dev[i]); -+ -+ return err; -+} -+EXPORT_SYMBOL(dpa_start); -+ -+int __cold dpa_stop(struct net_device *net_dev) -+{ -+ int _errno, i, err; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ netif_tx_stop_all_queues(net_dev); -+ /* Allow the Fman (Tx) port to process in-flight frames before we -+ * try switching it off. -+ */ -+ usleep_range(5000, 10000); -+ -+ _errno = mac_dev->stop(mac_dev); -+ if (unlikely(_errno < 0)) -+ if (netif_msg_ifdown(priv)) -+ netdev_err(net_dev, "mac_dev->stop() = %d\n", -+ _errno); -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ err = fm_port_disable(mac_dev->port_dev[i]); -+ _errno = err ? err : _errno; -+ } -+ -+ if (mac_dev->phy_dev) -+ phy_disconnect(mac_dev->phy_dev); -+ mac_dev->phy_dev = NULL; -+ -+ return _errno; -+} -+EXPORT_SYMBOL(dpa_stop); -+ -+void __cold dpa_timeout(struct net_device *net_dev) -+{ -+ const struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ -+ priv = netdev_priv(net_dev); -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ if (netif_msg_timer(priv)) -+ netdev_crit(net_dev, "Transmit timeout latency: %u ms\n", -+ jiffies_to_msecs(jiffies - net_dev->trans_start)); -+ -+ percpu_priv->stats.tx_errors++; -+} -+EXPORT_SYMBOL(dpa_timeout); -+ -+/* net_device */ -+ -+/** -+ * @param net_dev the device for which statistics are calculated -+ * @param stats the function fills this structure with the device's statistics -+ * @return the address of the structure containing the statistics -+ * -+ * Calculates the statistics for the given device by adding the statistics -+ * collected by each CPU. -+ */ -+struct rtnl_link_stats64 * __cold -+dpa_get_stats64(struct net_device *net_dev, -+ struct rtnl_link_stats64 *stats) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ u64 *cpustats; -+ u64 *netstats = (u64 *)stats; -+ int i, j; -+ struct dpa_percpu_priv_s *percpu_priv; -+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ cpustats = (u64 *)&percpu_priv->stats; -+ -+ for (j = 0; j < numstats; j++) -+ netstats[j] += cpustats[j]; -+ } -+ -+ return stats; -+} -+EXPORT_SYMBOL(dpa_get_stats64); -+ -+int dpa_change_mtu(struct net_device *net_dev, int new_mtu) -+{ -+ const int max_mtu = dpa_get_max_mtu(); -+ -+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */ -+ if (new_mtu < 68 || new_mtu > max_mtu) { -+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n", -+ new_mtu, 68, max_mtu); -+ return -EINVAL; -+ } -+ net_dev->mtu = new_mtu; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_change_mtu); -+ -+/* .ndo_init callback */ -+int dpa_ndo_init(struct net_device *net_dev) -+{ -+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, -+ * we choose conservatively and let the user explicitly set a higher -+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs -+ * in the same LAN. -+ * If on the other hand fsl_fm_max_frm has been chosen below 1500, -+ * start with the maximum allowed. -+ */ -+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN); -+ -+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu); -+ net_dev->mtu = init_mtu; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_ndo_init); -+ -+int dpa_set_features(struct net_device *dev, netdev_features_t features) -+{ -+ /* Not much to do here for now */ -+ dev->features = features; -+ return 0; -+} -+EXPORT_SYMBOL(dpa_set_features); -+ -+netdev_features_t dpa_fix_features(struct net_device *dev, -+ netdev_features_t features) -+{ -+ netdev_features_t unsupported_features = 0; -+ -+ /* In theory we should never be requested to enable features that -+ * we didn't set in netdev->features and netdev->hw_features at probe -+ * time, but double check just to be on the safe side. -+ * We don't support enabling Rx csum through ethtool yet -+ */ -+ unsupported_features |= NETIF_F_RXCSUM; -+ -+ features &= ~unsupported_features; -+ -+ return features; -+} -+EXPORT_SYMBOL(dpa_fix_features); -+ -+#ifdef CONFIG_FSL_DPAA_TS -+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx, -+ const void *data) -+{ -+ u64 *ts, ns; -+ -+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx], -+ data); -+ -+ if (!ts || *ts == 0) -+ return 0; -+ -+ be64_to_cpus(ts); -+ -+ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */ -+ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT; -+ -+ return ns; -+} -+ -+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx, -+ struct skb_shared_hwtstamps *shhwtstamps, const void *data) -+{ -+ u64 ns; -+ -+ ns = dpa_get_timestamp_ns(priv, rx_tx, data); -+ -+ if (ns == 0) -+ return -EINVAL; -+ -+ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); -+ shhwtstamps->hwtstamp = ns_to_ktime(ns); -+ -+ return 0; -+} -+ -+static void dpa_ts_tx_enable(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev->fm_rtc_enable) -+ mac_dev->fm_rtc_enable(get_fm_handle(dev)); -+ if (mac_dev->ptp_enable) -+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); -+ -+ priv->ts_tx_en = true; -+} -+ -+static void dpa_ts_tx_disable(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ -+#if 0 -+/* the RTC might be needed by the Rx Ts, cannot disable here -+ * no separate ptp_disable API for Rx/Tx, cannot disable here -+ */ -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev->fm_rtc_disable) -+ mac_dev->fm_rtc_disable(get_fm_handle(dev)); -+ -+ if (mac_dev->ptp_disable) -+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); -+#endif -+ -+ priv->ts_tx_en = false; -+} -+ -+static void dpa_ts_rx_enable(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev->fm_rtc_enable) -+ mac_dev->fm_rtc_enable(get_fm_handle(dev)); -+ if (mac_dev->ptp_enable) -+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); -+ -+ priv->ts_rx_en = true; -+} -+ -+static void dpa_ts_rx_disable(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ -+#if 0 -+/* the RTC might be needed by the Tx Ts, cannot disable here -+ * no separate ptp_disable API for Rx/Tx, cannot disable here -+ */ -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev->fm_rtc_disable) -+ mac_dev->fm_rtc_disable(get_fm_handle(dev)); -+ -+ if (mac_dev->ptp_disable) -+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); -+#endif -+ -+ priv->ts_rx_en = false; -+} -+ -+static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ struct hwtstamp_config config; -+ -+ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) -+ return -EFAULT; -+ -+ switch (config.tx_type) { -+ case HWTSTAMP_TX_OFF: -+ dpa_ts_tx_disable(dev); -+ break; -+ case HWTSTAMP_TX_ON: -+ dpa_ts_tx_enable(dev); -+ break; -+ default: -+ return -ERANGE; -+ } -+ -+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) -+ dpa_ts_rx_disable(dev); -+ else { -+ dpa_ts_rx_enable(dev); -+ /* TS is set for all frame types, not only those requested */ -+ config.rx_filter = HWTSTAMP_FILTER_ALL; -+ } -+ -+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? -+ -EFAULT : 0; -+} -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+#ifdef CONFIG_FSL_DPAA_1588 -+ struct dpa_priv_s *priv = netdev_priv(dev); -+#endif -+ int ret = 0; -+ -+ /* at least one timestamping feature must be enabled */ -+#ifdef CONFIG_FSL_DPAA_TS -+ if (!netif_running(dev)) -+#endif -+ return -EINVAL; -+ -+#ifdef CONFIG_FSL_DPAA_TS -+ if (cmd == SIOCSHWTSTAMP) -+ return dpa_ts_ioctl(dev, rq, cmd); -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) { -+ if (priv->tsu && priv->tsu->valid) -+ ret = dpa_ioctl_1588(dev, rq, cmd); -+ else -+ ret = -ENODEV; -+ } -+#endif -+ -+ return ret; -+} -+EXPORT_SYMBOL(dpa_ioctl); -+ -+int __cold dpa_remove(struct platform_device *of_dev) -+{ -+ int err; -+ struct device *dev; -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ -+ dev = &of_dev->dev; -+ net_dev = dev_get_drvdata(dev); -+ -+ priv = netdev_priv(net_dev); -+ -+ dpaa_eth_sysfs_remove(dev); -+ -+ dev_set_drvdata(dev, NULL); -+ unregister_netdev(net_dev); -+ -+ err = dpa_fq_free(dev, &priv->dpa_fq_list); -+ -+ qman_delete_cgr_safe(&priv->ingress_cgr); -+ qman_release_cgrid(priv->ingress_cgr.cgrid); -+ qman_delete_cgr_safe(&priv->cgr_data.cgr); -+ qman_release_cgrid(priv->cgr_data.cgr.cgrid); -+ -+ dpa_private_napi_del(net_dev); -+ -+ dpa_bp_free(priv); -+ -+ if (priv->buf_layout) -+ devm_kfree(dev, priv->buf_layout); -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ /* remove debugfs entry for this net_device */ -+ dpa_netdev_debugfs_remove(net_dev); -+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid) -+ dpa_ptp_cleanup(priv); -+#endif -+ -+ free_netdev(net_dev); -+ -+ return err; -+} -+EXPORT_SYMBOL(dpa_remove); -+ -+struct mac_device * __cold __must_check -+__attribute__((nonnull)) -+dpa_mac_probe(struct platform_device *_of_dev) -+{ -+ struct device *dpa_dev, *dev; -+ struct device_node *mac_node; -+ struct platform_device *of_dev; -+ struct mac_device *mac_dev; -+#ifdef CONFIG_FSL_DPAA_1588 -+ int lenp; -+ const phandle *phandle_prop; -+ struct net_device *net_dev = NULL; -+ struct dpa_priv_s *priv = NULL; -+ struct device_node *timer_node; -+#endif -+ dpa_dev = &_of_dev->dev; -+ -+ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0); -+ if (unlikely(mac_node == NULL)) { -+ dev_err(dpa_dev, "Cannot find MAC device device tree node\n"); -+ return ERR_PTR(-EFAULT); -+ } -+ -+ of_dev = of_find_device_by_node(mac_node); -+ if (unlikely(of_dev == NULL)) { -+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n", -+ mac_node->full_name); -+ of_node_put(mac_node); -+ return ERR_PTR(-EINVAL); -+ } -+ of_node_put(mac_node); -+ -+ dev = &of_dev->dev; -+ -+ mac_dev = dev_get_drvdata(dev); -+ if (unlikely(mac_dev == NULL)) { -+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n", -+ dev_name(dev)); -+ return ERR_PTR(-EINVAL); -+ } -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp); -+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) || -+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) && -+ (mac_dev->speed == SPEED_1000)))) { -+ timer_node = of_find_node_by_phandle(*phandle_prop); -+ if (timer_node) -+ net_dev = dev_get_drvdata(dpa_dev); -+ if (timer_node && net_dev) { -+ priv = netdev_priv(net_dev); -+ if (!dpa_ptp_init(priv)) -+ dev_info(dev, "%s: ptp 1588 is initialized.\n", -+ mac_node->full_name); -+ } -+ } -+#endif -+ -+#ifdef CONFIG_PTP_1588_CLOCK_DPAA -+ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) || -+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) && -+ (mac_dev->speed == SPEED_1000))) { -+ ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0); -+ if (ptp_priv.node) { -+ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node); -+ if (unlikely(ptp_priv.of_dev == NULL)) { -+ dev_err(dpa_dev, -+ "Cannot find device represented by timer_node\n"); -+ of_node_put(ptp_priv.node); -+ return ERR_PTR(-EINVAL); -+ } -+ ptp_priv.mac_dev = mac_dev; -+ } -+ } -+#endif -+ return mac_dev; -+} -+EXPORT_SYMBOL(dpa_mac_probe); -+ -+int dpa_set_mac_address(struct net_device *net_dev, void *addr) -+{ -+ const struct dpa_priv_s *priv; -+ int _errno; -+ struct mac_device *mac_dev; -+ -+ priv = netdev_priv(net_dev); -+ -+ _errno = eth_mac_addr(net_dev, addr); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, -+ "eth_mac_addr() = %d\n", -+ _errno); -+ return _errno; -+ } -+ -+ mac_dev = priv->mac_dev; -+ -+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev), -+ net_dev->dev_addr); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, -+ "mac_dev->change_addr() = %d\n", -+ _errno); -+ return _errno; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_set_mac_address); -+ -+void dpa_set_rx_mode(struct net_device *net_dev) -+{ -+ int _errno; -+ const struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ -+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { -+ priv->mac_dev->promisc = !priv->mac_dev->promisc; -+ _errno = priv->mac_dev->set_promisc( -+ priv->mac_dev->get_mac_handle(priv->mac_dev), -+ priv->mac_dev->promisc); -+ if (unlikely(_errno < 0) && netif_msg_drv(priv)) -+ netdev_err(net_dev, -+ "mac_dev->set_promisc() = %d\n", -+ _errno); -+ } -+ -+ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev); -+ if (unlikely(_errno < 0) && netif_msg_drv(priv)) -+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno); -+} -+EXPORT_SYMBOL(dpa_set_rx_mode); -+ -+void dpa_set_buffers_layout(struct mac_device *mac_dev, -+ struct dpa_buffer_layout_s *layout) -+{ -+ struct fm_port_params params; -+ -+ /* Rx */ -+ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE; -+ layout[RX].parse_results = true; -+ layout[RX].hash_results = true; -+#ifdef CONFIG_FSL_DPAA_TS -+ layout[RX].time_stamp = true; -+#endif -+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], ¶ms); -+ layout[RX].manip_extra_space = params.manip_extra_space; -+ /* a value of zero for data alignment means "don't care", so align to -+ * a non-zero value to prevent FMD from using its own default -+ */ -+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; -+ -+ /* Tx */ -+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE; -+ layout[TX].parse_results = true; -+ layout[TX].hash_results = true; -+#ifdef CONFIG_FSL_DPAA_TS -+ layout[TX].time_stamp = true; -+#endif -+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], ¶ms); -+ layout[TX].manip_extra_space = params.manip_extra_space; -+ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; -+} -+EXPORT_SYMBOL(dpa_set_buffers_layout); -+ -+int __attribute__((nonnull)) -+dpa_bp_alloc(struct dpa_bp *dpa_bp) -+{ -+ int err; -+ struct bman_pool_params bp_params; -+ struct platform_device *pdev; -+ -+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) { -+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers"); -+ return -EINVAL; -+ } -+ -+ memset(&bp_params, 0, sizeof(struct bman_pool_params)); -+#ifdef CONFIG_FMAN_PFC -+ bp_params.flags = BMAN_POOL_FLAG_THRESH; -+ bp_params.thresholds[0] = bp_params.thresholds[2] = -+ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD; -+ bp_params.thresholds[1] = bp_params.thresholds[3] = -+ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; -+#endif -+ -+ /* If the pool is already specified, we only create one per bpid */ -+ if (dpa_bpid2pool_use(dpa_bp->bpid)) -+ return 0; -+ -+ if (dpa_bp->bpid == 0) -+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID; -+ else -+ bp_params.bpid = dpa_bp->bpid; -+ -+ dpa_bp->pool = bman_new_pool(&bp_params); -+ if (unlikely(dpa_bp->pool == NULL)) { -+ pr_err("bman_new_pool() failed\n"); -+ return -ENODEV; -+ } -+ -+ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid; -+ -+ pdev = platform_device_register_simple("dpaa_eth_bpool", -+ dpa_bp->bpid, NULL, 0); -+ if (IS_ERR(pdev)) { -+ err = PTR_ERR(pdev); -+ goto pdev_register_failed; -+ } -+ -+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40)); -+ if (err) -+ goto pdev_mask_failed; -+ if (!pdev->dev.dma_mask) -+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; -+ else { -+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); -+ if (err) -+ goto pdev_mask_failed; -+ } -+ -+#ifdef CONFIG_FMAN_ARM -+ /* force coherency */ -+ pdev->dev.archdata.dma_coherent = true; -+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true); -+#endif -+ -+ dpa_bp->dev = &pdev->dev; -+ -+ if (dpa_bp->seed_cb) { -+ err = dpa_bp->seed_cb(dpa_bp); -+ if (err) -+ goto pool_seed_failed; -+ } -+ -+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp); -+ -+ return 0; -+ -+pool_seed_failed: -+pdev_mask_failed: -+ platform_device_unregister(pdev); -+pdev_register_failed: -+ bman_free_pool(dpa_bp->pool); -+ -+ return err; -+} -+EXPORT_SYMBOL(dpa_bp_alloc); -+ -+void dpa_bp_drain(struct dpa_bp *bp) -+{ -+ int ret, num = 8; -+ -+ do { -+ struct bm_buffer bmb[8]; -+ int i; -+ -+ ret = bman_acquire(bp->pool, bmb, num, 0); -+ if (ret < 0) { -+ if (num == 8) { -+ /* we have less than 8 buffers left; -+ * drain them one by one -+ */ -+ num = 1; -+ ret = 1; -+ continue; -+ } else { -+ /* Pool is fully drained */ -+ break; -+ } -+ } -+ -+ for (i = 0; i < num; i++) { -+ dma_addr_t addr = bm_buf_addr(&bmb[i]); -+ -+ dma_unmap_single(bp->dev, addr, bp->size, -+ DMA_BIDIRECTIONAL); -+ -+ bp->free_buf_cb(phys_to_virt(addr)); -+ } -+ } while (ret > 0); -+} -+EXPORT_SYMBOL(dpa_bp_drain); -+ -+static void __cold __attribute__((nonnull)) -+_dpa_bp_free(struct dpa_bp *dpa_bp) -+{ -+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid); -+ -+ /* the mapping between bpid and dpa_bp is done very late in the -+ * allocation procedure; if something failed before the mapping, the bp -+ * was not configured, therefore we don't need the below instructions -+ */ -+ if (!bp) -+ return; -+ -+ if (!atomic_dec_and_test(&bp->refs)) -+ return; -+ -+ if (bp->free_buf_cb) -+ dpa_bp_drain(bp); -+ -+ dpa_bp_array[bp->bpid] = NULL; -+ bman_free_pool(bp->pool); -+ -+ if (bp->dev) -+ platform_device_unregister(to_platform_device(bp->dev)); -+} -+ -+void __cold __attribute__((nonnull)) -+dpa_bp_free(struct dpa_priv_s *priv) -+{ -+ int i; -+ -+ for (i = 0; i < priv->bp_count; i++) -+ _dpa_bp_free(&priv->dpa_bp[i]); -+} -+EXPORT_SYMBOL(dpa_bp_free); -+ -+struct dpa_bp *dpa_bpid2pool(int bpid) -+{ -+ return dpa_bp_array[bpid]; -+} -+EXPORT_SYMBOL(dpa_bpid2pool); -+ -+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp) -+{ -+ dpa_bp_array[bpid] = dpa_bp; -+ atomic_set(&dpa_bp->refs, 1); -+} -+ -+bool dpa_bpid2pool_use(int bpid) -+{ -+ if (dpa_bpid2pool(bpid)) { -+ atomic_inc(&dpa_bp_array[bpid]->refs); -+ return true; -+ } -+ -+ return false; -+} -+ -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, -+ void *accel_priv, select_queue_fallback_t fallback) -+{ -+ return dpa_get_queue_mapping(skb); -+} -+EXPORT_SYMBOL(dpa_select_queue); -+#endif -+ -+struct dpa_fq *dpa_fq_alloc(struct device *dev, -+ u32 fq_start, -+ u32 fq_count, -+ struct list_head *list, -+ enum dpa_fq_type fq_type) -+{ -+ int i; -+ struct dpa_fq *dpa_fq; -+ -+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL); -+ if (dpa_fq == NULL) -+ return NULL; -+ -+ for (i = 0; i < fq_count; i++) { -+ dpa_fq[i].fq_type = fq_type; -+ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO) -+ dpa_fq[i].fqid = fq_start ? -+ DPAA_ETH_FQ_DELTA + fq_start + i : 0; -+ else -+ dpa_fq[i].fqid = fq_start ? fq_start + i : 0; -+ -+ list_add_tail(&dpa_fq[i].list, list); -+ } -+ -+#ifdef CONFIG_FMAN_PFC -+ if (fq_type == FQ_TYPE_TX) -+ for (i = 0; i < fq_count; i++) -+ dpa_fq[i].wq = i / dpa_num_cpus; -+ else -+#endif -+ for (i = 0; i < fq_count; i++) -+ _dpa_assign_wq(dpa_fq + i); -+ -+ return dpa_fq; -+} -+EXPORT_SYMBOL(dpa_fq_alloc); -+ -+/* Probing of FQs for MACful ports */ -+int dpa_fq_probe_mac(struct device *dev, struct list_head *list, -+ struct fm_port_fqs *port_fqs, -+ bool alloc_tx_conf_fqs, -+ enum port_type ptype) -+{ -+ struct fqid_cell *fqids = NULL; -+ const void *fqids_off = NULL; -+ struct dpa_fq *dpa_fq = NULL; -+ struct device_node *np = dev->of_node; -+ int num_ranges; -+ int i, lenp; -+ -+ if (ptype == TX && alloc_tx_conf_fqs) { -+ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start, -+ tx_confirm_fqids->count, list, -+ FQ_TYPE_TX_CONF_MQ)) -+ goto fq_alloc_failed; -+ } -+ -+ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp); -+ if (fqids_off == NULL) { -+ /* No dts definition, so use the defaults. */ -+ fqids = default_fqids[ptype]; -+ num_ranges = 3; -+ } else { -+ num_ranges = lenp / sizeof(*fqids); -+ -+ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges, -+ GFP_KERNEL); -+ if (fqids == NULL) -+ goto fqids_alloc_failed; -+ -+ /* convert to CPU endianess */ -+ for (i = 0; i < num_ranges; i++) { -+ fqids[i].start = be32_to_cpup(fqids_off + -+ i * sizeof(*fqids)); -+ fqids[i].count = be32_to_cpup(fqids_off + -+ i * sizeof(*fqids) + sizeof(__be32)); -+ } -+ } -+ -+ for (i = 0; i < num_ranges; i++) { -+ switch (i) { -+ case 0: -+ /* The first queue is the error queue */ -+ if (fqids[i].count != 1) -+ goto invalid_error_queue; -+ -+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start, -+ fqids[i].count, list, -+ ptype == RX ? -+ FQ_TYPE_RX_ERROR : -+ FQ_TYPE_TX_ERROR); -+ if (dpa_fq == NULL) -+ goto fq_alloc_failed; -+ -+ if (ptype == RX) -+ port_fqs->rx_errq = &dpa_fq[0]; -+ else -+ port_fqs->tx_errq = &dpa_fq[0]; -+ break; -+ case 1: -+ /* the second queue is the default queue */ -+ if (fqids[i].count != 1) -+ goto invalid_default_queue; -+ -+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start, -+ fqids[i].count, list, -+ ptype == RX ? -+ FQ_TYPE_RX_DEFAULT : -+ FQ_TYPE_TX_CONFIRM); -+ if (dpa_fq == NULL) -+ goto fq_alloc_failed; -+ -+ if (ptype == RX) -+ port_fqs->rx_defq = &dpa_fq[0]; -+ else -+ port_fqs->tx_defq = &dpa_fq[0]; -+ break; -+ default: -+ /* all subsequent queues are either RX* PCD or Tx */ -+ if (ptype == RX) { -+ if (!dpa_fq_alloc(dev, fqids[i].start, -+ fqids[i].count, list, -+ FQ_TYPE_RX_PCD) || -+ !dpa_fq_alloc(dev, fqids[i].start, -+ fqids[i].count, list, -+ FQ_TYPE_RX_PCD_HI_PRIO)) -+ goto fq_alloc_failed; -+ } else { -+ if (!dpa_fq_alloc(dev, fqids[i].start, -+ fqids[i].count, list, -+ FQ_TYPE_TX)) -+ goto fq_alloc_failed; -+ } -+ break; -+ } -+ } -+ -+ return 0; -+ -+fq_alloc_failed: -+fqids_alloc_failed: -+ dev_err(dev, "Cannot allocate memory for frame queues\n"); -+ return -ENOMEM; -+ -+invalid_default_queue: -+invalid_error_queue: -+ dev_err(dev, "Too many default or error queues\n"); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpa_fq_probe_mac); -+ -+static u32 rx_pool_channel; -+static DEFINE_SPINLOCK(rx_pool_channel_init); -+ -+int dpa_get_channel(void) -+{ -+ spin_lock(&rx_pool_channel_init); -+ if (!rx_pool_channel) { -+ u32 pool; -+ int ret = qman_alloc_pool(&pool); -+ if (!ret) -+ rx_pool_channel = pool; -+ } -+ spin_unlock(&rx_pool_channel_init); -+ if (!rx_pool_channel) -+ return -ENOMEM; -+ return rx_pool_channel; -+} -+EXPORT_SYMBOL(dpa_get_channel); -+ -+void dpa_release_channel(void) -+{ -+ qman_release_pool(rx_pool_channel); -+} -+EXPORT_SYMBOL(dpa_release_channel); -+ -+int dpaa_eth_add_channel(void *__arg) -+{ -+ const cpumask_t *cpus = qman_affine_cpus(); -+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg); -+ int cpu; -+ struct qman_portal *portal; -+ -+ for_each_cpu(cpu, cpus) { -+ portal = (struct qman_portal *)qman_get_affine_portal(cpu); -+ qman_p_static_dequeue_add(portal, pool); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(dpaa_eth_add_channel); -+ -+/** -+ * Congestion group state change notification callback. -+ * Stops the device's egress queues while they are congested and -+ * wakes them upon exiting congested state. -+ * Also updates some CGR-related stats. -+ */ -+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, -+ -+ int congested) -+{ -+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr, -+ struct dpa_priv_s, cgr_data.cgr); -+ -+ if (congested) { -+ priv->cgr_data.congestion_start_jiffies = jiffies; -+ netif_tx_stop_all_queues(priv->net_dev); -+ priv->cgr_data.cgr_congested_count++; -+ } else { -+ priv->cgr_data.congested_jiffies += -+ (jiffies - priv->cgr_data.congestion_start_jiffies); -+ netif_tx_wake_all_queues(priv->net_dev); -+ } -+} -+ -+int dpaa_eth_cgr_init(struct dpa_priv_s *priv) -+{ -+ struct qm_mcc_initcgr initcgr; -+ u32 cs_th; -+ int err; -+ -+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); -+ if (err < 0) { -+ pr_err("Error %d allocating CGR ID\n", err); -+ goto out_error; -+ } -+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn; -+ -+ /* Enable Congestion State Change Notifications and CS taildrop */ -+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; -+ initcgr.cgr.cscn_en = QM_CGR_EN; -+ -+ /* Set different thresholds based on the MAC speed. -+ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed -+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. -+ * In such cases, we ought to reconfigure the threshold, too. -+ */ -+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G; -+ else -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G; -+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); -+ -+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN; -+ initcgr.cgr.cstd_en = QM_CGR_EN; -+ -+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, -+ &initcgr); -+ if (err < 0) { -+ pr_err("Error %d creating CGR with ID %d\n", err, -+ priv->cgr_data.cgr.cgrid); -+ qman_release_cgrid(priv->cgr_data.cgr.cgrid); -+ goto out_error; -+ } -+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", -+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, -+ priv->cgr_data.cgr.chan); -+ -+out_error: -+ return err; -+} -+EXPORT_SYMBOL(dpaa_eth_cgr_init); -+ -+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv, -+ struct dpa_fq *fq, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = priv->net_dev; -+ -+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; -+ fq->channel = priv->channel; -+} -+ -+static inline void dpa_setup_egress(const struct dpa_priv_s *priv, -+ struct dpa_fq *fq, -+ struct fm_port *port, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = priv->net_dev; -+ -+ if (port) { -+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; -+ fq->channel = (uint16_t)fm_get_tx_port_channel(port); -+ } else { -+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY; -+ } -+} -+ -+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, -+ struct fm_port *tx_port) -+{ -+ struct dpa_fq *fq; -+ uint16_t portals[NR_CPUS]; -+ int cpu, portal_cnt = 0, num_portals = 0; -+ uint32_t pcd_fqid, pcd_fqid_hi_prio; -+ const cpumask_t *affine_cpus = qman_affine_cpus(); -+ int egress_cnt = 0, conf_cnt = 0; -+ -+ /* Prepare for PCD FQs init */ -+ for_each_cpu(cpu, affine_cpus) -+ portals[num_portals++] = qman_affine_channel(cpu); -+ if (num_portals == 0) -+ dev_err(priv->net_dev->dev.parent, -+ "No Qman software (affine) channels found"); -+ -+ pcd_fqid = (priv->mac_dev) ? -+ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0; -+ pcd_fqid_hi_prio = (priv->mac_dev) ? -+ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0; -+ -+ /* Initialize each FQ in the list */ -+ list_for_each_entry(fq, &priv->dpa_fq_list, list) { -+ switch (fq->fq_type) { -+ case FQ_TYPE_RX_DEFAULT: -+ BUG_ON(!priv->mac_dev); -+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); -+ break; -+ case FQ_TYPE_RX_ERROR: -+ BUG_ON(!priv->mac_dev); -+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq); -+ break; -+ case FQ_TYPE_RX_PCD: -+ /* For MACless we can't have dynamic Rx queues */ -+ BUG_ON(!priv->mac_dev && !fq->fqid); -+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); -+ if (!fq->fqid) -+ fq->fqid = pcd_fqid++; -+ fq->channel = portals[portal_cnt]; -+ portal_cnt = (portal_cnt + 1) % num_portals; -+ break; -+ case FQ_TYPE_RX_PCD_HI_PRIO: -+ /* For MACless we can't have dynamic Hi Pri Rx queues */ -+ BUG_ON(!priv->mac_dev && !fq->fqid); -+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); -+ if (!fq->fqid) -+ fq->fqid = pcd_fqid_hi_prio++; -+ fq->channel = portals[portal_cnt]; -+ portal_cnt = (portal_cnt + 1) % num_portals; -+ break; -+ case FQ_TYPE_TX: -+ dpa_setup_egress(priv, fq, tx_port, -+ &fq_cbs->egress_ern); -+ /* If we have more Tx queues than the number of cores, -+ * just ignore the extra ones. -+ */ -+ if (egress_cnt < DPAA_ETH_TX_QUEUES) -+ priv->egress_fqs[egress_cnt++] = &fq->fq_base; -+ break; -+ case FQ_TYPE_TX_CONFIRM: -+ BUG_ON(!priv->mac_dev); -+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); -+ break; -+ case FQ_TYPE_TX_CONF_MQ: -+ BUG_ON(!priv->mac_dev); -+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); -+ priv->conf_fqs[conf_cnt++] = &fq->fq_base; -+ break; -+ case FQ_TYPE_TX_ERROR: -+ BUG_ON(!priv->mac_dev); -+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq); -+ break; -+ default: -+ dev_warn(priv->net_dev->dev.parent, -+ "Unknown FQ type detected!\n"); -+ break; -+ } -+ } -+ -+ /* The number of Tx queues may be smaller than the number of cores, if -+ * the Tx queue range is specified in the device tree instead of being -+ * dynamically allocated. -+ * Make sure all CPUs receive a corresponding Tx queue. -+ */ -+ while (egress_cnt < DPAA_ETH_TX_QUEUES) { -+ list_for_each_entry(fq, &priv->dpa_fq_list, list) { -+ if (fq->fq_type != FQ_TYPE_TX) -+ continue; -+ priv->egress_fqs[egress_cnt++] = &fq->fq_base; -+ if (egress_cnt == DPAA_ETH_TX_QUEUES) -+ break; -+ } -+ } -+} -+EXPORT_SYMBOL(dpa_fq_setup); -+ -+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable) -+{ -+ int _errno; -+ const struct dpa_priv_s *priv; -+ struct device *dev; -+ struct qman_fq *fq; -+ struct qm_mcc_initfq initfq; -+ struct qman_fq *confq; -+ int queue_id; -+ -+ priv = netdev_priv(dpa_fq->net_dev); -+ dev = dpa_fq->net_dev->dev.parent; -+ -+ if (dpa_fq->fqid == 0) -+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; -+ -+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); -+ -+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); -+ if (_errno) { -+ dev_err(dev, "qman_create_fq() failed\n"); -+ return _errno; -+ } -+ fq = &dpa_fq->fq_base; -+ -+ if (dpa_fq->init) { -+ memset(&initfq, 0, sizeof(initfq)); -+ -+ initfq.we_mask = QM_INITFQ_WE_FQCTRL; -+ /* FIXME: why would we want to keep an empty FQ in cache? */ -+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; -+ -+ /* Try to reduce the number of portal interrupts for -+ * Tx Confirmation FQs. -+ */ -+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) -+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; -+ -+ /* FQ placement */ -+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ; -+ -+ initfq.fqd.dest.channel = dpa_fq->channel; -+ initfq.fqd.dest.wq = dpa_fq->wq; -+ -+ /* Put all egress queues in a congestion group of their own. -+ * Sensu stricto, the Tx confirmation queues are Rx FQs, -+ * rather than Tx - but they nonetheless account for the -+ * memory footprint on behalf of egress traffic. We therefore -+ * place them in the netdev's CGR, along with the Tx FQs. -+ */ -+ if (dpa_fq->fq_type == FQ_TYPE_TX || -+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM || -+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { -+ initfq.we_mask |= QM_INITFQ_WE_CGID; -+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; -+ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid; -+ /* Set a fixed overhead accounting, in an attempt to -+ * reduce the impact of fixed-size skb shells and the -+ * driver's needed headroom on system memory. This is -+ * especially the case when the egress traffic is -+ * composed of small datagrams. -+ * Unfortunately, QMan's OAL value is capped to an -+ * insufficient value, but even that is better than -+ * no overhead accounting at all. -+ */ -+ initfq.we_mask |= QM_INITFQ_WE_OAC; -+ initfq.fqd.oac_init.oac = QM_OAC_CG; -+ initfq.fqd.oac_init.oal = -+ (signed char)(min(sizeof(struct sk_buff) + -+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); -+ } -+ -+ if (td_enable) { -+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; -+ qm_fqd_taildrop_set(&initfq.fqd.td, -+ DPA_FQ_TD, 1); -+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; -+ } -+ -+ /* Configure the Tx confirmation queue, now that we know -+ * which Tx queue it pairs with. -+ */ -+ if (dpa_fq->fq_type == FQ_TYPE_TX) { -+ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base); -+ if (queue_id >= 0) { -+ confq = priv->conf_fqs[queue_id]; -+ if (confq) { -+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD) -+ * A2V=1 (contextA A2 field is valid) -+ * A0V=1 (contextA A0 field is valid) -+ * B0V=1 (contextB field is valid) -+ * ContextA A2: EBD=1 (deallocate buffers inside FMan) -+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) -+ */ -+ initfq.fqd.context_a.hi = 0x1e000000; -+ initfq.fqd.context_a.lo = 0x80000000; -+ } -+ } -+ } -+ -+ /* Put all *private* ingress queues in our "ingress CGR". */ -+ if (priv->use_ingress_cgr && -+ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT || -+ dpa_fq->fq_type == FQ_TYPE_RX_ERROR || -+ dpa_fq->fq_type == FQ_TYPE_RX_PCD || -+ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) { -+ initfq.we_mask |= QM_INITFQ_WE_CGID; -+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; -+ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid; -+ /* Set a fixed overhead accounting, just like for the -+ * egress CGR. -+ */ -+ initfq.we_mask |= QM_INITFQ_WE_OAC; -+ initfq.fqd.oac_init.oac = QM_OAC_CG; -+ initfq.fqd.oac_init.oal = -+ (signed char)(min(sizeof(struct sk_buff) + -+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); -+ } -+ -+ /* Initialization common to all ingress queues */ -+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { -+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ initfq.fqd.fq_ctrl |= -+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; -+ initfq.fqd.context_a.stashing.exclusive = -+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | -+ QM_STASHING_EXCL_ANNOTATION; -+ initfq.fqd.context_a.stashing.data_cl = 2; -+ initfq.fqd.context_a.stashing.annotation_cl = 1; -+ initfq.fqd.context_a.stashing.context_cl = -+ DIV_ROUND_UP(sizeof(struct qman_fq), 64); -+ } -+ -+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); -+ if (_errno < 0) { -+ dev_err(dev, "qman_init_fq(%u) = %d\n", -+ qman_fq_fqid(fq), _errno); -+ qman_destroy_fq(fq, 0); -+ return _errno; -+ } -+ } -+ -+ dpa_fq->fqid = qman_fq_fqid(fq); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_fq_init); -+ -+int __cold __attribute__((nonnull)) -+_dpa_fq_free(struct device *dev, struct qman_fq *fq) -+{ -+ int _errno, __errno; -+ struct dpa_fq *dpa_fq; -+ const struct dpa_priv_s *priv; -+ -+ _errno = 0; -+ -+ dpa_fq = container_of(fq, struct dpa_fq, fq_base); -+ priv = netdev_priv(dpa_fq->net_dev); -+ -+ if (dpa_fq->init) { -+ _errno = qman_retire_fq(fq, NULL); -+ if (unlikely(_errno < 0) && netif_msg_drv(priv)) -+ dev_err(dev, "qman_retire_fq(%u) = %d\n", -+ qman_fq_fqid(fq), _errno); -+ -+ __errno = qman_oos_fq(fq); -+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) { -+ dev_err(dev, "qman_oos_fq(%u) = %d\n", -+ qman_fq_fqid(fq), __errno); -+ if (_errno >= 0) -+ _errno = __errno; -+ } -+ } -+ -+ qman_destroy_fq(fq, 0); -+ list_del(&dpa_fq->list); -+ -+ return _errno; -+} -+EXPORT_SYMBOL(_dpa_fq_free); -+ -+int __cold __attribute__((nonnull)) -+dpa_fq_free(struct device *dev, struct list_head *list) -+{ -+ int _errno, __errno; -+ struct dpa_fq *dpa_fq, *tmp; -+ -+ _errno = 0; -+ list_for_each_entry_safe(dpa_fq, tmp, list, list) { -+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); -+ if (unlikely(__errno < 0) && _errno >= 0) -+ _errno = __errno; -+ } -+ -+ return _errno; -+} -+EXPORT_SYMBOL(dpa_fq_free); -+ -+static void -+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq, -+ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout) -+{ -+ struct fm_port_params tx_port_param; -+ bool frag_enabled = false; -+ -+ memset(&tx_port_param, 0, sizeof(tx_port_param)); -+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid, -+ buf_layout, frag_enabled); -+} -+ -+static void -+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count, -+ struct dpa_fq *errq, struct dpa_fq *defq, -+ struct dpa_buffer_layout_s *buf_layout) -+{ -+ struct fm_port_params rx_port_param; -+ int i; -+ bool frag_enabled = false; -+ -+ memset(&rx_port_param, 0, sizeof(rx_port_param)); -+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count); -+ rx_port_param.num_pools = (uint8_t)count; -+ for (i = 0; i < count; i++) { -+ if (i >= rx_port_param.num_pools) -+ break; -+ rx_port_param.pool_param[i].id = bp[i].bpid; -+ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size; -+ } -+ -+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid, -+ buf_layout, frag_enabled); -+} -+ -+#if defined(CONFIG_FSL_SDK_FMAN_TEST) -+/* Defined as weak, to be implemented by fman pcd tester. */ -+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *) -+__attribute__((weak)); -+ -+int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak)); -+#else -+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *); -+ -+int dpa_free_pcd_fqids(struct device *, uint32_t); -+ -+#endif /* CONFIG_FSL_SDK_FMAN_TEST */ -+ -+ -+int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num, -+ uint8_t alignment, uint32_t *base_fqid) -+{ -+ dev_crit(dev, "callback not implemented!\n"); -+ -+ return 0; -+} -+ -+int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid) -+{ -+ -+ dev_crit(dev, "callback not implemented!\n"); -+ -+ return 0; -+} -+ -+void dpaa_eth_init_ports(struct mac_device *mac_dev, -+ struct dpa_bp *bp, size_t count, -+ struct fm_port_fqs *port_fqs, -+ struct dpa_buffer_layout_s *buf_layout, -+ struct device *dev) -+{ -+ struct fm_port_pcd_param rx_port_pcd_param; -+ struct fm_port *rxport = mac_dev->port_dev[RX]; -+ struct fm_port *txport = mac_dev->port_dev[TX]; -+ -+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, -+ port_fqs->tx_defq, &buf_layout[TX]); -+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq, -+ port_fqs->rx_defq, &buf_layout[RX]); -+ -+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids; -+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids; -+ rx_port_pcd_param.dev = dev; -+ fm_port_pcd_bind(rxport, &rx_port_pcd_param); -+} -+EXPORT_SYMBOL(dpaa_eth_init_ports); -+ -+void dpa_release_sgt(struct qm_sg_entry *sgt) -+{ -+ struct dpa_bp *dpa_bp; -+ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX]; -+ uint8_t i = 0, j; -+ -+ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer)); -+ -+ do { -+ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])); -+ DPA_BUG_ON(!dpa_bp); -+ -+ j = 0; -+ do { -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i])); -+ -+ j++; i++; -+ } while (j < ARRAY_SIZE(bmb) && -+ !qm_sg_entry_get_final(&sgt[i-1]) && -+ qm_sg_entry_get_bpid(&sgt[i-1]) == -+ qm_sg_entry_get_bpid(&sgt[i])); -+ -+ while (bman_release(dpa_bp->pool, bmb, j, 0)) -+ cpu_relax(); -+ } while (!qm_sg_entry_get_final(&sgt[i-1])); -+} -+EXPORT_SYMBOL(dpa_release_sgt); -+ -+void __attribute__((nonnull)) -+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd) -+{ -+ struct qm_sg_entry *sgt; -+ struct dpa_bp *dpa_bp; -+ struct bm_buffer bmb; -+ dma_addr_t addr; -+ void *vaddr; -+ -+ memset(&bmb, 0, sizeof(struct bm_buffer)); -+ bm_buffer_set64(&bmb, fd->addr); -+ -+ dpa_bp = dpa_bpid2pool(fd->bpid); -+ DPA_BUG_ON(!dpa_bp); -+ -+ if (fd->format == qm_fd_sg) { -+ vaddr = phys_to_virt(fd->addr); -+ sgt = vaddr + dpa_fd_offset(fd); -+ -+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size, -+ DMA_BIDIRECTIONAL); -+ -+ dpa_release_sgt(sgt); -+ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size, -+ DMA_BIDIRECTIONAL); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ return; -+ } -+ bm_buffer_set64(&bmb, addr); -+ } -+ -+ while (bman_release(dpa_bp->pool, &bmb, 1, 0)) -+ cpu_relax(); -+} -+EXPORT_SYMBOL(dpa_fd_release); -+ -+void count_ern(struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_mr_entry *msg) -+{ -+ switch (msg->ern.rc & QM_MR_RC_MASK) { -+ case QM_MR_RC_CGR_TAILDROP: -+ percpu_priv->ern_cnt.cg_tdrop++; -+ break; -+ case QM_MR_RC_WRED: -+ percpu_priv->ern_cnt.wred++; -+ break; -+ case QM_MR_RC_ERROR: -+ percpu_priv->ern_cnt.err_cond++; -+ break; -+ case QM_MR_RC_ORPWINDOW_EARLY: -+ percpu_priv->ern_cnt.early_window++; -+ break; -+ case QM_MR_RC_ORPWINDOW_LATE: -+ percpu_priv->ern_cnt.late_window++; -+ break; -+ case QM_MR_RC_FQ_TAILDROP: -+ percpu_priv->ern_cnt.fq_tdrop++; -+ break; -+ case QM_MR_RC_ORPWINDOW_RETIRED: -+ percpu_priv->ern_cnt.fq_retired++; -+ break; -+ case QM_MR_RC_ORP_ZERO: -+ percpu_priv->ern_cnt.orp_zero++; -+ break; -+ } -+} -+EXPORT_SYMBOL(count_ern); -+ -+/** -+ * Turn on HW checksum computation for this outgoing frame. -+ * If the current protocol is not something we support in this regard -+ * (or if the stack has already computed the SW checksum), we do nothing. -+ * -+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value -+ * otherwise. -+ * -+ * Note that this function may modify the fd->cmd field and the skb data buffer -+ * (the Parse Results area). -+ */ -+int dpa_enable_tx_csum(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results) -+{ -+ fm_prs_result_t *parse_result; -+ struct iphdr *iph; -+ struct ipv6hdr *ipv6h = NULL; -+ u8 l4_proto; -+ u16 ethertype = ntohs(skb->protocol); -+ int retval = 0; -+ -+ if (skb->ip_summed != CHECKSUM_PARTIAL) -+ return 0; -+ -+ /* Note: L3 csum seems to be already computed in sw, but we can't choose -+ * L4 alone from the FM configuration anyway. -+ */ -+ -+ /* Fill in some fields of the Parse Results array, so the FMan -+ * can find them as if they came from the FMan Parser. -+ */ -+ parse_result = (fm_prs_result_t *)parse_results; -+ -+ /* If we're dealing with VLAN, get the real Ethernet type */ -+ if (ethertype == ETH_P_8021Q) { -+ /* We can't always assume the MAC header is set correctly -+ * by the stack, so reset to beginning of skb->data -+ */ -+ skb_reset_mac_header(skb); -+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); -+ } -+ -+ /* Fill in the relevant L3 parse result fields -+ * and read the L4 protocol type -+ */ -+ switch (ethertype) { -+ case ETH_P_IP: -+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); -+ iph = ip_hdr(skb); -+ DPA_BUG_ON(iph == NULL); -+ l4_proto = iph->protocol; -+ break; -+ case ETH_P_IPV6: -+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); -+ ipv6h = ipv6_hdr(skb); -+ DPA_BUG_ON(ipv6h == NULL); -+ l4_proto = ipv6h->nexthdr; -+ break; -+ default: -+ /* We shouldn't even be here */ -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_alert(priv->net_dev, -+ "Can't compute HW csum for L3 proto 0x%x\n", -+ ntohs(skb->protocol)); -+ retval = -EIO; -+ goto return_error; -+ } -+ -+ /* Fill in the relevant L4 parse result fields */ -+ switch (l4_proto) { -+ case IPPROTO_UDP: -+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP; -+ break; -+ case IPPROTO_TCP: -+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP; -+ break; -+ default: -+ /* This can as well be a BUG() */ -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_alert(priv->net_dev, -+ "Can't compute HW csum for L4 proto 0x%x\n", -+ l4_proto); -+ retval = -EIO; -+ goto return_error; -+ } -+ -+ /* At index 0 is IPOffset_1 as defined in the Parse Results */ -+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb); -+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb); -+ -+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ -+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; -+ -+ /* On P1023 and similar platforms fd->cmd interpretation could -+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit -+ * is not set so we do not need to check; in the future, if/when -+ * using context_a we need to check this bit -+ */ -+ -+return_error: -+ return retval; -+} -+EXPORT_SYMBOL(dpa_enable_tx_csum); -+ -+#ifdef CONFIG_FSL_DPAA_CEETM -+void dpa_enable_ceetm(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ priv->ceetm_en = true; -+} -+EXPORT_SYMBOL(dpa_enable_ceetm); -+ -+void dpa_disable_ceetm(struct net_device *dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(dev); -+ priv->ceetm_en = false; -+} -+EXPORT_SYMBOL(dpa_disable_ceetm); -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h -@@ -0,0 +1,227 @@ -+/* Copyright 2008-2013 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA_ETH_COMMON_H -+#define __DPAA_ETH_COMMON_H -+ -+#include /* struct net_device */ -+#include /* struct bm_buffer */ -+#include /* struct platform_device */ -+#include /* struct hwtstamp_config */ -+ -+#include "dpaa_eth.h" -+#include "lnxwrp_fsl_fman.h" -+ -+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\ -+ frag_enabled) \ -+{ \ -+ param.errq = errq_id; \ -+ param.defq = defq_id; \ -+ param.priv_data_size = buf_layout->priv_data_size; \ -+ param.parse_results = buf_layout->parse_results; \ -+ param.hash_results = buf_layout->hash_results; \ -+ param.frag_enable = frag_enabled; \ -+ param.time_stamp = buf_layout->time_stamp; \ -+ param.manip_extra_space = buf_layout->manip_extra_space; \ -+ param.data_align = buf_layout->data_align; \ -+ fm_set_##type##_port_params(port, ¶m); \ -+} -+ -+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ -+ -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+/* each S/G entry can be divided into two S/G entries */ -+#define DPA_SGT_ENTRIES_THRESHOLD 7 -+#else -+#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES -+#endif /* DPAA_LS1043A_DMA_4K_ISSUE */ -+ -+ -+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ -+ -+/* return codes for the dpaa-eth hooks */ -+enum dpaa_eth_hook_result { -+ /* fd/skb was retained by the hook. -+ * -+ * On the Rx path, this means the Ethernet driver will _not_ -+ * deliver the skb to the stack. Instead, the hook implementation -+ * is expected to properly dispose of the skb. -+ * -+ * On the Tx path, the Ethernet driver's dpa_tx() function will -+ * immediately return NETDEV_TX_OK. The hook implementation is expected -+ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan, -+ * unless you know exactly what you're doing! -+ * -+ * On the confirmation/error paths, the Ethernet driver will _not_ -+ * perform any fd cleanup, nor update the interface statistics. -+ */ -+ DPAA_ETH_STOLEN, -+ /* fd/skb was returned to the Ethernet driver for regular processing. -+ * The hook is not allowed to, for instance, reallocate the skb (as if -+ * by linearizing, copying, cloning or reallocating the headroom). -+ */ -+ DPAA_ETH_CONTINUE -+}; -+ -+typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)( -+ struct sk_buff *skb, struct net_device *net_dev, u32 fqid); -+typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)( -+ struct sk_buff *skb, struct net_device *net_dev); -+typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)( -+ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid); -+ -+/* used in napi related functions */ -+extern u16 qman_portal_max; -+ -+/* from dpa_ethtool.c */ -+extern const struct ethtool_ops dpa_ethtool_ops; -+ -+#ifdef CONFIG_FSL_DPAA_HOOKS -+/* Various hooks used for unit-testing and/or fastpath optimizations. -+ * Currently only one set of such hooks is supported. -+ */ -+struct dpaa_eth_hooks_s { -+ /* Invoked on the Tx private path, immediately after receiving the skb -+ * from the stack. -+ */ -+ dpaa_eth_egress_hook_t tx; -+ -+ /* Invoked on the Rx private path, right before passing the skb -+ * up the stack. At that point, the packet's protocol id has already -+ * been set. The skb's data pointer is now at the L3 header, and -+ * skb->mac_header points to the L2 header. skb->len has been adjusted -+ * to be the length of L3+payload (i.e., the length of the -+ * original frame minus the L2 header len). -+ * For more details on what the skb looks like, see eth_type_trans(). -+ */ -+ dpaa_eth_ingress_hook_t rx_default; -+ -+ /* Driver hook for the Rx error private path. */ -+ dpaa_eth_confirm_hook_t rx_error; -+ /* Driver hook for the Tx confirmation private path. */ -+ dpaa_eth_confirm_hook_t tx_confirm; -+ /* Driver hook for the Tx error private path. */ -+ dpaa_eth_confirm_hook_t tx_error; -+}; -+ -+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); -+ -+extern struct dpaa_eth_hooks_s dpaa_eth_hooks; -+#endif -+ -+int dpa_netdev_init(struct net_device *net_dev, -+ const uint8_t *mac_addr, -+ uint16_t tx_timeout); -+int __cold dpa_start(struct net_device *net_dev); -+int __cold dpa_stop(struct net_device *net_dev); -+void __cold dpa_timeout(struct net_device *net_dev); -+struct rtnl_link_stats64 * __cold -+dpa_get_stats64(struct net_device *net_dev, -+ struct rtnl_link_stats64 *stats); -+int dpa_change_mtu(struct net_device *net_dev, int new_mtu); -+int dpa_ndo_init(struct net_device *net_dev); -+int dpa_set_features(struct net_device *dev, netdev_features_t features); -+netdev_features_t dpa_fix_features(struct net_device *dev, -+ netdev_features_t features); -+#ifdef CONFIG_FSL_DPAA_TS -+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, -+ enum port_type rx_tx, const void *data); -+/* Updates the skb shared hw timestamp from the hardware timestamp */ -+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx, -+ struct skb_shared_hwtstamps *shhwtstamps, const void *data); -+#endif /* CONFIG_FSL_DPAA_TS */ -+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -+int __cold dpa_remove(struct platform_device *of_dev); -+struct mac_device * __cold __must_check -+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev); -+int dpa_set_mac_address(struct net_device *net_dev, void *addr); -+void dpa_set_rx_mode(struct net_device *net_dev); -+void dpa_set_buffers_layout(struct mac_device *mac_dev, -+ struct dpa_buffer_layout_s *layout); -+int __attribute__((nonnull)) -+dpa_bp_alloc(struct dpa_bp *dpa_bp); -+void __cold __attribute__((nonnull)) -+dpa_bp_free(struct dpa_priv_s *priv); -+struct dpa_bp *dpa_bpid2pool(int bpid); -+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp); -+bool dpa_bpid2pool_use(int bpid); -+void dpa_bp_drain(struct dpa_bp *bp); -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, -+ void *accel_priv, select_queue_fallback_t fallback); -+#endif -+struct dpa_fq *dpa_fq_alloc(struct device *dev, -+ u32 fq_start, -+ u32 fq_count, -+ struct list_head *list, -+ enum dpa_fq_type fq_type); -+int dpa_fq_probe_mac(struct device *dev, struct list_head *list, -+ struct fm_port_fqs *port_fqs, -+ bool tx_conf_fqs_per_core, -+ enum port_type ptype); -+int dpa_get_channel(void); -+void dpa_release_channel(void); -+int dpaa_eth_add_channel(void *__arg); -+int dpaa_eth_cgr_init(struct dpa_priv_s *priv); -+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, -+ struct fm_port *tx_port); -+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable); -+int __cold __attribute__((nonnull)) -+dpa_fq_free(struct device *dev, struct list_head *list); -+void dpaa_eth_init_ports(struct mac_device *mac_dev, -+ struct dpa_bp *bp, size_t count, -+ struct fm_port_fqs *port_fqs, -+ struct dpa_buffer_layout_s *buf_layout, -+ struct device *dev); -+void dpa_release_sgt(struct qm_sg_entry *sgt); -+void __attribute__((nonnull)) -+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd); -+void count_ern(struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_mr_entry *msg); -+int dpa_enable_tx_csum(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results); -+#ifdef CONFIG_FSL_DPAA_CEETM -+void dpa_enable_ceetm(struct net_device *dev); -+void dpa_disable_ceetm(struct net_device *dev); -+#endif -+struct proxy_device { -+ struct mac_device *mac_dev; -+}; -+ -+/* mac device control functions exposed by proxy interface*/ -+int dpa_proxy_start(struct net_device *net_dev); -+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev); -+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev, -+ struct net_device *net_dev); -+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev, -+ struct net_device *net_dev); -+ -+#endif /* __DPAA_ETH_COMMON_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c -@@ -0,0 +1,1735 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_base.h" -+#include "dpaa_eth_generic.h" -+ -+#define DPA_DEFAULT_TX_HEADROOM 64 -+#define DPA_GENERIC_SKB_COPY_MAX_SIZE 256 -+#define DPA_GENERIC_NAPI_WEIGHT 64 -+#define DPA_GENERIC_DESCRIPTION "FSL DPAA Generic Ethernet driver" -+#define DPA_GENERIC_BUFFER_QUOTA 4 -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION(DPA_GENERIC_DESCRIPTION); -+ -+static uint8_t generic_debug = -1; -+module_param(generic_debug, byte, S_IRUGO); -+MODULE_PARM_DESC(generic_debug, "Module/Driver verbosity level"); -+ -+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ -+static uint16_t tx_timeout = 1000; -+module_param(tx_timeout, ushort, S_IRUGO); -+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); -+ -+struct rtnl_link_stats64 *__cold -+dpa_generic_get_stats64(struct net_device *netdev, -+ struct rtnl_link_stats64 *stats); -+static int dpa_generic_set_mac_address(struct net_device *net_dev, -+ void *addr); -+static int __cold dpa_generic_start(struct net_device *netdev); -+static int __cold dpa_generic_stop(struct net_device *netdev); -+static int dpa_generic_eth_probe(struct platform_device *_of_dev); -+static int dpa_generic_remove(struct platform_device *of_dev); -+static void dpa_generic_ern(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_mr_entry *msg); -+static int __hot dpa_generic_tx(struct sk_buff *skb, -+ struct net_device *netdev); -+static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf); -+static void dpa_generic_drain_sg_bp(struct dpa_bp *sg_bp, u8 nbuf); -+ -+static const struct net_device_ops dpa_generic_ops = { -+ .ndo_open = dpa_generic_start, -+ .ndo_start_xmit = dpa_generic_tx, -+ .ndo_stop = dpa_generic_stop, -+ .ndo_set_mac_address = dpa_generic_set_mac_address, -+ .ndo_tx_timeout = dpa_timeout, -+ .ndo_get_stats64 = dpa_generic_get_stats64, -+ .ndo_init = dpa_ndo_init, -+ .ndo_set_features = dpa_set_features, -+ .ndo_fix_features = dpa_fix_features, -+ .ndo_change_mtu = dpa_change_mtu, -+}; -+ -+static void dpa_generic_draining_timer(unsigned long arg) -+{ -+ struct dpa_generic_priv_s *priv = (struct dpa_generic_priv_s *)arg; -+ -+ dpa_generic_drain_bp(priv->draining_tx_bp, DPA_GENERIC_BUFFER_QUOTA); -+ dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, -+ DPA_GENERIC_BUFFER_QUOTA); -+ -+ if (priv->net_dev->flags & IFF_UP) -+ mod_timer(&(priv->timer), jiffies + 1); -+} -+ -+struct rtnl_link_stats64 *__cold -+dpa_generic_get_stats64(struct net_device *netdev, -+ struct rtnl_link_stats64 *stats) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ u64 *cpustats; -+ u64 *netstats = (u64 *)stats; -+ int i, j; -+ struct dpa_percpu_priv_s *percpu_priv; -+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); -+ -+ for_each_online_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ cpustats = (u64 *)&percpu_priv->stats; -+ -+ for (j = 0; j < numstats; j++) -+ netstats[j] += cpustats[j]; -+ } -+ -+ return stats; -+} -+ -+static int dpa_generic_set_mac_address(struct net_device *net_dev, -+ void *addr) -+{ -+ const struct dpa_generic_priv_s *priv = netdev_priv(net_dev); -+ int _errno; -+ -+ _errno = eth_mac_addr(net_dev, addr); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno); -+ return _errno; -+ } -+ -+ return 0; -+} -+ -+static const struct of_device_id dpa_generic_match[] = { -+ { -+ .compatible = "fsl,dpa-ethernet-generic" -+ }, -+ {} -+}; -+ -+MODULE_DEVICE_TABLE(of, dpa_generic_match); -+ -+static struct platform_driver dpa_generic_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .of_match_table = dpa_generic_match, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpa_generic_eth_probe, -+ .remove = dpa_generic_remove -+}; -+ -+static int get_port_ref(struct device_node *dev_node, -+ struct fm_port **port) -+{ -+ struct platform_device *port_of_dev = NULL; -+ struct device *op_dev = NULL; -+ struct device_node *port_node = NULL; -+ -+ port_node = of_parse_phandle(dev_node, "fsl,fman-oh-port", 0); -+ if (port_node == NULL) -+ return -EINVAL; -+ -+ port_of_dev = of_find_device_by_node(port_node); -+ of_node_put(port_node); -+ -+ if (port_of_dev == NULL) -+ return -EINVAL; -+ -+ /* get the reference to oh port from FMD */ -+ op_dev = &port_of_dev->dev; -+ *port = fm_port_bind(op_dev); -+ -+ if (*port == NULL) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static void dpaa_generic_napi_enable(struct dpa_generic_priv_s *priv) -+{ -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, j; -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ for (j = 0; j < qman_portal_max; j++) -+ napi_enable(&percpu_priv->np[j].napi); -+ } -+} -+ -+static void dpaa_generic_napi_disable(struct dpa_generic_priv_s *priv) -+{ -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, j; -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ for (j = 0; j < qman_portal_max; j++) -+ napi_disable(&percpu_priv->np[j].napi); -+ } -+} -+ -+static struct device_node *get_rx_op_port_node(struct platform_device *_of_dev) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct device_node *port_node = NULL; -+ struct device_node *onic_node = NULL; -+ int num_ports = 0; -+ -+ onic_node = dev->of_node; -+ -+ num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL); -+ if (num_ports != 2) { -+ dev_err(dev, "There should be two O/H port handles in the device tree\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ port_node = of_parse_phandle(onic_node, "fsl,oh-ports", 0); -+ if (port_node == NULL) { -+ dev_err(dev, "Cannot find O/H port node in the device tree\n"); -+ return ERR_PTR(-EFAULT); -+ } -+ -+ return port_node; -+} -+ -+static int __cold dpa_generic_start(struct net_device *netdev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ -+ /* seed default buffer pool */ -+ dpa_bp_priv_seed(priv->rx_bp); -+ -+ dpaa_generic_napi_enable(priv); -+ netif_tx_start_all_queues(netdev); -+ -+ mod_timer(&priv->timer, jiffies + 100); -+ -+ return 0; -+} -+ -+static int __cold dpa_generic_stop(struct net_device *netdev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ -+ netif_tx_stop_all_queues(netdev); -+ dpaa_generic_napi_disable(priv); -+ -+ return 0; -+} -+ -+static enum qman_cb_dqrr_result __hot -+dpa_generic_rx_err_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *netdev; -+ struct dpa_generic_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ const struct qm_fd *fd; -+ int *countptr; -+ -+ netdev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(netdev); -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ countptr = raw_cpu_ptr(priv->rx_bp->percpu_count); -+ fd = &dq->fd; -+ -+ /* TODO: extract bpid from the fd; when multiple bps are supported -+ * there won't be a default bp -+ */ -+ -+ if (dpaa_eth_napi_schedule(percpu_priv, portal)) -+ return qman_cb_dqrr_stop; -+ -+ if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) { -+ /* Unable to refill the buffer pool due to insufficient -+ * system memory. Just release the frame back into the pool, -+ * otherwise we'll soon end up with an empty buffer pool. -+ */ -+ dpa_fd_release(netdev, fd); -+ goto qman_consume; -+ } -+ -+ /* limit common, possibly innocuous Rx FIFO Overflow errors' -+ * interference with zero-loss convergence benchmark results. -+ */ -+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL)) -+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n"); -+ else -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_err(netdev, "Err FD status 2 = 0x%08x\n", -+ fd->status & FM_FD_STAT_RX_ERRORS); -+ -+ -+ percpu_priv->stats.rx_errors++; -+ -+ if (fd->status & FM_PORT_FRM_ERR_DMA) -+ percpu_priv->rx_errors.dme++; -+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL) -+ percpu_priv->rx_errors.fpe++; -+ if (fd->status & FM_PORT_FRM_ERR_SIZE) -+ percpu_priv->rx_errors.fse++; -+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR) -+ percpu_priv->rx_errors.phe++; -+ -+ /* TODO dpa_csum_validation */ -+ -+ dpa_fd_release(netdev, fd); -+ -+qman_consume: -+ return qman_cb_dqrr_consume; -+} -+ -+ -+static enum qman_cb_dqrr_result __hot -+dpa_generic_rx_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *netdev; -+ struct dpa_generic_priv_s *priv; -+ struct dpa_bp *bp; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct sk_buff **skbh; -+ struct sk_buff *skb; -+ const struct qm_fd *fd = &dq->fd; -+ unsigned int skb_len; -+ u32 fd_status = fd->status; -+ u64 pad; -+ dma_addr_t addr = qm_fd_addr(fd); -+ unsigned int data_start; -+ unsigned long skb_addr; -+ int *countptr; -+ -+ netdev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(netdev); -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ countptr = raw_cpu_ptr(priv->rx_bp->percpu_count); -+ -+ /* This is needed for TCP traffic as draining only on TX is not -+ * enough -+ */ -+ dpa_generic_drain_bp(priv->draining_tx_bp, 1); -+ dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, 1); -+ -+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) -+ return qman_cb_dqrr_stop; -+ -+ if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) { -+ /* Unable to refill the buffer pool due to insufficient -+ * system memory. Just release the frame back into the pool, -+ * otherwise we'll soon end up with an empty buffer pool. -+ */ -+ dpa_fd_release(netdev, fd); -+ goto qman_consume; -+ } -+ -+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), -1); -+ -+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(netdev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_RX_ERRORS); -+ -+ percpu_priv->stats.rx_errors++; -+ dpa_fd_release(netdev, fd); -+ goto qman_consume; -+ } -+ if (unlikely(fd->format != qm_fd_contig)) { -+ percpu_priv->stats.rx_dropped++; -+ if (netif_msg_rx_status(priv) && net_ratelimit()) -+ netdev_warn(netdev, "Dropping a SG frame\n"); -+ dpa_fd_release(netdev, fd); -+ goto qman_consume; -+ } -+ -+ bp = dpa_bpid2pool(fd->bpid); -+ -+ /* find out the pad */ -+ skb_addr = virt_to_phys(skb->head); -+ pad = addr - skb_addr; -+ -+ dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL); -+ -+ countptr = raw_cpu_ptr(bp->percpu_count); -+ (*countptr)--; -+ -+ /* The skb is currently pointed at head + headroom. The packet -+ * starts at skb->head + pad + fd offset. -+ */ -+ data_start = (unsigned int)(pad + dpa_fd_offset(fd) - -+ skb_headroom(skb)); -+ skb_put(skb, dpa_fd_length(fd) + data_start); -+ skb_pull(skb, data_start); -+ skb->protocol = eth_type_trans(skb, netdev); -+ if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) { -+ percpu_priv->stats.rx_dropped++; -+ dev_kfree_skb(skb); -+ goto qman_consume; -+ } -+ -+ skb_len = skb->len; -+ -+ if (fd->status & FM_FD_STAT_L4CV) -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ else -+ skb->ip_summed = CHECKSUM_NONE; -+ -+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) -+ goto qman_consume; -+ -+ percpu_priv->stats.rx_packets++; -+ percpu_priv->stats.rx_bytes += skb_len; -+ -+qman_consume: -+ return qman_cb_dqrr_consume; -+} -+ -+static void dpa_generic_drain_sg_bp(struct dpa_bp *sgbp, u8 nbuf) -+{ -+ int ret; -+ struct bm_buffer bmb[8]; -+ -+ do { -+ ret = bman_acquire(sgbp->pool, bmb, nbuf, 0); -+ } while (ret >= 0); -+} -+ -+inline void dpa_release_sg(struct sk_buff *skb, dma_addr_t addr, -+ struct dpa_bp *bp) -+{ -+ struct qm_sg_entry *sgt = phys_to_virt(addr + DPA_DEFAULT_TX_HEADROOM); -+ int nr_frags = skb_shinfo(skb)->nr_frags; -+ dma_addr_t sg_addr; -+ int j; -+ -+ dma_unmap_single(bp->dev, addr, DPA_DEFAULT_TX_HEADROOM + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags), -+ DMA_BIDIRECTIONAL); -+ -+ for (j = 0; j <= nr_frags; j++) { -+ DPA_BUG_ON(sgt[j].extension); -+ sg_addr = qm_sg_addr(&sgt[j]); -+ dma_unmap_page(bp->dev, sg_addr, -+ sgt[j].length, DMA_BIDIRECTIONAL); -+ } -+ -+ dev_kfree_skb_any(skb); -+} -+ -+inline void dpa_release_contig(struct sk_buff *skb, dma_addr_t addr, -+ struct dpa_bp *bp) -+{ -+ dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL); -+ dev_kfree_skb_any(skb); -+} -+ -+static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf) -+{ -+ int ret, i; -+ struct bm_buffer bmb[8]; -+ dma_addr_t addr; -+ int *countptr = raw_cpu_ptr(bp->percpu_count); -+ int count = *countptr; -+ struct sk_buff **skbh; -+ -+ do { -+ /* bman_acquire will fail if nbuf > 8 */ -+ ret = bman_acquire(bp->pool, bmb, nbuf, 0); -+ if (ret > 0) { -+ for (i = 0; i < nbuf; i++) { -+ addr = bm_buf_addr(&bmb[i]); -+ skbh = (struct sk_buff **)phys_to_virt(addr); -+ dma_unmap_single(bp->dev, addr, bp->size, -+ DMA_TO_DEVICE); -+ -+ if (skb_is_nonlinear(*skbh)) -+ dpa_release_sg(*skbh, addr, bp); -+ else -+ dpa_release_contig(*skbh, addr, bp); -+ } -+ count -= i; -+ } -+ } while (ret > 0); -+ -+ *countptr = count; -+} -+ -+/** -+ * Turn on HW checksum computation for this outgoing frame. -+ * If the current protocol is not something we support in this regard -+ * (or if the stack has already computed the SW checksum), we do nothing. -+ * -+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value -+ * otherwise. -+ * -+ * Note that this function may modify the fd->cmd field and the skb data buffer -+ * (the Parse Results area). -+ */ -+static int dpa_generic_tx_csum(struct dpa_generic_priv_s *priv, -+ struct sk_buff *skb, -+ struct qm_fd *fd, -+ char *parse_results) -+{ -+ fm_prs_result_t *parse_result; -+ struct iphdr *iph; -+ struct ipv6hdr *ipv6h = NULL; -+ int l4_proto; -+ int ethertype = ntohs(skb->protocol); -+ int retval = 0; -+ -+ if (skb->ip_summed != CHECKSUM_PARTIAL) -+ return 0; -+ -+ /* Note: L3 csum seems to be already computed in sw, but we can't choose -+ * L4 alone from the FM configuration anyway. -+ */ -+ -+ /* Fill in some fields of the Parse Results array, so the FMan -+ * can find them as if they came from the FMan Parser. -+ */ -+ parse_result = (fm_prs_result_t *)parse_results; -+ -+ /* If we're dealing with VLAN, get the real Ethernet type */ -+ if (ethertype == ETH_P_8021Q) { -+ /* We can't always assume the MAC header is set correctly -+ * by the stack, so reset to beginning of skb->data -+ */ -+ skb_reset_mac_header(skb); -+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); -+ } -+ -+ /* Fill in the relevant L3 parse result fields -+ * and read the L4 protocol type -+ */ -+ switch (ethertype) { -+ case ETH_P_IP: -+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV4; -+ iph = ip_hdr(skb); -+ BUG_ON(iph == NULL); -+ l4_proto = iph->protocol; -+ break; -+ case ETH_P_IPV6: -+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV6; -+ ipv6h = ipv6_hdr(skb); -+ BUG_ON(ipv6h == NULL); -+ l4_proto = ipv6h->nexthdr; -+ break; -+ default: -+ /* We shouldn't even be here */ -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_alert(priv->net_dev, -+ "Can't compute HW csum for L3 proto 0x%x\n", -+ ntohs(skb->protocol)); -+ retval = -EIO; -+ goto return_error; -+ } -+ -+ /* Fill in the relevant L4 parse result fields */ -+ switch (l4_proto) { -+ case IPPROTO_UDP: -+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP; -+ break; -+ case IPPROTO_TCP: -+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP; -+ break; -+ default: -+ /* This can as well be a BUG() */ -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_alert(priv->net_dev, -+ "Can't compute HW csum for L4 proto 0x%x\n", -+ l4_proto); -+ retval = -EIO; -+ goto return_error; -+ } -+ -+ /* At index 0 is IPOffset_1 as defined in the Parse Results */ -+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb); -+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb); -+ -+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ -+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; -+ -+ /* On P1023 and similar platforms fd->cmd interpretation could -+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit -+ * is not set so we do not need to check; in the future, if/when -+ * using context_a we need to check this bit -+ */ -+ -+return_error: -+ return retval; -+} -+ -+static inline int generic_skb_to_sg_fd(struct dpa_generic_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd) -+{ -+ struct dpa_bp *dpa_bp = priv->draining_tx_bp; -+ struct dpa_bp *dpa_sg_bp = priv->draining_tx_sg_bp; -+ dma_addr_t addr; -+ struct sk_buff **skbh; -+ struct net_device *net_dev = priv->net_dev; -+ int err; -+ -+ struct qm_sg_entry *sgt; -+ void *sgt_buf; -+ void *buffer_start; -+ skb_frag_t *frag; -+ int i, j; -+ const enum dma_data_direction dma_dir = DMA_BIDIRECTIONAL; -+ const int nr_frags = skb_shinfo(skb)->nr_frags; -+ -+ memset(fd, 0, sizeof(*fd)); -+ fd->format = qm_fd_sg; -+ -+ /* get a page frag to store the SGTable */ -+ sgt_buf = netdev_alloc_frag(priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags)); -+ if (unlikely(!sgt_buf)) { -+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n"); -+ return -ENOMEM; -+ } -+ -+ memset(sgt_buf, 0, priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags)); -+ -+ /* do this before dma_map_single(DMA_TO_DEVICE), because we may need to -+ * write into the skb. -+ */ -+ err = dpa_generic_tx_csum(priv, skb, fd, -+ sgt_buf + DPA_TX_PRIV_DATA_SIZE); -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, "HW csum error: %d\n", err); -+ goto csum_failed; -+ } -+ -+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); -+ sgt[0].bpid = dpa_sg_bp->bpid; -+ sgt[0].offset = 0; -+ sgt[0].length = skb_headlen(skb); -+ sgt[0].extension = 0; -+ sgt[0].final = 0; -+ -+ addr = dma_map_single(dpa_sg_bp->dev, skb->data, sgt[0].length, -+ dma_dir); -+ if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) { -+ dev_err(dpa_sg_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sg0_map_failed; -+ } -+ -+ sgt[0].addr_hi = (uint8_t)upper_32_bits(addr); -+ sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr)); -+ -+ /* populate the rest of SGT entries */ -+ for (i = 1; i <= nr_frags; i++) { -+ frag = &skb_shinfo(skb)->frags[i - 1]; -+ sgt[i].bpid = dpa_sg_bp->bpid; -+ sgt[i].offset = 0; -+ sgt[i].length = frag->size; -+ sgt[i].extension = 0; -+ sgt[i].final = 0; -+ -+ DPA_BUG_ON(!skb_frag_page(frag)); -+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length, -+ dma_dir); -+ if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) { -+ dev_err(dpa_sg_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sg_map_failed; -+ } -+ -+ /* keep the offset in the address */ -+ sgt[i].addr_hi = (uint8_t)upper_32_bits(addr); -+ sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr)); -+ } -+ sgt[i - 1].final = 1; -+ -+ fd->length20 = skb->len; -+ fd->offset = priv->tx_headroom; -+ -+ /* DMA map the SGT page */ -+ buffer_start = (void *)sgt - dpa_fd_offset(fd); -+ /* Can't write at "negative" offset in buffer_start, because this skb -+ * may not have been allocated by us. -+ */ -+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); -+ -+ addr = dma_map_single(dpa_bp->dev, buffer_start, -+ priv->tx_headroom + sizeof(struct qm_sg_entry) * (1 + nr_frags), -+ dma_dir); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sgt_map_failed; -+ } -+ -+ fd->bpid = dpa_bp->bpid; -+ fd->addr_hi = (uint8_t)upper_32_bits(addr); -+ fd->addr_lo = lower_32_bits(addr); -+ -+ return 0; -+ -+sgt_map_failed: -+sg_map_failed: -+ for (j = 0; j < i; j++) -+ dma_unmap_page(dpa_sg_bp->dev, qm_sg_addr(&sgt[j]), -+ be32_to_cpu(sgt[j].length), dma_dir); -+sg0_map_failed: -+csum_failed: -+ put_page(virt_to_head_page(sgt_buf)); -+ -+ return err; -+} -+ -+static int __hot dpa_generic_tx(struct sk_buff *skb, struct net_device *netdev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ struct dpa_percpu_priv_s *percpu_priv = -+ raw_cpu_ptr(priv->percpu_priv); -+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats; -+ struct dpa_bp *bp = priv->draining_tx_bp; -+ struct dpa_bp *sg_bp = priv->draining_tx_sg_bp; -+ struct sk_buff **skbh = NULL; -+ dma_addr_t addr; -+ struct qm_fd fd; -+ int queue_mapping; -+ struct qman_fq *egress_fq; -+ const bool nonlinear = skb_is_nonlinear(skb); -+ int i = 0, err = 0; -+ int *countptr; -+ -+ if (nonlinear && skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES) { -+ err = generic_skb_to_sg_fd(priv, skb, &fd); -+ if (unlikely(err < 0)) -+ goto sg_failed; -+ percpu_priv->tx_frag_skbuffs++; -+ addr = qm_fd_addr(&fd); -+ } else { -+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) { -+ struct sk_buff *skb_new; -+ -+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom); -+ if (unlikely(!skb_new)) { -+ percpu_stats->tx_errors++; -+ kfree_skb(skb); -+ goto done; -+ } -+ -+ kfree_skb(skb); -+ skb = skb_new; -+ } -+ -+ clear_fd(&fd); -+ -+ /* store skb backpointer to release the skb later */ -+ skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); -+ *skbh = skb; -+ -+ /* do this before dma_map_single(), because we may need to write -+ * into the skb. -+ */ -+ err = dpa_generic_tx_csum(priv, skb, &fd, -+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE); -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(netdev, "HW csum error: %d\n", err); -+ return err; -+ } -+ -+ addr = dma_map_single(bp->dev, skbh, -+ skb->len + priv->tx_headroom, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(bp->dev, addr))) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(netdev, "dma_map_single() failed\n"); -+ goto dma_mapping_failed; -+ } -+ -+ fd.format = qm_fd_contig; -+ fd.length20 = skb->len; -+ fd.offset = priv->tx_headroom; -+ fd.addr_hi = (uint8_t)upper_32_bits(addr); -+ fd.addr_lo = lower_32_bits(addr); -+ /* fd.cmd |= FM_FD_CMD_FCO; */ -+ fd.bpid = bp->bpid; -+ } -+ -+ dpa_generic_drain_bp(bp, 1); -+ dpa_generic_drain_sg_bp(sg_bp, 1); -+ -+ queue_mapping = dpa_get_queue_mapping(skb); -+ egress_fq = priv->egress_fqs[queue_mapping]; -+ -+ for (i = 0; i < 100000; i++) { -+ err = qman_enqueue(egress_fq, &fd, 0); -+ if (err != -EBUSY) -+ break; -+ } -+ -+ if (unlikely(err < 0)) { -+ percpu_stats->tx_fifo_errors++; -+ goto xmit_failed; -+ } -+ -+ countptr = raw_cpu_ptr(bp->percpu_count); -+ (*countptr)++; -+ -+ percpu_stats->tx_packets++; -+ percpu_stats->tx_bytes += fd.length20; -+ netdev->trans_start = jiffies; -+ -+ goto done; -+ -+xmit_failed: -+ dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE); -+sg_failed: -+dma_mapping_failed: -+ percpu_stats->tx_errors++; -+ dev_kfree_skb(skb); -+done: -+ return NETDEV_TX_OK; -+} -+ -+static int dpa_generic_napi_add(struct net_device *net_dev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, cpu; -+ -+ for_each_possible_cpu(cpu) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); -+ -+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent, -+ qman_portal_max * sizeof(struct dpa_napi_portal), -+ GFP_KERNEL); -+ -+ if (unlikely(percpu_priv->np == NULL)) { -+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ for (i = 0; i < qman_portal_max; i++) -+ netif_napi_add(net_dev, &percpu_priv->np[i].napi, -+ dpaa_eth_poll, DPA_GENERIC_NAPI_WEIGHT); -+ } -+ -+ return 0; -+} -+ -+static void dpa_generic_napi_del(struct net_device *net_dev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev); -+ struct dpa_percpu_priv_s *percpu_priv; -+ int i, cpu; -+ -+ for_each_possible_cpu(cpu) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); -+ -+ if (percpu_priv->np) { -+ for (i = 0; i < qman_portal_max; i++) -+ netif_napi_del(&percpu_priv->np[i].napi); -+ -+ devm_kfree(net_dev->dev.parent, percpu_priv->np); -+ } -+ } -+} -+ -+ -+static int dpa_generic_netdev_init(struct device_node *dpa_node, -+ struct net_device *netdev) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ struct device *dev = netdev->dev.parent; -+ const uint8_t *mac_addr; -+ int err; -+ -+ netdev->netdev_ops = &dpa_generic_ops; -+ -+ mac_addr = of_get_mac_address(dpa_node); -+ if (mac_addr == NULL) { -+ if (netif_msg_probe(priv)) -+ dev_err(dev, "No virtual MAC address found!\n"); -+ return -EINVAL; -+ } -+ -+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG; -+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; -+ netdev->features |= netdev->hw_features; -+ netdev->vlan_features = netdev->features; -+ -+ memcpy(netdev->perm_addr, mac_addr, netdev->addr_len); -+ memcpy(netdev->dev_addr, mac_addr, netdev->addr_len); -+ -+ netdev->ethtool_ops = &dpa_generic_ethtool_ops; -+ -+ netdev->needed_headroom = priv->tx_headroom; -+ netdev->watchdog_timeo = msecs_to_jiffies(tx_timeout); -+ -+ err = register_netdev(netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev() = %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static struct dpa_fq_cbs_t generic_fq_cbs = { -+ .rx_defq = { .cb = { .dqrr = dpa_generic_rx_dqrr } }, -+ .rx_errq = { .cb = { .dqrr = dpa_generic_rx_err_dqrr } }, -+ .egress_ern = { .cb = { .ern = dpa_generic_ern } } -+}; -+ -+static struct fqid_cell *__fq_alloc(struct device *dev, -+ int num_ranges, -+ const void *fqids_off) -+{ -+ struct fqid_cell *fqids; -+ int i; -+ -+ fqids = kzalloc(sizeof(*fqids) * num_ranges, GFP_KERNEL); -+ if (fqids == NULL) -+ return NULL; -+ -+ /* convert to CPU endianess */ -+ for (i = 0; i < num_ranges; i++) { -+ fqids[i].start = be32_to_cpup(fqids_off + -+ i * sizeof(*fqids)); -+ fqids[i].count = be32_to_cpup(fqids_off + -+ i * sizeof(*fqids) + sizeof(__be32)); -+ } -+ -+ return fqids; -+} -+ -+static struct list_head *dpa_generic_fq_probe(struct platform_device *_of_dev, -+ struct fm_port *tx_port) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct device_node *oh_node = NULL; -+ struct device_node *onic_node = NULL; -+ struct fqid_cell *fqids; -+ const void *fqids_off; -+ struct dpa_fq *fq, *tmp; -+ struct list_head *list; -+ int num_ranges; -+ int i, lenp; -+ -+ onic_node = dev->of_node; -+ -+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); -+ if (!list) { -+ dev_err(dev, "Cannot allocate space for frame queues list\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ INIT_LIST_HEAD(list); -+ -+ /* RX queues (RX error, RX default) are specified in Rx O/H port node */ -+ oh_node = get_rx_op_port_node(_of_dev); -+ fqids_off = of_get_property(oh_node, "fsl,qman-frame-queues-oh", &lenp); -+ if (fqids_off == NULL) { -+ dev_err(dev, "Need Rx FQ definition in dts for generic devices\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ of_node_put(oh_node); -+ -+ num_ranges = lenp / sizeof(*fqids); -+ if (num_ranges != 2) { -+ dev_err(dev, "Need 2 Rx FQ definitions in dts for generic devices\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ fqids = __fq_alloc(dev, num_ranges, fqids_off); -+ if (!dpa_fq_alloc(dev, fqids[0].start, fqids[0].count, list, -+ FQ_TYPE_RX_ERROR) || -+ !dpa_fq_alloc(dev, fqids[1].start, fqids[1].count, -+ list, FQ_TYPE_RX_DEFAULT)) { -+ dev_err(dev, "Cannot allocate space for default frame queues\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ kfree(fqids); -+ -+ /* TX queues */ -+ fqids_off = of_get_property(onic_node, "fsl,qman-frame-queues-tx", -+ &lenp); -+ if (fqids_off == NULL) { -+ dev_err(dev, "Need Tx FQ definition in dts for generic devices\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ num_ranges = lenp / sizeof(*fqids); -+ fqids = __fq_alloc(dev, num_ranges, fqids_off); -+ for (i = 0; i < num_ranges; i++) { -+ if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list, -+ FQ_TYPE_TX)) { -+ dev_err(dev, "_dpa_fq_alloc() failed\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ } -+ kfree(fqids); -+ -+ /* optional RX PCD queues */ -+ lenp = 0; -+ fqids_off = of_get_property(onic_node, -+ "fsl,qman-frame-queues-rx", &lenp); -+ num_ranges = lenp / sizeof(*fqids); -+ fqids = __fq_alloc(dev, num_ranges, fqids_off); -+ for (i = 0; i < num_ranges; i++) { -+ if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list, -+ FQ_TYPE_RX_PCD)) { -+ dev_err(dev, "_dpa_fq_alloc() failed\n"); -+ return ERR_PTR(-ENOMEM); -+ } -+ } -+ kfree(fqids); -+ -+ list_for_each_entry_safe(fq, tmp, list, list) { -+ if (fq->fq_type == FQ_TYPE_TX) -+ fq->channel = fm_get_tx_port_channel(tx_port); -+ } -+ -+ return list; -+} -+ -+static void dpa_generic_ern(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ struct net_device *netdev; -+ const struct dpa_generic_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct qm_fd fd = msg->ern.fd; -+ -+ netdev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(netdev); -+ /* Non-migratable context, safe to use raw_cpu_ptr */ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ percpu_priv->stats.tx_dropped++; -+ percpu_priv->stats.tx_fifo_errors++; -+ count_ern(percpu_priv, msg); -+ -+ /* release this buffer into the draining buffer pool */ -+ dpa_fd_release(netdev, &fd); -+} -+ -+static int dpa_generic_rx_bp_probe(struct platform_device *_of_dev, -+ struct fm_port *rx_port, -+ int *rx_bp_count, -+ struct dpa_bp **rx_bp, -+ struct dpa_buffer_layout_s **rx_buf_layout) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct fm_port_params params; -+ struct dpa_bp *bp = NULL; -+ int bp_count = 0; -+ int bpid; -+ const __be32 *bpool_cfg = NULL; -+ struct device_node *dev_node = NULL; -+ struct device_node *oh_node = NULL; -+ struct dpa_buffer_layout_s *buf_layout = NULL; -+ int lenp = 0; -+ int na = 0, ns = 0; -+ int err = 0, i = 0; -+ -+ oh_node = get_rx_op_port_node(_of_dev); -+ -+ bp_count = of_count_phandle_with_args(oh_node, -+ "fsl,bman-buffer-pools", NULL); -+ if (bp_count <= 0) { -+ dev_err(dev, "Missing buffer pool handles from onic node from device tree\n"); -+ return -EINVAL; -+ } -+ -+ bp = devm_kzalloc(dev, bp_count * sizeof(*bp), GFP_KERNEL); -+ if (unlikely(bp == NULL)) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ err = -ENOMEM; -+ goto _return_of_node_put; -+ } -+ -+ dev_node = of_find_node_by_path("/"); -+ if (unlikely(dev_node == NULL)) { -+ dev_err(dev, "of_find_node_by_path(/) failed\n"); -+ err = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ na = of_n_addr_cells(dev_node); -+ ns = of_n_size_cells(dev_node); -+ -+ of_node_put(dev_node); -+ -+ for (i = 0; i < bp_count; i++) { -+ dev_node = of_parse_phandle(oh_node, -+ "fsl,bman-buffer-pools", i); -+ if (dev_node == NULL) { -+ dev_err(dev, "Cannot find buffer pool node in the device tree\n"); -+ err = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid); -+ if (err) { -+ dev_err(dev, "Cannot find buffer pool ID in the buffer pool node in the device tree\n"); -+ goto _return_of_node_put; -+ } -+ -+ bp[i].bpid = (uint8_t)bpid; -+ -+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", -+ &lenp); -+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { -+ bp[i].config_count = (int)of_read_number(bpool_cfg, ns); -+ bp[i].size = of_read_number(bpool_cfg + ns, ns); -+ bp[i].paddr = 0; -+ bp[i].seed_pool = false; -+ } else { -+ dev_err(dev, "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n", -+ dev_node->full_name); -+ err = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ bp[i].percpu_count = devm_alloc_percpu(dev, -+ *bp[i].percpu_count); -+ } -+ -+ of_node_put(oh_node); -+ -+ buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL); -+ if (!buf_layout) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ err = -ENOMEM; -+ goto _return_of_node_put; -+ } -+ -+ buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE; -+ buf_layout->parse_results = false; -+ buf_layout->hash_results = false; -+ buf_layout->time_stamp = false; -+ fm_port_get_buff_layout_ext_params(rx_port, ¶ms); -+ buf_layout->manip_extra_space = params.manip_extra_space; -+ /* a value of zero for data alignment means "don't care", so align to -+ * a non-zero value to prevent FMD from using its own default -+ */ -+ buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; -+ -+ *rx_buf_layout = buf_layout; -+ *rx_bp = bp; -+ *rx_bp_count = bp_count; -+ -+ return 0; -+ -+_return_of_node_put: -+ if (dev_node) -+ of_node_put(dev_node); -+ -+ return err; -+} -+ -+static int dpa_generic_tx_bp_probe(struct platform_device *_of_dev, -+ struct fm_port *tx_port, -+ struct dpa_bp **draining_tx_bp, -+ struct dpa_bp **draining_tx_sg_bp, -+ struct dpa_buffer_layout_s **tx_buf_layout) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct fm_port_params params; -+ struct dpa_bp *bp = NULL; -+ struct dpa_bp *bp_sg = NULL; -+ struct dpa_buffer_layout_s *buf_layout = NULL; -+ -+ buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL); -+ if (!buf_layout) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE; -+ buf_layout->parse_results = true; -+ buf_layout->hash_results = true; -+ buf_layout->time_stamp = false; -+ -+ fm_port_get_buff_layout_ext_params(tx_port, ¶ms); -+ buf_layout->manip_extra_space = params.manip_extra_space; -+ buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; -+ -+ bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL); -+ if (unlikely(bp == NULL)) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ bp->size = dpa_bp_size(buf_layout); -+ bp->percpu_count = devm_alloc_percpu(dev, *bp->percpu_count); -+ bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; -+ -+ *draining_tx_bp = bp; -+ -+ bp_sg = devm_kzalloc(dev, sizeof(*bp_sg), GFP_KERNEL); -+ if (unlikely(bp_sg == NULL)) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ bp_sg->size = dpa_bp_size(buf_layout); -+ bp_sg->percpu_count = alloc_percpu(*bp_sg->percpu_count); -+ bp_sg->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; -+ -+ *draining_tx_sg_bp = bp_sg; -+ -+ *tx_buf_layout = buf_layout; -+ -+ return 0; -+} -+ -+static int dpa_generic_buff_dealloc_probe(struct platform_device *_of_dev, -+ int *disable_buff_dealloc) -+{ -+ struct device *dev = &_of_dev->dev; -+ const phandle *disable_handle = NULL; -+ int lenp = 0; -+ int err = 0; -+ -+ disable_handle = of_get_property(dev->of_node, -+ "fsl,disable_buff_dealloc", &lenp); -+ if (disable_handle != NULL) -+ *disable_buff_dealloc = 1; -+ -+ return err; -+} -+ -+static int dpa_generic_port_probe(struct platform_device *_of_dev, -+ struct fm_port **rx_port, -+ struct fm_port **tx_port) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct device_node *dev_node = NULL; -+ struct device_node *onic_node = NULL; -+ int num_ports = 0; -+ int err = 0; -+ -+ onic_node = dev->of_node; -+ -+ num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL); -+ if (num_ports != 2) { -+ dev_err(dev, "There should be two OH ports in device tree (one for RX, one for TX\n"); -+ return -EINVAL; -+ } -+ -+ dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", RX); -+ if (dev_node == NULL) { -+ dev_err(dev, "Cannot find Rx OH port node in device tree\n"); -+ return err; -+ } -+ -+ err = get_port_ref(dev_node, rx_port); -+ if (err) { -+ dev_err(dev, "Cannot read Rx OH port node in device tree\n"); -+ return err; -+ } -+ -+ dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", TX); -+ if (dev_node == NULL) { -+ dev_err(dev, "Cannot find Tx OH port node in device tree\n"); -+ return -EFAULT; -+ } -+ -+ err = get_port_ref(dev_node, tx_port); -+ if (err) { -+ dev_err(dev, "Cannot read Tx OH port node in device tree\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static inline void dpa_generic_setup_ingress( -+ const struct dpa_generic_priv_s *priv, -+ struct dpa_fq *fq, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = priv->net_dev; -+ -+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; -+ fq->channel = priv->channel; -+} -+ -+static inline void dpa_generic_setup_egress( -+ const struct dpa_generic_priv_s *priv, -+ struct dpa_fq *fq, -+ struct fm_port *port, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = priv->net_dev; -+ -+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; -+ fq->channel = fm_get_tx_port_channel(port); -+} -+ -+static void dpa_generic_fq_setup(struct dpa_generic_priv_s *priv, -+ const struct dpa_fq_cbs_t *fq_cbs, -+ struct fm_port *tx_port) -+{ -+ struct dpa_fq *fq; -+ int egress_cnt = 0; -+ -+ /* Initialize each FQ in the list */ -+ list_for_each_entry(fq, &priv->dpa_fq_list, list) { -+ switch (fq->fq_type) { -+ case FQ_TYPE_RX_DEFAULT: -+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq); -+ break; -+ case FQ_TYPE_RX_ERROR: -+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_errq); -+ break; -+ case FQ_TYPE_RX_PCD: -+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq); -+ break; -+ case FQ_TYPE_TX: -+ dpa_generic_setup_egress(priv, fq, -+ tx_port, &fq_cbs->egress_ern); -+ /* If we have more Tx queues than the number of cores, -+ * just ignore the extra ones. -+ */ -+ if (egress_cnt < DPAA_ETH_TX_QUEUES) -+ priv->egress_fqs[egress_cnt++] = &fq->fq_base; -+ break; -+ default: -+ dev_warn(priv->net_dev->dev.parent, -+ "Unknown FQ type detected!\n"); -+ break; -+ } -+ } -+ -+ /* The number of Tx queues may be smaller than the number of cores, if -+ * the Tx queue range is specified in the device tree instead of being -+ * dynamically allocated. -+ * Make sure all CPUs receive a corresponding Tx queue. -+ */ -+ while (egress_cnt < DPAA_ETH_TX_QUEUES) { -+ list_for_each_entry(fq, &priv->dpa_fq_list, list) { -+ if (fq->fq_type != FQ_TYPE_TX) -+ continue; -+ priv->egress_fqs[egress_cnt++] = &fq->fq_base; -+ if (egress_cnt == DPAA_ETH_TX_QUEUES) -+ break; -+ } -+ } -+} -+ -+static int dpa_generic_fq_init(struct dpa_fq *dpa_fq, int disable_buff_dealloc) -+{ -+ int _errno; -+ struct device *dev; -+ struct qman_fq *fq; -+ struct qm_mcc_initfq initfq; -+ -+ dev = dpa_fq->net_dev->dev.parent; -+ -+ if (dpa_fq->fqid == 0) -+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; -+ -+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); -+ if (_errno) { -+ dev_err(dev, "qman_create_fq() failed\n"); -+ return _errno; -+ } -+ fq = &dpa_fq->fq_base; -+ -+ initfq.we_mask = QM_INITFQ_WE_FQCTRL; -+ /* FIXME: why would we want to keep an empty FQ in cache? */ -+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; -+ -+ /* FQ placement */ -+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ; -+ -+ initfq.fqd.dest.channel = dpa_fq->channel; -+ initfq.fqd.dest.wq = dpa_fq->wq; -+ -+ if (dpa_fq->fq_type == FQ_TYPE_TX && !disable_buff_dealloc) { -+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ /* ContextA: A2V=1 (contextA A2 field is valid) -+ * ContextA A2: EBD=1 (deallocate buffers inside FMan) -+ */ -+ initfq.fqd.context_a.hi = 0x10000000; -+ initfq.fqd.context_a.lo = 0x80000000; -+ } -+ -+ /* Initialization common to all ingress queues */ -+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { -+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ initfq.fqd.fq_ctrl |= -+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; -+ initfq.fqd.context_a.stashing.exclusive = -+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | -+ QM_STASHING_EXCL_ANNOTATION; -+ initfq.fqd.context_a.stashing.data_cl = 2; -+ initfq.fqd.context_a.stashing.annotation_cl = 1; -+ initfq.fqd.context_a.stashing.context_cl = -+ DIV_ROUND_UP(sizeof(struct qman_fq), 64); -+ } -+ -+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); -+ if (_errno < 0) { -+ dev_err(dev, "qman_init_fq(%u) = %d\n", -+ qman_fq_fqid(fq), _errno); -+ qman_destroy_fq(fq, 0); -+ return _errno; -+ } -+ -+ dpa_fq->fqid = qman_fq_fqid(fq); -+ -+ return 0; -+} -+ -+static int dpa_generic_fq_create(struct net_device *netdev, -+ struct list_head *dpa_fq_list, -+ struct fm_port *tx_port) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(netdev); -+ struct dpa_fq *fqs = NULL, *tmp = NULL; -+ struct task_struct *kth; -+ int err = 0; -+ int channel; -+ -+ INIT_LIST_HEAD(&priv->dpa_fq_list); -+ -+ list_replace_init(dpa_fq_list, &priv->dpa_fq_list); -+ -+ channel = dpa_get_channel(); -+ if (channel < 0) -+ return channel; -+ priv->channel = (uint16_t)channel; -+ -+ /* Start a thread that will walk the cpus with affine portals -+ * and add this pool channel to each's dequeue mask. -+ */ -+ kth = kthread_run(dpaa_eth_add_channel, -+ (void *)(unsigned long)priv->channel, -+ "dpaa_%p:%d", netdev, priv->channel); -+ if (!kth) -+ return -ENOMEM; -+ -+ dpa_generic_fq_setup(priv, &generic_fq_cbs, tx_port); -+ -+ /* Add the FQs to the interface, and make them active */ -+ list_for_each_entry_safe(fqs, tmp, &priv->dpa_fq_list, list) { -+ err = dpa_generic_fq_init(fqs, priv->disable_buff_dealloc); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpa_generic_bp_create(struct net_device *net_dev, -+ int rx_bp_count, -+ struct dpa_bp *rx_bp, -+ struct dpa_buffer_layout_s *rx_buf_layout, -+ struct dpa_bp *draining_tx_bp, -+ struct dpa_bp *draining_tx_sg_bp, -+ struct dpa_buffer_layout_s *tx_buf_layout) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev); -+ int err = 0; -+ -+ /* TODO: multiple Rx bps */ -+ priv->rx_bp_count = rx_bp_count; -+ priv->rx_bp = rx_bp; -+ priv->rx_buf_layout = rx_buf_layout; -+ priv->draining_tx_bp = draining_tx_bp; -+ priv->draining_tx_sg_bp = draining_tx_sg_bp; -+ priv->tx_buf_layout = tx_buf_layout; -+ -+ err = dpa_bp_alloc(priv->rx_bp); -+ if (err < 0) { -+ priv->rx_bp = NULL; -+ return err; -+ } -+ -+ err = dpa_bp_alloc(priv->draining_tx_bp); -+ if (err < 0) { -+ priv->draining_tx_bp = NULL; -+ return err; -+ } -+ -+ err = dpa_bp_alloc(priv->draining_tx_sg_bp); -+ if (err < 0) { -+ priv->draining_tx_sg_bp = NULL; -+ return err; -+ } -+ -+ return 0; -+} -+ -+static void dpa_generic_relase_bp(struct dpa_bp *bp) -+{ -+ if (!bp) -+ return; -+ -+ if (!atomic_dec_and_test(&bp->refs)) -+ return; -+ -+ if (bp->free_buf_cb) -+ dpa_bp_drain(bp); -+ -+ bman_free_pool(bp->pool); -+ -+ if (bp->dev) -+ platform_device_unregister(to_platform_device(bp->dev)); -+} -+ -+static void dpa_generic_bp_free(struct dpa_generic_priv_s *priv) -+{ -+ int i = 0; -+ -+ /* release the rx bpools */ -+ for (i = 0; i < priv->rx_bp_count; i++) -+ dpa_generic_relase_bp(&priv->rx_bp[i]); -+ -+ /* release the tx draining bpools */ -+ dpa_generic_relase_bp(priv->draining_tx_bp); -+ dpa_generic_relase_bp(priv->draining_tx_sg_bp); -+} -+ -+static int dpa_generic_remove(struct platform_device *of_dev) -+{ -+ int err; -+ struct device *dev; -+ struct net_device *net_dev; -+ struct dpa_generic_priv_s *priv; -+ -+ dev = &of_dev->dev; -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ dpaa_eth_generic_sysfs_remove(dev); -+ -+ dev_set_drvdata(dev, NULL); -+ unregister_netdev(net_dev); -+ -+ err = dpa_fq_free(dev, &priv->dpa_fq_list); -+ -+ dpa_generic_napi_del(net_dev); -+ -+ dpa_generic_bp_free(priv); -+ -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static int dpa_generic_eth_probe(struct platform_device *_of_dev) -+{ -+ struct device *dev = &_of_dev->dev; -+ struct device_node *dpa_node = dev->of_node; -+ struct net_device *netdev = NULL; -+ struct dpa_generic_priv_s *priv; -+ struct fm_port *rx_port = NULL; -+ struct fm_port *tx_port = NULL; -+ struct dpa_percpu_priv_s *percpu_priv; -+ int rx_bp_count = 0; -+ int disable_buff_dealloc = 0; -+ struct dpa_bp *rx_bp = NULL, *draining_tx_bp = NULL; -+ struct dpa_bp *draining_tx_sg_bp = NULL; -+ struct dpa_buffer_layout_s *rx_buf_layout = NULL, *tx_buf_layout = NULL; -+ struct list_head *dpa_fq_list; -+ static u8 generic_idx; -+ int err = 0; -+ int i = 0; -+ -+ if (!of_device_is_available(dpa_node)) -+ return -ENODEV; -+ -+ err = dpa_generic_port_probe(_of_dev, &tx_port, &rx_port); -+ if (err < 0) -+ return err; -+ -+ err = dpa_generic_rx_bp_probe(_of_dev, rx_port, &rx_bp_count, -+ &rx_bp, &rx_buf_layout); -+ if (err < 0) -+ return err; -+ -+ err = dpa_generic_tx_bp_probe(_of_dev, tx_port, &draining_tx_bp, -+ &draining_tx_sg_bp, &tx_buf_layout); -+ if (err < 0) -+ return err; -+ -+ dpa_fq_list = dpa_generic_fq_probe(_of_dev, tx_port); -+ if (IS_ERR(dpa_fq_list)) -+ return PTR_ERR(dpa_fq_list); -+ -+ err = dpa_generic_buff_dealloc_probe(_of_dev, &disable_buff_dealloc); -+ if (err < 0) -+ return err; -+ -+ /* just one queue for now */ -+ netdev = alloc_etherdev_mq(sizeof(*priv), 1); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ SET_NETDEV_DEV(netdev, dev); -+ dev_set_drvdata(dev, netdev); -+ priv = netdev_priv(netdev); -+ priv->net_dev = netdev; -+ sprintf(priv->if_type, "generic%d", generic_idx++); -+ priv->msg_enable = netif_msg_init(generic_debug, -1); -+ priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM; -+ -+ init_timer(&priv->timer); -+ priv->timer.data = (unsigned long)priv; -+ priv->timer.function = dpa_generic_draining_timer; -+ -+ err = dpa_generic_bp_create(netdev, rx_bp_count, rx_bp, rx_buf_layout, -+ draining_tx_bp, draining_tx_sg_bp, tx_buf_layout); -+ if (err < 0) -+ goto bp_create_failed; -+ -+ priv->disable_buff_dealloc = disable_buff_dealloc; -+ -+ err = dpa_generic_fq_create(netdev, dpa_fq_list, rx_port); -+ if (err < 0) -+ goto fq_create_failed; -+ -+ priv->tx_headroom = dpa_get_headroom(tx_buf_layout); -+ priv->rx_headroom = dpa_get_headroom(rx_buf_layout); -+ priv->rx_port = rx_port; -+ priv->tx_port = tx_port; -+ priv->mac_dev = NULL; -+ -+ -+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); -+ if (priv->percpu_priv == NULL) { -+ dev_err(dev, "devm_alloc_percpu() failed\n"); -+ err = -ENOMEM; -+ goto alloc_percpu_failed; -+ } -+ for_each_online_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ memset(percpu_priv, 0, sizeof(*percpu_priv)); -+ } -+ -+ /* Initialize NAPI */ -+ err = dpa_generic_napi_add(netdev); -+ if (err < 0) -+ goto napi_add_failed; -+ -+ err = dpa_generic_netdev_init(dpa_node, netdev); -+ if (err < 0) -+ goto netdev_init_failed; -+ -+ dpaa_eth_generic_sysfs_init(&netdev->dev); -+ -+ pr_info("fsl_dpa_generic: Probed %s interface as %s\n", -+ priv->if_type, netdev->name); -+ -+ return 0; -+ -+netdev_init_failed: -+napi_add_failed: -+ dpa_generic_napi_del(netdev); -+alloc_percpu_failed: -+ if (netdev) -+ dpa_fq_free(dev, &priv->dpa_fq_list); -+fq_create_failed: -+bp_create_failed: -+ if (netdev) -+ dpa_generic_bp_free(priv); -+ dev_set_drvdata(dev, NULL); -+ if (netdev) -+ free_netdev(netdev); -+ -+ return err; -+} -+ -+static int __init __cold dpa_generic_load(void) -+{ -+ int _errno; -+ -+ pr_info(KBUILD_MODNAME ": " DPA_GENERIC_DESCRIPTION "\n"); -+ -+ /* initialise dpaa_eth mirror values */ -+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); -+ dpa_max_frm = fm_get_max_frm(); -+ -+ _errno = platform_driver_register(&dpa_generic_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+ -+/* waiting for all referenced ports to be initialized -+ * by other kernel modules (proxy ethernet, offline_port) -+ */ -+late_initcall(dpa_generic_load); -+ -+static void __exit __cold dpa_generic_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ platform_driver_unregister(&dpa_generic_driver); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(dpa_generic_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h -@@ -0,0 +1,90 @@ -+/* Copyright 2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPA_ETH_GENERIC_H -+#define __DPA_ETH_GENERIC_H -+ -+#include "lnxwrp_fsl_fman.h" -+#include "dpaa_eth.h" -+ -+struct dpa_generic_priv_s { -+ struct net_device *net_dev; -+ /* use the same percpu_priv as other DPAA Ethernet drivers */ -+ struct dpa_percpu_priv_s __percpu *percpu_priv; -+ -+ /* up to 4 bps supported for RX */ -+ int rx_bp_count; -+ struct dpa_bp *rx_bp; -+ struct dpa_buffer_layout_s *rx_buf_layout; -+ -+ struct dpa_bp *draining_tx_bp; -+ struct dpa_bp *draining_tx_sg_bp; -+ struct dpa_buffer_layout_s *tx_buf_layout; -+ -+ /* Store here the needed Tx headroom for convenience and speed -+ * (even though it can be computed based on the fields of buf_layout) -+ */ -+ uint16_t tx_headroom; -+ uint16_t rx_headroom; -+ -+ /* In some scenarios, when VSP are not enabled on the Tx O/H port, -+ * the buffers will be released by other hardware modules -+ */ -+ int disable_buff_dealloc; -+ -+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES]; -+ -+ struct fm_port *rx_port; -+ struct fm_port *tx_port; -+ -+ /* oNIC can have limited control capabilities over a MAC device */ -+ struct mac_device *mac_dev; -+ -+ uint16_t channel; /* "fsl,qman-channel-id" */ -+ struct list_head dpa_fq_list; -+ -+ uint32_t msg_enable; /* net_device message level */ -+ -+ struct dpa_buffer_layout_s *buf_layout; -+ char if_type[30]; -+ -+ /* periodic drain */ -+ struct timer_list timer; -+}; -+ -+extern const struct ethtool_ops dpa_generic_ethtool_ops; -+ -+void dpaa_eth_generic_sysfs_init(struct device *dev); -+void dpaa_eth_generic_sysfs_remove(struct device *dev); -+int __init dpa_generic_debugfs_module_init(void); -+void __exit dpa_generic_debugfs_module_exit(void); -+ -+#endif /* __DPA_ETH_GENERIC_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c -@@ -0,0 +1,201 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpaa_eth_generic.h" -+#include "mac.h" /* struct mac_device */ -+ -+static ssize_t dpaa_eth_generic_show_addr(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev) -+ return sprintf(buf, "%llx\n", -+ (unsigned long long)mac_dev->res->start); -+ else -+ return sprintf(buf, "none\n"); -+} -+ -+static ssize_t dpaa_eth_generic_show_type(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ ssize_t res = 0; -+ res = sprintf(buf, "generic\n"); -+ -+ return res; -+} -+ -+static ssize_t dpaa_eth_generic_show_fqids(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t bytes = 0; -+ int i = 0; -+ char *str; -+ struct dpa_fq *fq; -+ struct dpa_fq *tmp; -+ struct dpa_fq *prev = NULL; -+ u32 first_fqid = 0; -+ u32 last_fqid = 0; -+ char *prevstr = NULL; -+ -+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) { -+ switch (fq->fq_type) { -+ case FQ_TYPE_RX_DEFAULT: -+ str = "Rx default"; -+ break; -+ case FQ_TYPE_RX_ERROR: -+ str = "Rx error"; -+ break; -+ case FQ_TYPE_RX_PCD: -+ str = "Rx PCD"; -+ break; -+ case FQ_TYPE_TX_CONFIRM: -+ str = "Tx default confirmation"; -+ break; -+ case FQ_TYPE_TX_CONF_MQ: -+ str = "Tx confirmation (mq)"; -+ break; -+ case FQ_TYPE_TX_ERROR: -+ str = "Tx error"; -+ break; -+ case FQ_TYPE_TX: -+ str = "Tx"; -+ break; -+ default: -+ str = "Unknown"; -+ } -+ -+ if (prev && (abs(fq->fqid - prev->fqid) != 1 || -+ str != prevstr)) { -+ if (last_fqid == first_fqid) -+ bytes += sprintf(buf + bytes, -+ "%s: %d\n", prevstr, prev->fqid); -+ else -+ bytes += sprintf(buf + bytes, -+ "%s: %d - %d\n", prevstr, -+ first_fqid, last_fqid); -+ } -+ -+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr) -+ last_fqid = fq->fqid; -+ else -+ first_fqid = last_fqid = fq->fqid; -+ -+ prev = fq; -+ prevstr = str; -+ i++; -+ } -+ -+ if (prev) { -+ if (last_fqid == first_fqid) -+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, -+ prev->fqid); -+ else -+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr, -+ first_fqid, last_fqid); -+ } -+ -+ return bytes; -+} -+ -+static ssize_t dpaa_eth_generic_show_bpids(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ ssize_t bytes = 0; -+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct dpa_bp *rx_bp = priv->rx_bp; -+ struct dpa_bp *draining_tx_bp = priv->draining_tx_bp; -+ int i = 0; -+ -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "Rx buffer pools:\n"); -+ for (i = 0; i < priv->rx_bp_count; i++) -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u ", -+ rx_bp[i].bpid); -+ -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "\n"); -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "Draining buffer pool:\n"); -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", draining_tx_bp->bpid); -+ -+ return bytes; -+} -+ -+static ssize_t dpaa_eth_generic_show_mac_regs(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ int n = 0; -+ -+ if (mac_dev) -+ n = fm_mac_dump_regs(mac_dev, buf, n); -+ else -+ return sprintf(buf, "no mac control\n"); -+ -+ return n; -+} -+ -+static struct device_attribute dpaa_eth_generic_attrs[] = { -+ __ATTR(device_addr, S_IRUGO, dpaa_eth_generic_show_addr, NULL), -+ __ATTR(device_type, S_IRUGO, dpaa_eth_generic_show_type, NULL), -+ __ATTR(fqids, S_IRUGO, dpaa_eth_generic_show_fqids, NULL), -+ __ATTR(bpids, S_IRUGO, dpaa_eth_generic_show_bpids, NULL), -+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_generic_show_mac_regs, NULL), -+}; -+ -+void dpaa_eth_generic_sysfs_init(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++) -+ if (device_create_file(dev, &dpaa_eth_generic_attrs[i])) { -+ dev_err(dev, "Error creating sysfs file\n"); -+ while (i > 0) -+ device_remove_file(dev, -+ &dpaa_eth_generic_attrs[--i]); -+ return; -+ } -+} -+ -+void dpaa_eth_generic_sysfs_remove(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++) -+ device_remove_file(dev, &dpaa_eth_generic_attrs[i]); -+} ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c -@@ -0,0 +1,499 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_base.h" -+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ -+#include "mac.h" -+ -+/* For MAC-based interfaces, we compute the tx needed headroom from the -+ * associated Tx port's buffer layout settings. -+ * For MACless interfaces just use a default value. -+ */ -+#define DPA_DEFAULT_TX_HEADROOM 64 -+ -+#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+MODULE_DESCRIPTION(DPA_DESCRIPTION); -+ -+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ -+static uint16_t macless_tx_timeout = 1000; -+module_param(macless_tx_timeout, ushort, S_IRUGO); -+MODULE_PARM_DESC(macless_tx_timeout, "The MACless Tx timeout in ms"); -+ -+/* forward declarations */ -+static int __cold dpa_macless_start(struct net_device *net_dev); -+static int __cold dpa_macless_stop(struct net_device *net_dev); -+static int __cold dpa_macless_set_address(struct net_device *net_dev, -+ void *addr); -+static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev); -+ -+static int dpaa_eth_macless_probe(struct platform_device *_of_dev); -+static netdev_features_t -+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features); -+ -+static const struct net_device_ops dpa_macless_ops = { -+ .ndo_open = dpa_macless_start, -+ .ndo_start_xmit = dpa_shared_tx, -+ .ndo_stop = dpa_macless_stop, -+ .ndo_tx_timeout = dpa_timeout, -+ .ndo_get_stats64 = dpa_get_stats64, -+ .ndo_set_mac_address = dpa_macless_set_address, -+ .ndo_set_rx_mode = dpa_macless_set_rx_mode, -+ .ndo_validate_addr = eth_validate_addr, -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+ .ndo_select_queue = dpa_select_queue, -+#endif -+ .ndo_change_mtu = dpa_change_mtu, -+ .ndo_init = dpa_ndo_init, -+ .ndo_set_features = dpa_set_features, -+ .ndo_fix_features = dpa_macless_fix_features, -+}; -+ -+static const struct of_device_id dpa_macless_match[] = { -+ { -+ .compatible = "fsl,dpa-ethernet-macless" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, dpa_macless_match); -+ -+static struct platform_driver dpa_macless_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME "-macless", -+ .of_match_table = dpa_macless_match, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa_eth_macless_probe, -+ .remove = dpa_remove -+}; -+ -+static const char macless_frame_queues[][25] = { -+ [RX] = "fsl,qman-frame-queues-rx", -+ [TX] = "fsl,qman-frame-queues-tx" -+}; -+ -+static int __cold dpa_macless_start(struct net_device *net_dev) -+{ -+ const struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer; -+ -+ netif_tx_start_all_queues(net_dev); -+ -+ if (proxy_dev) -+ dpa_proxy_start(net_dev); -+ -+ -+ return 0; -+} -+ -+static int __cold dpa_macless_stop(struct net_device *net_dev) -+{ -+ const struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer; -+ -+ netif_tx_stop_all_queues(net_dev); -+ -+ if (proxy_dev) -+ dpa_proxy_stop(proxy_dev, net_dev); -+ -+ return 0; -+} -+ -+static int dpa_macless_set_address(struct net_device *net_dev, void *addr) -+{ -+ const struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer; -+ int _errno; -+ -+ _errno = eth_mac_addr(net_dev, addr); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno); -+ return _errno; -+ } -+ -+ if (proxy_dev) { -+ _errno = dpa_proxy_set_mac_address(proxy_dev, net_dev); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "proxy_set_mac_address() = %d\n", -+ _errno); -+ return _errno; -+ } -+ } -+ -+ return 0; -+} -+ -+static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev) -+{ -+ const struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer; -+ -+ if (proxy_dev) -+ dpa_proxy_set_rx_mode(proxy_dev, net_dev); -+} -+ -+static netdev_features_t -+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features) -+{ -+ netdev_features_t unsupported_features = 0; -+ -+ /* In theory we should never be requested to enable features that -+ * we didn't set in netdev->features and netdev->hw_features at probe -+ * time, but double check just to be on the safe side. -+ */ -+ unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; -+ /* We don't support enabling Rx csum through ethtool yet */ -+ unsupported_features |= NETIF_F_RXCSUM; -+ -+ features &= ~unsupported_features; -+ -+ return features; -+} -+ -+static int dpa_macless_netdev_init(struct device_node *dpa_node, -+ struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer; -+ struct device *dev = net_dev->dev.parent; -+ const uint8_t *mac_addr; -+ -+ net_dev->netdev_ops = &dpa_macless_ops; -+ -+ if (proxy_dev) { -+ struct mac_device *mac_dev = proxy_dev->mac_dev; -+ net_dev->mem_start = mac_dev->res->start; -+ net_dev->mem_end = mac_dev->res->end; -+ -+ return dpa_netdev_init(net_dev, mac_dev->addr, -+ macless_tx_timeout); -+ } else { -+ /* Get the MAC address from device tree */ -+ mac_addr = of_get_mac_address(dpa_node); -+ -+ if (mac_addr == NULL) { -+ if (netif_msg_probe(priv)) -+ dev_err(dev, "No MAC address found!\n"); -+ return -EINVAL; -+ } -+ -+ return dpa_netdev_init(net_dev, mac_addr, -+ macless_tx_timeout); -+ } -+} -+ -+/* Probing of FQs for MACless ports */ -+static int dpa_fq_probe_macless(struct device *dev, struct list_head *list, -+ enum port_type ptype) -+{ -+ struct device_node *np = dev->of_node; -+ const struct fqid_cell *fqids; -+ int num_ranges; -+ int i, lenp; -+ -+ fqids = of_get_property(np, macless_frame_queues[ptype], &lenp); -+ if (fqids == NULL) { -+ dev_err(dev, "Need FQ definition in dts for MACless devices\n"); -+ return -EINVAL; -+ } -+ -+ num_ranges = lenp / sizeof(*fqids); -+ -+ /* All ranges defined in the device tree are used as Rx/Tx queues */ -+ for (i = 0; i < num_ranges; i++) { -+ if (!dpa_fq_alloc(dev, be32_to_cpu(fqids[i].start), -+ be32_to_cpu(fqids[i].count), list, -+ ptype == RX ? FQ_TYPE_RX_PCD : FQ_TYPE_TX)) { -+ dev_err(dev, "_dpa_fq_alloc() failed\n"); -+ return -ENOMEM; -+ } -+ } -+ -+ return 0; -+} -+ -+ static struct proxy_device * -+dpa_macless_proxy_probe(struct platform_device *_of_dev) -+{ -+ struct device *dev; -+ const phandle *proxy_prop; -+ struct proxy_device *proxy_dev; -+ struct device_node *proxy_node; -+ struct platform_device *proxy_pdev; -+ int lenp; -+ -+ dev = &_of_dev->dev; -+ -+ proxy_prop = of_get_property(dev->of_node, "proxy", &lenp); -+ if (!proxy_prop) -+ return NULL; -+ -+ proxy_node = of_find_node_by_phandle(*proxy_prop); -+ if (!proxy_node) { -+ dev_err(dev, "Cannot find proxy node\n"); -+ return NULL; -+ } -+ -+ proxy_pdev = of_find_device_by_node(proxy_node); -+ if (!proxy_pdev) { -+ of_node_put(proxy_node); -+ dev_err(dev, "Cannot find device represented by proxy node\n"); -+ return NULL; -+ } -+ -+ proxy_dev = dev_get_drvdata(&proxy_pdev->dev); -+ -+ of_node_put(proxy_node); -+ -+ return proxy_dev; -+} -+ -+static int dpaa_eth_macless_probe(struct platform_device *_of_dev) -+{ -+ int err = 0, i, channel; -+ struct device *dev; -+ struct device_node *dpa_node; -+ struct dpa_bp *dpa_bp; -+ struct dpa_fq *dpa_fq, *tmp; -+ size_t count; -+ struct net_device *net_dev = NULL; -+ struct dpa_priv_s *priv = NULL; -+ struct dpa_percpu_priv_s *percpu_priv; -+ static struct proxy_device *proxy_dev; -+ struct task_struct *kth; -+ static u8 macless_idx; -+ -+ dev = &_of_dev->dev; -+ -+ dpa_node = dev->of_node; -+ -+ if (!of_device_is_available(dpa_node)) -+ return -ENODEV; -+ -+ /* Get the buffer pools assigned to this interface */ -+ dpa_bp = dpa_bp_probe(_of_dev, &count); -+ if (IS_ERR(dpa_bp)) -+ return PTR_ERR(dpa_bp); -+ -+ for (i = 0; i < count; i++) -+ dpa_bp[i].seed_cb = dpa_bp_shared_port_seed; -+ -+ proxy_dev = dpa_macless_proxy_probe(_of_dev); -+ -+ -+ /* Allocate this early, so we can store relevant information in -+ * the private area (needed by 1588 code in dpa_mac_probe) -+ */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ /* Do this here, so we can be verbose early */ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ sprintf(priv->if_type, "macless%d", macless_idx++); -+ -+ priv->msg_enable = netif_msg_init(advanced_debug, -1); -+ -+ priv->peer = NULL; -+ priv->mac_dev = NULL; -+ if (proxy_dev) { -+ /* This is a temporary solution for the need of -+ * having main driver upstreamability: adjust_link -+ * is a general function that should work for both -+ * private driver and macless driver with MAC device -+ * control capabilities even if the last will not be -+ * upstreamable. -+ * TODO: find a convenient solution (wrapper over -+ * main priv structure, etc.) -+ */ -+ priv->mac_dev = proxy_dev->mac_dev; -+ -+ /* control over proxy's mac device */ -+ priv->peer = (void *)proxy_dev; -+ } -+ -+ INIT_LIST_HEAD(&priv->dpa_fq_list); -+ -+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX); -+ if (!err) -+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, -+ TX); -+ if (err < 0) -+ goto fq_probe_failed; -+ -+ /* bp init */ -+ priv->bp_count = count; -+ err = dpa_bp_create(net_dev, dpa_bp, count); -+ if (err < 0) -+ goto bp_create_failed; -+ -+ channel = dpa_get_channel(); -+ -+ if (channel < 0) { -+ err = channel; -+ goto get_channel_failed; -+ } -+ -+ priv->channel = (uint16_t)channel; -+ -+ /* Start a thread that will walk the cpus with affine portals -+ * and add this pool channel to each's dequeue mask. -+ */ -+ kth = kthread_run(dpaa_eth_add_channel, -+ (void *)(unsigned long)priv->channel, -+ "dpaa_%p:%d", net_dev, priv->channel); -+ if (!kth) { -+ err = -ENOMEM; -+ goto add_channel_failed; -+ } -+ -+ dpa_fq_setup(priv, &shared_fq_cbs, NULL); -+ -+ /* Add the FQs to the interface, and make them active */ -+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) { -+ /* For MAC-less devices we only get here for RX frame queues -+ * initialization, which are the TX queues of the other -+ * partition. -+ * It is safe to rely on one partition to set the FQ taildrop -+ * threshold for the TX queues of the other partition -+ * because the ERN notifications will be received by the -+ * partition doing qman_enqueue. -+ */ -+ err = dpa_fq_init(dpa_fq, true); -+ if (err < 0) -+ goto fq_alloc_failed; -+ } -+ -+ priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM; -+ -+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); -+ -+ if (priv->percpu_priv == NULL) { -+ dev_err(dev, "devm_alloc_percpu() failed\n"); -+ err = -ENOMEM; -+ goto alloc_percpu_failed; -+ } -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ memset(percpu_priv, 0, sizeof(*percpu_priv)); -+ } -+ -+ err = dpa_macless_netdev_init(dpa_node, net_dev); -+ if (err < 0) -+ goto netdev_init_failed; -+ -+ dpaa_eth_sysfs_init(&net_dev->dev); -+ -+ pr_info("fsl_dpa_macless: Probed %s interface as %s\n", -+ priv->if_type, net_dev->name); -+ -+ return 0; -+ -+netdev_init_failed: -+alloc_percpu_failed: -+fq_alloc_failed: -+ if (net_dev) -+ dpa_fq_free(dev, &priv->dpa_fq_list); -+add_channel_failed: -+get_channel_failed: -+ if (net_dev) -+ dpa_bp_free(priv); -+bp_create_failed: -+fq_probe_failed: -+ dev_set_drvdata(dev, NULL); -+ if (net_dev) -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static int __init __cold dpa_macless_load(void) -+{ -+ int _errno; -+ -+ pr_info(DPA_DESCRIPTION "\n"); -+ -+ /* Initialize dpaa_eth mirror values */ -+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); -+ dpa_max_frm = fm_get_max_frm(); -+ -+ _errno = platform_driver_register(&dpa_macless_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+module_init(dpa_macless_load); -+ -+static void __exit __cold dpa_macless_unload(void) -+{ -+ platform_driver_unregister(&dpa_macless_driver); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(dpa_macless_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c -@@ -0,0 +1,2156 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dpaa_eth_macsec.h" -+#include "dpaa_eth_common.h" -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+#include "dpaa_1588.h" -+#endif -+ -+static struct sock *nl_sk; -+static struct macsec_priv_s *macsec_priv[FM_MAX_NUM_OF_MACS]; -+static char *macsec_ifs[FM_MAX_NUM_OF_MACS]; -+static int macsec_ifs_cnt; -+ -+static char ifs[MAX_LEN]; -+const struct ethtool_ops *dpa_ethtool_ops_prev; -+static struct ethtool_ops dpa_macsec_ethtool_ops; -+ -+module_param_string(ifs, ifs, MAX_LEN, 0000); -+MODULE_PARM_DESC(ifs, "Comma separated interface list"); -+ -+struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev) -+{ -+ return macsec_priv[net_dev->ifindex - 1]; -+} -+ -+static void macsec_setup_ethtool_ops(struct net_device *net_dev) -+{ -+ /* remember private driver's ethtool ops just once */ -+ if (!dpa_ethtool_ops_prev) { -+ dpa_ethtool_ops_prev = net_dev->ethtool_ops; -+ -+ memcpy(&dpa_macsec_ethtool_ops, net_dev->ethtool_ops, -+ sizeof(struct ethtool_ops)); -+ dpa_macsec_ethtool_ops.get_sset_count = -+ dpa_macsec_get_sset_count; -+ dpa_macsec_ethtool_ops.get_ethtool_stats = -+ dpa_macsec_get_ethtool_stats; -+ dpa_macsec_ethtool_ops.get_strings = -+ dpa_macsec_get_strings; -+ } -+ -+ net_dev->ethtool_ops = &dpa_macsec_ethtool_ops; -+} -+ -+static void macsec_restore_ethtool_ops(struct net_device *net_dev) -+{ -+ net_dev->ethtool_ops = dpa_ethtool_ops_prev; -+} -+ -+ -+static int ifname_to_id(char *ifname) -+{ -+ int i; -+ -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) { -+ if (macsec_priv[i]->net_dev && -+ (strcmp(ifname, macsec_priv[i]->net_dev->name) == 0)) { -+ return i; -+ } -+ } -+ -+ return -1; -+} -+ -+static void deinit_macsec(int macsec_id) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int i; -+ -+ selected_macsec_priv = macsec_priv[macsec_id]; -+ -+ if (selected_macsec_priv->en_state == SECY_ENABLED) { -+ for (i = 0; i < NUM_OF_RX_SC; i++) { -+ if (!selected_macsec_priv->rx_sc_dev[i]) -+ continue; -+ fm_macsec_secy_rxsa_disable_receive( -+ selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->rx_sc_dev[i], -+ selected_macsec_priv->an); -+ pr_debug("disable rx_sa done\n"); -+ -+ fm_macsec_secy_delete_rx_sa( -+ selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->rx_sc_dev[i], -+ selected_macsec_priv->an); -+ pr_debug("delete rx_sa done\n"); -+ -+ fm_macsec_secy_delete_rxsc( -+ selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->rx_sc_dev[i]); -+ pr_debug("delete rx_sc done\n"); -+ } -+ -+ fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->an); -+ pr_debug("delete tx_sa done\n"); -+ -+ fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy); -+ selected_macsec_priv->fm_ms_secy = NULL; -+ pr_debug("secy free done\n"); -+ } -+ -+ if (selected_macsec_priv->en_state != MACSEC_DISABLED) { -+ fm_macsec_disable(selected_macsec_priv->fm_macsec); -+ fm_macsec_free(selected_macsec_priv->fm_macsec); -+ selected_macsec_priv->fm_macsec = NULL; -+ pr_debug("macsec disable and free done\n"); -+ } -+} -+ -+static void parse_ifs(void) -+{ -+ char *token, *strpos = ifs; -+ -+ while ((token = strsep(&strpos, ","))) { -+ if (strlen(token) == 0) -+ return; -+ else -+ macsec_ifs[macsec_ifs_cnt] = token; -+ macsec_ifs_cnt++; -+ } -+} -+ -+static void macsec_exception(handle_t _macsec_priv_s, -+ fm_macsec_exception exception) -+{ -+ struct macsec_priv_s *priv; -+ priv = (struct macsec_priv_s *)_macsec_priv_s; -+ -+ switch (exception) { -+ case (SINGLE_BIT_ECC): -+ dev_warn(priv->mac_dev->dev, "%s:%s SINGLE_BIT_ECC exception\n", -+ KBUILD_BASENAME".c", __func__); -+ break; -+ case (MULTI_BIT_ECC): -+ dev_warn(priv->mac_dev->dev, "%s:%s MULTI_BIT_ECC exception\n", -+ KBUILD_BASENAME".c", __func__); -+ break; -+ default: -+ dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n", -+ KBUILD_BASENAME".c", __func__, exception); -+ break; -+ } -+} -+ -+ -+static void macsec_secy_exception(handle_t _macsec_priv_s, -+ fm_macsec_secy_exception exception) -+{ -+ struct macsec_priv_s *priv; -+ priv = (struct macsec_priv_s *)_macsec_priv_s; -+ -+ switch (exception) { -+ case (SECY_EX_FRAME_DISCARDED): -+ dev_warn(priv->mac_dev->dev, -+ "%s:%s SECY_EX_FRAME_DISCARDED exception\n", -+ KBUILD_BASENAME".c", __func__); -+ break; -+ default: -+ dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n", -+ KBUILD_BASENAME".c", __func__, exception); -+ break; -+ } -+} -+ -+static void macsec_secy_events(handle_t _macsec_priv_s, -+ fm_macsec_secy_event event) -+{ -+ struct macsec_priv_s *priv; -+ priv = (struct macsec_priv_s *)_macsec_priv_s; -+ -+ switch (event) { -+ case (SECY_EV_NEXT_PN): -+ dev_dbg(priv->mac_dev->dev, "%s:%s SECY_EV_NEXT_PN event\n", -+ KBUILD_BASENAME".c", __func__); -+ break; -+ default: -+ dev_dbg(priv->mac_dev->dev, "%s:%s event %d\n", -+ KBUILD_BASENAME".c", __func__, event); -+ break; -+ } -+} -+ -+static struct qman_fq *macsec_get_tx_conf_queue( -+ const struct macsec_priv_s *macsec_priv, -+ struct qman_fq *tx_fq) -+{ -+ int i; -+ -+ for (i = 0; i < MACSEC_ETH_TX_QUEUES; i++) -+ if (macsec_priv->egress_fqs[i] == tx_fq) -+ return macsec_priv->conf_fqs[i]; -+ return NULL; -+} -+ -+/* Initialize qman fqs. Still need to set context_a, specifically the bits -+ * that identify the secure channel. -+ */ -+static int macsec_fq_init(struct dpa_fq *dpa_fq) -+{ -+ struct qman_fq *fq; -+ struct device *dev; -+ struct qm_mcc_initfq initfq; -+ uint32_t sc_phys_id; -+ int _errno, macsec_id; -+ -+ dev = dpa_fq->net_dev->dev.parent; -+ macsec_id = dpa_fq->net_dev->ifindex - 1; -+ -+ if (dpa_fq->fqid == 0) -+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; -+ -+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); -+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); -+ -+ if (_errno) { -+ dev_err(dev, "qman_create_fq() failed\n"); -+ return _errno; -+ } -+ -+ fq = &dpa_fq->fq_base; -+ -+ if (dpa_fq->init) { -+ initfq.we_mask = QM_INITFQ_WE_FQCTRL; -+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; -+ -+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) -+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; -+ -+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ; -+ -+ initfq.fqd.dest.channel = dpa_fq->channel; -+ initfq.fqd.dest.wq = dpa_fq->wq; -+ -+ if (dpa_fq->fq_type == FQ_TYPE_TX) { -+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ -+ /* Obtain the TX scId from fman */ -+ _errno = fm_macsec_secy_get_txsc_phys_id( -+ macsec_priv[macsec_id]->fm_ms_secy, -+ &sc_phys_id); -+ if (unlikely(_errno < 0)) { -+ dev_err(dev, "fm_macsec_secy_get_txsc_phys_id = %d\n", -+ _errno); -+ return _errno; -+ } -+ -+ /* Write the TX SC-ID in the context of the FQ. -+ * A2V=1 (use the A2 field) -+ * A0V=1 (use the A0 field) -+ * OVOM=1 -+ * MCV=1 (MACsec controlled frames) -+ * MACCMD=the TX scId -+ */ -+ initfq.fqd.context_a.hi = 0x1a100000 | -+ sc_phys_id << 16; -+ initfq.fqd.context_a.lo = 0x80000000; -+ } -+ -+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); -+ if (_errno < 0) { -+ dev_err(dev, "qman_init_fq(%u) = %d\n", -+ qman_fq_fqid(fq), _errno); -+ qman_destroy_fq(fq, 0); -+ return _errno; -+ } -+ } -+ -+ dpa_fq->fqid = qman_fq_fqid(fq); -+ -+ return 0; -+} -+ -+/* Configure and enable secy. */ -+static int enable_secy(struct generic_msg *gen, int *macsec_id) -+{ -+ struct enable_secy *sec; -+ int _errno; -+ struct fm_macsec_secy_params secy_params; -+ struct dpa_fq *dpa_fq, *tmp; -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ sec = &gen->payload.secy; -+ -+ if (sec->macsec_id < 0 || sec->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ _errno = -EINVAL; -+ goto _return; -+ } -+ *macsec_id = sec->macsec_id; -+ selected_macsec_priv = macsec_priv[sec->macsec_id]; -+ -+ if (selected_macsec_priv->fm_ms_secy) { -+ pr_err("Secy has already been enabled\n"); -+ return -EINVAL; -+ } -+ -+ memset(&secy_params, 0, sizeof(secy_params)); -+ secy_params.fm_macsec_h = selected_macsec_priv->fm_macsec; -+ secy_params.num_receive_channels = NUM_OF_RX_SC; -+ secy_params.tx_sc_params.sci = sec->sci; -+ -+ /* Set encryption method */ -+ secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_128; -+#if (DPAA_VERSION >= 11) -+ secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_256; -+#endif /* (DPAA_VERSION >= 11) */ -+ secy_params.exception_f = macsec_secy_exception; -+ secy_params.event_f = macsec_secy_events; -+ secy_params.app_h = selected_macsec_priv; -+ -+ selected_macsec_priv->fm_ms_secy = -+ fm_macsec_secy_config(&secy_params); -+ -+ if (unlikely(selected_macsec_priv->fm_ms_secy == NULL)) { -+ _errno = -EINVAL; -+ goto _return; -+ } -+ -+ /* Configure the insertion mode */ -+ if (sec->config_insertion_mode) { -+ _errno = fm_macsec_secy_config_sci_insertion_mode( -+ selected_macsec_priv->fm_ms_secy, -+ sec->sci_insertion_mode); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Configure the frame protection */ -+ if (sec->config_protect_frames) { -+ _errno = fm_macsec_secy_config_protect_frames( -+ selected_macsec_priv->fm_ms_secy, -+ sec->protect_frames); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Configure the replay window */ -+ if (sec->config_replay_window) { -+ _errno = fm_macsec_secy_config_replay_window( -+ selected_macsec_priv->fm_ms_secy, -+ sec->replay_protect, -+ sec->replay_window); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Configure the validation mode */ -+ if (sec->config_validation_mode) { -+ _errno = fm_macsec_secy_config_validation_mode( -+ selected_macsec_priv->fm_ms_secy, -+ sec->validate_frames); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Select the exceptions that will be signaled */ -+ if (sec->config_exception) { -+ _errno = fm_macsec_secy_config_exception( -+ selected_macsec_priv->fm_ms_secy, -+ sec->exception, -+ sec->enable_exception); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Select the events that will be signaled */ -+ if (sec->config_event) { -+ _errno = fm_macsec_secy_config_event( -+ selected_macsec_priv->fm_ms_secy, -+ sec->event, -+ sec->enable_event); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Configure a point-to-point connection */ -+ if (sec->config_point_to_point) { -+ _errno = fm_macsec_secy_config_point_to_point( -+ selected_macsec_priv->fm_ms_secy); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ /* Configure the connection's confidentiality state */ -+ if (sec->config_confidentiality) { -+ _errno = fm_macsec_secy_config_confidentiality( -+ selected_macsec_priv->fm_ms_secy, -+ sec->confidentiality_enable, -+ sec->confidentiality_offset); -+ if (unlikely(_errno < 0)) -+ goto _return; -+ } -+ -+ _errno = fm_macsec_secy_init(selected_macsec_priv->fm_ms_secy); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_macsec_secy_free; -+ -+ list_for_each_entry_safe(dpa_fq, -+ tmp, -+ &selected_macsec_priv->dpa_fq_list, -+ list) { -+ _errno = macsec_fq_init(dpa_fq); -+ if (_errno < 0) -+ goto _return; -+ } -+ -+ return 0; -+ -+_return_fm_macsec_secy_free: -+ fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy); -+ selected_macsec_priv->fm_ms_secy = NULL; -+_return: -+ return _errno; -+} -+ -+static int set_macsec_exception(struct generic_msg *gen) -+{ -+ struct set_exception *set_ex; -+ struct macsec_priv_s *selected_macsec_priv; -+ int rv; -+ -+ set_ex = &(gen->payload.set_ex); -+ -+ selected_macsec_priv = macsec_priv[set_ex->macsec_id]; -+ -+ rv = fm_macsec_set_exception(selected_macsec_priv->fm_macsec, -+ set_ex->exception, -+ set_ex->enable_exception); -+ if (unlikely(rv < 0)) -+ pr_err("error when setting the macsec exception mask\n"); -+ -+ return rv; -+} -+ -+static int create_tx_sa(struct generic_msg *gen) -+{ -+ struct create_tx_sa *c_tx_sa; -+ macsec_sa_key_t sa_key; -+ int rv; -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ c_tx_sa = &(gen->payload.c_tx_sa); -+ -+ if (c_tx_sa->macsec_id < 0 || -+ c_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ kfree(c_tx_sa); -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[c_tx_sa->macsec_id]; -+ -+ /* set macsec_priv field */ -+ selected_macsec_priv->an = c_tx_sa->an; -+ -+ /* because of the algorithms used */ -+ if (unlikely(c_tx_sa->sak_len > 32)) { -+ pr_warn("size of secure key is greater than 32 bytes!\n"); -+ kfree(c_tx_sa); -+ return -EINVAL; -+ } -+ -+ rv = copy_from_user(&sa_key, -+ c_tx_sa->sak, -+ c_tx_sa->sak_len); -+ if (unlikely(rv != 0)) { -+ pr_err("copy_from_user could not copy %i bytes\n", rv); -+ return -EFAULT; -+ } -+ -+ rv = fm_macsec_secy_create_tx_sa(selected_macsec_priv->fm_ms_secy, -+ c_tx_sa->an, -+ sa_key); -+ if (unlikely(rv < 0)) -+ pr_err("error when creating tx sa\n"); -+ -+ return rv; -+} -+ -+static int modify_tx_sa_key(struct generic_msg *gen) -+{ -+ struct modify_tx_sa_key *tx_sa_key; -+ struct macsec_priv_s *selected_macsec_priv; -+ macsec_sa_key_t sa_key; -+ int rv; -+ -+ tx_sa_key = &(gen->payload.modify_tx_sa_key); -+ -+ if (tx_sa_key->macsec_id < 0 || -+ tx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ selected_macsec_priv = macsec_priv[tx_sa_key->macsec_id]; -+ -+ /* set macsec_priv field */ -+ selected_macsec_priv->an = tx_sa_key->an; -+ -+ if (unlikely(tx_sa_key->sak_len > 32)) { -+ pr_warn("size of secure key is greater than 32 bytes!\n"); -+ kfree(tx_sa_key); -+ return -EINVAL; -+ } -+ -+ rv = copy_from_user(&sa_key, -+ tx_sa_key->sak, -+ tx_sa_key->sak_len); -+ if (unlikely(rv != 0)) { -+ pr_err("copy_from_user could not copy %i bytes\n", rv); -+ return -EFAULT; -+ } -+ -+ rv = fm_macsec_secy_txsa_modify_key(selected_macsec_priv->fm_ms_secy, -+ tx_sa_key->an, -+ sa_key); -+ if (unlikely(rv < 0)) -+ pr_err("error while modifying the tx sa key\n"); -+ -+ return rv; -+} -+ -+static int activate_tx_sa(struct generic_msg *gen) -+{ -+ struct activate_tx_sa *a_tx_sa; -+ struct macsec_priv_s *selected_macsec_priv; -+ int rv; -+ -+ a_tx_sa = &(gen->payload.a_tx_sa); -+ -+ if (a_tx_sa->macsec_id < 0 || -+ a_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ kfree(a_tx_sa); -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[a_tx_sa->macsec_id]; -+ -+ rv = fm_macsec_secy_txsa_set_active(selected_macsec_priv->fm_ms_secy, -+ a_tx_sa->an); -+ if (unlikely(rv < 0)) -+ pr_err("error when creating tx sa\n"); -+ -+ return rv; -+} -+ -+static int get_tx_sa_an(struct generic_msg *gen, macsec_an_t *an) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ -+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id]; -+ -+ fm_macsec_secy_txsa_get_active(selected_macsec_priv->fm_ms_secy, an); -+ -+ return 0; -+} -+ -+static int create_rx_sc(struct generic_msg *gen) -+{ -+ struct fm_macsec_secy_sc_params params; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *rx_sc_dev; -+ uint32_t sc_phys_id; -+ int i; -+ -+ if (gen->payload.c_rx_sc.macsec_id < 0 || -+ gen->payload.c_rx_sc.macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ selected_macsec_priv = macsec_priv[gen->payload.c_rx_sc.macsec_id]; -+ -+ for (i = 0; i < NUM_OF_RX_SC; i++) -+ if (!selected_macsec_priv->rx_sc_dev[i]) -+ break; -+ if (i == NUM_OF_RX_SC) { -+ pr_err("number of maximum RX_SC's has been reached\n"); -+ return -EINVAL; -+ } -+ -+ params.sci = gen->payload.c_rx_sc.sci; -+ params.cipher_suite = SECY_GCM_AES_128; -+#if (DPAA_VERSION >= 11) -+ params.cipher_suite = SECY_GCM_AES_256; -+#endif /* (DPAA_VERSION >= 11) */ -+ -+ rx_sc_dev = fm_macsec_secy_create_rxsc(selected_macsec_priv->fm_ms_secy, -+ ¶ms); -+ -+ fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy, -+ rx_sc_dev, -+ &sc_phys_id); -+ -+ selected_macsec_priv->rx_sc_dev[sc_phys_id] = rx_sc_dev; -+ -+ return sc_phys_id; -+} -+ -+static int create_rx_sa(struct generic_msg *gen) -+{ -+ struct create_rx_sa *c_rx_sa; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ macsec_sa_key_t sak; -+ int rv; -+ -+ c_rx_sa = &(gen->payload.c_rx_sa); -+ -+ if (unlikely(c_rx_sa->sak_len > 32)) { -+ pr_warn("size of secure key is greater than 32 bytes!\n"); -+ return -EINVAL; -+ } -+ rv = copy_from_user(&sak, -+ c_rx_sa->sak, -+ c_rx_sa->sak_len); -+ if (unlikely(rv != 0)) { -+ pr_err("copy_from_user could not copy %i bytes\n", rv); -+ return -EFAULT; -+ } -+ -+ if (c_rx_sa->macsec_id < 0 || -+ c_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ -+ selected_macsec_priv = macsec_priv[c_rx_sa->macsec_id]; -+ -+ if (c_rx_sa->rx_sc_id < 0 || c_rx_sa->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ -+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[c_rx_sa->rx_sc_id]; -+ -+ rv = fm_macsec_secy_create_rx_sa(selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ c_rx_sa->an, -+ c_rx_sa->lpn, -+ sak); -+ if (unlikely(rv < 0)) { -+ pr_err("fm_macsec_secy_create_rx_sa failed\n"); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+static int modify_rx_sa_key(struct generic_msg *gen) -+{ -+ struct modify_rx_sa_key *rx_sa_key; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc; -+ macsec_sa_key_t sa_key; -+ int rv; -+ -+ rx_sa_key = &(gen->payload.modify_rx_sa_key); -+ -+ if (rx_sa_key->macsec_id < 0 || -+ rx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ selected_macsec_priv = macsec_priv[rx_sa_key->macsec_id]; -+ -+ if (rx_sa_key->rx_sc_id < 0 || rx_sa_key->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc = selected_macsec_priv->rx_sc_dev[rx_sa_key->rx_sc_id]; -+ -+ /* set macsec_priv field */ -+ selected_macsec_priv->an = rx_sa_key->an; -+ -+ if (unlikely(rx_sa_key->sak_len > 32)) { -+ pr_warn("size of secure key is greater than 32 bytes!\n"); -+ kfree(rx_sa_key); -+ return -EINVAL; -+ } -+ -+ rv = copy_from_user(&sa_key, -+ rx_sa_key->sak, -+ rx_sa_key->sak_len); -+ if (unlikely(rv != 0)) { -+ pr_err("copy_from_user could not copy %i bytes\n", rv); -+ return -EFAULT; -+ } -+ -+ rv = fm_macsec_secy_rxsa_modify_key(selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc, -+ rx_sa_key->an, -+ sa_key); -+ if (unlikely(rv < 0)) -+ pr_err("error while modifying the rx sa key\n"); -+ -+ return rv; -+} -+ -+static int update_npn(struct generic_msg *gen) -+{ -+ struct update_npn *update_npn; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ update_npn = &(gen->payload.update_npn); -+ -+ if (update_npn->macsec_id < 0 || -+ update_npn->macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ selected_macsec_priv = macsec_priv[update_npn->macsec_id]; -+ -+ if (update_npn->rx_sc_id < 0 || update_npn->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[update_npn->rx_sc_id]; -+ -+ err = fm_macsec_secy_rxsa_update_next_pn( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ update_npn->an, -+ update_npn->pn); -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_rxsa_update_next_pn failed\n"); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+static int update_lpn(struct generic_msg *gen) -+{ -+ struct update_lpn *update_lpn; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ update_lpn = &(gen->payload.update_lpn); -+ -+ if (update_lpn->macsec_id < 0 || -+ update_lpn->macsec_id >= FM_MAX_NUM_OF_MACS) -+ return -EINVAL; -+ selected_macsec_priv = macsec_priv[update_lpn->macsec_id]; -+ -+ if (update_lpn->rx_sc_id < 0 || update_lpn->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[update_lpn->rx_sc_id]; -+ -+ err = fm_macsec_secy_rxsa_update_lowest_pn( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ update_lpn->an, -+ update_lpn->pn); -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_rxsa_update_lowest_pn failed\n"); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+static int activate_rx_sa(struct generic_msg *gen) -+{ -+ struct activate_rx_sa *a_rx_sa; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ a_rx_sa = &(gen->payload.a_rx_sa); -+ -+ if (a_rx_sa->macsec_id < 0 || -+ a_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[a_rx_sa->macsec_id]; -+ -+ if (a_rx_sa->rx_sc_id < 0 || a_rx_sa->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[a_rx_sa->rx_sc_id]; -+ -+ err = fm_macsec_secy_rxsa_enable_receive( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ a_rx_sa->an); -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_rxsa_enable_receive failed\n"); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+static int get_tx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int err; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id]; -+ -+ err = fm_macsec_secy_get_txsc_phys_id(selected_macsec_priv->fm_ms_secy, -+ sc_id); -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_get_txsc_phys_id failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int get_rx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id) -+{ -+ struct get_rx_sc_id *get_rx_sc_id; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ get_rx_sc_id = &(gen->payload.get_rx_sc_id); -+ -+ if (get_rx_sc_id->macsec_id < 0 || -+ get_rx_sc_id->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[get_rx_sc_id->macsec_id]; -+ -+ if (get_rx_sc_id->rx_sc_id < 0 || -+ get_rx_sc_id->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[get_rx_sc_id->rx_sc_id]; -+ -+ err = fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ sc_id); -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_get_rxsc_phys_id failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int get_macsec_revision(struct generic_msg *gen, int *macsec_revision) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int err; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id]; -+ -+ err = fm_macsec_get_revision(selected_macsec_priv->fm_macsec, -+ macsec_revision); -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_get_revision failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int rx_sa_disable(struct generic_msg *gen) -+{ -+ struct disable_rx_sa *disable_rx_sa; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ disable_rx_sa = &(gen->payload.d_rx_sa); -+ -+ if (disable_rx_sa->macsec_id < 0 || -+ disable_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[disable_rx_sa->macsec_id]; -+ -+ if (disable_rx_sa->rx_sc_id < 0 || -+ disable_rx_sa->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[disable_rx_sa->rx_sc_id]; -+ -+ err = fm_macsec_secy_rxsa_disable_receive( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ selected_macsec_priv->an); -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_rxsa_disable_receive failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int rx_sa_delete(struct generic_msg *gen) -+{ -+ struct delete_rx_sa *delete_rx_sa; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ delete_rx_sa = &(gen->payload.del_rx_sa); -+ -+ if (delete_rx_sa->macsec_id < 0 || -+ delete_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[delete_rx_sa->macsec_id]; -+ -+ if (delete_rx_sa->rx_sc_id < 0 || -+ delete_rx_sa->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[delete_rx_sa->rx_sc_id]; -+ -+ err = fm_macsec_secy_delete_rx_sa(selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ selected_macsec_priv->an); -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_delete_rx_sa failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int rx_sc_delete(struct generic_msg *gen) -+{ -+ struct delete_rx_sc *delete_rx_sc; -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err; -+ -+ delete_rx_sc = &(gen->payload.del_rx_sc); -+ -+ if (delete_rx_sc->macsec_id < 0 || -+ delete_rx_sc->macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[delete_rx_sc->macsec_id]; -+ -+ if (delete_rx_sc->rx_sc_id < 0 || -+ delete_rx_sc->rx_sc_id >= NUM_OF_RX_SC) -+ return -EINVAL; -+ selected_rx_sc_dev = -+ selected_macsec_priv->rx_sc_dev[delete_rx_sc->rx_sc_id]; -+ -+ err = fm_macsec_secy_delete_rxsc(selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev); -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_delete_rxsc failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int tx_sa_delete(struct generic_msg *gen) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int err; -+ -+ if (gen->payload.del_tx_sa.macsec_id < 0 || -+ gen->payload.del_tx_sa.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[gen->payload.del_tx_sa.macsec_id]; -+ -+ err = fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->an); -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_delete_tx_sa failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int disable_secy(struct generic_msg *gen, int *macsec_id) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int err; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id]; -+ *macsec_id = gen->payload.macsec_id; -+ -+ err = fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy); -+ selected_macsec_priv->fm_ms_secy = NULL; -+ -+ if (unlikely(err < 0)) { -+ pr_err("fm_macsec_secy_free failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int disable_macsec(struct generic_msg *gen, int *macsec_id) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ int err; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ -+ selected_macsec_priv = -+ macsec_priv[gen->payload.macsec_id]; -+ *macsec_id = gen->payload.macsec_id; -+ -+ err = fm_macsec_disable(selected_macsec_priv->fm_macsec); -+ err += fm_macsec_free(selected_macsec_priv->fm_macsec); -+ selected_macsec_priv->fm_macsec = NULL; -+ -+ if (unlikely(err < 0)) { -+ pr_err("macsec disable failed\n"); -+ return err; -+ } -+ -+ return 0; -+ -+} -+ -+static int disable_all(struct generic_msg *gen, int *macsec_id) -+{ -+ struct macsec_priv_s *selected_macsec_priv; -+ struct rx_sc_dev *selected_rx_sc_dev; -+ int err = 0, i; -+ -+ if (gen->payload.macsec_id < 0 || -+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) { -+ return -EINVAL; -+ } -+ -+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id]; -+ *macsec_id = gen->payload.macsec_id; -+ -+ for (i = 0; i < NUM_OF_RX_SC; i++) { -+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[i]; -+ -+ if (!selected_rx_sc_dev) -+ continue; -+ -+ err += fm_macsec_secy_rxsa_disable_receive( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ selected_macsec_priv->an); -+ -+ err += fm_macsec_secy_delete_rx_sa( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev, -+ selected_macsec_priv->an); -+ -+ err += fm_macsec_secy_delete_rxsc( -+ selected_macsec_priv->fm_ms_secy, -+ selected_rx_sc_dev); -+ } -+ -+ err += fm_macsec_secy_delete_tx_sa( -+ selected_macsec_priv->fm_ms_secy, -+ selected_macsec_priv->an); -+ -+ err += fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy); -+ selected_macsec_priv->fm_ms_secy = NULL; -+ -+ err += fm_macsec_disable(selected_macsec_priv->fm_macsec); -+ -+ err += fm_macsec_free(selected_macsec_priv->fm_macsec); -+ selected_macsec_priv->fm_macsec = NULL; -+ -+ if (unlikely(err < 0)) { -+ pr_err("macsec disable failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static inline void macsec_setup_ingress(struct macsec_priv_s *macsec_priv, -+ struct dpa_fq *fq, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = macsec_priv->net_dev; -+ -+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; -+ fq->channel = macsec_priv->channel; -+} -+ -+static inline void macsec_setup_egress(struct macsec_priv_s *macsec_priv, -+ struct dpa_fq *fq, -+ struct fm_port *port, -+ const struct qman_fq *template) -+{ -+ fq->fq_base = *template; -+ fq->net_dev = macsec_priv->net_dev; -+ -+ if (port) { -+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; -+ fq->channel = (uint16_t)fm_get_tx_port_channel(port); -+ } else { -+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY; -+ } -+} -+ -+/* At the moment, we don't create recycle queues. */ -+static void macsec_fq_setup(struct macsec_priv_s *macsec_priv, -+ const struct dpa_fq_cbs_t *fq_cbs, -+ struct fm_port *tx_port) -+{ -+ struct dpa_fq *fq; -+ int egress_cnt = 0, conf_cnt = 0; -+ -+ /* Initialize each FQ in the list */ -+ list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) { -+ switch (fq->fq_type) { -+ /* Normal TX queues */ -+ case FQ_TYPE_TX: -+ macsec_setup_egress(macsec_priv, fq, tx_port, -+ &fq_cbs->egress_ern); -+ /* If we have more Tx queues than the number of cores, -+ * just ignore the extra ones. -+ */ -+ if (egress_cnt < MACSEC_ETH_TX_QUEUES) -+ macsec_priv->egress_fqs[egress_cnt++] = -+ &fq->fq_base; -+ break; -+ case FQ_TYPE_TX_CONFIRM: -+ BUG_ON(!macsec_priv->mac_dev); -+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq); -+ break; -+ /* TX confirm multiple queues */ -+ case FQ_TYPE_TX_CONF_MQ: -+ BUG_ON(!macsec_priv->mac_dev); -+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq); -+ macsec_priv->conf_fqs[conf_cnt++] = &fq->fq_base; -+ break; -+ case FQ_TYPE_TX_ERROR: -+ BUG_ON(!macsec_priv->mac_dev); -+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_errq); -+ break; -+ default: -+ dev_warn(macsec_priv->net_dev->dev.parent, -+ "Unknown FQ type detected!\n"); -+ break; -+ } -+ } -+ -+ /* The number of Tx queues may be smaller than the number of cores, if -+ * the Tx queue range is specified in the device tree instead of being -+ * dynamically allocated. -+ * Make sure all CPUs receive a corresponding Tx queue. -+ */ -+ while (egress_cnt < MACSEC_ETH_TX_QUEUES) { -+ list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) { -+ if (fq->fq_type != FQ_TYPE_TX) -+ continue; -+ macsec_priv->egress_fqs[egress_cnt++] = &fq->fq_base; -+ if (egress_cnt == MACSEC_ETH_TX_QUEUES) -+ break; -+ } -+ } -+ -+} -+ -+static const struct fqid_cell tx_fqids[] = { -+ {0, MACSEC_ETH_TX_QUEUES} -+}; -+ -+static const struct fqid_cell tx_confirm_fqids[] = { -+ {0, MACSEC_ETH_TX_QUEUES} -+}; -+ -+/* Allocate percpu priv. This is used to keep track of rx and tx packets on -+ * each cpu (take into consideration that the number of queues is equal to the -+ * number of cpus, so there is one queue/cpu). -+ */ -+static void alloc_priv(struct macsec_percpu_priv_s *percpu_priv, -+ struct macsec_priv_s *macsec_priv, struct device *dev) -+{ -+ int i, err; -+ -+ macsec_priv->percpu_priv = alloc_percpu(*macsec_priv->percpu_priv); -+ -+ if (unlikely(macsec_priv->percpu_priv == NULL)) { -+ dev_err(dev, "alloc_percpu() failed\n"); -+ err = -ENOMEM; -+ dpa_fq_free(dev, &macsec_priv->dpa_fq_list); -+ } -+ -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(macsec_priv->percpu_priv, i); -+ memset(percpu_priv, 0, sizeof(*percpu_priv)); -+ } -+ -+} -+ -+/* On RX, we only need to retain the information about frames, if they were -+ * encrypted or not. Statistics regarding this will be printed in a log file. -+ */ -+static int macsec_rx_hook(void *ptr, struct net_device *net_dev, u32 fqid) -+{ -+ -+ struct qm_fd *rx_fd = (struct qm_fd *)ptr; -+ struct macsec_percpu_priv_s *percpu_priv_m; -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ selected_macsec_priv = macsec_priv[net_dev->ifindex - 1]; -+ -+ percpu_priv_m = raw_cpu_ptr(selected_macsec_priv->percpu_priv); -+ -+ if ((rx_fd->status & FM_FD_STAT_RX_MACSEC) != 0) { -+ if (netif_msg_hw(selected_macsec_priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%u\n", -+ rx_fd->status & FM_FD_STAT_RX_MACSEC); -+ percpu_priv_m->rx_macsec++; -+ } -+ -+ return DPAA_ETH_CONTINUE; -+} -+ -+/* Split TX traffic. If encryption enabled, send packets on specific QMAN frame -+ * queues. Other way, let them be handled by dpa eth. Also, keep track of the -+ * number of packets that are walking away through "macsec" queues. -+ */ -+static enum dpaa_eth_hook_result macsec_tx_hook(struct sk_buff *skb, -+ struct net_device *net_dev) -+{ -+ struct dpa_priv_s *dpa_priv; -+ struct qm_fd fd; -+ struct macsec_percpu_priv_s *macsec_percpu_priv; -+ struct dpa_percpu_priv_s *dpa_percpu_priv; -+ int i, err = 0; -+ int *countptr, offset = 0; -+ const bool nonlinear = skb_is_nonlinear(skb); -+ struct qman_fq *egress_fq; -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ selected_macsec_priv = macsec_priv[net_dev->ifindex - 1]; -+ -+ if (!selected_macsec_priv->net_dev || -+ (selected_macsec_priv->en_state != SECY_ENABLED) || -+ (ntohs(skb->protocol) == ETH_P_PAE)) -+ return DPAA_ETH_CONTINUE; -+ -+ dpa_priv = netdev_priv(net_dev); -+ /* Non-migratable context, safe to use raw_cpu_ptr */ -+ macsec_percpu_priv = raw_cpu_ptr(selected_macsec_priv->percpu_priv); -+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv); -+ -+ countptr = raw_cpu_ptr(dpa_priv->dpa_bp->percpu_count); -+ -+ clear_fd(&fd); -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (dpa_priv->tsu && dpa_priv->tsu->valid && -+ dpa_priv->tsu->hwts_tx_en_ioctl) -+ fd.cmd |= FM_FD_CMD_UPD; -+#endif -+#ifdef CONFIG_FSL_DPAA_TS -+ if (unlikely(dpa_priv->ts_tx_en && -+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) -+ fd.cmd |= FM_FD_CMD_UPD; -+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure -+ * we don't feed FMan with more fragments than it supports. -+ * Btw, we're using the first sgt entry to store the linear part of -+ * the skb, so we're one extra frag short. -+ */ -+ if (nonlinear && -+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) { -+ /* Just create a S/G fd based on the skb */ -+ err = skb_to_sg_fd(dpa_priv, skb, &fd); -+ dpa_percpu_priv->tx_frag_skbuffs++; -+ } else { -+ /* Make sure we have enough headroom to accommodate private -+ * data, parse results, etc. Normally this shouldn't happen if -+ * we're here via the standard kernel stack. -+ */ -+ if (unlikely(skb_headroom(skb) < dpa_priv->tx_headroom)) { -+ struct sk_buff *skb_new; -+ -+ skb_new = skb_realloc_headroom(skb, -+ dpa_priv->tx_headroom); -+ if (unlikely(!skb_new)) { -+ dev_kfree_skb(skb); -+ dpa_percpu_priv->stats.tx_errors++; -+ return DPAA_ETH_STOLEN; -+ } -+ dev_kfree_skb(skb); -+ skb = skb_new; -+ } -+ -+ /* We're going to store the skb backpointer at the beginning -+ * of the data buffer, so we need a privately owned skb -+ */ -+ -+ /* Code borrowed from skb_unshare(). */ -+ if (skb_cloned(skb)) { -+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); -+ kfree_skb(skb); -+ skb = nskb; -+ /* skb_copy() has now linearized the skbuff. */ -+ } else if (unlikely(nonlinear)) { -+ /* We are here because the egress skb contains -+ * more fragments than we support. In this case, -+ * we have no choice but to linearize it ourselves. -+ */ -+ err = __skb_linearize(skb); -+ } -+ if (unlikely(!skb || err < 0)) { -+ /* Common out-of-memory error path */ -+ goto enomem; -+ } -+ -+ /* Finally, create a contig FD from this skb */ -+ err = skb_to_contig_fd(dpa_priv, skb, &fd, countptr, &offset); -+ } -+ if (unlikely(err < 0)) -+ goto skb_to_fd_failed; -+ -+ if (fd.bpid != 0xff) { -+ skb_recycle(skb); -+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom, -+ * but we need the skb to look as if returned by build_skb(). -+ * We need to manually adjust the tailptr as well. -+ */ -+ skb->data = skb->head + offset; -+ skb_reset_tail_pointer(skb); -+ -+ (*countptr)++; -+ dpa_percpu_priv->tx_returned++; -+ } -+ -+ egress_fq = selected_macsec_priv->egress_fqs[smp_processor_id()]; -+ if (fd.bpid == 0xff) -+ fd.cmd |= qman_fq_fqid(macsec_get_tx_conf_queue( -+ selected_macsec_priv, -+ egress_fq)); -+ -+ for (i = 0; i < 100000; i++) { -+ err = qman_enqueue(egress_fq, &fd, 0); -+ if (err != -EBUSY) -+ break; -+ } -+ -+ if (unlikely(err < 0)) { -+ dpa_percpu_priv->stats.tx_errors++; -+ dpa_percpu_priv->stats.tx_fifo_errors++; -+ goto xmit_failed; -+ } -+ -+ macsec_percpu_priv->tx_macsec++; -+ dpa_percpu_priv->stats.tx_packets++; -+ dpa_percpu_priv->stats.tx_bytes += dpa_fd_length(&fd); -+ -+ net_dev->trans_start = jiffies; -+ return DPAA_ETH_STOLEN; -+ -+xmit_failed: -+ if (fd.bpid != 0xff) { -+ (*countptr)--; -+ dpa_percpu_priv->tx_returned--; -+ dpa_fd_release(net_dev, &fd); -+ dpa_percpu_priv->stats.tx_errors++; -+ return DPAA_ETH_STOLEN; -+ } -+ _dpa_cleanup_tx_fd(dpa_priv, &fd); -+skb_to_fd_failed: -+enomem: -+ dpa_percpu_priv->stats.tx_errors++; -+ dev_kfree_skb(skb); -+ return DPAA_ETH_STOLEN; -+} -+ -+/* Allocate and initialize macsec priv and fqs. Also, create debugfs entry for -+ * a spcific interface. Iterate thourgh existing devices in order to find the -+ * one we want to have macsec for. -+ */ -+static int macsec_setup(void) -+{ -+ struct net_device *net_dev; -+ struct macsec_percpu_priv_s *percpu_priv = NULL; -+ struct dpa_priv_s *dpa_priv = NULL; -+ struct dpa_fq *dpa_fq; -+ struct device *dev = NULL; -+ int err, i, j, macsec_id; -+ -+ pr_debug("Entering: %s\n", __func__); -+ -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) { -+ macsec_priv[i] = kzalloc(sizeof(*(macsec_priv[i])), GFP_KERNEL); -+ -+ if (unlikely(macsec_priv[i] == NULL)) { -+ int j; -+ for (j = 0; j < i; j++) -+ kfree(macsec_priv[j]); -+ pr_err("could not allocate\n"); -+ return -ENOMEM; -+ } -+ } -+ -+ for (i = 0; i < macsec_ifs_cnt; i++) { -+ net_dev = first_net_device(&init_net); -+ macsec_id = net_dev->ifindex - 1; -+ while (net_dev) { -+ macsec_id = net_dev->ifindex - 1; -+ -+ /* to maintain code readability and less than -+ * 80 characters per line -+ */ -+ if (strcmp(net_dev->name, macsec_ifs[i]) != 0) { -+ net_dev = next_net_device(net_dev); -+ continue; -+ } -+ -+ /* strcmp(net_dev->name, macsec_ifs[i]) == 0 */ -+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED; -+ macsec_priv[macsec_id]->net_dev = net_dev; -+ dpa_priv = netdev_priv(net_dev); -+ macsec_priv[macsec_id]->mac_dev = dpa_priv->mac_dev; -+ macsec_priv[macsec_id]->channel = dpa_priv->channel; -+ dev = net_dev->dev.parent; -+ -+ INIT_LIST_HEAD(&macsec_priv[macsec_id]->dpa_fq_list); -+ -+ dpa_fq = dpa_fq_alloc(dev, -+ tx_fqids->start, tx_fqids->count, -+ &macsec_priv[macsec_id]->dpa_fq_list, -+ FQ_TYPE_TX); -+ if (unlikely(dpa_fq == NULL)) { -+ dev_err(dev, "dpa_fq_alloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ dpa_fq = dpa_fq_alloc(dev, -+ tx_confirm_fqids->start, -+ tx_confirm_fqids->count, -+ &macsec_priv[macsec_id]->dpa_fq_list, -+ FQ_TYPE_TX_CONF_MQ); -+ if (unlikely(dpa_fq == NULL)) { -+ dev_err(dev, "dpa_fq_alloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ macsec_fq_setup(macsec_priv[macsec_id], &private_fq_cbs, -+ macsec_priv[macsec_id]->mac_dev->port_dev[TX]); -+ -+ alloc_priv(percpu_priv, macsec_priv[macsec_id], dev); -+ -+ break; -+ } -+ if (macsec_priv[macsec_id]->net_dev == NULL) { -+ pr_err("Interface unknown\n"); -+ err = -EINVAL; -+ goto _error; -+ } -+ -+ /* setup specific ethtool ops for macsec */ -+ macsec_setup_ethtool_ops(net_dev); -+ } -+ return 0; -+ -+_error: -+ for (j = 0; j < i; i++) { -+ net_dev = first_net_device(&init_net); -+ while (net_dev) { -+ macsec_id = net_dev->ifindex - 1; -+ if (strcmp(net_dev->name, macsec_ifs[j]) != 0) { -+ net_dev = next_net_device(net_dev); -+ continue; -+ } -+ dpa_fq_free(net_dev->dev.parent, -+ &macsec_priv[macsec_id]->dpa_fq_list); -+ break; -+ } -+ macsec_restore_ethtool_ops(macsec_priv[j]->net_dev); -+ kfree(macsec_priv[j]); -+ } -+ for (j = i; j < FM_MAX_NUM_OF_MACS; j++) -+ kfree(macsec_priv[j]); -+ return err; -+} -+ -+static int enable_macsec(struct generic_msg *gen) -+{ -+ struct fm_macsec_params macsec_params; -+ int rv, macsec_id; -+ void __iomem *mac_dev_base_addr; -+ uintptr_t macsec_reg_addr; -+ struct macsec_data *mdata; -+ char if_name[IFNAMSIZ]; -+ struct macsec_priv_s *selected_macsec_priv; -+ -+ mdata = &gen->payload.en_macsec; -+ -+ if (unlikely(mdata->if_name_length > IFNAMSIZ)) { -+ pr_err("interface name too long\n"); -+ return -EINVAL; -+ } -+ -+ rv = copy_from_user(if_name, mdata->if_name, mdata->if_name_length); -+ if (unlikely(rv != 0)) { -+ pr_err("copy_from_user could not copy %i bytes\n", rv); -+ return -EFAULT; -+ } -+ -+ macsec_id = ifname_to_id(if_name); -+ if (macsec_id < 0 || macsec_id >= FM_MAX_NUM_OF_MACS) { -+ pr_err("error on converting to macsec_id\n"); -+ return -ENXIO; -+ } -+ -+ selected_macsec_priv = macsec_priv[macsec_id]; -+ -+ if (selected_macsec_priv->fm_macsec) { -+ pr_err("macsec has already been configured\n"); -+ return -EINVAL; -+ } -+ -+ mac_dev_base_addr = selected_macsec_priv->mac_dev->vaddr; -+ -+ macsec_reg_addr = (uintptr_t)(mac_dev_base_addr + MACSEC_REG_OFFSET); -+ -+ memset(&macsec_params, 0, sizeof(macsec_params)); -+ macsec_params.fm_h = (handle_t)selected_macsec_priv->mac_dev->fm; -+ macsec_params.guest_mode = FALSE; -+ /* The MACsec offset relative to the memory mapped MAC device */ -+ macsec_params.non_guest_params.base_addr = macsec_reg_addr; -+ macsec_params.non_guest_params.fm_mac_h = -+ (handle_t)selected_macsec_priv->mac_dev->get_mac_handle( -+ selected_macsec_priv->mac_dev); -+ macsec_params.non_guest_params.exception_f = macsec_exception; -+ macsec_params.non_guest_params.app_h = selected_macsec_priv->mac_dev; -+ -+ selected_macsec_priv->fm_macsec = fm_macsec_config(&macsec_params); -+ if (unlikely(selected_macsec_priv->fm_macsec == NULL)) -+ return -EINVAL; -+ -+ if (mdata->config_unknown_sci_treatment) { -+ rv = fm_macsec_config_unknown_sci_frame_treatment( -+ selected_macsec_priv->fm_macsec, -+ mdata->unknown_sci_treatment); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_invalid_tag_treatment) { -+ rv = fm_macsec_config_invalid_tags_frame_treatment( -+ selected_macsec_priv->fm_macsec, -+ mdata->deliver_uncontrolled); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_kay_frame_treatment) { -+ rv = fm_macsec_config_kay_frame_treatment( -+ selected_macsec_priv->fm_macsec, -+ mdata->discard_uncontrolled); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_untag_treatment) { -+ rv = fm_macsec_config_untag_frame_treatment( -+ selected_macsec_priv->fm_macsec, -+ mdata->untag_treatment); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_pn_exhaustion_threshold) { -+ rv = fm_macsec_config_pn_exhaustion_threshold( -+ selected_macsec_priv->fm_macsec, -+ mdata->pn_threshold); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_keys_unreadable) { -+ rv = fm_macsec_config_keys_unreadable( -+ selected_macsec_priv->fm_macsec); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_sectag_without_sci) { -+ rv = fm_macsec_config_sectag_without_sci( -+ selected_macsec_priv->fm_macsec); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ if (mdata->config_exception) { -+ rv = fm_macsec_config_exception(selected_macsec_priv->fm_macsec, -+ mdata->exception, -+ mdata->enable_exception); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ } -+ -+ rv = fm_macsec_init(selected_macsec_priv->fm_macsec); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ -+ rv = fm_macsec_enable(selected_macsec_priv->fm_macsec); -+ if (unlikely(rv < 0)) -+ goto _return_fm_macsec_free; -+ -+ return macsec_id; -+ -+_return_fm_macsec_free: -+ fm_macsec_free(selected_macsec_priv->fm_macsec); -+ selected_macsec_priv->fm_macsec = NULL; -+ return rv; -+} -+ -+static int send_result(struct nlmsghdr *nlh, int pid, int result) -+{ -+ int res; -+ struct sk_buff *skb_out; -+ size_t msg_size = sizeof(result); -+ -+ skb_out = nlmsg_new(msg_size, 0); -+ if (unlikely(!skb_out)) { -+ pr_err("Failed to allocate new skb\n"); -+ goto _ret_err; -+ } -+ -+ nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0); -+ if (unlikely(!nlh)) { -+ pr_err("Failed to send\n"); -+ goto _ret_err; -+ } -+ -+ NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */ -+ memcpy(nlmsg_data(nlh), &result, msg_size); -+ -+ res = nlmsg_unicast(nl_sk, skb_out, pid); -+ if (unlikely(res < 0)) { -+ pr_err("Error while sending back to user\n"); -+ goto _ret_err; -+ } -+ -+ return 0; -+ -+_ret_err: -+ return -1; -+} -+ -+/* Kernel communicates with user space through netlink sockets. This function -+ * implements the responses of the kernel. The generic struct is used for -+ * easier handling of the code, which otherwise would have been duplicated. -+ */ -+static void switch_messages(struct sk_buff *skb) -+{ -+ struct nlmsghdr *nlh; -+ int pid, rv; -+ enum msg_type cmd; -+ -+ struct dpa_fq *dpa_fq, *tmp; -+ struct device *dev; -+ -+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks; -+ -+ struct generic_msg *check; -+ int macsec_id = 0; -+ uint32_t sc_id, macsec_revision; -+ macsec_an_t ret_an; -+ int i; -+ -+ pr_debug("Entering: %s\n", __func__); -+ -+ if (unlikely(!skb)) { -+ pr_err("skb null\n"); -+ return; -+ } -+ -+ nlh = (struct nlmsghdr *)skb->data; -+ check = kmalloc(sizeof(*check), GFP_KERNEL); -+ memcpy(check, nlmsg_data(nlh), sizeof(*check)); -+ pid = nlh->nlmsg_pid; /*pid of sending process */ -+ cmd = check->chf; -+ -+ switch (cmd) { -+ case ENABLE_MACSEC: -+ pr_debug("ENABLE_MACSEC\n"); -+ -+ macsec_id = enable_macsec(check); -+ -+ if (macsec_id >= 0) -+ macsec_priv[macsec_id]->en_state = MACSEC_ENABLED; -+ -+ rv = send_result(nlh, pid, (macsec_id < 0) ? NACK : macsec_id); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case SET_EXCEPTION: -+ pr_debug("SET_EXCEPTION\n"); -+ -+ rv = set_macsec_exception(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case ENABLE_SECY: -+ pr_debug("ENABLE_SECY\n"); -+ -+ rv = enable_secy(check, &macsec_id); -+ -+ if (rv == 0) -+ macsec_priv[macsec_id]->en_state = SECY_ENABLED; -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case GET_REVISION: -+ pr_debug("GET_REVISION\n"); -+ -+ rv = get_macsec_revision(check, &macsec_revision); -+ -+ rv = send_result(nlh, pid, -+ (rv < 0) ? NACK : (int)macsec_revision); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case GET_TXSC_PHYS_ID: -+ pr_debug("GET_TXSC_PHYS_ID\n"); -+ -+ rv = get_tx_sc_phys_id(check, &sc_id); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case TX_SA_CREATE: -+ pr_debug("TX_SA_CREATE\n"); -+ -+ rv = create_tx_sa(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case MODIFY_TXSA_KEY: -+ pr_debug("MODIFY_TXSA_KEY\n"); -+ -+ rv = modify_tx_sa_key(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case TX_SA_ACTIVATE: -+ pr_debug("TX_SA_ACTIVATE\n"); -+ -+ rv = activate_tx_sa(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case GET_TXSA_AN: -+ pr_debug("GET_TXSA_AN\n"); -+ -+ rv = get_tx_sa_an(check, &ret_an); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)ret_an); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SC_CREATE: -+ pr_debug("RX_SC_CREATE\n"); -+ -+ sc_id = create_rx_sc(check); -+ -+ rv = send_result(nlh, pid, (sc_id < 0) ? NACK : (int)sc_id); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case GET_RXSC_PHYS_ID: -+ pr_debug("GET_RXSC_PHYS_ID\n"); -+ -+ rv = get_rx_sc_phys_id(check, &sc_id); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SA_CREATE: -+ pr_debug("RX_SA_CREATE\n"); -+ -+ rv = create_rx_sa(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case MODIFY_RXSA_KEY: -+ pr_debug("MODIFY_RXSA_KEY\n"); -+ -+ rv = modify_rx_sa_key(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case UPDATE_NPN: -+ pr_debug("UPDATE_NPN\n"); -+ -+ rv = update_npn(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case UPDATE_LPN: -+ pr_debug("UPDATE_LPN\n"); -+ -+ rv = update_lpn(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SA_ACTIVATE: -+ pr_debug("RX_SA_ACTIVATE\n"); -+ -+ rv = activate_rx_sa(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SA_DISABLE: -+ pr_debug("RX_SA_DISABLE\n"); -+ -+ rv = rx_sa_disable(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SA_DELETE: -+ pr_debug("RX_SA_DELETE\n"); -+ -+ rv = rx_sa_delete(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case RX_SC_DELETE: -+ pr_debug("RX_SC_DELETE\n"); -+ -+ rv = rx_sc_delete(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case TX_SA_DELETE: -+ pr_debug("TX_SA_DELETE\n"); -+ -+ rv = tx_sa_delete(check); -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case DISABLE_SECY: -+ pr_debug("DISABLE_SECY\n"); -+ -+ rv = disable_secy(check, &macsec_id); -+ -+ if (unlikely(rv < 0)) -+ macsec_priv[macsec_id]->en_state = SECY_ENABLED; -+ else -+ macsec_priv[macsec_id]->en_state = MACSEC_ENABLED; -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case DISABLE_MACSEC: -+ pr_debug("DISABLE_MACSEC\n"); -+ -+ rv = disable_macsec(check, &macsec_id); -+ -+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED; -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ -+ break; -+ -+ case DISABLE_ALL: -+ pr_debug("DISABLE_ALL\n"); -+ -+ rv = disable_all(check, &macsec_id); -+ -+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED; -+ -+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK); -+ if (unlikely(rv < 0)) -+ goto _release; -+ break; -+ -+ default: -+ /* should never get here */ -+ pr_err("not a state\n"); -+ break; -+ } -+ -+ return; -+ -+_release: -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) -+ deinit_macsec(i); -+ -+ /* Reset the TX hooks */ -+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks)); -+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks); -+ -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) { -+ -+ if (!macsec_priv[i]->net_dev) -+ continue; -+ -+ free_percpu(macsec_priv[i]->percpu_priv); -+ -+ /* Delete the fman queues */ -+ list_for_each_entry_safe(dpa_fq, -+ tmp, -+ &macsec_priv[i]->dpa_fq_list, -+ list) { -+ dev = dpa_fq->net_dev->dev.parent; -+ rv = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); -+ if (unlikely(rv < 0)) -+ pr_err("_dpa_fq_fre=%d\n", rv); -+ } -+ -+ macsec_restore_ethtool_ops(macsec_priv[i]->net_dev); -+ kfree(macsec_priv[i]); -+ macsec_priv[i] = NULL; -+ } -+ -+ kfree(check); -+ -+ netlink_kernel_release(nl_sk); -+} -+ -+struct netlink_kernel_cfg ms_cfg = { -+ .groups = 1, -+ .input = switch_messages, -+}; -+ -+static int __init macsec_init(void) -+{ -+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks; -+ int ret, i; -+ -+ pr_debug("Entering: %s\n", __func__); -+ -+ /* If there is no interface we want macsec on, just exit. */ -+ parse_ifs(); -+ for (i = 0; i < macsec_ifs_cnt; i++) { -+ if (!macsec_ifs[i]) { -+ pr_err("Interface unknown\n"); -+ return -EINVAL; -+ } -+ } -+ -+ /* Actually send the info to the user through a given socket. */ -+ nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &ms_cfg); -+ if (unlikely(!nl_sk)) { -+ pr_err("Error creating socket.\n"); -+ ret = -ENOMEM; -+ goto _release; -+ } -+ -+ ret = macsec_setup(); -+ if (unlikely(ret != 0)) { -+ pr_err("Setup of macsec failed\n"); -+ goto _release; -+ } -+ -+ /* set dpaa hooks for default queues */ -+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks)); -+ macsec_dpaa_eth_hooks.tx = (dpaa_eth_egress_hook_t)(macsec_tx_hook); -+ macsec_dpaa_eth_hooks.rx_default = -+ (dpaa_eth_ingress_hook_t)(macsec_rx_hook); -+ -+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks); -+ -+ return 0; -+ -+_release: -+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks)); -+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks); -+ netlink_kernel_release(nl_sk); -+ return ret; -+} -+ -+static void __exit macsec_exit(void) -+{ -+ int _errno; -+ struct dpa_fq *dpa_fq, *tmp; -+ struct device *dev; -+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks; -+ int i; -+ -+ pr_debug("exiting macsec module\n"); -+ -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) { -+ /* release has already been done, due to errors, -+ * in switch_messages we will return to exit the module properly -+ */ -+ if (!macsec_priv[i]->net_dev) { -+ pr_debug("no release needed\n"); -+ continue; -+ } -+ deinit_macsec(i); -+ } -+ -+ /* Reset the TX hooks before exiting */ -+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks)); -+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks); -+ -+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) { -+ -+ if (!macsec_priv[i]->net_dev) { -+ pr_debug("no release needed\n"); -+ continue; -+ } -+ -+ free_percpu(macsec_priv[i]->percpu_priv); -+ -+ /* Delete the fman queues */ -+ list_for_each_entry_safe(dpa_fq, tmp, -+ &macsec_priv[i]->dpa_fq_list, list) { -+ if (dpa_fq) { -+ dev = dpa_fq->net_dev->dev.parent; -+ _errno = _dpa_fq_free(dev, -+ (struct qman_fq *)dpa_fq); -+ if (unlikely(_errno < 0)) -+ pr_err("_dpa_fq_fre=%d\n", _errno); -+ } -+ } -+ -+ /* restore ethtool ops to the previous private ones */ -+ macsec_restore_ethtool_ops(macsec_priv[i]->net_dev); -+ -+ kfree(macsec_priv[i]); -+ } -+ -+ netlink_kernel_release(nl_sk); -+ -+ pr_debug("exited macsec module\n"); -+} -+ -+module_init(macsec_init); -+module_exit(macsec_exit); -+ -+MODULE_LICENSE("Dual BSD/GPL"); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h -@@ -0,0 +1,294 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA_ETH_MACSEC_H -+#define __DPAA_ETH_MACSEC_H -+ -+#include "mac.h" -+ -+#define NETLINK_USER 31 -+#define MAX_NUM_OF_SECY 1 -+#define MAX_LEN 100 -+#define FM_FD_STAT_RX_MACSEC 0x00800000 -+#define MACSEC_ETH_TX_QUEUES NR_CPUS -+#define MACSEC_REG_OFFSET 0x800 -+#define ACK 0 -+#define NACK -1 -+ -+extern const struct dpa_fq_cbs_t private_fq_cbs; -+ -+extern int dpa_macsec_get_sset_count(struct net_device *net_dev, int type); -+extern void -+dpa_macsec_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, u64 *data); -+extern void -+dpa_macsec_get_strings(struct net_device *net_dev, -+ u32 stringset, u8 *data); -+ -+enum msg_type {ENABLE_MACSEC, -+ SET_EXCEPTION, -+ ENABLE_SECY, -+ TX_SA_CREATE, -+ TX_SA_ACTIVATE, -+ RX_SC_CREATE, -+ RX_SA_CREATE, -+ RX_SA_ACTIVATE, -+ RX_SA_DISABLE, -+ RX_SA_DELETE, -+ RX_SC_DELETE, -+ TX_SA_DELETE, -+ DISABLE_MACSEC, -+ DISABLE_SECY, -+ DISABLE_ALL, -+ GET_REVISION, -+ UPDATE_NPN, -+ UPDATE_LPN, -+ GET_TXSC_PHYS_ID, -+ GET_RXSC_PHYS_ID, -+ GET_TXSA_AN, -+ MODIFY_TXSA_KEY, -+ MODIFY_RXSA_KEY, -+}; -+ -+enum macsec_enablement {MACSEC_DISABLED, MACSEC_ENABLED, SECY_ENABLED}; -+ -+struct enable_secy { -+ int macsec_id; -+ -+ u64 sci; /* MAC address(48b) + port_id(16b) */ -+ -+ bool config_insertion_mode; -+ fm_macsec_sci_insertion_mode sci_insertion_mode; -+ -+ bool config_protect_frames; -+ bool protect_frames; -+ -+ bool config_replay_window; -+ bool replay_protect; -+ uint32_t replay_window; -+ -+ bool config_validation_mode; -+ fm_macsec_valid_frame_behavior validate_frames; -+ -+ bool config_confidentiality; -+ bool confidentiality_enable; -+ uint32_t confidentiality_offset; -+ -+ bool config_point_to_point; -+ -+ bool config_exception; -+ bool enable_exception; -+ fm_macsec_secy_exception exception; -+ -+ bool config_event; -+ bool enable_event; -+ fm_macsec_secy_event event; -+}; -+ -+struct macsec_data { -+ char *if_name; -+ size_t if_name_length; /* including string terminator */ -+ -+ bool config_unknown_sci_treatment; -+ fm_macsec_unknown_sci_frame_treatment unknown_sci_treatment; -+ -+ bool config_invalid_tag_treatment; -+ bool deliver_uncontrolled; -+ -+ bool config_kay_frame_treatment; -+ bool discard_uncontrolled; -+ -+ bool config_untag_treatment; -+ fm_macsec_untag_frame_treatment untag_treatment; -+ -+ bool config_pn_exhaustion_threshold; -+ uint32_t pn_threshold; -+ -+ bool config_keys_unreadable; -+ -+ bool config_sectag_without_sci; -+ -+ bool config_exception; -+ bool enable_exception; -+ fm_macsec_exception exception; -+}; -+ -+struct set_exception { -+ int macsec_id; -+ bool enable_exception; -+ fm_macsec_exception exception; -+}; -+ -+struct create_tx_sa { -+ int macsec_id; -+ u8 an; /* association number */ -+ u8 *sak; /* secure assoc key */ -+ u32 sak_len; /* assoc key length */ -+}; -+ -+struct modify_tx_sa_key { -+ int macsec_id; -+ u8 an; /* association number */ -+ u8 *sak; /* secure assoc key */ -+ u32 sak_len; /* assoc key length */ -+}; -+ -+struct activate_tx_sa { -+ int macsec_id; -+ u8 an; /* association number */ -+}; -+ -+struct create_rx_sc { -+ int macsec_id; -+ u64 sci; -+}; -+ -+struct delete_rx_sc { -+ int macsec_id; -+ u32 rx_sc_id; -+}; -+ -+struct get_rx_sc_id { -+ int macsec_id; -+ u32 rx_sc_id; -+}; -+ -+struct create_rx_sa { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+ u32 lpn; -+ u8 *sak; -+ u32 sak_len; -+}; -+ -+struct activate_rx_sa { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+}; -+ -+struct disable_rx_sa { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+}; -+ -+struct delete_rx_sa { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+}; -+ -+struct delete_tx_sa { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+}; -+ -+struct update_npn { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+ u32 pn; -+}; -+ -+struct update_lpn { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+ u32 pn; -+}; -+ -+struct modify_rx_sa_key { -+ int macsec_id; -+ u32 rx_sc_id; -+ u8 an; -+ u8 *sak; -+ u32 sak_len; -+}; -+ -+struct generic_msg { -+ enum msg_type chf; -+ union { -+ int macsec_id; -+ struct macsec_data en_macsec; -+ struct enable_secy secy; -+ struct create_tx_sa c_tx_sa; -+ struct activate_tx_sa a_tx_sa; -+ struct create_rx_sc c_rx_sc; -+ struct get_rx_sc_id get_rx_sc_id; -+ struct create_rx_sa c_rx_sa; -+ struct activate_rx_sa a_rx_sa; -+ struct disable_rx_sa d_rx_sa; -+ struct delete_rx_sa del_rx_sa; -+ struct delete_rx_sc del_rx_sc; -+ struct delete_tx_sa del_tx_sa; -+ struct update_npn update_npn; -+ struct update_lpn update_lpn; -+ struct modify_tx_sa_key modify_tx_sa_key; -+ struct modify_rx_sa_key modify_rx_sa_key; -+ struct set_exception set_ex; -+ } payload; -+}; -+ -+struct macsec_percpu_priv_s { -+ u64 rx_macsec; -+ u64 tx_macsec; -+}; -+ -+struct macsec_priv_s { -+ struct macsec_percpu_priv_s __percpu *percpu_priv; -+ -+ struct net_device *net_dev; -+ struct mac_device *mac_dev; -+ -+ struct qman_fq *egress_fqs[MACSEC_ETH_TX_QUEUES]; -+ struct qman_fq *conf_fqs[MACSEC_ETH_TX_QUEUES]; -+ struct list_head dpa_fq_list; -+ uint32_t msg_enable; /* net_device message level */ -+ uint16_t channel; -+ struct fm_macsec_dev *fm_macsec; -+ -+ struct fm_macsec_secy_dev *fm_ms_secy; -+ uint8_t an; -+ -+ struct rx_sc_dev *rx_sc_dev[NUM_OF_RX_SC]; -+ uint8_t *sa_key; -+ enum macsec_enablement en_state; -+ -+ uintptr_t vaddr; -+ struct resource *fman_resource; -+}; -+ -+struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev); -+ -+#endif /* __DPAA_ETH_MACSEC_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c -@@ -0,0 +1,381 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_base.h" -+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ -+#include "mac.h" -+ -+#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+MODULE_DESCRIPTION(DPA_DESCRIPTION); -+ -+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev); -+#ifdef CONFIG_PM -+ -+static int proxy_suspend(struct device *dev) -+{ -+ struct proxy_device *proxy_dev = dev_get_drvdata(dev); -+ struct mac_device *mac_dev = proxy_dev->mac_dev; -+ int err = 0; -+ -+ err = fm_port_suspend(mac_dev->port_dev[RX]); -+ if (err) -+ goto port_suspend_failed; -+ -+ err = fm_port_suspend(mac_dev->port_dev[TX]); -+ if (err) -+ err = fm_port_resume(mac_dev->port_dev[RX]); -+ -+port_suspend_failed: -+ return err; -+} -+ -+static int proxy_resume(struct device *dev) -+{ -+ struct proxy_device *proxy_dev = dev_get_drvdata(dev); -+ struct mac_device *mac_dev = proxy_dev->mac_dev; -+ int err = 0; -+ -+ err = fm_port_resume(mac_dev->port_dev[TX]); -+ if (err) -+ goto port_resume_failed; -+ -+ err = fm_port_resume(mac_dev->port_dev[RX]); -+ if (err) -+ err = fm_port_suspend(mac_dev->port_dev[TX]); -+ -+port_resume_failed: -+ return err; -+} -+ -+static const struct dev_pm_ops proxy_pm_ops = { -+ .suspend = proxy_suspend, -+ .resume = proxy_resume, -+}; -+ -+#define PROXY_PM_OPS (&proxy_pm_ops) -+ -+#else /* CONFIG_PM */ -+ -+#define PROXY_PM_OPS NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev) -+{ -+ int err = 0, i; -+ struct device *dev; -+ struct device_node *dpa_node; -+ struct dpa_bp *dpa_bp; -+ struct list_head proxy_fq_list; -+ size_t count; -+ struct fm_port_fqs port_fqs; -+ struct dpa_buffer_layout_s *buf_layout = NULL; -+ struct mac_device *mac_dev; -+ struct proxy_device *proxy_dev; -+ -+ dev = &_of_dev->dev; -+ -+ dpa_node = dev->of_node; -+ -+ if (!of_device_is_available(dpa_node)) -+ return -ENODEV; -+ -+ /* Get the buffer pools assigned to this interface */ -+ dpa_bp = dpa_bp_probe(_of_dev, &count); -+ if (IS_ERR(dpa_bp)) -+ return PTR_ERR(dpa_bp); -+ -+ mac_dev = dpa_mac_probe(_of_dev); -+ if (IS_ERR(mac_dev)) -+ return PTR_ERR(mac_dev); -+ -+ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL); -+ if (!proxy_dev) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ -+ proxy_dev->mac_dev = mac_dev; -+ dev_set_drvdata(dev, proxy_dev); -+ -+ /* We have physical ports, so we need to establish -+ * the buffer layout. -+ */ -+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), -+ GFP_KERNEL); -+ if (!buf_layout) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ return -ENOMEM; -+ } -+ dpa_set_buffers_layout(mac_dev, buf_layout); -+ -+ INIT_LIST_HEAD(&proxy_fq_list); -+ -+ memset(&port_fqs, 0, sizeof(port_fqs)); -+ -+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX); -+ if (!err) -+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, -+ TX); -+ if (err < 0) { -+ devm_kfree(dev, buf_layout); -+ return err; -+ } -+ -+ /* Proxy initializer - Just configures the MAC on behalf of -+ * another partition. -+ */ -+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, -+ buf_layout, dev); -+ -+ /* Proxy interfaces need to be started, and the allocated -+ * memory freed -+ */ -+ devm_kfree(dev, buf_layout); -+ devm_kfree(dev, dpa_bp); -+ -+ /* Free FQ structures */ -+ devm_kfree(dev, port_fqs.rx_defq); -+ devm_kfree(dev, port_fqs.rx_errq); -+ devm_kfree(dev, port_fqs.tx_defq); -+ devm_kfree(dev, port_fqs.tx_errq); -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ err = fm_port_enable(mac_dev->port_dev[i]); -+ if (err) -+ goto port_enable_fail; -+ } -+ -+ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", -+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], -+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); -+ -+ return 0; /* Proxy interface initialization ended */ -+ -+port_enable_fail: -+ for_each_port_device(i, mac_dev->port_dev) -+ fm_port_disable(mac_dev->port_dev[i]); -+ dpa_eth_proxy_remove(_of_dev); -+ -+ return err; -+} -+ -+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev, -+ struct net_device *net_dev) -+{ -+ struct mac_device *mac_dev; -+ int _errno; -+ -+ mac_dev = proxy_dev->mac_dev; -+ -+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev), -+ net_dev->dev_addr); -+ if (_errno < 0) -+ return _errno; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_proxy_set_mac_address); -+ -+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev, -+ struct net_device *net_dev) -+{ -+ struct mac_device *mac_dev = proxy_dev->mac_dev; -+ int _errno; -+ -+ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) { -+ mac_dev->promisc = !mac_dev->promisc; -+ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev), -+ mac_dev->promisc); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n", -+ _errno); -+ } -+ -+ _errno = mac_dev->set_multi(net_dev, mac_dev); -+ if (unlikely(_errno < 0)) -+ return _errno; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpa_proxy_set_rx_mode); -+ -+int dpa_proxy_start(struct net_device *net_dev) -+{ -+ struct mac_device *mac_dev; -+ const struct dpa_priv_s *priv; -+ struct proxy_device *proxy_dev; -+ int _errno; -+ int i; -+ -+ priv = netdev_priv(net_dev); -+ proxy_dev = (struct proxy_device *)priv->peer; -+ mac_dev = proxy_dev->mac_dev; -+ -+ _errno = mac_dev->init_phy(net_dev, mac_dev); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "init_phy() = %d\n", -+ _errno); -+ return _errno; -+ } -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ _errno = fm_port_enable(mac_dev->port_dev[i]); -+ if (_errno) -+ goto port_enable_fail; -+ } -+ -+ _errno = mac_dev->start(mac_dev); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "mac_dev->start() = %d\n", -+ _errno); -+ goto port_enable_fail; -+ } -+ -+ return _errno; -+ -+port_enable_fail: -+ for_each_port_device(i, mac_dev->port_dev) -+ fm_port_disable(mac_dev->port_dev[i]); -+ -+ return _errno; -+} -+EXPORT_SYMBOL(dpa_proxy_start); -+ -+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev) -+{ -+ struct mac_device *mac_dev = proxy_dev->mac_dev; -+ const struct dpa_priv_s *priv = netdev_priv(net_dev); -+ int _errno, i, err; -+ -+ _errno = mac_dev->stop(mac_dev); -+ if (_errno < 0) { -+ if (netif_msg_drv(priv)) -+ netdev_err(net_dev, "mac_dev->stop() = %d\n", -+ _errno); -+ return _errno; -+ } -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ err = fm_port_disable(mac_dev->port_dev[i]); -+ _errno = err ? err : _errno; -+ } -+ -+ if (mac_dev->phy_dev) -+ phy_disconnect(mac_dev->phy_dev); -+ mac_dev->phy_dev = NULL; -+ -+ return _errno; -+} -+EXPORT_SYMBOL(dpa_proxy_stop); -+ -+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev) -+{ -+ struct device *dev = &of_dev->dev; -+ struct proxy_device *proxy_dev = dev_get_drvdata(dev); -+ -+ kfree(proxy_dev); -+ -+ dev_set_drvdata(dev, NULL); -+ -+ return 0; -+} -+ -+static const struct of_device_id dpa_proxy_match[] = { -+ { -+ .compatible = "fsl,dpa-ethernet-init" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, dpa_proxy_match); -+ -+static struct platform_driver dpa_proxy_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME "-proxy", -+ .of_match_table = dpa_proxy_match, -+ .owner = THIS_MODULE, -+ .pm = PROXY_PM_OPS, -+ }, -+ .probe = dpaa_eth_proxy_probe, -+ .remove = dpa_eth_proxy_remove -+}; -+ -+static int __init __cold dpa_proxy_load(void) -+{ -+ int _errno; -+ -+ pr_info(DPA_DESCRIPTION "\n"); -+ -+ /* Initialize dpaa_eth mirror values */ -+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); -+ dpa_max_frm = fm_get_max_frm(); -+ -+ _errno = platform_driver_register(&dpa_proxy_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+module_init(dpa_proxy_load); -+ -+static void __exit __cold dpa_proxy_unload(void) -+{ -+ platform_driver_unregister(&dpa_proxy_driver); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(dpa_proxy_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c -@@ -0,0 +1,1128 @@ -+/* Copyright 2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+ -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#ifdef CONFIG_FSL_DPAA_1588 -+#include "dpaa_1588.h" -+#endif -+#ifdef CONFIG_FSL_DPAA_CEETM -+#include "dpaa_eth_ceetm.h" -+#endif -+ -+/* DMA map and add a page frag back into the bpool. -+ * @vaddr fragment must have been allocated with netdev_alloc_frag(), -+ * specifically for fitting into @dpa_bp. -+ */ -+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr, -+ int *count_ptr) -+{ -+ struct bm_buffer bmb; -+ dma_addr_t addr; -+ -+ memset(&bmb, 0, sizeof(struct bm_buffer)); -+ -+ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size, -+ DMA_BIDIRECTIONAL); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ return; -+ } -+ -+ bm_buffer_set64(&bmb, addr); -+ -+ while (bman_release(dpa_bp->pool, &bmb, 1, 0)) -+ cpu_relax(); -+ -+ (*count_ptr)++; -+} -+ -+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp) -+{ -+ struct bm_buffer bmb[8]; -+ void *new_buf; -+ dma_addr_t addr; -+ uint8_t i; -+ struct device *dev = dpa_bp->dev; -+ struct sk_buff *skb, **skbh; -+ -+ memset(bmb, 0, sizeof(struct bm_buffer) * 8); -+ -+ for (i = 0; i < 8; i++) { -+ /* We'll prepend the skb back-pointer; can't use the DPA -+ * priv space, because FMan will overwrite it (from offset 0) -+ * if it ends up being the second, third, etc. fragment -+ * in a S/G frame. -+ * -+ * We only need enough space to store a pointer, but allocate -+ * an entire cacheline for performance reasons. -+ */ -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ new_buf = page_address(alloc_page(GFP_ATOMIC)); -+#else -+ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE); -+#endif -+ if (unlikely(!new_buf)) -+ goto netdev_alloc_failed; -+ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES); -+ -+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) { -+ put_page(virt_to_head_page(new_buf)); -+ goto build_skb_failed; -+ } -+ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1); -+ -+ addr = dma_map_single(dev, new_buf, -+ dpa_bp->size, DMA_BIDIRECTIONAL); -+ if (unlikely(dma_mapping_error(dev, addr))) -+ goto dma_map_failed; -+ -+ bm_buffer_set64(&bmb[i], addr); -+ } -+ -+release_bufs: -+ /* Release the buffers. In case bman is busy, keep trying -+ * until successful. bman_release() is guaranteed to succeed -+ * in a reasonable amount of time -+ */ -+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0))) -+ cpu_relax(); -+ return i; -+ -+dma_map_failed: -+ kfree_skb(skb); -+ -+build_skb_failed: -+netdev_alloc_failed: -+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n"); -+ WARN_ONCE(1, "Memory allocation failure on Rx\n"); -+ -+ bm_buffer_set64(&bmb[i], 0); -+ /* Avoid releasing a completely null buffer; bman_release() requires -+ * at least one buffer. -+ */ -+ if (likely(i)) -+ goto release_bufs; -+ -+ return 0; -+} -+ -+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */ -+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu) -+{ -+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu); -+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp); -+} -+ -+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) -+{ -+ int i; -+ -+ /* Give each CPU an allotment of "config_count" buffers */ -+ for_each_possible_cpu(i) { -+ int j; -+ -+ /* Although we access another CPU's counters here -+ * we do it at boot time so it is safe -+ */ -+ for (j = 0; j < dpa_bp->config_count; j += 8) -+ dpa_bp_add_8_bufs(dpa_bp, i); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(dpa_bp_priv_seed); -+ -+/* Add buffers/(pages) for Rx processing whenever bpool count falls below -+ * REFILL_THRESHOLD. -+ */ -+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr) -+{ -+ int count = *countptr; -+ int new_bufs; -+ -+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) { -+ do { -+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp); -+ if (unlikely(!new_bufs)) { -+ /* Avoid looping forever if we've temporarily -+ * run out of memory. We'll try again at the -+ * next NAPI cycle. -+ */ -+ break; -+ } -+ count += new_bufs; -+ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT); -+ -+ *countptr = count; -+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpaa_eth_refill_bpools); -+ -+/* Cleanup function for outgoing frame descriptors that were built on Tx path, -+ * either contiguous frames or scatter/gather ones. -+ * Skb freeing is not handled here. -+ * -+ * This function may be called on error paths in the Tx function, so guard -+ * against cases when not all fd relevant fields were filled in. -+ * -+ * Return the skb backpointer, since for S/G frames the buffer containing it -+ * gets freed here. -+ */ -+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, -+ const struct qm_fd *fd) -+{ -+ const struct qm_sg_entry *sgt; -+ int i; -+ struct dpa_bp *dpa_bp = priv->dpa_bp; -+ dma_addr_t addr = qm_fd_addr(fd); -+ dma_addr_t sg_addr; -+ struct sk_buff **skbh; -+ struct sk_buff *skb = NULL; -+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE; -+ int nr_frags; -+ int sg_len; -+ -+ /* retrieve skb back pointer */ -+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0); -+ -+ if (unlikely(fd->format == qm_fd_sg)) { -+ nr_frags = skb_shinfo(skb)->nr_frags; -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+/* addressing the 4k DMA issue can yield a larger number of fragments than -+ * the skb had -+ */ -+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) + -+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES, -+ dma_dir); -+#else -+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags), -+ dma_dir); -+#endif -+ /* The sgt buffer has been allocated with netdev_alloc_frag(), -+ * it's from lowmem. -+ */ -+ sgt = phys_to_virt(addr + dpa_fd_offset(fd)); -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid && -+ priv->tsu->hwts_tx_en_ioctl) -+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh); -+#endif -+#ifdef CONFIG_FSL_DPAA_TS -+ if (unlikely(priv->ts_tx_en && -+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { -+ struct skb_shared_hwtstamps shhwtstamps; -+ -+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh); -+ skb_tstamp_tx(skb, &shhwtstamps); -+ } -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ /* sgt[0] is from lowmem, was dma_map_single()-ed */ -+ sg_addr = qm_sg_addr(&sgt[0]); -+ sg_len = qm_sg_entry_get_len(&sgt[0]); -+ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir); -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ i = 1; -+ do { -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ sg_addr = qm_sg_addr(&sgt[i]); -+ sg_len = qm_sg_entry_get_len(&sgt[i]); -+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir); -+ } while (!qm_sg_entry_get_final(&sgt[i++])); -+#else -+ /* remaining pages were mapped with dma_map_page() */ -+ for (i = 1; i <= nr_frags; i++) { -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ sg_addr = qm_sg_addr(&sgt[i]); -+ sg_len = qm_sg_entry_get_len(&sgt[i]); -+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir); -+ } -+#endif -+ -+ /* Free the page frag that we allocated on Tx */ -+ put_page(virt_to_head_page(sgt)); -+ } else { -+ dma_unmap_single(dpa_bp->dev, addr, -+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); -+#ifdef CONFIG_FSL_DPAA_TS -+ /* get the timestamp for non-SG frames */ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid && -+ priv->tsu->hwts_tx_en_ioctl) -+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh); -+#endif -+ if (unlikely(priv->ts_tx_en && -+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { -+ struct skb_shared_hwtstamps shhwtstamps; -+ -+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh); -+ skb_tstamp_tx(skb, &shhwtstamps); -+ } -+#endif -+ } -+ -+ return skb; -+} -+EXPORT_SYMBOL(_dpa_cleanup_tx_fd); -+ -+#ifndef CONFIG_FSL_DPAA_TS -+bool dpa_skb_is_recyclable(struct sk_buff *skb) -+{ -+ /* No recycling possible if skb buffer is kmalloc'ed */ -+ if (skb->head_frag == 0) -+ return false; -+ -+ /* or if it's an userspace buffer */ -+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) -+ return false; -+ -+ /* or if it's cloned or shared */ -+ if (skb_shared(skb) || skb_cloned(skb) || -+ skb->fclone != SKB_FCLONE_UNAVAILABLE) -+ return false; -+ -+ return true; -+} -+EXPORT_SYMBOL(dpa_skb_is_recyclable); -+ -+bool dpa_buf_is_recyclable(struct sk_buff *skb, -+ uint32_t min_size, -+ uint16_t min_offset, -+ unsigned char **new_buf_start) -+{ -+ unsigned char *new; -+ -+ /* In order to recycle a buffer, the following conditions must be met: -+ * - buffer size no less than the buffer pool size -+ * - buffer size no higher than an upper limit (to avoid moving too much -+ * system memory to the buffer pools) -+ * - buffer address aligned to cacheline bytes -+ * - offset of data from start of buffer no lower than a minimum value -+ * - offset of data from start of buffer no higher than a maximum value -+ */ -+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset); -+ -+ /* left align to the nearest cacheline */ -+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1)); -+ -+ if (likely(new >= skb->head && -+ new >= (skb->data - DPA_MAX_FD_OFFSET) && -+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) { -+ *new_buf_start = new; -+ return true; -+ } -+ -+ return false; -+} -+EXPORT_SYMBOL(dpa_buf_is_recyclable); -+#endif -+ -+/* Build a linear skb around the received buffer. -+ * We are guaranteed there is enough room at the end of the data buffer to -+ * accommodate the shared info area of the skb. -+ */ -+static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv, -+ const struct qm_fd *fd, int *use_gro) -+{ -+ dma_addr_t addr = qm_fd_addr(fd); -+ ssize_t fd_off = dpa_fd_offset(fd); -+ void *vaddr; -+ const fm_prs_result_t *parse_results; -+ struct sk_buff *skb = NULL, **skbh; -+ -+ vaddr = phys_to_virt(addr); -+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); -+ -+ /* Retrieve the skb and adjust data and tail pointers, to make sure -+ * forwarded skbs will have enough space on Tx if extra headers -+ * are added. -+ */ -+ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1); -+ -+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME -+ /* When using jumbo Rx buffers, we risk having frames dropped due to -+ * the socket backlog reaching its maximum allowed size. -+ * Use the frame length for the skb truesize instead of the buffer -+ * size, as this is the size of the data that actually gets copied to -+ * userspace. -+ */ -+ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd)); -+#endif -+ -+ DPA_BUG_ON(fd_off != priv->rx_headroom); -+ skb_reserve(skb, fd_off); -+ skb_put(skb, dpa_fd_length(fd)); -+ -+ /* Peek at the parse results for csum validation */ -+ parse_results = (const fm_prs_result_t *)(vaddr + -+ DPA_RX_PRIV_DATA_SIZE); -+ _dpa_process_parse_results(parse_results, fd, skb, use_gro); -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl) -+ dpa_ptp_store_rxstamp(priv, skb, vaddr); -+#endif -+#ifdef CONFIG_FSL_DPAA_TS -+ if (priv->ts_rx_en) -+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr); -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ return skb; -+} -+ -+ -+/* Build an skb with the data of the first S/G entry in the linear portion and -+ * the rest of the frame as skb fragments. -+ * -+ * The page fragment holding the S/G Table is recycled here. -+ */ -+static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv, -+ const struct qm_fd *fd, int *use_gro, -+ int *count_ptr) -+{ -+ const struct qm_sg_entry *sgt; -+ dma_addr_t addr = qm_fd_addr(fd); -+ ssize_t fd_off = dpa_fd_offset(fd); -+ dma_addr_t sg_addr; -+ void *vaddr, *sg_vaddr; -+ struct dpa_bp *dpa_bp; -+ struct page *page, *head_page; -+ int frag_offset, frag_len; -+ int page_offset; -+ int i; -+ const fm_prs_result_t *parse_results; -+ struct sk_buff *skb = NULL, *skb_tmp, **skbh; -+ -+ vaddr = phys_to_virt(addr); -+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); -+ -+ dpa_bp = priv->dpa_bp; -+ /* Iterate through the SGT entries and add data buffers to the skb */ -+ sgt = vaddr + fd_off; -+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) { -+ /* Extension bit is not supported */ -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ -+ /* We use a single global Rx pool */ -+ DPA_BUG_ON(dpa_bp != -+ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]))); -+ -+ sg_addr = qm_sg_addr(&sgt[i]); -+ sg_vaddr = phys_to_virt(sg_addr); -+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr, -+ SMP_CACHE_BYTES)); -+ -+ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size, -+ DMA_BIDIRECTIONAL); -+ if (i == 0) { -+ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1); -+ DPA_BUG_ON(skb->head != sg_vaddr); -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid && -+ priv->tsu->hwts_rx_en_ioctl) -+ dpa_ptp_store_rxstamp(priv, skb, vaddr); -+#endif -+#ifdef CONFIG_FSL_DPAA_TS -+ if (priv->ts_rx_en) -+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr); -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ /* In the case of a SG frame, FMan stores the Internal -+ * Context in the buffer containing the sgt. -+ * Inspect the parse results before anything else. -+ */ -+ parse_results = (const fm_prs_result_t *)(vaddr + -+ DPA_RX_PRIV_DATA_SIZE); -+ _dpa_process_parse_results(parse_results, fd, skb, -+ use_gro); -+ -+ /* Make sure forwarded skbs will have enough space -+ * on Tx, if extra headers are added. -+ */ -+ DPA_BUG_ON(fd_off != priv->rx_headroom); -+ skb_reserve(skb, fd_off); -+ skb_put(skb, qm_sg_entry_get_len(&sgt[i])); -+ } else { -+ /* Not the first S/G entry; all data from buffer will -+ * be added in an skb fragment; fragment index is offset -+ * by one since first S/G entry was incorporated in the -+ * linear part of the skb. -+ * -+ * Caution: 'page' may be a tail page. -+ */ -+ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1); -+ page = virt_to_page(sg_vaddr); -+ head_page = virt_to_head_page(sg_vaddr); -+ -+ /* Free (only) the skbuff shell because its data buffer -+ * is already a frag in the main skb. -+ */ -+ get_page(head_page); -+ dev_kfree_skb(skb_tmp); -+ -+ /* Compute offset in (possibly tail) page */ -+ page_offset = ((unsigned long)sg_vaddr & -+ (PAGE_SIZE - 1)) + -+ (page_address(page) - page_address(head_page)); -+ /* page_offset only refers to the beginning of sgt[i]; -+ * but the buffer itself may have an internal offset. -+ */ -+ frag_offset = qm_sg_entry_get_offset(&sgt[i]) + -+ page_offset; -+ frag_len = qm_sg_entry_get_len(&sgt[i]); -+ /* skb_add_rx_frag() does no checking on the page; if -+ * we pass it a tail page, we'll end up with -+ * bad page accounting and eventually with segafults. -+ */ -+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset, -+ frag_len, dpa_bp->size); -+ } -+ /* Update the pool count for the current {cpu x bpool} */ -+ (*count_ptr)--; -+ -+ if (qm_sg_entry_get_final(&sgt[i])) -+ break; -+ } -+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); -+ -+ /* recycle the SGT fragment */ -+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); -+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr); -+ return skb; -+} -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+static inline int dpa_skb_loop(const struct dpa_priv_s *priv, -+ struct sk_buff *skb) -+{ -+ if (unlikely(priv->loop_to < 0)) -+ return 0; /* loop disabled by default */ -+ -+ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */ -+ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]); -+ -+ return 1; /* Frame Tx on the selected interface */ -+} -+#endif -+ -+void __hot _dpa_rx(struct net_device *net_dev, -+ struct qman_portal *portal, -+ const struct dpa_priv_s *priv, -+ struct dpa_percpu_priv_s *percpu_priv, -+ const struct qm_fd *fd, -+ u32 fqid, -+ int *count_ptr) -+{ -+ struct dpa_bp *dpa_bp; -+ struct sk_buff *skb; -+ dma_addr_t addr = qm_fd_addr(fd); -+ u32 fd_status = fd->status; -+ unsigned int skb_len; -+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats; -+ int use_gro = net_dev->features & NETIF_F_GRO; -+ -+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd_status & FM_FD_STAT_RX_ERRORS); -+ -+ percpu_stats->rx_errors++; -+ goto _release_frame; -+ } -+ -+ dpa_bp = priv->dpa_bp; -+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); -+ -+ /* prefetch the first 64 bytes of the frame or the SGT start */ -+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL); -+ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd)); -+ -+ /* The only FD types that we may receive are contig and S/G */ -+ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg)); -+ -+ if (likely(fd->format == qm_fd_contig)) { -+#ifdef CONFIG_FSL_DPAA_HOOKS -+ /* Execute the Rx processing hook, if it exists. */ -+ if (dpaa_eth_hooks.rx_default && -+ dpaa_eth_hooks.rx_default((void *)fd, net_dev, -+ fqid) == DPAA_ETH_STOLEN) { -+ /* won't count the rx bytes in */ -+ return; -+ } -+#endif -+ skb = contig_fd_to_skb(priv, fd, &use_gro); -+ } else { -+ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr); -+ percpu_priv->rx_sg++; -+ } -+ -+ /* Account for either the contig buffer or the SGT buffer (depending on -+ * which case we were in) having been removed from the pool. -+ */ -+ (*count_ptr)--; -+ skb->protocol = eth_type_trans(skb, net_dev); -+ -+ /* IP Reassembled frames are allowed to be larger than MTU */ -+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) && -+ !(fd_status & FM_FD_IPR))) { -+ percpu_stats->rx_dropped++; -+ goto drop_bad_frame; -+ } -+ -+ skb_len = skb->len; -+ -+#ifdef CONFIG_FSL_DPAA_DBG_LOOP -+ if (dpa_skb_loop(priv, skb)) { -+ percpu_stats->rx_packets++; -+ percpu_stats->rx_bytes += skb_len; -+ return; -+ } -+#endif -+ -+ if (use_gro) { -+ gro_result_t gro_result; -+ const struct qman_portal_config *pc = -+ qman_p_get_portal_config(portal); -+ struct dpa_napi_portal *np = &percpu_priv->np[pc->index]; -+ -+ np->p = portal; -+ gro_result = napi_gro_receive(&np->napi, skb); -+ /* If frame is dropped by the stack, rx_dropped counter is -+ * incremented automatically, so no need for us to update it -+ */ -+ if (unlikely(gro_result == GRO_DROP)) -+ goto packet_dropped; -+ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) -+ goto packet_dropped; -+ -+ percpu_stats->rx_packets++; -+ percpu_stats->rx_bytes += skb_len; -+ -+packet_dropped: -+ return; -+ -+drop_bad_frame: -+ dev_kfree_skb(skb); -+ return; -+ -+_release_frame: -+ dpa_fd_release(net_dev, fd); -+} -+ -+int __hot skb_to_contig_fd(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd, -+ int *count_ptr, int *offset) -+{ -+ struct sk_buff **skbh; -+ dma_addr_t addr; -+ struct dpa_bp *dpa_bp = priv->dpa_bp; -+ struct net_device *net_dev = priv->net_dev; -+ int err; -+ enum dma_data_direction dma_dir; -+ unsigned char *buffer_start; -+ -+#ifndef CONFIG_FSL_DPAA_TS -+ /* Check recycling conditions; only if timestamp support is not -+ * enabled, otherwise we need the fd back on tx confirmation -+ */ -+ -+ /* We can recycle the buffer if: -+ * - the pool is not full -+ * - the buffer meets the skb recycling conditions -+ * - the buffer meets our own (size, offset, align) conditions -+ */ -+ if (likely((*count_ptr < dpa_bp->target_count) && -+ dpa_skb_is_recyclable(skb) && -+ dpa_buf_is_recyclable(skb, dpa_bp->size, -+ priv->tx_headroom, &buffer_start))) { -+ /* Buffer is recyclable; use the new start address -+ * and set fd parameters and DMA mapping direction -+ */ -+ fd->bpid = dpa_bp->bpid; -+ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET); -+ fd->offset = (uint16_t)(skb->data - buffer_start); -+ dma_dir = DMA_BIDIRECTIONAL; -+ -+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1); -+ *offset = skb_headroom(skb) - fd->offset; -+ } else -+#endif -+ { -+ /* Not recyclable. -+ * We are guaranteed to have at least tx_headroom bytes -+ * available, so just use that for offset. -+ */ -+ fd->bpid = 0xff; -+ buffer_start = skb->data - priv->tx_headroom; -+ fd->offset = priv->tx_headroom; -+ dma_dir = DMA_TO_DEVICE; -+ -+ /* The buffer will be Tx-confirmed, but the TxConf cb must -+ * necessarily look at our Tx private data to retrieve the -+ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.) -+ */ -+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); -+ } -+ -+ /* Enable L3/L4 hardware checksum computation. -+ * -+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may -+ * need to write into the skb. -+ */ -+ err = dpa_enable_tx_csum(priv, skb, fd, -+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE); -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, "HW csum error: %d\n", err); -+ return err; -+ } -+ -+ /* Fill in the rest of the FD fields */ -+ fd->format = qm_fd_contig; -+ fd->length20 = skb->len; -+ fd->cmd |= FM_FD_CMD_FCO; -+ -+ /* Map the entire buffer size that may be seen by FMan, but no more */ -+ addr = dma_map_single(dpa_bp->dev, skbh, -+ skb_tail_pointer(skb) - buffer_start, dma_dir); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, "dma_map_single() failed\n"); -+ return -EINVAL; -+ } -+ fd->addr = addr; -+ -+ -+ return 0; -+} -+EXPORT_SYMBOL(skb_to_contig_fd); -+ -+int __hot skb_to_sg_fd(struct dpa_priv_s *priv, -+ struct sk_buff *skb, struct qm_fd *fd) -+{ -+ struct dpa_bp *dpa_bp = priv->dpa_bp; -+ dma_addr_t addr; -+ dma_addr_t sg_addr; -+ struct sk_buff **skbh; -+ struct net_device *net_dev = priv->net_dev; -+ int sg_len; -+ int err; -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ unsigned long boundary; -+ int k; -+#endif -+ -+ struct qm_sg_entry *sgt; -+ void *sgt_buf; -+ void *buffer_start; -+ skb_frag_t *frag; -+ int i, j; -+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE; -+ const int nr_frags = skb_shinfo(skb)->nr_frags; -+ -+ fd->format = qm_fd_sg; -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ /* get a page frag to store the SGTable */ -+ sgt_buf = netdev_alloc_frag(priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES); -+ if (unlikely(!sgt_buf)) { -+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n"); -+ return -ENOMEM; -+ } -+ -+ /* it seems that the memory allocator does not zero the allocated mem */ -+ memset(sgt_buf, 0, priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES); -+#else -+ /* get a page frag to store the SGTable */ -+ sgt_buf = netdev_alloc_frag(priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags)); -+ if (unlikely(!sgt_buf)) { -+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n"); -+ return -ENOMEM; -+ } -+ -+ memset(sgt_buf, 0, priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags)); -+#endif -+ -+ /* Enable L3/L4 hardware checksum computation. -+ * -+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may -+ * need to write into the skb. -+ */ -+ err = dpa_enable_tx_csum(priv, skb, fd, -+ sgt_buf + DPA_TX_PRIV_DATA_SIZE); -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, "HW csum error: %d\n", err); -+ goto csum_failed; -+ } -+ -+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); -+ sg_len = skb_headlen(skb); -+ qm_sg_entry_set_bpid(&sgt[0], 0xff); -+ qm_sg_entry_set_offset(&sgt[0], 0); -+ qm_sg_entry_set_len(&sgt[0], sg_len); -+ qm_sg_entry_set_ext(&sgt[0], 0); -+ qm_sg_entry_set_final(&sgt[0], 0); -+ -+ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sg0_map_failed; -+ -+ } -+ -+ qm_sg_entry_set64(&sgt[0], addr); -+ -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ j = 0; -+ if (unlikely(HAS_DMA_ISSUE(skb->data, sg_len))) { -+ boundary = BOUNDARY_4K(skb->data, sg_len); -+ qm_sg_entry_set_len(&sgt[j], boundary - -+ (unsigned long)skb->data); -+ -+ j++; -+ qm_sg_entry_set_bpid(&sgt[j], 0xff); -+ qm_sg_entry_set_offset(&sgt[j], 0); -+ qm_sg_entry_set_len(&sgt[j], -+ ((unsigned long)skb->data + (unsigned long)sg_len) - -+ boundary); -+ qm_sg_entry_set_ext(&sgt[j], 0); -+ qm_sg_entry_set_final(&sgt[j], 0); -+ -+ /* keep the offset in the address */ -+ qm_sg_entry_set64(&sgt[j], addr + -+ (boundary - -+ (unsigned long)skb->data)); -+ } -+ j++; -+ -+ /* populate the rest of SGT entries */ -+ for (i = 1; i <= nr_frags; i++, j++) { -+ frag = &skb_shinfo(skb)->frags[i - 1]; -+ qm_sg_entry_set_bpid(&sgt[j], 0xff); -+ qm_sg_entry_set_offset(&sgt[j], 0); -+ qm_sg_entry_set_len(&sgt[j], frag->size); -+ qm_sg_entry_set_ext(&sgt[j], 0); -+ -+ DPA_BUG_ON(!skb_frag_page(frag)); -+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size, -+ dma_dir); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sg_map_failed; -+ } -+ -+ /* keep the offset in the address */ -+ qm_sg_entry_set64(&sgt[j], addr); -+ -+ if (unlikely(HAS_DMA_ISSUE(frag, frag->size))) { -+ boundary = BOUNDARY_4K(frag, frag->size); -+ qm_sg_entry_set_len(&sgt[j], boundary - -+ (unsigned long)frag); -+ -+ j++; -+ qm_sg_entry_set_bpid(&sgt[j], 0xff); -+ qm_sg_entry_set_offset(&sgt[j], 0); -+ qm_sg_entry_set_len(&sgt[j], -+ ((unsigned long)frag->size - -+ (boundary - (unsigned long)frag))); -+ qm_sg_entry_set_ext(&sgt[j], 0); -+ -+ /* keep the offset in the address */ -+ qm_sg_entry_set64(&sgt[j], addr + -+ (boundary - (unsigned long)frag)); -+ } -+ -+ if (i == nr_frags) -+ qm_sg_entry_set_final(&sgt[j], 1); -+ else -+ qm_sg_entry_set_final(&sgt[j], 0); -+#else -+ -+ /* populate the rest of SGT entries */ -+ for (i = 1; i <= nr_frags; i++) { -+ frag = &skb_shinfo(skb)->frags[i - 1]; -+ qm_sg_entry_set_bpid(&sgt[i], 0xff); -+ qm_sg_entry_set_offset(&sgt[i], 0); -+ qm_sg_entry_set_len(&sgt[i], frag->size); -+ qm_sg_entry_set_ext(&sgt[i], 0); -+ -+ if (i == nr_frags) -+ qm_sg_entry_set_final(&sgt[i], 1); -+ else -+ qm_sg_entry_set_final(&sgt[i], 0); -+ -+ DPA_BUG_ON(!skb_frag_page(frag)); -+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size, -+ dma_dir); -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sg_map_failed; -+ } -+ -+ /* keep the offset in the address */ -+ qm_sg_entry_set64(&sgt[i], addr); -+#endif -+ } -+ -+ fd->length20 = skb->len; -+ fd->offset = priv->tx_headroom; -+ -+ /* DMA map the SGT page */ -+ buffer_start = (void *)sgt - priv->tx_headroom; -+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES, -+ dma_dir); -+#else -+ addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom + -+ sizeof(struct qm_sg_entry) * (1 + nr_frags), -+ dma_dir); -+#endif -+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { -+ dev_err(dpa_bp->dev, "DMA mapping failed"); -+ err = -EINVAL; -+ goto sgt_map_failed; -+ } -+ -+ fd->bpid = 0xff; -+ fd->cmd |= FM_FD_CMD_FCO; -+ fd->addr = addr; -+ -+ return 0; -+ -+sgt_map_failed: -+sg_map_failed: -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ for (k = 0; k < j; k++) { -+ sg_addr = qm_sg_addr(&sgt[k]); -+ dma_unmap_page(dpa_bp->dev, sg_addr, -+ qm_sg_entry_get_len(&sgt[k]), dma_dir); -+ } -+#else -+ for (j = 0; j < i; j++) { -+ sg_addr = qm_sg_addr(&sgt[j]); -+ dma_unmap_page(dpa_bp->dev, sg_addr, -+ qm_sg_entry_get_len(&sgt[j]), dma_dir); -+ } -+#endif -+sg0_map_failed: -+csum_failed: -+ put_page(virt_to_head_page(sgt_buf)); -+ -+ return err; -+} -+EXPORT_SYMBOL(skb_to_sg_fd); -+ -+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv; -+ const int queue_mapping = dpa_get_queue_mapping(skb); -+ struct qman_fq *egress_fq, *conf_fq; -+ -+#ifdef CONFIG_FSL_DPAA_HOOKS -+ /* If there is a Tx hook, run it. */ -+ if (dpaa_eth_hooks.tx && -+ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN) -+ /* won't update any Tx stats */ -+ return NETDEV_TX_OK; -+#endif -+ -+ priv = netdev_priv(net_dev); -+ -+#ifdef CONFIG_FSL_DPAA_CEETM -+ if (priv->ceetm_en) -+ return ceetm_tx(skb, net_dev); -+#endif -+ -+ egress_fq = priv->egress_fqs[queue_mapping]; -+ conf_fq = priv->conf_fqs[queue_mapping]; -+ -+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq); -+} -+ -+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev, -+ struct qman_fq *egress_fq, struct qman_fq *conf_fq) -+{ -+ struct dpa_priv_s *priv; -+ struct qm_fd fd; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct rtnl_link_stats64 *percpu_stats; -+ int err = 0; -+ const bool nonlinear = skb_is_nonlinear(skb); -+ int *countptr, offset = 0; -+ -+ priv = netdev_priv(net_dev); -+ /* Non-migratable context, safe to use raw_cpu_ptr */ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ percpu_stats = &percpu_priv->stats; -+ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); -+ -+ clear_fd(&fd); -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl) -+ fd.cmd |= FM_FD_CMD_UPD; -+#endif -+#ifdef CONFIG_FSL_DPAA_TS -+ if (unlikely(priv->ts_tx_en && -+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) -+ fd.cmd |= FM_FD_CMD_UPD; -+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -+#endif /* CONFIG_FSL_DPAA_TS */ -+ -+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure -+ * we don't feed FMan with more fragments than it supports. -+ * Btw, we're using the first sgt entry to store the linear part of -+ * the skb, so we're one extra frag short. -+ */ -+ if (nonlinear && -+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_ENTRIES_THRESHOLD)) { -+ /* Just create a S/G fd based on the skb */ -+ err = skb_to_sg_fd(priv, skb, &fd); -+ percpu_priv->tx_frag_skbuffs++; -+ } else { -+ /* Make sure we have enough headroom to accommodate private -+ * data, parse results, etc. Normally this shouldn't happen if -+ * we're here via the standard kernel stack. -+ */ -+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) { -+ struct sk_buff *skb_new; -+ -+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom); -+ if (unlikely(!skb_new)) { -+ dev_kfree_skb(skb); -+ percpu_stats->tx_errors++; -+ return NETDEV_TX_OK; -+ } -+ dev_kfree_skb(skb); -+ skb = skb_new; -+ } -+ -+ /* We're going to store the skb backpointer at the beginning -+ * of the data buffer, so we need a privately owned skb -+ */ -+ -+ /* Code borrowed from skb_unshare(). */ -+ if (skb_cloned(skb)) { -+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); -+ kfree_skb(skb); -+ skb = nskb; -+ /* skb_copy() has now linearized the skbuff. */ -+ } else if (unlikely(nonlinear)) { -+ /* We are here because the egress skb contains -+ * more fragments than we support. In this case, -+ * we have no choice but to linearize it ourselves. -+ */ -+ err = __skb_linearize(skb); -+ } -+ if (unlikely(!skb || err < 0)) -+ /* Common out-of-memory error path */ -+ goto enomem; -+ -+#ifdef DPAA_LS1043A_DMA_4K_ISSUE -+ if (unlikely(HAS_DMA_ISSUE(skb->data, skb->len))) { -+ err = skb_to_sg_fd(priv, skb, &fd); -+ percpu_priv->tx_frag_skbuffs++; -+ } else { -+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset); -+ } -+#else -+ /* Finally, create a contig FD from this skb */ -+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset); -+#endif -+ } -+ if (unlikely(err < 0)) -+ goto skb_to_fd_failed; -+ -+ if (fd.bpid != 0xff) { -+ skb_recycle(skb); -+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom, -+ * but we need the skb to look as if returned by build_skb(). -+ * We need to manually adjust the tailptr as well. -+ */ -+ skb->data = skb->head + offset; -+ skb_reset_tail_pointer(skb); -+ -+ (*countptr)++; -+ percpu_priv->tx_returned++; -+ } -+ -+ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0)) -+ goto xmit_failed; -+ -+ net_dev->trans_start = jiffies; -+ return NETDEV_TX_OK; -+ -+xmit_failed: -+ if (fd.bpid != 0xff) { -+ (*countptr)--; -+ percpu_priv->tx_returned--; -+ dpa_fd_release(net_dev, &fd); -+ percpu_stats->tx_errors++; -+ return NETDEV_TX_OK; -+ } -+ _dpa_cleanup_tx_fd(priv, &fd); -+skb_to_fd_failed: -+enomem: -+ percpu_stats->tx_errors++; -+ dev_kfree_skb(skb); -+ return NETDEV_TX_OK; -+} -+EXPORT_SYMBOL(dpa_tx_extended); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c -@@ -0,0 +1,914 @@ -+/* Copyright 2008-2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_base.h" -+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ -+#include "mac.h" -+ -+/* forward declarations */ -+static enum qman_cb_dqrr_result __hot -+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq); -+static enum qman_cb_dqrr_result __hot -+shared_tx_default_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq); -+static enum qman_cb_dqrr_result -+shared_tx_error_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq); -+static void shared_ern(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_mr_entry *msg); -+ -+#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+MODULE_DESCRIPTION(DPA_DESCRIPTION); -+ -+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ -+static uint16_t shared_tx_timeout = 1000; -+module_param(shared_tx_timeout, ushort, S_IRUGO); -+MODULE_PARM_DESC(shared_tx_timeout, "The Tx timeout in ms"); -+ -+static const struct of_device_id dpa_shared_match[]; -+ -+static const struct net_device_ops dpa_shared_ops = { -+ .ndo_open = dpa_start, -+ .ndo_start_xmit = dpa_shared_tx, -+ .ndo_stop = dpa_stop, -+ .ndo_tx_timeout = dpa_timeout, -+ .ndo_get_stats64 = dpa_get_stats64, -+ .ndo_set_mac_address = dpa_set_mac_address, -+ .ndo_validate_addr = eth_validate_addr, -+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -+ .ndo_select_queue = dpa_select_queue, -+#endif -+ .ndo_change_mtu = dpa_change_mtu, -+ .ndo_set_rx_mode = dpa_set_rx_mode, -+ .ndo_init = dpa_ndo_init, -+ .ndo_set_features = dpa_set_features, -+ .ndo_fix_features = dpa_fix_features, -+ .ndo_do_ioctl = dpa_ioctl, -+}; -+ -+const struct dpa_fq_cbs_t shared_fq_cbs = { -+ .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } }, -+ .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } }, -+ .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } }, -+ .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } }, -+ .egress_ern = { .cb = { .ern = shared_ern } } -+}; -+EXPORT_SYMBOL(shared_fq_cbs); -+ -+static inline void * __must_check __attribute__((nonnull)) -+dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr) -+{ -+ return dpa_bp->vaddr + (addr - dpa_bp->paddr); -+} -+ -+static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size) -+{ -+ int i; -+ -+ for (i = 0; i < priv->bp_count; i++) -+ if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size) -+ return dpa_bpid2pool(priv->dpa_bp[i].bpid); -+ return ERR_PTR(-ENODEV); -+} -+ -+/* Copy to a memory region that requires kmapping from a linear buffer, -+ * taking into account page boundaries in the destination -+ */ -+static void -+copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size) -+{ -+ struct page *page; -+ size_t size, offset; -+ void *page_vaddr; -+ -+ while (buf_size > 0) { -+ offset = offset_in_page(phys_start); -+ size = (offset + buf_size > PAGE_SIZE) ? -+ PAGE_SIZE - offset : buf_size; -+ -+ page = pfn_to_page(phys_start >> PAGE_SHIFT); -+ page_vaddr = kmap_atomic(page); -+ -+ memcpy(page_vaddr + offset, src, size); -+ -+ kunmap_atomic(page_vaddr); -+ -+ phys_start += size; -+ src += size; -+ buf_size -= size; -+ } -+} -+ -+/* Copy from a memory region that requires kmapping to a linear buffer, -+ * taking into account page boundaries in the source -+ */ -+static void -+copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size) -+{ -+ struct page *page; -+ size_t size, offset; -+ void *page_vaddr; -+ -+ while (buf_size > 0) { -+ offset = offset_in_page(phys_start); -+ size = (offset + buf_size > PAGE_SIZE) ? -+ PAGE_SIZE - offset : buf_size; -+ -+ page = pfn_to_page(phys_start >> PAGE_SHIFT); -+ page_vaddr = kmap_atomic(page); -+ -+ memcpy(dest, page_vaddr + offset, size); -+ -+ kunmap_atomic(page_vaddr); -+ -+ phys_start += size; -+ dest += size; -+ buf_size -= size; -+ } -+} -+ -+static void -+dpa_fd_release_sg(const struct net_device *net_dev, -+ const struct qm_fd *fd) -+{ -+ const struct dpa_priv_s *priv; -+ struct qm_sg_entry *sgt; -+ struct dpa_bp *_dpa_bp; -+ struct bm_buffer _bmb; -+ -+ priv = netdev_priv(net_dev); -+ -+ _bmb.hi = fd->addr_hi; -+ _bmb.lo = fd->addr_lo; -+ -+ _dpa_bp = dpa_bpid2pool(fd->bpid); -+ BUG_ON(!_dpa_bp); -+ -+ if (_dpa_bp->vaddr) { -+ sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) + -+ dpa_fd_offset(fd); -+ dpa_release_sgt(sgt); -+ } else { -+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC); -+ if (sgt == NULL) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, -+ "Memory allocation failed\n"); -+ return; -+ } -+ -+ copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) + -+ dpa_fd_offset(fd), -+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), -+ _dpa_bp->size)); -+ dpa_release_sgt(sgt); -+ kfree(sgt); -+ } -+ -+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0)) -+ cpu_relax(); -+} -+ -+static enum qman_cb_dqrr_result __hot -+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ const struct qm_fd *fd = &dq->fd; -+ struct dpa_bp *dpa_bp; -+ struct sk_buff *skb; -+ struct qm_sg_entry *sgt; -+ int i; -+ void *frag_addr; -+ u32 frag_length; -+ u32 offset; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ dpa_bp = dpa_bpid2pool(fd->bpid); -+ BUG_ON(!dpa_bp); -+ -+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) { -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_RX_ERRORS); -+ -+ percpu_priv->stats.rx_errors++; -+ -+ goto out; -+ } -+ -+ skb = __netdev_alloc_skb(net_dev, -+ priv->tx_headroom + dpa_fd_length(fd), -+ GFP_ATOMIC); -+ if (unlikely(skb == NULL)) { -+ if (netif_msg_rx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, "Could not alloc skb\n"); -+ -+ percpu_priv->stats.rx_dropped++; -+ -+ goto out; -+ } -+ -+ skb_reserve(skb, priv->tx_headroom); -+ -+ if (fd->format == qm_fd_sg) { -+ if (dpa_bp->vaddr) { -+ sgt = dpa_phys2virt(dpa_bp, -+ qm_fd_addr(fd)) + dpa_fd_offset(fd); -+ -+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) { -+ offset = qm_sg_entry_get_offset(&sgt[i]); -+ frag_addr = dpa_phys2virt(dpa_bp, -+ qm_sg_addr(&sgt[i]) + -+ offset); -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ frag_length = qm_sg_entry_get_len(&sgt[i]); -+ -+ /* copy from sgt[i] */ -+ memcpy(skb_put(skb, frag_length), frag_addr, -+ frag_length); -+ if (qm_sg_entry_get_final(&sgt[i])) -+ break; -+ } -+ } else { -+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), -+ GFP_ATOMIC); -+ if (unlikely(sgt == NULL)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, -+ "Memory allocation failed\n"); -+ return -ENOMEM; -+ } -+ -+ copy_from_unmapped_area(sgt, -+ qm_fd_addr(fd) + dpa_fd_offset(fd), -+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), -+ dpa_bp->size)); -+ -+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) { -+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); -+ frag_length = qm_sg_entry_get_len(&sgt[i]); -+ copy_from_unmapped_area( -+ skb_put(skb, frag_length), -+ qm_sg_addr(&sgt[i]) + -+ qm_sg_entry_get_offset(&sgt[i]), -+ frag_length); -+ -+ if (qm_sg_entry_get_final(&sgt[i])) -+ break; -+ } -+ -+ kfree(sgt); -+ } -+ goto skb_copied; -+ } -+ -+ /* otherwise fd->format == qm_fd_contig */ -+ if (dpa_bp->vaddr) { -+ /* Fill the SKB */ -+ memcpy(skb_put(skb, dpa_fd_length(fd)), -+ dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) + -+ dpa_fd_offset(fd), dpa_fd_length(fd)); -+ } else { -+ copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)), -+ qm_fd_addr(fd) + dpa_fd_offset(fd), -+ dpa_fd_length(fd)); -+ } -+ -+skb_copied: -+ skb->protocol = eth_type_trans(skb, net_dev); -+ -+ /* IP Reassembled frames are allowed to be larger than MTU */ -+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) && -+ !(fd->status & FM_FD_IPR))) { -+ percpu_priv->stats.rx_dropped++; -+ dev_kfree_skb_any(skb); -+ goto out; -+ } -+ -+ if (unlikely(netif_rx(skb) != NET_RX_SUCCESS)) -+ goto out; -+ else { -+ percpu_priv->stats.rx_packets++; -+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd); -+ } -+ -+out: -+ if (fd->format == qm_fd_sg) -+ dpa_fd_release_sg(net_dev, fd); -+ else -+ dpa_fd_release(net_dev, fd); -+ -+ return qman_cb_dqrr_consume; -+} -+ -+static enum qman_cb_dqrr_result -+shared_tx_error_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct dpa_bp *dpa_bp; -+ const struct qm_fd *fd = &dq->fd; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ dpa_bp = dpa_bpid2pool(fd->bpid); -+ BUG_ON(!dpa_bp); -+ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_TX_ERRORS); -+ -+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr)) -+ dpa_fd_release_sg(net_dev, fd); -+ else -+ dpa_fd_release(net_dev, fd); -+ -+ percpu_priv->stats.tx_errors++; -+ -+ return qman_cb_dqrr_consume; -+} -+ -+static enum qman_cb_dqrr_result __hot -+shared_tx_default_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct dpa_bp *dpa_bp; -+ const struct qm_fd *fd = &dq->fd; -+ -+ net_dev = ((struct dpa_fq *)fq)->net_dev; -+ priv = netdev_priv(net_dev); -+ -+ dpa_bp = dpa_bpid2pool(fd->bpid); -+ BUG_ON(!dpa_bp); -+ -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { -+ if (netif_msg_hw(priv) && net_ratelimit()) -+ netdev_warn(net_dev, "FD status = 0x%08x\n", -+ fd->status & FM_FD_STAT_TX_ERRORS); -+ -+ percpu_priv->stats.tx_errors++; -+ } -+ -+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr)) -+ dpa_fd_release_sg(net_dev, fd); -+ else -+ dpa_fd_release(net_dev, fd); -+ -+ percpu_priv->tx_confirm++; -+ -+ return qman_cb_dqrr_consume; -+} -+ -+static void shared_ern(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ struct net_device *net_dev; -+ const struct dpa_priv_s *priv; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct dpa_fq *dpa_fq = (struct dpa_fq *)fq; -+ -+ net_dev = dpa_fq->net_dev; -+ priv = netdev_priv(net_dev); -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ dpa_fd_release(net_dev, &msg->ern.fd); -+ -+ percpu_priv->stats.tx_dropped++; -+ percpu_priv->stats.tx_fifo_errors++; -+ count_ern(percpu_priv, msg); -+} -+ -+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ struct dpa_bp *dpa_bp; -+ struct bm_buffer bmb; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct dpa_priv_s *priv; -+ struct qm_fd fd; -+ int queue_mapping; -+ int err; -+ void *dpa_bp_vaddr; -+ fm_prs_result_t parse_results; -+ fm_prs_result_t *parse_results_ref; -+ struct qman_fq *egress_fq, *conf_fq; -+ -+ priv = netdev_priv(net_dev); -+ percpu_priv = raw_cpu_ptr(priv->percpu_priv); -+ -+ memset(&fd, 0, sizeof(fd)); -+ fd.format = qm_fd_contig; -+ -+ queue_mapping = smp_processor_id(); -+ -+ dpa_bp = dpa_size2pool(priv, skb_headlen(skb)); -+ if (unlikely(!dpa_bp)) { -+ percpu_priv->stats.tx_errors++; -+ err = PTR_ERR(dpa_bp); -+ goto bpools_too_small_error; -+ } -+ -+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0); -+ if (unlikely(err <= 0)) { -+ percpu_priv->stats.tx_errors++; -+ if (err == 0) -+ err = -ENOMEM; -+ goto buf_acquire_failed; -+ } -+ fd.bpid = dpa_bp->bpid; -+ -+ fd.length20 = skb_headlen(skb); -+ fd.addr_hi = (uint8_t)bmb.hi; -+ fd.addr_lo = bmb.lo; -+ fd.offset = priv->tx_headroom; -+ -+ /* The virtual address of the buffer pool is expected to be NULL -+ * in scenarios like MAC-less or Shared-MAC between Linux and -+ * USDPAA. In this case the buffers are dynamically mapped/unmapped. -+ */ -+ if (dpa_bp->vaddr) { -+ dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb)); -+ -+ /* Copy the packet payload */ -+ skb_copy_from_linear_data(skb, -+ dpa_bp_vaddr + dpa_fd_offset(&fd), -+ dpa_fd_length(&fd)); -+ -+ /* if no mac device or peer set it's macless */ -+ if (!priv->mac_dev || priv->peer) { -+ parse_results_ref = (fm_prs_result_t *) (dpa_bp_vaddr + -+ DPA_TX_PRIV_DATA_SIZE); -+ /* Default values; FMan will not generate/validate -+ * CSUM; -+ */ -+ parse_results_ref->l3r = 0; -+ parse_results_ref->l4r = 0; -+ parse_results_ref->ip_off[0] = 0xff; -+ parse_results_ref->ip_off[1] = 0xff; -+ parse_results_ref->l4_off = 0xff; -+ -+ fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD; -+ } else { -+ /* Enable L3/L4 hardware checksum computation, -+ * if applicable -+ */ -+ err = dpa_enable_tx_csum(priv, skb, &fd, -+ dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE); -+ -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, -+ "Tx HW csum error: %d\n", err); -+ percpu_priv->stats.tx_errors++; -+ goto l3_l4_csum_failed; -+ } -+ } -+ -+ } else { -+ if (!priv->mac_dev || priv->peer) { -+ /* Default values; FMan will not generate/validate -+ * CSUM; -+ */ -+ parse_results.l3r = 0; -+ parse_results.l4r = 0; -+ parse_results.ip_off[0] = 0xff; -+ parse_results.ip_off[1] = 0xff; -+ parse_results.l4_off = 0xff; -+ -+ fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD; -+ } else { -+ /* Enable L3/L4 hardware checksum computation, -+ * if applicable -+ */ -+ err = dpa_enable_tx_csum(priv, skb, &fd, -+ (char *)&parse_results); -+ -+ if (unlikely(err < 0)) { -+ if (netif_msg_tx_err(priv) && net_ratelimit()) -+ netdev_err(net_dev, -+ "Tx HW csum error: %d\n", err); -+ percpu_priv->stats.tx_errors++; -+ goto l3_l4_csum_failed; -+ } -+ -+ } -+ -+ copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE, -+ &parse_results, -+ DPA_PARSE_RESULTS_SIZE); -+ -+ copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd), -+ skb->data, -+ dpa_fd_length(&fd)); -+ } -+ -+ egress_fq = priv->egress_fqs[queue_mapping]; -+ conf_fq = priv->conf_fqs[queue_mapping]; -+ -+ err = dpa_xmit(priv, &percpu_priv->stats, &fd, egress_fq, conf_fq); -+ -+l3_l4_csum_failed: -+bpools_too_small_error: -+buf_acquire_failed: -+ /* We're done with the skb */ -+ dev_kfree_skb(skb); -+ -+ /* err remains unused, NETDEV_TX_OK must be returned here */ -+ return NETDEV_TX_OK; -+} -+EXPORT_SYMBOL(dpa_shared_tx); -+ -+static int dpa_shared_netdev_init(struct device_node *dpa_node, -+ struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ const uint8_t *mac_addr; -+ -+ net_dev->netdev_ops = &dpa_shared_ops; -+ -+ net_dev->mem_start = priv->mac_dev->res->start; -+ net_dev->mem_end = priv->mac_dev->res->end; -+ -+ mac_addr = priv->mac_dev->addr; -+ -+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_LLTX); -+ -+ return dpa_netdev_init(net_dev, mac_addr, shared_tx_timeout); -+} -+ -+#ifdef CONFIG_PM -+ -+static int dpa_shared_suspend(struct device *dev) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ int err = 0; -+ -+ net_dev = dev_get_drvdata(dev); -+ if (net_dev->flags & IFF_UP) { -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ err = fm_port_suspend(mac_dev->port_dev[RX]); -+ if (err) -+ goto port_suspend_failed; -+ -+ err = fm_port_suspend(mac_dev->port_dev[TX]); -+ if (err) -+ err = fm_port_resume(mac_dev->port_dev[RX]); -+ } -+ -+port_suspend_failed: -+ return err; -+} -+ -+static int dpa_shared_resume(struct device *dev) -+{ -+ struct net_device *net_dev; -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ int err = 0; -+ -+ net_dev = dev_get_drvdata(dev); -+ if (net_dev->flags & IFF_UP) { -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ err = fm_port_resume(mac_dev->port_dev[TX]); -+ if (err) -+ goto port_resume_failed; -+ -+ err = fm_port_resume(mac_dev->port_dev[RX]); -+ if (err) -+ err = fm_port_suspend(mac_dev->port_dev[TX]); -+ } -+ -+port_resume_failed: -+ return err; -+} -+ -+static const struct dev_pm_ops shared_pm_ops = { -+ .suspend = dpa_shared_suspend, -+ .resume = dpa_shared_resume, -+}; -+ -+#define SHARED_PM_OPS (&shared_pm_ops) -+ -+#else /* CONFIG_PM */ -+ -+#define SHARED_PM_OPS NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int -+dpaa_eth_shared_probe(struct platform_device *_of_dev) -+{ -+ int err = 0, i, channel; -+ struct device *dev; -+ struct device_node *dpa_node; -+ struct dpa_bp *dpa_bp; -+ struct dpa_fq *dpa_fq, *tmp; -+ size_t count; -+ struct net_device *net_dev = NULL; -+ struct dpa_priv_s *priv = NULL; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct fm_port_fqs port_fqs; -+ struct dpa_buffer_layout_s *buf_layout = NULL; -+ struct mac_device *mac_dev; -+ struct task_struct *kth; -+ -+ dev = &_of_dev->dev; -+ -+ dpa_node = dev->of_node; -+ -+ if (!of_device_is_available(dpa_node)) -+ return -ENODEV; -+ -+ /* Get the buffer pools assigned to this interface */ -+ dpa_bp = dpa_bp_probe(_of_dev, &count); -+ if (IS_ERR(dpa_bp)) -+ return PTR_ERR(dpa_bp); -+ -+ for (i = 0; i < count; i++) -+ dpa_bp[i].seed_cb = dpa_bp_shared_port_seed; -+ -+ /* Allocate this early, so we can store relevant information in -+ * the private area (needed by 1588 code in dpa_mac_probe) -+ */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ /* Do this here, so we can be verbose early */ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ strcpy(priv->if_type, "shared"); -+ -+ priv->msg_enable = netif_msg_init(advanced_debug, -1); -+ -+ mac_dev = dpa_mac_probe(_of_dev); -+ if (IS_ERR(mac_dev) || !mac_dev) { -+ err = PTR_ERR(mac_dev); -+ goto mac_probe_failed; -+ } -+ -+ /* We have physical ports, so we need to establish -+ * the buffer layout. -+ */ -+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), -+ GFP_KERNEL); -+ if (!buf_layout) { -+ dev_err(dev, "devm_kzalloc() failed\n"); -+ goto alloc_failed; -+ } -+ dpa_set_buffers_layout(mac_dev, buf_layout); -+ -+ INIT_LIST_HEAD(&priv->dpa_fq_list); -+ -+ memset(&port_fqs, 0, sizeof(port_fqs)); -+ -+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, -+ false, RX); -+ if (!err) -+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, -+ &port_fqs, false, TX); -+ if (err < 0) -+ goto fq_probe_failed; -+ -+ /* bp init */ -+ priv->bp_count = count; -+ err = dpa_bp_create(net_dev, dpa_bp, count); -+ if (err < 0) -+ goto bp_create_failed; -+ -+ priv->mac_dev = mac_dev; -+ -+ channel = dpa_get_channel(); -+ -+ if (channel < 0) { -+ err = channel; -+ goto get_channel_failed; -+ } -+ -+ priv->channel = (uint16_t)channel; -+ -+ /* Start a thread that will walk the cpus with affine portals -+ * and add this pool channel to each's dequeue mask. -+ */ -+ kth = kthread_run(dpaa_eth_add_channel, -+ (void *)(unsigned long)priv->channel, -+ "dpaa_%p:%d", net_dev, priv->channel); -+ if (!kth) { -+ err = -ENOMEM; -+ goto add_channel_failed; -+ } -+ -+ dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]); -+ -+ /* Create a congestion group for this netdev, with -+ * dynamically-allocated CGR ID. -+ * Must be executed after probing the MAC, but before -+ * assigning the egress FQs to the CGRs. -+ */ -+ err = dpaa_eth_cgr_init(priv); -+ if (err < 0) { -+ dev_err(dev, "Error initializing CGR\n"); -+ goto cgr_init_failed; -+ } -+ -+ /* Add the FQs to the interface, and make them active */ -+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) { -+ err = dpa_fq_init(dpa_fq, false); -+ if (err < 0) -+ goto fq_alloc_failed; -+ } -+ -+ priv->buf_layout = buf_layout; -+ priv->tx_headroom = -+ dpa_get_headroom(&priv->buf_layout[TX]); -+ -+ /* All real interfaces need their ports initialized */ -+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, -+ buf_layout, dev); -+ -+ /* Now we need to initialize either a private or shared interface */ -+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); -+ -+ if (priv->percpu_priv == NULL) { -+ dev_err(dev, "devm_alloc_percpu() failed\n"); -+ err = -ENOMEM; -+ goto alloc_percpu_failed; -+ } -+ for_each_possible_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ memset(percpu_priv, 0, sizeof(*percpu_priv)); -+ } -+ -+ err = dpa_shared_netdev_init(dpa_node, net_dev); -+ -+ if (err < 0) -+ goto netdev_init_failed; -+ -+ dpaa_eth_sysfs_init(&net_dev->dev); -+ -+ pr_info("fsl_dpa_shared: Probed shared interface %s\n", -+ net_dev->name); -+ -+ return 0; -+ -+netdev_init_failed: -+alloc_percpu_failed: -+fq_alloc_failed: -+ if (net_dev) { -+ dpa_fq_free(dev, &priv->dpa_fq_list); -+ qman_release_cgrid(priv->cgr_data.cgr.cgrid); -+ qman_delete_cgr(&priv->cgr_data.cgr); -+ } -+cgr_init_failed: -+add_channel_failed: -+get_channel_failed: -+ if (net_dev) -+ dpa_bp_free(priv); -+bp_create_failed: -+fq_probe_failed: -+ devm_kfree(dev, buf_layout); -+alloc_failed: -+mac_probe_failed: -+ dev_set_drvdata(dev, NULL); -+ if (net_dev) -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static const struct of_device_id dpa_shared_match[] = { -+ { -+ .compatible = "fsl,dpa-ethernet-shared" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, dpa_shared_match); -+ -+static struct platform_driver dpa_shared_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME "-shared", -+ .of_match_table = dpa_shared_match, -+ .owner = THIS_MODULE, -+ .pm = SHARED_PM_OPS, -+ }, -+ .probe = dpaa_eth_shared_probe, -+ .remove = dpa_remove -+}; -+ -+static int __init __cold dpa_shared_load(void) -+{ -+ int _errno; -+ -+ pr_info(DPA_DESCRIPTION "\n"); -+ -+ /* Initialize dpaa_eth mirror values */ -+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); -+ dpa_max_frm = fm_get_max_frm(); -+ -+ _errno = platform_driver_register(&dpa_shared_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+module_init(dpa_shared_load); -+ -+static void __exit __cold dpa_shared_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ platform_driver_unregister(&dpa_shared_driver); -+} -+module_exit(dpa_shared_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c -@@ -0,0 +1,278 @@ -+/* Copyright 2008-2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "dpaa_eth.h" -+#include "mac.h" /* struct mac_device */ -+#ifdef CONFIG_FSL_DPAA_1588 -+#include "dpaa_1588.h" -+#endif -+ -+static ssize_t dpaa_eth_show_addr(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ -+ if (mac_dev) -+ return sprintf(buf, "%llx", -+ (unsigned long long)mac_dev->res->start); -+ else -+ return sprintf(buf, "none"); -+} -+ -+static ssize_t dpaa_eth_show_type(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t res = 0; -+ -+ if (priv) -+ res = sprintf(buf, "%s", priv->if_type); -+ -+ return res; -+} -+ -+static ssize_t dpaa_eth_show_fqids(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t bytes = 0; -+ int i = 0; -+ char *str; -+ struct dpa_fq *fq; -+ struct dpa_fq *tmp; -+ struct dpa_fq *prev = NULL; -+ u32 first_fqid = 0; -+ u32 last_fqid = 0; -+ char *prevstr = NULL; -+ -+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) { -+ switch (fq->fq_type) { -+ case FQ_TYPE_RX_DEFAULT: -+ str = "Rx default"; -+ break; -+ case FQ_TYPE_RX_ERROR: -+ str = "Rx error"; -+ break; -+ case FQ_TYPE_RX_PCD: -+ str = "Rx PCD"; -+ break; -+ case FQ_TYPE_TX_CONFIRM: -+ str = "Tx default confirmation"; -+ break; -+ case FQ_TYPE_TX_CONF_MQ: -+ str = "Tx confirmation (mq)"; -+ break; -+ case FQ_TYPE_TX_ERROR: -+ str = "Tx error"; -+ break; -+ case FQ_TYPE_TX: -+ str = "Tx"; -+ break; -+ case FQ_TYPE_RX_PCD_HI_PRIO: -+ str ="Rx PCD High Priority"; -+ break; -+ default: -+ str = "Unknown"; -+ } -+ -+ if (prev && (abs(fq->fqid - prev->fqid) != 1 || -+ str != prevstr)) { -+ if (last_fqid == first_fqid) -+ bytes += sprintf(buf + bytes, -+ "%s: %d\n", prevstr, prev->fqid); -+ else -+ bytes += sprintf(buf + bytes, -+ "%s: %d - %d\n", prevstr, -+ first_fqid, last_fqid); -+ } -+ -+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr) -+ last_fqid = fq->fqid; -+ else -+ first_fqid = last_fqid = fq->fqid; -+ -+ prev = fq; -+ prevstr = str; -+ i++; -+ } -+ -+ if (prev) { -+ if (last_fqid == first_fqid) -+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, -+ prev->fqid); -+ else -+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr, -+ first_fqid, last_fqid); -+ } -+ -+ return bytes; -+} -+ -+static ssize_t dpaa_eth_show_bpids(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ ssize_t bytes = 0; -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct dpa_bp *dpa_bp = priv->dpa_bp; -+ int i = 0; -+ -+ for (i = 0; i < priv->bp_count; i++) -+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", -+ dpa_bp[i].bpid); -+ -+ return bytes; -+} -+ -+static ssize_t dpaa_eth_show_mac_regs(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ int n = 0; -+ -+ if (mac_dev) -+ n = fm_mac_dump_regs(mac_dev, buf, n); -+ else -+ return sprintf(buf, "no mac registers\n"); -+ -+ return n; -+} -+ -+static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ int n = 0; -+ -+ if (mac_dev) -+ n = fm_mac_dump_rx_stats(mac_dev, buf, n); -+ else -+ return sprintf(buf, "no mac rx stats\n"); -+ -+ return n; -+} -+ -+static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ struct mac_device *mac_dev = priv->mac_dev; -+ int n = 0; -+ -+ if (mac_dev) -+ n = fm_mac_dump_tx_stats(mac_dev, buf, n); -+ else -+ return sprintf(buf, "no mac tx stats\n"); -+ -+ return n; -+} -+ -+#ifdef CONFIG_FSL_DPAA_1588 -+static ssize_t dpaa_eth_show_ptp_1588(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ -+ if (priv->tsu && priv->tsu->valid) -+ return sprintf(buf, "1\n"); -+ else -+ return sprintf(buf, "0\n"); -+} -+ -+static ssize_t dpaa_eth_set_ptp_1588(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ unsigned int num; -+ unsigned long flags; -+ -+ if (kstrtouint(buf, 0, &num) < 0) -+ return -EINVAL; -+ -+ local_irq_save(flags); -+ -+ if (num) { -+ if (priv->tsu) -+ priv->tsu->valid = TRUE; -+ } else { -+ if (priv->tsu) -+ priv->tsu->valid = FALSE; -+ } -+ -+ local_irq_restore(flags); -+ -+ return count; -+} -+#endif -+ -+static struct device_attribute dpaa_eth_attrs[] = { -+ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL), -+ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL), -+ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL), -+ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL), -+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL), -+ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL), -+ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL), -+#ifdef CONFIG_FSL_DPAA_1588 -+ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588, -+ dpaa_eth_set_ptp_1588), -+#endif -+}; -+ -+void dpaa_eth_sysfs_init(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) -+ if (device_create_file(dev, &dpaa_eth_attrs[i])) { -+ dev_err(dev, "Error creating sysfs file\n"); -+ while (i > 0) -+ device_remove_file(dev, &dpaa_eth_attrs[--i]); -+ return; -+ } -+} -+EXPORT_SYMBOL(dpaa_eth_sysfs_init); -+ -+void dpaa_eth_sysfs_remove(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) -+ device_remove_file(dev, &dpaa_eth_attrs[i]); -+} ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h -@@ -0,0 +1,144 @@ -+/* Copyright 2013 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM dpaa_eth -+ -+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _DPAA_ETH_TRACE_H -+ -+#include -+#include -+#include "dpaa_eth.h" -+#include -+ -+#define fd_format_name(format) { qm_fd_##format, #format } -+#define fd_format_list \ -+ fd_format_name(contig), \ -+ fd_format_name(sg) -+#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \ -+ " status=0x%08x" -+ -+/* This is used to declare a class of events. -+ * individual events of this type will be defined below. -+ */ -+ -+/* Store details about a frame descriptor and the FQ on which it was -+ * transmitted/received. -+ */ -+DECLARE_EVENT_CLASS(dpaa_eth_fd, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ struct qman_fq *fq, -+ const struct qm_fd *fd), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, fq, fd), -+ -+ /* A structure containing the relevant information we want to record. -+ * Declare name and type for each normal element, name, type and size -+ * for arrays. Use __string for variable length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(u32, fqid) -+ __field(u64, fd_addr) -+ __field(u8, fd_format) -+ __field(u16, fd_offset) -+ __field(u32, fd_length) -+ __field(u32, fd_status) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared fields */ -+ TP_fast_assign( -+ __entry->fqid = fq->fqid; -+ __entry->fd_addr = qm_fd_addr_get64(fd); -+ __entry->fd_format = fd->format; -+ __entry->fd_offset = dpa_fd_offset(fd); -+ __entry->fd_length = dpa_fd_length(fd); -+ __entry->fd_status = fd->status; -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is triggered */ -+ /* TODO: print the status using __print_flags() */ -+ TP_printk(TR_FMT, -+ __get_str(name), __entry->fqid, __entry->fd_addr, -+ __print_symbolic(__entry->fd_format, fd_format_list), -+ __entry->fd_offset, __entry->fd_length, __entry->fd_status) -+); -+ -+/* Now declare events of the above type. Format is: -+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class -+ */ -+ -+/* Tx (egress) fd */ -+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd, -+ -+ TP_PROTO(struct net_device *netdev, -+ struct qman_fq *fq, -+ const struct qm_fd *fd), -+ -+ TP_ARGS(netdev, fq, fd) -+); -+ -+/* Rx fd */ -+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd, -+ -+ TP_PROTO(struct net_device *netdev, -+ struct qman_fq *fq, -+ const struct qm_fd *fd), -+ -+ TP_ARGS(netdev, fq, fd) -+); -+ -+/* Tx confirmation fd */ -+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd, -+ -+ TP_PROTO(struct net_device *netdev, -+ struct qman_fq *fq, -+ const struct qm_fd *fd), -+ -+ TP_ARGS(netdev, fq, fd) -+); -+ -+/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). -+ * The syntax is the same as for DECLARE_EVENT_CLASS(). -+ */ -+ -+#endif /* _DPAA_ETH_TRACE_H */ -+ -+/* This must be outside ifdef _DPAA_ETH_TRACE_H */ -+#undef TRACE_INCLUDE_PATH -+#define TRACE_INCLUDE_PATH . -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_FILE dpaa_eth_trace -+#include ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c -@@ -0,0 +1,544 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+ -+#include "dpaa_eth.h" -+#include "mac.h" /* struct mac_device */ -+#include "dpaa_eth_common.h" -+ -+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = { -+ "interrupts", -+ "rx packets", -+ "tx packets", -+ "tx recycled", -+ "tx confirm", -+ "tx S/G", -+ "rx S/G", -+ "tx error", -+ "rx error", -+ "bp count" -+}; -+ -+static char dpa_stats_global[][ETH_GSTRING_LEN] = { -+ /* dpa rx errors */ -+ "rx dma error", -+ "rx frame physical error", -+ "rx frame size error", -+ "rx header error", -+ "rx csum error", -+ -+ /* demultiplexing errors */ -+ "qman cg_tdrop", -+ "qman wred", -+ "qman error cond", -+ "qman early window", -+ "qman late window", -+ "qman fq tdrop", -+ "qman fq retired", -+ "qman orp disabled", -+ -+ /* congestion related stats */ -+ "congestion time (ms)", -+ "entered congestion", -+ "congested (0/1)" -+}; -+ -+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu) -+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global) -+ -+static int __cold dpa_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *et_cmd) -+{ -+ int _errno; -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_dbg(net_dev, "phy device not initialized\n"); -+ return 0; -+ } -+ -+ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno); -+ -+ return _errno; -+} -+ -+static int __cold dpa_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *et_cmd) -+{ -+ int _errno; -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno); -+ -+ return _errno; -+} -+ -+static void __cold dpa_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ int _errno; -+ -+ strncpy(drvinfo->driver, KBUILD_MODNAME, -+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0; -+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), -+ "%X", 0); -+ -+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { -+ /* Truncated output */ -+ netdev_notice(net_dev, "snprintf() = %d\n", _errno); -+ } else if (unlikely(_errno < 0)) { -+ netdev_warn(net_dev, "snprintf() = %d\n", _errno); -+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version)); -+ } -+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), -+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0; -+} -+ -+static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev) -+{ -+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable; -+} -+ -+static void __cold dpa_set_msglevel(struct net_device *net_dev, -+ uint32_t msg_enable) -+{ -+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable; -+} -+ -+static int __cold dpa_nway_reset(struct net_device *net_dev) -+{ -+ int _errno; -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ _errno = 0; -+ if (priv->mac_dev->phy_dev->autoneg) { -+ _errno = phy_start_aneg(priv->mac_dev->phy_dev); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "phy_start_aneg() = %d\n", -+ _errno); -+ } -+ -+ return _errno; -+} -+ -+static void __cold dpa_get_pauseparam(struct net_device *net_dev, -+ struct ethtool_pauseparam *epause) -+{ -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ struct phy_device *phy_dev; -+ -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ if (mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return; -+ } -+ -+ phy_dev = mac_dev->phy_dev; -+ if (unlikely(phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return; -+ } -+ -+ epause->autoneg = mac_dev->autoneg_pause; -+ epause->rx_pause = mac_dev->rx_pause_active; -+ epause->tx_pause = mac_dev->tx_pause_active; -+} -+ -+static int __cold dpa_set_pauseparam(struct net_device *net_dev, -+ struct ethtool_pauseparam *epause) -+{ -+ struct dpa_priv_s *priv; -+ struct mac_device *mac_dev; -+ struct phy_device *phy_dev; -+ int _errno; -+ u32 newadv, oldadv; -+ bool rx_pause, tx_pause; -+ -+ priv = netdev_priv(net_dev); -+ mac_dev = priv->mac_dev; -+ -+ if (mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ -+ phy_dev = mac_dev->phy_dev; -+ if (unlikely(phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ if (!(phy_dev->supported & SUPPORTED_Pause) || -+ (!(phy_dev->supported & SUPPORTED_Asym_Pause) && -+ (epause->rx_pause != epause->tx_pause))) -+ return -EINVAL; -+ -+ /* The MAC should know how to handle PAUSE frame autonegotiation before -+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE -+ * settings. -+ */ -+ mac_dev->autoneg_pause = !!epause->autoneg; -+ mac_dev->rx_pause_req = !!epause->rx_pause; -+ mac_dev->tx_pause_req = !!epause->tx_pause; -+ -+ /* Determine the sym/asym advertised PAUSE capabilities from the desired -+ * rx/tx pause settings. -+ */ -+ newadv = 0; -+ if (epause->rx_pause) -+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; -+ if (epause->tx_pause) -+ newadv |= ADVERTISED_Asym_Pause; -+ -+ oldadv = phy_dev->advertising & -+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause); -+ -+ /* If there are differences between the old and the new advertised -+ * values, restart PHY autonegotiation and advertise the new values. -+ */ -+ if (oldadv != newadv) { -+ phy_dev->advertising &= ~(ADVERTISED_Pause -+ | ADVERTISED_Asym_Pause); -+ phy_dev->advertising |= newadv; -+ if (phy_dev->autoneg) { -+ _errno = phy_start_aneg(phy_dev); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "phy_start_aneg() = %d\n", -+ _errno); -+ } -+ } -+ -+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause); -+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno); -+ -+ return _errno; -+} -+ -+#ifdef CONFIG_PM -+static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ -+ wol->supported = 0; -+ wol->wolopts = 0; -+ -+ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent)) -+ return; -+ -+ if (priv->wol & DPAA_WOL_MAGIC) { -+ wol->supported = WAKE_MAGIC; -+ wol->wolopts = WAKE_MAGIC; -+ } -+} -+ -+static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_dbg(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ if (!device_can_wakeup(net_dev->dev.parent) || -+ (wol->wolopts & ~WAKE_MAGIC)) -+ return -EOPNOTSUPP; -+ -+ priv->wol = 0; -+ -+ if (wol->wolopts & WAKE_MAGIC) { -+ priv->wol = DPAA_WOL_MAGIC; -+ device_set_wakeup_enable(net_dev->dev.parent, 1); -+ } else { -+ device_set_wakeup_enable(net_dev->dev.parent, 0); -+ } -+ -+ return 0; -+} -+#endif -+ -+static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee) -+{ -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee); -+} -+ -+static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee) -+{ -+ struct dpa_priv_s *priv; -+ -+ priv = netdev_priv(net_dev); -+ if (priv->mac_dev == NULL) { -+ netdev_info(net_dev, "This is a MAC-less interface\n"); -+ return -ENODEV; -+ } -+ -+ if (unlikely(priv->mac_dev->phy_dev == NULL)) { -+ netdev_err(net_dev, "phy device not initialized\n"); -+ return -ENODEV; -+ } -+ -+ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee); -+} -+ -+static int dpa_get_sset_count(struct net_device *net_dev, int type) -+{ -+ unsigned int total_stats, num_stats; -+ -+ num_stats = num_online_cpus() + 1; -+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN; -+ -+ switch (type) { -+ case ETH_SS_STATS: -+ return total_stats; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus, -+ int crr_cpu, u64 bp_count, u64 *data) -+{ -+ int num_stat_values = num_cpus + 1; -+ int crr_stat = 0; -+ -+ /* update current CPU's stats and also add them to the total values */ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors; -+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors; -+ -+ data[crr_stat * num_stat_values + crr_cpu] = bp_count; -+ data[crr_stat++ * num_stat_values + num_cpus] += bp_count; -+} -+ -+static void dpa_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, u64 *data) -+{ -+ u64 bp_count, cg_time, cg_num, cg_status; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct qm_mcr_querycgr query_cgr; -+ struct dpa_rx_errors rx_errors; -+ struct dpa_ern_cnt ern_cnt; -+ struct dpa_priv_s *priv; -+ unsigned int num_cpus, offset; -+ struct dpa_bp *dpa_bp; -+ int total_stats, i; -+ -+ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS); -+ priv = netdev_priv(net_dev); -+ dpa_bp = priv->dpa_bp; -+ num_cpus = num_online_cpus(); -+ bp_count = 0; -+ -+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors)); -+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt)); -+ memset(data, 0, total_stats * sizeof(u64)); -+ -+ for_each_online_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ if (dpa_bp->percpu_count) -+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i)); -+ -+ rx_errors.dme += percpu_priv->rx_errors.dme; -+ rx_errors.fpe += percpu_priv->rx_errors.fpe; -+ rx_errors.fse += percpu_priv->rx_errors.fse; -+ rx_errors.phe += percpu_priv->rx_errors.phe; -+ rx_errors.cse += percpu_priv->rx_errors.cse; -+ -+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; -+ ern_cnt.wred += percpu_priv->ern_cnt.wred; -+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; -+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window; -+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window; -+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; -+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; -+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; -+ -+ copy_stats(percpu_priv, num_cpus, i, bp_count, data); -+ } -+ -+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN; -+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors)); -+ -+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64); -+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt)); -+ -+ /* gather congestion related counters */ -+ cg_num = 0; -+ cg_status = 0; -+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); -+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) { -+ cg_num = priv->cgr_data.cgr_congested_count; -+ cg_status = query_cgr.cgr.cs; -+ -+ /* reset congestion stats (like QMan API does */ -+ priv->cgr_data.congested_jiffies = 0; -+ priv->cgr_data.cgr_congested_count = 0; -+ } -+ -+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64); -+ data[offset++] = cg_time; -+ data[offset++] = cg_num; -+ data[offset++] = cg_status; -+} -+ -+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data) -+{ -+ unsigned int i, j, num_cpus, size; -+ char stat_string_cpu[ETH_GSTRING_LEN]; -+ u8 *strings; -+ -+ strings = data; -+ num_cpus = num_online_cpus(); -+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; -+ -+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) { -+ for (j = 0; j < num_cpus; j++) { -+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j); -+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]); -+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ memcpy(strings, dpa_stats_global, size); -+} -+ -+const struct ethtool_ops dpa_ethtool_ops = { -+ .get_settings = dpa_get_settings, -+ .set_settings = dpa_set_settings, -+ .get_drvinfo = dpa_get_drvinfo, -+ .get_msglevel = dpa_get_msglevel, -+ .set_msglevel = dpa_set_msglevel, -+ .nway_reset = dpa_nway_reset, -+ .get_pauseparam = dpa_get_pauseparam, -+ .set_pauseparam = dpa_set_pauseparam, -+ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */ -+ .get_link = ethtool_op_get_link, -+ .get_eee = dpa_get_eee, -+ .set_eee = dpa_set_eee, -+ .get_sset_count = dpa_get_sset_count, -+ .get_ethtool_stats = dpa_get_ethtool_stats, -+ .get_strings = dpa_get_strings, -+#ifdef CONFIG_PM -+ .get_wol = dpa_get_wol, -+ .set_wol = dpa_set_wol, -+#endif -+}; ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c -@@ -0,0 +1,286 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+ -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+#include "dpaa_eth_generic.h" -+ -+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = { -+ "interrupts", -+ "rx packets", -+ "tx packets", -+ "tx recycled", -+ "tx confirm", -+ "tx S/G", -+ "rx S/G (N/A)", -+ "tx error", -+ "rx error", -+ "bp count", -+ "bp draining count" -+}; -+ -+static char dpa_stats_global[][ETH_GSTRING_LEN] = { -+ /* dpa rx errors */ -+ "rx dma error", -+ "rx frame physical error", -+ "rx frame size error", -+ "rx header error", -+ "rx csum error", -+ -+ /* demultiplexing errors */ -+ "qman cg_tdrop", -+ "qman wred", -+ "qman error cond", -+ "qman early window", -+ "qman late window", -+ "qman fq tdrop", -+ "qman fq retired", -+ "qman orp disabled", -+}; -+ -+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu) -+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global) -+ -+static int __cold dpa_generic_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *et_cmd) -+{ -+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n"); -+ return -ENODEV; -+} -+ -+static int __cold dpa_generic_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *et_cmd) -+{ -+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n"); -+ return -ENODEV; -+} -+ -+static void __cold dpa_generic_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ int _errno; -+ -+ strncpy(drvinfo->driver, KBUILD_MODNAME, -+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0; -+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), -+ "%X", 0); -+ -+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { -+ /* Truncated output */ -+ netdev_notice(net_dev, "snprintf() = %d\n", _errno); -+ } else if (unlikely(_errno < 0)) { -+ netdev_warn(net_dev, "snprintf() = %d\n", _errno); -+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version)); -+ } -+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), -+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0; -+} -+ -+static uint32_t __cold dpa_generic_get_msglevel(struct net_device *net_dev) -+{ -+ return ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable; -+} -+ -+static void __cold dpa_generic_set_msglevel(struct net_device *net_dev, -+ uint32_t msg_enable) -+{ -+ ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable = -+ msg_enable; -+} -+ -+static int __cold dpa_generic_nway_reset(struct net_device *net_dev) -+{ -+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n"); -+ return -ENODEV; -+} -+ -+static int dpa_generic_get_sset_count(struct net_device *net_dev, int type) -+{ -+ unsigned int total_stats, num_stats; -+ -+ num_stats = num_online_cpus() + 1; -+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN; -+ -+ switch (type) { -+ case ETH_SS_STATS: -+ return total_stats; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, -+ int num_cpus, int crr_cpu, u64 bp_count, -+ u64 bp_drain_count, u64 *data) -+{ -+ int num_values = num_cpus + 1; -+ int crr = 0; -+ -+ /* update current CPU's stats and also add them to the total values */ -+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; -+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_returned; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; -+ -+ data[crr * num_values + crr_cpu] = bp_count; -+ data[crr++ * num_values + num_cpus] += bp_count; -+ -+ data[crr * num_values + crr_cpu] = bp_drain_count; -+ data[crr++ * num_values + num_cpus] += bp_drain_count; -+} -+ -+static void dpa_generic_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct dpa_bp *dpa_bp, *drain_bp; -+ struct dpa_generic_priv_s *priv; -+ struct dpa_rx_errors rx_errors; -+ struct dpa_ern_cnt ern_cnt; -+ unsigned int num_cpus, offset; -+ u64 bp_cnt, drain_cnt; -+ int total_stats, i; -+ -+ total_stats = dpa_generic_get_sset_count(net_dev, ETH_SS_STATS); -+ priv = netdev_priv(net_dev); -+ drain_bp = priv->draining_tx_bp; -+ dpa_bp = priv->rx_bp; -+ num_cpus = num_online_cpus(); -+ drain_cnt = 0; -+ bp_cnt = 0; -+ -+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors)); -+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt)); -+ memset(data, 0, total_stats * sizeof(u64)); -+ -+ for_each_online_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ -+ if (dpa_bp->percpu_count) -+ bp_cnt = *(per_cpu_ptr(dpa_bp->percpu_count, i)); -+ -+ if (drain_bp->percpu_count) -+ drain_cnt = *(per_cpu_ptr(drain_bp->percpu_count, i)); -+ -+ rx_errors.dme += percpu_priv->rx_errors.dme; -+ rx_errors.fpe += percpu_priv->rx_errors.fpe; -+ rx_errors.fse += percpu_priv->rx_errors.fse; -+ rx_errors.phe += percpu_priv->rx_errors.phe; -+ rx_errors.cse += percpu_priv->rx_errors.cse; -+ -+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; -+ ern_cnt.wred += percpu_priv->ern_cnt.wred; -+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; -+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window; -+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window; -+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; -+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; -+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; -+ -+ copy_stats(percpu_priv, num_cpus, i, bp_cnt, drain_cnt, data); -+ } -+ -+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN; -+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors)); -+ -+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64); -+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt)); -+} -+ -+static void dpa_generic_get_strings(struct net_device *net_dev, -+ u32 stringset, u8 *data) -+{ -+ unsigned int i, j, num_cpus, size; -+ char string_cpu[ETH_GSTRING_LEN]; -+ u8 *strings; -+ -+ strings = data; -+ num_cpus = num_online_cpus(); -+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; -+ -+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) { -+ for (j = 0; j < num_cpus; j++) { -+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", -+ dpa_stats_percpu[i], j); -+ memcpy(strings, string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", -+ dpa_stats_percpu[i]); -+ memcpy(strings, string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ memcpy(strings, dpa_stats_global, size); -+} -+ -+const struct ethtool_ops dpa_generic_ethtool_ops = { -+ .get_settings = dpa_generic_get_settings, -+ .set_settings = dpa_generic_set_settings, -+ .get_drvinfo = dpa_generic_get_drvinfo, -+ .get_msglevel = dpa_generic_get_msglevel, -+ .set_msglevel = dpa_generic_set_msglevel, -+ .nway_reset = dpa_generic_nway_reset, -+ .get_link = ethtool_op_get_link, -+ .get_sset_count = dpa_generic_get_sset_count, -+ .get_ethtool_stats = dpa_generic_get_ethtool_stats, -+ .get_strings = dpa_generic_get_strings, -+}; ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c -@@ -0,0 +1,250 @@ -+/* Copyright 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#include "dpaa_eth.h" -+#include "dpaa_eth_macsec.h" -+ -+static const char dpa_macsec_stats_percpu[][ETH_GSTRING_LEN] = { -+ "interrupts", -+ "rx packets", -+ "tx packets", -+ "tx recycled", -+ "tx confirm", -+ "tx S/G", -+ "rx S/G", -+ "tx error", -+ "rx error", -+ "bp count", -+ "tx macsec", -+ "rx macsec" -+}; -+ -+static char dpa_macsec_stats_global[][ETH_GSTRING_LEN] = { -+ /* dpa rx errors */ -+ "rx dma error", -+ "rx frame physical error", -+ "rx frame size error", -+ "rx header error", -+ "rx csum error", -+ -+ /* demultiplexing errors */ -+ "qman cg_tdrop", -+ "qman wred", -+ "qman error cond", -+ "qman early window", -+ "qman late window", -+ "qman fq tdrop", -+ "qman fq retired", -+ "qman orp disabled", -+ -+ /* congestion related stats */ -+ "congestion time (ms)", -+ "entered congestion", -+ "congested (0/1)" -+}; -+ -+#define DPA_MACSEC_STATS_PERCPU_LEN ARRAY_SIZE(dpa_macsec_stats_percpu) -+#define DPA_MACSEC_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_macsec_stats_global) -+ -+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus, -+ int crr_cpu, u64 bp_count, u64 tx_macsec, -+ u64 rx_macsec, u64 *data) -+{ -+ int num_values = num_cpus + 1; -+ int crr = 0; -+ -+ /* update current CPU's stats and also add them to the total values */ -+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; -+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_returned; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; -+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->rx_sg; -+ data[crr++ * num_values + num_cpus] += percpu_priv->rx_sg; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; -+ -+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; -+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; -+ -+ data[crr * num_values + crr_cpu] = bp_count; -+ data[crr++ * num_values + num_cpus] += bp_count; -+ -+ data[crr * num_values + crr_cpu] = tx_macsec; -+ data[crr++ * num_values + num_cpus] += tx_macsec; -+ -+ data[crr * num_values + crr_cpu] = rx_macsec; -+ data[crr++ * num_values + num_cpus] += rx_macsec; -+} -+ -+int dpa_macsec_get_sset_count(struct net_device *net_dev, int type) -+{ -+ unsigned int total_stats, num_stats; -+ -+ num_stats = num_online_cpus() + 1; -+ total_stats = num_stats * DPA_MACSEC_STATS_PERCPU_LEN + -+ DPA_MACSEC_STATS_GLOBAL_LEN; -+ -+ switch (type) { -+ case ETH_SS_STATS: -+ return total_stats; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+void dpa_macsec_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, u64 *data) -+{ -+ u64 bp_count, bp_total, cg_time, cg_num, cg_status; -+ struct macsec_percpu_priv_s *percpu_priv_macsec; -+ struct dpa_percpu_priv_s *percpu_priv; -+ struct macsec_priv_s *macsec_priv; -+ struct qm_mcr_querycgr query_cgr; -+ struct dpa_rx_errors rx_errors; -+ struct dpa_ern_cnt ern_cnt; -+ struct dpa_priv_s *priv; -+ unsigned int num_cpus, offset; -+ struct dpa_bp *dpa_bp; -+ int total_stats, i; -+ -+ macsec_priv = dpa_macsec_get_priv(net_dev); -+ if (unlikely(!macsec_priv)) { -+ pr_err("selected macsec_priv is NULL\n"); -+ return; -+ } -+ -+ total_stats = dpa_macsec_get_sset_count(net_dev, ETH_SS_STATS); -+ priv = netdev_priv(net_dev); -+ dpa_bp = priv->dpa_bp; -+ num_cpus = num_online_cpus(); -+ bp_count = 0; -+ bp_total = 0; -+ -+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors)); -+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt)); -+ memset(data, 0, total_stats * sizeof(u64)); -+ -+ for_each_online_cpu(i) { -+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); -+ percpu_priv_macsec = per_cpu_ptr(macsec_priv->percpu_priv, i); -+ -+ if (dpa_bp->percpu_count) -+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i)); -+ -+ rx_errors.dme += percpu_priv->rx_errors.dme; -+ rx_errors.fpe += percpu_priv->rx_errors.fpe; -+ rx_errors.fse += percpu_priv->rx_errors.fse; -+ rx_errors.phe += percpu_priv->rx_errors.phe; -+ rx_errors.cse += percpu_priv->rx_errors.cse; -+ -+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; -+ ern_cnt.wred += percpu_priv->ern_cnt.wred; -+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; -+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window; -+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window; -+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; -+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; -+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; -+ -+ copy_stats(percpu_priv, num_cpus, i, bp_count, -+ percpu_priv_macsec->tx_macsec, -+ percpu_priv_macsec->rx_macsec, -+ data); -+ } -+ -+ offset = (num_cpus + 1) * DPA_MACSEC_STATS_PERCPU_LEN; -+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors)); -+ -+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64); -+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt)); -+ -+ /* gather congestion related counters */ -+ cg_num = 0; -+ cg_status = 0; -+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); -+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) { -+ cg_num = priv->cgr_data.cgr_congested_count; -+ cg_status = query_cgr.cgr.cs; -+ -+ /* reset congestion stats (like QMan API does */ -+ priv->cgr_data.congested_jiffies = 0; -+ priv->cgr_data.cgr_congested_count = 0; -+ } -+ -+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64); -+ data[offset++] = cg_time; -+ data[offset++] = cg_num; -+ data[offset++] = cg_status; -+} -+ -+void dpa_macsec_get_strings(struct net_device *net_dev, -+ u32 stringset, u8 *data) -+{ -+ unsigned int i, j, num_cpus, size; -+ char string_cpu[ETH_GSTRING_LEN]; -+ u8 *strings; -+ -+ strings = data; -+ num_cpus = num_online_cpus(); -+ size = DPA_MACSEC_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; -+ -+ for (i = 0; i < DPA_MACSEC_STATS_PERCPU_LEN; i++) { -+ for (j = 0; j < num_cpus; j++) { -+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", -+ dpa_macsec_stats_percpu[i], j); -+ memcpy(strings, string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", -+ dpa_macsec_stats_percpu[i]); -+ memcpy(strings, string_cpu, ETH_GSTRING_LEN); -+ strings += ETH_GSTRING_LEN; -+ } -+ memcpy(strings, dpa_macsec_stats_global, size); -+} -+ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c -@@ -0,0 +1,287 @@ -+/* -+ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC -+ * -+ * Author: Yangbo Lu -+ * -+ * Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "dpaa_eth.h" -+#include "mac.h" -+ -+struct ptp_clock *clock; -+ -+static struct mac_device *mac_dev; -+static u32 freqCompensation; -+ -+/* Bit definitions for the TMR_CTRL register */ -+#define ALM1P (1<<31) /* Alarm1 output polarity */ -+#define ALM2P (1<<30) /* Alarm2 output polarity */ -+#define FS (1<<28) /* FIPER start indication */ -+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ -+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ -+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ -+#define TCLK_PERIOD_MASK (0x3ff) -+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ -+#define FRD (1<<14) /* FIPER Realignment Disable */ -+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ -+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ -+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ -+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ -+#define COPH (1<<7) /* Generated clock output phase. */ -+#define CIPH (1<<6) /* External oscillator input clock phase */ -+#define TMSR (1<<5) /* Timer soft reset. */ -+#define BYP (1<<3) /* Bypass drift compensated clock */ -+#define TE (1<<2) /* 1588 timer enable. */ -+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ -+#define CKSEL_MASK (0x3) -+ -+/* Bit definitions for the TMR_TEVENT register */ -+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ -+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ -+#define ALM2 (1<<17) /* Current time = alarm time register 2 */ -+#define ALM1 (1<<16) /* Current time = alarm time register 1 */ -+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ -+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ -+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ -+ -+/* Bit definitions for the TMR_TEMASK register */ -+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ -+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ -+#define ALM2EN (1<<17) /* Timer ALM2 event enable */ -+#define ALM1EN (1<<16) /* Timer ALM1 event enable */ -+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ -+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ -+ -+/* Bit definitions for the TMR_PEVENT register */ -+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ -+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ -+#define RXP (1<<0) /* PTP frame has been received */ -+ -+/* Bit definitions for the TMR_PEMASK register */ -+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ -+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ -+#define RXPEN (1<<0) /* Receive PTP packet event enable */ -+ -+/* Bit definitions for the TMR_STAT register */ -+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ -+#define STAT_VEC_MASK (0x3f) -+ -+/* Bit definitions for the TMR_PRSC register */ -+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ -+#define PRSC_OCK_MASK (0xffff) -+ -+ -+#define N_EXT_TS 2 -+ -+static void set_alarm(void) -+{ -+ u64 ns; -+ -+ if (mac_dev->fm_rtc_get_cnt) -+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns); -+ ns += 1500000000ULL; -+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL; -+ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS; -+ if (mac_dev->fm_rtc_set_alarm) -+ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns); -+} -+ -+static void set_fipers(void) -+{ -+ u64 fiper; -+ -+ if (mac_dev->fm_rtc_disable) -+ mac_dev->fm_rtc_disable(mac_dev->fm_dev); -+ -+ set_alarm(); -+ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS; -+ if (mac_dev->fm_rtc_set_fiper) -+ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper); -+ -+ if (mac_dev->fm_rtc_enable) -+ mac_dev->fm_rtc_enable(mac_dev->fm_dev); -+} -+ -+/* PTP clock operations */ -+ -+static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -+{ -+ u64 adj; -+ u32 diff, tmr_add; -+ int neg_adj = 0; -+ -+ if (ppb < 0) { -+ neg_adj = 1; -+ ppb = -ppb; -+ } -+ -+ tmr_add = freqCompensation; -+ adj = tmr_add; -+ adj *= ppb; -+ diff = div_u64(adj, 1000000000ULL); -+ -+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; -+ -+ if (mac_dev->fm_rtc_set_drift) -+ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add); -+ -+ return 0; -+} -+ -+static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta) -+{ -+ s64 now; -+ -+ if (mac_dev->fm_rtc_get_cnt) -+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now); -+ -+ now += delta; -+ -+ if (mac_dev->fm_rtc_set_cnt) -+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now); -+ set_fipers(); -+ -+ return 0; -+} -+ -+static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec *ts) -+{ -+ u64 ns; -+ u32 remainder; -+ -+ if (mac_dev->fm_rtc_get_cnt) -+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns); -+ -+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); -+ ts->tv_nsec = remainder; -+ return 0; -+} -+ -+static int ptp_dpa_settime(struct ptp_clock_info *ptp, -+ const struct timespec *ts) -+{ -+ u64 ns; -+ -+ ns = ts->tv_sec * 1000000000ULL; -+ ns += ts->tv_nsec; -+ -+ if (mac_dev->fm_rtc_set_cnt) -+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns); -+ set_fipers(); -+ return 0; -+} -+ -+static int ptp_dpa_enable(struct ptp_clock_info *ptp, -+ struct ptp_clock_request *rq, int on) -+{ -+ u32 bit; -+ -+ switch (rq->type) { -+ case PTP_CLK_REQ_EXTTS: -+ switch (rq->extts.index) { -+ case 0: -+ bit = ETS1EN; -+ break; -+ case 1: -+ bit = ETS2EN; -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (on) { -+ if (mac_dev->fm_rtc_enable_interrupt) -+ mac_dev->fm_rtc_enable_interrupt( -+ mac_dev->fm_dev, bit); -+ } else { -+ if (mac_dev->fm_rtc_disable_interrupt) -+ mac_dev->fm_rtc_disable_interrupt( -+ mac_dev->fm_dev, bit); -+ } -+ return 0; -+ -+ case PTP_CLK_REQ_PPS: -+ if (on) { -+ if (mac_dev->fm_rtc_enable_interrupt) -+ mac_dev->fm_rtc_enable_interrupt( -+ mac_dev->fm_dev, PP1EN); -+ } else { -+ if (mac_dev->fm_rtc_disable_interrupt) -+ mac_dev->fm_rtc_disable_interrupt( -+ mac_dev->fm_dev, PP1EN); -+ } -+ return 0; -+ -+ default: -+ break; -+ } -+ -+ return -EOPNOTSUPP; -+} -+ -+static struct ptp_clock_info ptp_dpa_caps = { -+ .owner = THIS_MODULE, -+ .name = "dpaa clock", -+ .max_adj = 512000, -+ .n_alarm = 0, -+ .n_ext_ts = N_EXT_TS, -+ .n_per_out = 0, -+ .pps = 1, -+ .adjfreq = ptp_dpa_adjfreq, -+ .adjtime = ptp_dpa_adjtime, -+ .gettime = ptp_dpa_gettime, -+ .settime = ptp_dpa_settime, -+ .enable = ptp_dpa_enable, -+}; -+ -+static int __init __cold dpa_ptp_load(void) -+{ -+ struct device *ptp_dev; -+ struct timespec now; -+ int dpa_phc_index; -+ int err; -+ -+ ptp_dev = &ptp_priv.of_dev->dev; -+ mac_dev = ptp_priv.mac_dev; -+ -+ if (mac_dev->fm_rtc_get_drift) -+ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation); -+ -+ getnstimeofday(&now); -+ ptp_dpa_settime(&ptp_dpa_caps, &now); -+ -+ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev); -+ if (IS_ERR(clock)) { -+ err = PTR_ERR(clock); -+ return err; -+ } -+ dpa_phc_index = ptp_clock_index(clock); -+ return 0; -+} -+module_init(dpa_ptp_load); -+ -+static void __exit __cold dpa_ptp_unload(void) -+{ -+ if (mac_dev->fm_rtc_disable_interrupt) -+ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff); -+ ptp_clock_unregister(clock); -+} -+module_exit(dpa_ptp_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c -@@ -0,0 +1,915 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpaa_eth.h" -+#include "mac.h" -+#include "lnxwrp_fsl_fman.h" -+ -+#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */ -+ -+#include "fsl_fman_dtsec.h" -+#include "fsl_fman_tgec.h" -+#include "fsl_fman_memac.h" -+#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h" -+ -+#define MAC_DESCRIPTION "FSL FMan MAC API based driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+ -+MODULE_AUTHOR("Emil Medve "); -+ -+MODULE_DESCRIPTION(MAC_DESCRIPTION); -+ -+struct mac_priv_s { -+ struct fm_mac_dev *fm_mac; -+}; -+ -+const char *mac_driver_description __initconst = MAC_DESCRIPTION; -+const size_t mac_sizeof_priv[] = { -+ [DTSEC] = sizeof(struct mac_priv_s), -+ [XGMAC] = sizeof(struct mac_priv_s), -+ [MEMAC] = sizeof(struct mac_priv_s) -+}; -+ -+static const enet_mode_t _100[] = { -+ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100, -+ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100 -+}; -+ -+static const enet_mode_t _1000[] = { -+ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000, -+ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000, -+ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000, -+ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000, -+ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000, -+ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000, -+ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000, -+ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000, -+ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000 -+}; -+ -+static enet_mode_t __cold __attribute__((nonnull)) -+macdev2enetinterface(const struct mac_device *mac_dev) -+{ -+ switch (mac_dev->max_speed) { -+ case SPEED_100: -+ return _100[mac_dev->phy_if]; -+ case SPEED_1000: -+ return _1000[mac_dev->phy_if]; -+ case SPEED_2500: -+ return e_ENET_MODE_SGMII_2500; -+ case SPEED_10000: -+ return e_ENET_MODE_XGMII_10000; -+ default: -+ return e_ENET_MODE_MII_100; -+ } -+} -+ -+static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception) -+{ -+ struct mac_device *mac_dev; -+ -+ mac_dev = (struct mac_device *)_mac_dev; -+ -+ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) { -+ /* don't flag RX FIFO after the first */ -+ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), -+ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false); -+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", -+ exception); -+ } -+ -+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__, -+ exception); -+} -+ -+static int __cold init(struct mac_device *mac_dev) -+{ -+ int _errno; -+ struct mac_priv_s *priv; -+ t_FmMacParams param; -+ uint32_t version; -+ -+ priv = macdev_priv(mac_dev); -+ -+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap( -+ mac_dev->dev, mac_dev->res->start, 0x2000); -+ param.enetMode = macdev2enetinterface(mac_dev); -+ memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr), -+ sizeof(mac_dev->addr))); -+ param.macId = mac_dev->cell_index; -+ param.h_Fm = (handle_t)mac_dev->fm; -+ param.mdioIrq = NO_IRQ; -+ param.f_Exception = mac_exception; -+ param.f_Event = mac_exception; -+ param.h_App = mac_dev; -+ -+ priv->fm_mac = fm_mac_config(¶m); -+ if (unlikely(priv->fm_mac == NULL)) { -+ _errno = -EINVAL; -+ goto _return; -+ } -+ -+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac, -+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? -+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS); -+ -+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, -+ fm_get_max_frm()); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { -+ /* 10G always works with pad and CRC */ -+ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ _errno = fm_mac_config_half_duplex(priv->fm_mac, -+ mac_dev->half_duplex); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ } else { -+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ } -+ -+ _errno = fm_mac_init(priv->fm_mac); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN -+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */ -+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { -+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), -+ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ } -+#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */ -+ -+ /* For 10G MAC, disable Tx ECC exception */ -+ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) { -+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), -+ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ } -+ -+ _errno = fm_mac_get_version(priv->fm_mac, &version); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n", -+ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? -+ "dTSEC" : "XGEC"), version); -+ -+ goto _return; -+ -+ -+_return_fm_mac_free: -+ fm_mac_free(mac_dev->get_mac_handle(mac_dev)); -+ -+_return: -+ return _errno; -+} -+ -+static int __cold memac_init(struct mac_device *mac_dev) -+{ -+ int _errno; -+ struct mac_priv_s *priv; -+ t_FmMacParams param; -+ -+ priv = macdev_priv(mac_dev); -+ -+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap( -+ mac_dev->dev, mac_dev->res->start, 0x2000); -+ param.enetMode = macdev2enetinterface(mac_dev); -+ memcpy(¶m.addr, mac_dev->addr, sizeof(mac_dev->addr)); -+ param.macId = mac_dev->cell_index; -+ param.h_Fm = (handle_t)mac_dev->fm; -+ param.mdioIrq = NO_IRQ; -+ param.f_Exception = mac_exception; -+ param.f_Event = mac_exception; -+ param.h_App = mac_dev; -+ -+ priv->fm_mac = fm_mac_config(¶m); -+ if (unlikely(priv->fm_mac == NULL)) { -+ _errno = -EINVAL; -+ goto _return; -+ } -+ -+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac, -+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? -+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS); -+ -+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm()); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ _errno = fm_mac_init(priv->fm_mac); -+ if (unlikely(_errno < 0)) -+ goto _return_fm_mac_free; -+ -+ dev_info(mac_dev->dev, "FMan MEMAC\n"); -+ -+ goto _return; -+ -+_return_fm_mac_free: -+ fm_mac_free(priv->fm_mac); -+ -+_return: -+ return _errno; -+} -+ -+static int __cold start(struct mac_device *mac_dev) -+{ -+ int _errno; -+ struct phy_device *phy_dev = mac_dev->phy_dev; -+ -+ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev)); -+ -+ if (!_errno && phy_dev) { -+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) -+ phy_start(phy_dev); -+ else if (phy_dev->drv->read_status) -+ phy_dev->drv->read_status(phy_dev); -+ } -+ -+ return _errno; -+} -+ -+static int __cold stop(struct mac_device *mac_dev) -+{ -+ if (mac_dev->phy_dev && -+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)) -+ phy_stop(mac_dev->phy_dev); -+ -+ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev)); -+} -+ -+static int __cold set_multi(struct net_device *net_dev, -+ struct mac_device *mac_dev) -+{ -+ struct mac_priv_s *mac_priv; -+ struct mac_address *old_addr, *tmp; -+ struct netdev_hw_addr *ha; -+ int _errno; -+ -+ mac_priv = macdev_priv(mac_dev); -+ -+ /* Clear previous address list */ -+ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) { -+ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac, -+ (t_EnetAddr *)old_addr->addr); -+ if (_errno < 0) -+ return _errno; -+ -+ list_del(&old_addr->list); -+ kfree(old_addr); -+ } -+ -+ /* Add all the addresses from the new list */ -+ netdev_for_each_mc_addr(ha, net_dev) { -+ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac, -+ (t_EnetAddr *)ha->addr); -+ if (_errno < 0) -+ return _errno; -+ -+ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC); -+ if (!tmp) { -+ dev_err(mac_dev->dev, "Out of memory\n"); -+ return -ENOMEM; -+ } -+ memcpy(tmp->addr, ha->addr, ETH_ALEN); -+ list_add(&tmp->list, &mac_dev->mc_addr_list); -+ } -+ return 0; -+} -+ -+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired -+ * active PAUSE settings. Otherwise, the new active settings should be reflected -+ * in FMan. -+ */ -+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) -+{ -+ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev); -+ int _errno = 0; -+ -+ if (unlikely(rx != mac_dev->rx_pause_active)) { -+ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx); -+ if (likely(_errno == 0)) -+ mac_dev->rx_pause_active = rx; -+ } -+ -+ if (unlikely(tx != mac_dev->tx_pause_active)) { -+ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx); -+ if (likely(_errno == 0)) -+ mac_dev->tx_pause_active = tx; -+ } -+ -+ return _errno; -+} -+EXPORT_SYMBOL(set_mac_active_pause); -+ -+/* Determine the MAC RX/TX PAUSE frames settings based on PHY -+ * autonegotiation or values set by eththool. -+ */ -+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause) -+{ -+ struct phy_device *phy_dev = mac_dev->phy_dev; -+ u16 lcl_adv, rmt_adv; -+ u8 flowctrl; -+ -+ *rx_pause = *tx_pause = false; -+ -+ if (!phy_dev->duplex) -+ return; -+ -+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings -+ * are those set by ethtool. -+ */ -+ if (!mac_dev->autoneg_pause) { -+ *rx_pause = mac_dev->rx_pause_req; -+ *tx_pause = mac_dev->tx_pause_req; -+ return; -+ } -+ -+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE -+ * settings depend on the result of the link negotiation. -+ */ -+ -+ /* get local capabilities */ -+ lcl_adv = 0; -+ if (phy_dev->advertising & ADVERTISED_Pause) -+ lcl_adv |= ADVERTISE_PAUSE_CAP; -+ if (phy_dev->advertising & ADVERTISED_Asym_Pause) -+ lcl_adv |= ADVERTISE_PAUSE_ASYM; -+ -+ /* get link partner capabilities */ -+ rmt_adv = 0; -+ if (phy_dev->pause) -+ rmt_adv |= LPA_PAUSE_CAP; -+ if (phy_dev->asym_pause) -+ rmt_adv |= LPA_PAUSE_ASYM; -+ -+ /* Calculate TX/RX settings based on local and peer advertised -+ * symmetric/asymmetric PAUSE capabilities. -+ */ -+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); -+ if (flowctrl & FLOW_CTRL_RX) -+ *rx_pause = true; -+ if (flowctrl & FLOW_CTRL_TX) -+ *tx_pause = true; -+} -+EXPORT_SYMBOL(get_pause_cfg); -+ -+static void adjust_link(struct net_device *net_dev) -+{ -+ struct dpa_priv_s *priv = netdev_priv(net_dev); -+ struct mac_device *mac_dev = priv->mac_dev; -+ struct phy_device *phy_dev = mac_dev->phy_dev; -+ struct fm_mac_dev *fm_mac_dev; -+ bool rx_pause, tx_pause; -+ int _errno; -+ -+ fm_mac_dev = mac_dev->get_mac_handle(mac_dev); -+ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed, -+ phy_dev->duplex); -+ -+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause); -+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause); -+ if (unlikely(_errno < 0)) -+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno); -+} -+ -+/* Initializes driver's PHY state, and attaches to the PHY. -+ * Returns 0 on success. -+ */ -+static int dtsec_init_phy(struct net_device *net_dev, -+ struct mac_device *mac_dev) -+{ -+ struct phy_device *phy_dev; -+ -+ if (!mac_dev->phy_node) -+ phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id, -+ &adjust_link, mac_dev->phy_if); -+ else -+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -+ &adjust_link, 0, mac_dev->phy_if); -+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { -+ netdev_err(net_dev, "Could not connect to PHY %s\n", -+ mac_dev->phy_node ? -+ mac_dev->phy_node->full_name : -+ mac_dev->fixed_bus_id); -+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); -+ } -+ -+ /* Remove any features not supported by the controller */ -+ phy_dev->supported &= mac_dev->if_support; -+ /* Enable the symmetric and asymmetric PAUSE frame advertisements, -+ * as most of the PHY drivers do not enable them by default. -+ */ -+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); -+ phy_dev->advertising = phy_dev->supported; -+ -+ mac_dev->phy_dev = phy_dev; -+ -+ return 0; -+} -+ -+static int xgmac_init_phy(struct net_device *net_dev, -+ struct mac_device *mac_dev) -+{ -+ struct phy_device *phy_dev; -+ -+ if (!mac_dev->phy_node) -+ phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id, -+ mac_dev->phy_if); -+ else -+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0, -+ mac_dev->phy_if); -+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { -+ netdev_err(net_dev, "Could not attach to PHY %s\n", -+ mac_dev->phy_node ? -+ mac_dev->phy_node->full_name : -+ mac_dev->fixed_bus_id); -+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); -+ } -+ -+ phy_dev->supported &= mac_dev->if_support; -+ /* Enable the symmetric and asymmetric PAUSE frame advertisements, -+ * as most of the PHY drivers do not enable them by default. -+ */ -+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); -+ phy_dev->advertising = phy_dev->supported; -+ -+ mac_dev->phy_dev = phy_dev; -+ -+ return 0; -+} -+ -+static int memac_init_phy(struct net_device *net_dev, -+ struct mac_device *mac_dev) -+{ -+ struct phy_device *phy_dev; -+ -+ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) || -+ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)){ -+ if (!mac_dev->phy_node) { -+ mac_dev->phy_dev = NULL; -+ return 0; -+ } else -+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0, -+ mac_dev->phy_if); -+ } else { -+ if (!mac_dev->phy_node) -+ phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id, -+ &adjust_link, mac_dev->phy_if); -+ else -+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -+ &adjust_link, 0, -+ mac_dev->phy_if); -+ } -+ -+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { -+ netdev_err(net_dev, "Could not connect to PHY %s\n", -+ mac_dev->phy_node ? -+ mac_dev->phy_node->full_name : -+ mac_dev->fixed_bus_id); -+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); -+ } -+ -+ /* Remove any features not supported by the controller */ -+ phy_dev->supported &= mac_dev->if_support; -+ /* Enable the symmetric and asymmetric PAUSE frame advertisements, -+ * as most of the PHY drivers do not enable them by default. -+ */ -+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); -+ phy_dev->advertising = phy_dev->supported; -+ -+ mac_dev->phy_dev = phy_dev; -+ -+ return 0; -+} -+ -+static int __cold uninit(struct fm_mac_dev *fm_mac_dev) -+{ -+ int _errno, __errno; -+ -+ _errno = fm_mac_disable(fm_mac_dev); -+ __errno = fm_mac_free(fm_mac_dev); -+ -+ if (unlikely(__errno < 0)) -+ _errno = __errno; -+ -+ return _errno; -+} -+ -+static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev) -+{ -+ const struct mac_priv_s *priv; -+ priv = macdev_priv(mac_dev); -+ return priv->fm_mac; -+} -+ -+static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn) -+{ -+ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr; -+ int i = 0, n = nn; -+ -+ FM_DMP_SUBTITLE(buf, n, "\n"); -+ -+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index); -+ -+ FM_DMP_V32(buf, n, p_mm, tsec_id); -+ FM_DMP_V32(buf, n, p_mm, tsec_id2); -+ FM_DMP_V32(buf, n, p_mm, ievent); -+ FM_DMP_V32(buf, n, p_mm, imask); -+ FM_DMP_V32(buf, n, p_mm, ecntrl); -+ FM_DMP_V32(buf, n, p_mm, ptv); -+ FM_DMP_V32(buf, n, p_mm, tmr_ctrl); -+ FM_DMP_V32(buf, n, p_mm, tmr_pevent); -+ FM_DMP_V32(buf, n, p_mm, tmr_pemask); -+ FM_DMP_V32(buf, n, p_mm, tctrl); -+ FM_DMP_V32(buf, n, p_mm, rctrl); -+ FM_DMP_V32(buf, n, p_mm, maccfg1); -+ FM_DMP_V32(buf, n, p_mm, maccfg2); -+ FM_DMP_V32(buf, n, p_mm, ipgifg); -+ FM_DMP_V32(buf, n, p_mm, hafdup); -+ FM_DMP_V32(buf, n, p_mm, maxfrm); -+ -+ FM_DMP_V32(buf, n, p_mm, macstnaddr1); -+ FM_DMP_V32(buf, n, p_mm, macstnaddr2); -+ -+ for (i = 0; i < 7; ++i) { -+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1); -+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2); -+ } -+ -+ FM_DMP_V32(buf, n, p_mm, car1); -+ FM_DMP_V32(buf, n, p_mm, car2); -+ -+ return n; -+} -+ -+static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn) -+{ -+ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr; -+ int n = nn; -+ -+ FM_DMP_SUBTITLE(buf, n, "\n"); -+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index); -+ -+ FM_DMP_V32(buf, n, p_mm, tgec_id); -+ FM_DMP_V32(buf, n, p_mm, command_config); -+ FM_DMP_V32(buf, n, p_mm, mac_addr_0); -+ FM_DMP_V32(buf, n, p_mm, mac_addr_1); -+ FM_DMP_V32(buf, n, p_mm, maxfrm); -+ FM_DMP_V32(buf, n, p_mm, pause_quant); -+ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections); -+ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections); -+ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e); -+ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e); -+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl); -+ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status); -+ FM_DMP_V32(buf, n, p_mm, mdio_command); -+ FM_DMP_V32(buf, n, p_mm, mdio_data); -+ FM_DMP_V32(buf, n, p_mm, mdio_regaddr); -+ FM_DMP_V32(buf, n, p_mm, status); -+ FM_DMP_V32(buf, n, p_mm, tx_ipg_len); -+ FM_DMP_V32(buf, n, p_mm, mac_addr_2); -+ FM_DMP_V32(buf, n, p_mm, mac_addr_3); -+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd); -+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr); -+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd); -+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr); -+ FM_DMP_V32(buf, n, p_mm, imask); -+ FM_DMP_V32(buf, n, p_mm, ievent); -+ -+ return n; -+} -+ -+static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn) -+{ -+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; -+ int i = 0, n = nn; -+ -+ FM_DMP_SUBTITLE(buf, n, "\n"); -+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index); -+ -+ FM_DMP_V32(buf, n, p_mm, command_config); -+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l); -+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u); -+ FM_DMP_V32(buf, n, p_mm, maxfrm); -+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl); -+ FM_DMP_V32(buf, n, p_mm, ievent); -+ FM_DMP_V32(buf, n, p_mm, tx_ipg_length); -+ FM_DMP_V32(buf, n, p_mm, imask); -+ -+ for (i = 0; i < 4; ++i) -+ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]); -+ -+ for (i = 0; i < 4; ++i) -+ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]); -+ -+ FM_DMP_V32(buf, n, p_mm, rx_pause_status); -+ -+ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) { -+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l); -+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u); -+ } -+ -+ FM_DMP_V32(buf, n, p_mm, lpwake_timer); -+ FM_DMP_V32(buf, n, p_mm, sleep_timer); -+ FM_DMP_V32(buf, n, p_mm, statn_config); -+ FM_DMP_V32(buf, n, p_mm, if_mode); -+ FM_DMP_V32(buf, n, p_mm, if_status); -+ FM_DMP_V32(buf, n, p_mm, hg_config); -+ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta); -+ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh); -+ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status); -+ FM_DMP_V32(buf, n, p_mm, hg_fifos_status); -+ FM_DMP_V32(buf, n, p_mm, rhm); -+ FM_DMP_V32(buf, n, p_mm, thm); -+ -+ return n; -+} -+ -+static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn) -+{ -+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; -+ int n = nn; -+ -+ FM_DMP_SUBTITLE(buf, n, "\n"); -+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index); -+ -+ /* Rx Statistics Counter */ -+ FM_DMP_V32(buf, n, p_mm, reoct_l); -+ FM_DMP_V32(buf, n, p_mm, reoct_u); -+ FM_DMP_V32(buf, n, p_mm, roct_l); -+ FM_DMP_V32(buf, n, p_mm, roct_u); -+ FM_DMP_V32(buf, n, p_mm, raln_l); -+ FM_DMP_V32(buf, n, p_mm, raln_u); -+ FM_DMP_V32(buf, n, p_mm, rxpf_l); -+ FM_DMP_V32(buf, n, p_mm, rxpf_u); -+ FM_DMP_V32(buf, n, p_mm, rfrm_l); -+ FM_DMP_V32(buf, n, p_mm, rfrm_u); -+ FM_DMP_V32(buf, n, p_mm, rfcs_l); -+ FM_DMP_V32(buf, n, p_mm, rfcs_u); -+ FM_DMP_V32(buf, n, p_mm, rvlan_l); -+ FM_DMP_V32(buf, n, p_mm, rvlan_u); -+ FM_DMP_V32(buf, n, p_mm, rerr_l); -+ FM_DMP_V32(buf, n, p_mm, rerr_u); -+ FM_DMP_V32(buf, n, p_mm, ruca_l); -+ FM_DMP_V32(buf, n, p_mm, ruca_u); -+ FM_DMP_V32(buf, n, p_mm, rmca_l); -+ FM_DMP_V32(buf, n, p_mm, rmca_u); -+ FM_DMP_V32(buf, n, p_mm, rbca_l); -+ FM_DMP_V32(buf, n, p_mm, rbca_u); -+ FM_DMP_V32(buf, n, p_mm, rdrp_l); -+ FM_DMP_V32(buf, n, p_mm, rdrp_u); -+ FM_DMP_V32(buf, n, p_mm, rpkt_l); -+ FM_DMP_V32(buf, n, p_mm, rpkt_u); -+ FM_DMP_V32(buf, n, p_mm, rund_l); -+ FM_DMP_V32(buf, n, p_mm, rund_u); -+ FM_DMP_V32(buf, n, p_mm, r64_l); -+ FM_DMP_V32(buf, n, p_mm, r64_u); -+ FM_DMP_V32(buf, n, p_mm, r127_l); -+ FM_DMP_V32(buf, n, p_mm, r127_u); -+ FM_DMP_V32(buf, n, p_mm, r255_l); -+ FM_DMP_V32(buf, n, p_mm, r255_u); -+ FM_DMP_V32(buf, n, p_mm, r511_l); -+ FM_DMP_V32(buf, n, p_mm, r511_u); -+ FM_DMP_V32(buf, n, p_mm, r1023_l); -+ FM_DMP_V32(buf, n, p_mm, r1023_u); -+ FM_DMP_V32(buf, n, p_mm, r1518_l); -+ FM_DMP_V32(buf, n, p_mm, r1518_u); -+ FM_DMP_V32(buf, n, p_mm, r1519x_l); -+ FM_DMP_V32(buf, n, p_mm, r1519x_u); -+ FM_DMP_V32(buf, n, p_mm, rovr_l); -+ FM_DMP_V32(buf, n, p_mm, rovr_u); -+ FM_DMP_V32(buf, n, p_mm, rjbr_l); -+ FM_DMP_V32(buf, n, p_mm, rjbr_u); -+ FM_DMP_V32(buf, n, p_mm, rfrg_l); -+ FM_DMP_V32(buf, n, p_mm, rfrg_u); -+ FM_DMP_V32(buf, n, p_mm, rcnp_l); -+ FM_DMP_V32(buf, n, p_mm, rcnp_u); -+ FM_DMP_V32(buf, n, p_mm, rdrntp_l); -+ FM_DMP_V32(buf, n, p_mm, rdrntp_u); -+ -+ return n; -+} -+ -+static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn) -+{ -+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; -+ int n = nn; -+ -+ FM_DMP_SUBTITLE(buf, n, "\n"); -+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index); -+ -+ -+ /* Tx Statistics Counter */ -+ FM_DMP_V32(buf, n, p_mm, teoct_l); -+ FM_DMP_V32(buf, n, p_mm, teoct_u); -+ FM_DMP_V32(buf, n, p_mm, toct_l); -+ FM_DMP_V32(buf, n, p_mm, toct_u); -+ FM_DMP_V32(buf, n, p_mm, txpf_l); -+ FM_DMP_V32(buf, n, p_mm, txpf_u); -+ FM_DMP_V32(buf, n, p_mm, tfrm_l); -+ FM_DMP_V32(buf, n, p_mm, tfrm_u); -+ FM_DMP_V32(buf, n, p_mm, tfcs_l); -+ FM_DMP_V32(buf, n, p_mm, tfcs_u); -+ FM_DMP_V32(buf, n, p_mm, tvlan_l); -+ FM_DMP_V32(buf, n, p_mm, tvlan_u); -+ FM_DMP_V32(buf, n, p_mm, terr_l); -+ FM_DMP_V32(buf, n, p_mm, terr_u); -+ FM_DMP_V32(buf, n, p_mm, tuca_l); -+ FM_DMP_V32(buf, n, p_mm, tuca_u); -+ FM_DMP_V32(buf, n, p_mm, tmca_l); -+ FM_DMP_V32(buf, n, p_mm, tmca_u); -+ FM_DMP_V32(buf, n, p_mm, tbca_l); -+ FM_DMP_V32(buf, n, p_mm, tbca_u); -+ FM_DMP_V32(buf, n, p_mm, tpkt_l); -+ FM_DMP_V32(buf, n, p_mm, tpkt_u); -+ FM_DMP_V32(buf, n, p_mm, tund_l); -+ FM_DMP_V32(buf, n, p_mm, tund_u); -+ FM_DMP_V32(buf, n, p_mm, t64_l); -+ FM_DMP_V32(buf, n, p_mm, t64_u); -+ FM_DMP_V32(buf, n, p_mm, t127_l); -+ FM_DMP_V32(buf, n, p_mm, t127_u); -+ FM_DMP_V32(buf, n, p_mm, t255_l); -+ FM_DMP_V32(buf, n, p_mm, t255_u); -+ FM_DMP_V32(buf, n, p_mm, t511_l); -+ FM_DMP_V32(buf, n, p_mm, t511_u); -+ FM_DMP_V32(buf, n, p_mm, t1023_l); -+ FM_DMP_V32(buf, n, p_mm, t1023_u); -+ FM_DMP_V32(buf, n, p_mm, t1518_l); -+ FM_DMP_V32(buf, n, p_mm, t1518_u); -+ FM_DMP_V32(buf, n, p_mm, t1519x_l); -+ FM_DMP_V32(buf, n, p_mm, t1519x_u); -+ FM_DMP_V32(buf, n, p_mm, tcnp_l); -+ FM_DMP_V32(buf, n, p_mm, tcnp_u); -+ -+ return n; -+} -+ -+int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn) -+{ -+ int n = nn; -+ -+ n = h_mac->dump_mac_regs(h_mac, buf, n); -+ -+ return n; -+} -+EXPORT_SYMBOL(fm_mac_dump_regs); -+ -+int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn) -+{ -+ int n = nn; -+ -+ if(h_mac->dump_mac_rx_stats) -+ n = h_mac->dump_mac_rx_stats(h_mac, buf, n); -+ -+ return n; -+} -+EXPORT_SYMBOL(fm_mac_dump_rx_stats); -+ -+int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn) -+{ -+ int n = nn; -+ -+ if(h_mac->dump_mac_tx_stats) -+ n = h_mac->dump_mac_tx_stats(h_mac, buf, n); -+ -+ return n; -+} -+EXPORT_SYMBOL(fm_mac_dump_tx_stats); -+ -+static void __cold setup_dtsec(struct mac_device *mac_dev) -+{ -+ mac_dev->init_phy = dtsec_init_phy; -+ mac_dev->init = init; -+ mac_dev->start = start; -+ mac_dev->stop = stop; -+ mac_dev->set_promisc = fm_mac_set_promiscuous; -+ mac_dev->change_addr = fm_mac_modify_mac_addr; -+ mac_dev->set_multi = set_multi; -+ mac_dev->uninit = uninit; -+ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp; -+ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp; -+ mac_dev->get_mac_handle = get_mac_handle; -+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; -+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; -+ mac_dev->fm_rtc_enable = fm_rtc_enable; -+ mac_dev->fm_rtc_disable = fm_rtc_disable; -+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt; -+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt; -+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift; -+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift; -+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm; -+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper; -+ mac_dev->set_wol = fm_mac_set_wol; -+ mac_dev->dump_mac_regs = dtsec_dump_regs; -+} -+ -+static void __cold setup_xgmac(struct mac_device *mac_dev) -+{ -+ mac_dev->init_phy = xgmac_init_phy; -+ mac_dev->init = init; -+ mac_dev->start = start; -+ mac_dev->stop = stop; -+ mac_dev->set_promisc = fm_mac_set_promiscuous; -+ mac_dev->change_addr = fm_mac_modify_mac_addr; -+ mac_dev->set_multi = set_multi; -+ mac_dev->uninit = uninit; -+ mac_dev->get_mac_handle = get_mac_handle; -+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; -+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; -+ mac_dev->set_wol = fm_mac_set_wol; -+ mac_dev->dump_mac_regs = xgmac_dump_regs; -+} -+ -+static void __cold setup_memac(struct mac_device *mac_dev) -+{ -+ mac_dev->init_phy = memac_init_phy; -+ mac_dev->init = memac_init; -+ mac_dev->start = start; -+ mac_dev->stop = stop; -+ mac_dev->set_promisc = fm_mac_set_promiscuous; -+ mac_dev->change_addr = fm_mac_modify_mac_addr; -+ mac_dev->set_multi = set_multi; -+ mac_dev->uninit = uninit; -+ mac_dev->get_mac_handle = get_mac_handle; -+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; -+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; -+ mac_dev->fm_rtc_enable = fm_rtc_enable; -+ mac_dev->fm_rtc_disable = fm_rtc_disable; -+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt; -+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt; -+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift; -+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift; -+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm; -+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper; -+ mac_dev->set_wol = fm_mac_set_wol; -+ mac_dev->dump_mac_regs = memac_dump_regs; -+ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx; -+ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx; -+} -+ -+void (*const mac_setup[])(struct mac_device *mac_dev) = { -+ [DTSEC] = setup_dtsec, -+ [XGMAC] = setup_xgmac, -+ [MEMAC] = setup_memac -+}; ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -@@ -0,0 +1,470 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "lnxwrp_fm_ext.h" -+ -+#include "mac.h" -+ -+#define DTSEC_SUPPORTED \ -+ (SUPPORTED_10baseT_Half \ -+ | SUPPORTED_10baseT_Full \ -+ | SUPPORTED_100baseT_Half \ -+ | SUPPORTED_100baseT_Full \ -+ | SUPPORTED_Autoneg \ -+ | SUPPORTED_Pause \ -+ | SUPPORTED_Asym_Pause \ -+ | SUPPORTED_MII) -+ -+static const char phy_str[][11] = { -+ [PHY_INTERFACE_MODE_MII] = "mii", -+ [PHY_INTERFACE_MODE_GMII] = "gmii", -+ [PHY_INTERFACE_MODE_SGMII] = "sgmii", -+ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii", -+ [PHY_INTERFACE_MODE_TBI] = "tbi", -+ [PHY_INTERFACE_MODE_RMII] = "rmii", -+ [PHY_INTERFACE_MODE_RGMII] = "rgmii", -+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", -+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", -+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", -+ [PHY_INTERFACE_MODE_RTBI] = "rtbi", -+ [PHY_INTERFACE_MODE_XGMII] = "xgmii", -+ [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500" -+}; -+ -+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(phy_str); i++) -+ if (strcmp(str, phy_str[i]) == 0) -+ return (phy_interface_t)i; -+ -+ return PHY_INTERFACE_MODE_MII; -+} -+ -+static const uint16_t phy2speed[] = { -+ [PHY_INTERFACE_MODE_MII] = SPEED_100, -+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000, -+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, -+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000, -+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000, -+ [PHY_INTERFACE_MODE_RMII] = SPEED_100, -+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, -+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, -+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, -+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, -+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, -+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000, -+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500 -+}; -+ -+static struct mac_device * __cold -+alloc_macdev(struct device *dev, size_t sizeof_priv, -+ void (*setup)(struct mac_device *mac_dev)) -+{ -+ struct mac_device *mac_dev; -+ -+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL); -+ if (unlikely(mac_dev == NULL)) -+ mac_dev = ERR_PTR(-ENOMEM); -+ else { -+ mac_dev->dev = dev; -+ dev_set_drvdata(dev, mac_dev); -+ setup(mac_dev); -+ } -+ -+ return mac_dev; -+} -+ -+static int __cold free_macdev(struct mac_device *mac_dev) -+{ -+ dev_set_drvdata(mac_dev->dev, NULL); -+ -+ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev)); -+} -+ -+static const struct of_device_id mac_match[] = { -+ [DTSEC] = { -+ .compatible = "fsl,fman-1g-mac" -+ }, -+ [XGMAC] = { -+ .compatible = "fsl,fman-10g-mac" -+ }, -+ [MEMAC] = { -+ .compatible = "fsl,fman-memac" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, mac_match); -+ -+static int __cold mac_probe(struct platform_device *_of_dev) -+{ -+ int _errno, i; -+ struct device *dev; -+ struct device_node *mac_node, *dev_node; -+ struct mac_device *mac_dev; -+ struct platform_device *of_dev; -+ struct resource res; -+ const uint8_t *mac_addr; -+ const char *char_prop; -+ int nph; -+ u32 cell_index; -+ const struct of_device_id *match; -+ -+ dev = &_of_dev->dev; -+ mac_node = dev->of_node; -+ -+ match = of_match_device(mac_match, dev); -+ if (!match) -+ return -EINVAL; -+ -+ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; -+ i++) -+ ; -+ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1); -+ -+ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]); -+ if (IS_ERR(mac_dev)) { -+ _errno = PTR_ERR(mac_dev); -+ dev_err(dev, "alloc_macdev() = %d\n", _errno); -+ goto _return; -+ } -+ -+ INIT_LIST_HEAD(&mac_dev->mc_addr_list); -+ -+ /* Get the FM node */ -+ dev_node = of_get_parent(mac_node); -+ if (unlikely(dev_node == NULL)) { -+ dev_err(dev, "of_get_parent(%s) failed\n", -+ mac_node->full_name); -+ _errno = -EINVAL; -+ goto _return_dev_set_drvdata; -+ } -+ -+ of_dev = of_find_device_by_node(dev_node); -+ if (unlikely(of_dev == NULL)) { -+ dev_err(dev, "of_find_device_by_node(%s) failed\n", -+ dev_node->full_name); -+ _errno = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ mac_dev->fm_dev = fm_bind(&of_dev->dev); -+ if (unlikely(mac_dev->fm_dev == NULL)) { -+ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name); -+ _errno = -ENODEV; -+ goto _return_of_node_put; -+ } -+ -+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev); -+ of_node_put(dev_node); -+ -+ /* Get the address of the memory mapped registers */ -+ _errno = of_address_to_resource(mac_node, 0, &res); -+ if (unlikely(_errno < 0)) { -+ dev_err(dev, "of_address_to_resource(%s) = %d\n", -+ mac_node->full_name, _errno); -+ goto _return_dev_set_drvdata; -+ } -+ -+ mac_dev->res = __devm_request_region( -+ dev, -+ fm_get_mem_region(mac_dev->fm_dev), -+ res.start, res.end + 1 - res.start, "mac"); -+ if (unlikely(mac_dev->res == NULL)) { -+ dev_err(dev, "__devm_request_mem_region(mac) failed\n"); -+ _errno = -EBUSY; -+ goto _return_dev_set_drvdata; -+ } -+ -+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, -+ mac_dev->res->end + 1 -+ - mac_dev->res->start); -+ if (unlikely(mac_dev->vaddr == NULL)) { -+ dev_err(dev, "devm_ioremap() failed\n"); -+ _errno = -EIO; -+ goto _return_dev_set_drvdata; -+ } -+ -+#define TBIPA_OFFSET 0x1c -+#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */ -+ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0); -+ if (mac_dev->tbi_node) { -+ u32 tbiaddr = TBIPA_DEFAULT_ADDR; -+ const __be32 *tbi_reg; -+ void __iomem *addr; -+ -+ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL); -+ if (tbi_reg) -+ tbiaddr = be32_to_cpup(tbi_reg); -+ addr = mac_dev->vaddr + TBIPA_OFFSET; -+ /* TODO: out_be32 does not exist on ARM */ -+ out_be32(addr, tbiaddr); -+ } -+ -+ if (!of_device_is_available(mac_node)) { -+ devm_iounmap(dev, mac_dev->vaddr); -+ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev), -+ res.start, res.end + 1 - res.start); -+ fm_unbind(mac_dev->fm_dev); -+ devm_kfree(dev, mac_dev); -+ dev_set_drvdata(dev, NULL); -+ return -ENODEV; -+ } -+ -+ /* Get the cell-index */ -+ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index); -+ if (unlikely(_errno)) { -+ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n", -+ mac_node->full_name); -+ goto _return_dev_set_drvdata; -+ } -+ mac_dev->cell_index = (uint8_t)cell_index; -+ -+ /* Get the MAC address */ -+ mac_addr = of_get_mac_address(mac_node); -+ if (unlikely(mac_addr == NULL)) { -+ dev_err(dev, "of_get_mac_address(%s) failed\n", -+ mac_node->full_name); -+ _errno = -EINVAL; -+ goto _return_dev_set_drvdata; -+ } -+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); -+ -+ /* Verify the number of port handles */ -+ nph = of_count_phandle_with_args(mac_node, "fsl,port-handles", NULL); -+ if (unlikely(nph < 0)) { -+ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n", -+ mac_node->full_name); -+ _errno = nph; -+ goto _return_dev_set_drvdata; -+ } -+ -+ if (nph != ARRAY_SIZE(mac_dev->port_dev)) { -+ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n", -+ mac_node->full_name); -+ _errno = -EINVAL; -+ goto _return_dev_set_drvdata; -+ } -+ -+ for_each_port_device(i, mac_dev->port_dev) { -+ dev_node = of_parse_phandle(mac_node, "fsl,port-handles", i); -+ if (unlikely(dev_node == NULL)) { -+ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n", -+ mac_node->full_name); -+ _errno = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ of_dev = of_find_device_by_node(dev_node); -+ if (unlikely(of_dev == NULL)) { -+ dev_err(dev, "of_find_device_by_node(%s) failed\n", -+ dev_node->full_name); -+ _errno = -EINVAL; -+ goto _return_of_node_put; -+ } -+ -+ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev); -+ if (unlikely(mac_dev->port_dev[i] == NULL)) { -+ dev_err(dev, "dev_get_drvdata(%s) failed\n", -+ dev_node->full_name); -+ _errno = -EINVAL; -+ goto _return_of_node_put; -+ } -+ of_node_put(dev_node); -+ } -+ -+ /* Get the PHY connection type */ -+ _errno = of_property_read_string(mac_node, "phy-connection-type", -+ &char_prop); -+ if (unlikely(_errno)) { -+ dev_warn(dev, -+ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n", -+ mac_node->full_name); -+ mac_dev->phy_if = PHY_INTERFACE_MODE_MII; -+ } else -+ mac_dev->phy_if = str2phy(char_prop); -+ -+ mac_dev->link = false; -+ mac_dev->half_duplex = false; -+ mac_dev->speed = phy2speed[mac_dev->phy_if]; -+ mac_dev->max_speed = mac_dev->speed; -+ mac_dev->if_support = DTSEC_SUPPORTED; -+ /* We don't support half-duplex in SGMII mode */ -+ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii")) -+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | -+ SUPPORTED_100baseT_Half); -+ -+ if (strstr(char_prop, "sgmii-2500")) -+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | -+ SUPPORTED_100baseT_Half); -+ -+ /* Gigabit support (no half-duplex) */ -+ if (mac_dev->max_speed == 1000) -+ mac_dev->if_support |= SUPPORTED_1000baseT_Full; -+ -+ /* The 10G interface only supports one mode */ -+ if (strstr(char_prop, "xgmii")) -+ mac_dev->if_support = SUPPORTED_10000baseT_Full; -+ -+ /* Get the rest of the PHY information */ -+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); -+ if (mac_dev->phy_node == NULL) { -+ u32 phy_id; -+ -+ _errno = of_property_read_u32(mac_node, "fixed-link", &phy_id); -+ if (_errno) { -+ dev_err(dev, "No PHY (or fixed link) found\n"); -+ _errno = -EINVAL; -+ goto _return_dev_set_drvdata; -+ } -+ -+ sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "fixed-0", -+ phy_id); -+ } -+ -+ _errno = mac_dev->init(mac_dev); -+ if (unlikely(_errno < 0)) { -+ dev_err(dev, "mac_dev->init() = %d\n", _errno); -+ goto _return_dev_set_drvdata; -+ } -+ -+ /* pause frame autonegotiation enabled*/ -+ mac_dev->autoneg_pause = true; -+ -+ /* by intializing the values to false, force FMD to enable PAUSE frames -+ * on RX and TX -+ */ -+ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true; -+ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false; -+ _errno = set_mac_active_pause(mac_dev, true, true); -+ if (unlikely(_errno < 0)) -+ dev_err(dev, "set_mac_active_pause() = %d\n", _errno); -+ -+ dev_info(dev, -+ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", -+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], -+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); -+ -+ goto _return; -+ -+_return_of_node_put: -+ of_node_put(dev_node); -+_return_dev_set_drvdata: -+ dev_set_drvdata(dev, NULL); -+_return: -+ return _errno; -+} -+ -+static int __cold mac_remove(struct platform_device *of_dev) -+{ -+ int i, _errno; -+ struct device *dev; -+ struct mac_device *mac_dev; -+ -+ dev = &of_dev->dev; -+ mac_dev = (struct mac_device *)dev_get_drvdata(dev); -+ -+ for_each_port_device(i, mac_dev->port_dev) -+ fm_port_unbind(mac_dev->port_dev[i]); -+ -+ fm_unbind(mac_dev->fm_dev); -+ -+ _errno = free_macdev(mac_dev); -+ -+ return _errno; -+} -+ -+static struct platform_driver mac_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .of_match_table = mac_match, -+ .owner = THIS_MODULE, -+ }, -+ .probe = mac_probe, -+ .remove = mac_remove -+}; -+ -+static int __init __cold mac_load(void) -+{ -+ int _errno; -+ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description); -+ -+ _errno = platform_driver_register(&mac_driver); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ goto _return; -+ } -+ -+ goto _return; -+ -+_return: -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ return _errno; -+} -+module_init(mac_load); -+ -+static void __exit __cold mac_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ platform_driver_unregister(&mac_driver); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(mac_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h -@@ -0,0 +1,134 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __MAC_H -+#define __MAC_H -+ -+#include /* struct device, BUS_ID_SIZE */ -+#include /* ETH_ALEN */ -+#include /* phy_interface_t, struct phy_device */ -+#include -+ -+#include "lnxwrp_fsl_fman.h" /* struct port_device */ -+ -+enum {DTSEC, XGMAC, MEMAC}; -+ -+struct mac_device { -+ struct device *dev; -+ void *priv; -+ uint8_t cell_index; -+ struct resource *res; -+ void __iomem *vaddr; -+ uint8_t addr[ETH_ALEN]; -+ bool promisc; -+ -+ struct fm *fm_dev; -+ struct fm_port *port_dev[2]; -+ -+ phy_interface_t phy_if; -+ u32 if_support; -+ bool link; -+ bool half_duplex; -+ uint16_t speed; -+ uint16_t max_speed; -+ struct device_node *phy_node; -+ char fixed_bus_id[MII_BUS_ID_SIZE + 3]; -+ struct device_node *tbi_node; -+ struct phy_device *phy_dev; -+ void *fm; -+ /* List of multicast addresses */ -+ struct list_head mc_addr_list; -+ -+ bool autoneg_pause; -+ bool rx_pause_req; -+ bool tx_pause_req; -+ bool rx_pause_active; -+ bool tx_pause_active; -+ -+ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev); -+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); -+ int (*init)(struct mac_device *mac_dev); -+ int (*start)(struct mac_device *mac_dev); -+ int (*stop)(struct mac_device *mac_dev); -+ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable); -+ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr); -+ int (*set_multi)(struct net_device *net_dev, -+ struct mac_device *mac_dev); -+ int (*uninit)(struct fm_mac_dev *fm_mac_dev); -+ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev); -+ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev); -+ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en); -+ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en); -+ int (*fm_rtc_enable)(struct fm *fm_dev); -+ int (*fm_rtc_disable)(struct fm *fm_dev); -+ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts); -+ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts); -+ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift); -+ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift); -+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time); -+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id, -+ uint64_t fiper); -+#ifdef CONFIG_PTP_1588_CLOCK_DPAA -+ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events); -+ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events); -+#endif -+ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev, -+ bool en); -+ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn); -+ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn); -+ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn); -+}; -+ -+struct mac_address { -+ uint8_t addr[ETH_ALEN]; -+ struct list_head list; -+}; -+ -+#define get_fm_handle(net_dev) \ -+ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev) -+ -+#define for_each_port_device(i, port_dev) \ -+ for (i = 0; i < ARRAY_SIZE(port_dev); i++) -+ -+static inline __attribute((nonnull)) void *macdev_priv( -+ const struct mac_device *mac_dev) -+{ -+ return (void *)mac_dev + sizeof(*mac_dev); -+} -+ -+extern const char *mac_driver_description; -+extern const size_t mac_sizeof_priv[]; -+extern void (*const mac_setup[])(struct mac_device *mac_dev); -+ -+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx); -+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause); -+ -+#endif /* __MAC_H */ ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c -@@ -0,0 +1,848 @@ -+/* Copyright 2011-2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/* Offline Parsing / Host Command port driver for FSL QorIQ FMan. -+ * Validates device-tree configuration and sets up the offline ports. -+ */ -+ -+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ -+ KBUILD_BASENAME".c", __LINE__, __func__ -+#else -+#define pr_fmt(fmt) \ -+ KBUILD_MODNAME ": " fmt -+#endif -+ -+ -+#include -+#include -+#include -+#include -+ -+#include "offline_port.h" -+#include "dpaa_eth.h" -+#include "dpaa_eth_common.h" -+ -+#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver" -+/* Manip extra space and data alignment for fragmentation */ -+#define FRAG_MANIP_SPACE 128 -+#define FRAG_DATA_ALIGN 64 -+ -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Bogdan Hamciuc "); -+MODULE_DESCRIPTION(OH_MOD_DESCRIPTION); -+ -+ -+static const struct of_device_id oh_port_match_table[] = { -+ { -+ .compatible = "fsl,dpa-oh" -+ }, -+ { -+ .compatible = "fsl,dpa-oh-shared" -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, oh_port_match_table); -+ -+#ifdef CONFIG_PM -+ -+static int oh_suspend(struct device *dev) -+{ -+ struct dpa_oh_config_s *oh_config; -+ -+ oh_config = dev_get_drvdata(dev); -+ return fm_port_suspend(oh_config->oh_port); -+} -+ -+static int oh_resume(struct device *dev) -+{ -+ struct dpa_oh_config_s *oh_config; -+ -+ oh_config = dev_get_drvdata(dev); -+ return fm_port_resume(oh_config->oh_port); -+} -+ -+static const struct dev_pm_ops oh_pm_ops = { -+ .suspend = oh_suspend, -+ .resume = oh_resume, -+}; -+ -+#define OH_PM_OPS (&oh_pm_ops) -+ -+#else /* CONFIG_PM */ -+ -+#define OH_PM_OPS NULL -+ -+#endif /* CONFIG_PM */ -+ -+/* Creates Frame Queues */ -+static uint32_t oh_fq_create(struct qman_fq *fq, -+ uint32_t fq_id, uint16_t channel, -+ uint16_t wq_id) -+{ -+ struct qm_mcc_initfq fq_opts; -+ uint32_t create_flags, init_flags; -+ uint32_t ret = 0; -+ -+ if (fq == NULL) -+ return 1; -+ -+ /* Set flags for FQ create */ -+ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL; -+ -+ /* Create frame queue */ -+ ret = qman_create_fq(fq_id, create_flags, fq); -+ if (ret != 0) -+ return 1; -+ -+ /* Set flags for FQ init */ -+ init_flags = QMAN_INITFQ_FLAG_SCHED; -+ -+ /* Set FQ init options. Specify destination WQ ID and channel */ -+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ; -+ fq_opts.fqd.dest.wq = wq_id; -+ fq_opts.fqd.dest.channel = channel; -+ -+ /* Initialize frame queue */ -+ ret = qman_init_fq(fq, init_flags, &fq_opts); -+ if (ret != 0) { -+ qman_destroy_fq(fq, 0); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static void dump_fq(struct device *dev, int fqid, uint16_t channel) -+{ -+ if (channel) { -+ /* display fqs with a valid (!= 0) destination channel */ -+ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel); -+ } -+} -+ -+static void dump_fq_duple(struct device *dev, struct qman_fq *fqs, -+ int fqs_count, uint16_t channel_id) -+{ -+ int i; -+ for (i = 0; i < fqs_count; i++) -+ dump_fq(dev, (fqs + i)->fqid, channel_id); -+} -+ -+static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf) -+{ -+ struct list_head *fq_list; -+ struct fq_duple *fqd; -+ int i; -+ -+ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid); -+ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid); -+ -+ /* TX queues (old initialization) */ -+ dev_info(dev, "Initialized queues:"); -+ for (i = 0; i < conf->egress_cnt; i++) -+ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt, -+ conf->channel); -+ -+ /* initialized ingress queues */ -+ list_for_each(fq_list, &conf->fqs_ingress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id); -+ } -+ -+ /* initialized egress queues */ -+ list_for_each(fq_list, &conf->fqs_egress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id); -+ } -+} -+ -+/* Destroys Frame Queues */ -+static void oh_fq_destroy(struct qman_fq *fq) -+{ -+ int _errno = 0; -+ -+ _errno = qman_retire_fq(fq, NULL); -+ if (unlikely(_errno < 0)) -+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, -+ qman_fq_fqid(fq), _errno); -+ -+ _errno = qman_oos_fq(fq); -+ if (unlikely(_errno < 0)) { -+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, -+ qman_fq_fqid(fq), _errno); -+ } -+ -+ qman_destroy_fq(fq, 0); -+} -+ -+/* Allocation code for the OH port's PCD frame queues */ -+static int __cold oh_alloc_pcd_fqids(struct device *dev, -+ uint32_t num, -+ uint8_t alignment, -+ uint32_t *base_fqid) -+{ -+ dev_crit(dev, "callback not implemented!\n"); -+ BUG(); -+ -+ return 0; -+} -+ -+static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid) -+{ -+ dev_crit(dev, "callback not implemented!\n"); -+ BUG(); -+ -+ return 0; -+} -+ -+static void oh_set_buffer_layout(struct fm_port *port, -+ struct dpa_buffer_layout_s *layout) -+{ -+ struct fm_port_params params; -+ -+ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE; -+ layout->parse_results = true; -+ layout->hash_results = true; -+ layout->time_stamp = false; -+ -+ fm_port_get_buff_layout_ext_params(port, ¶ms); -+ layout->manip_extra_space = params.manip_extra_space; -+ layout->data_align = params.data_align; -+} -+ -+static int -+oh_port_probe(struct platform_device *_of_dev) -+{ -+ struct device *dpa_oh_dev; -+ struct device_node *dpa_oh_node; -+ int lenp, _errno = 0, fq_idx, duple_idx; -+ int n_size, i, j, ret, duples_count; -+ struct platform_device *oh_of_dev; -+ struct device_node *oh_node, *bpool_node = NULL, *root_node; -+ struct device *oh_dev; -+ struct dpa_oh_config_s *oh_config = NULL; -+ const __be32 *oh_all_queues; -+ const __be32 *channel_ids; -+ const __be32 *oh_tx_queues; -+ uint32_t queues_count; -+ uint32_t crt_fqid_base; -+ uint32_t crt_fq_count; -+ bool frag_enabled = false; -+ struct fm_port_params oh_port_tx_params; -+ struct fm_port_pcd_param oh_port_pcd_params; -+ struct dpa_buffer_layout_s buf_layout; -+ -+ /* True if the current partition owns the OH port. */ -+ bool init_oh_port; -+ -+ const struct of_device_id *match; -+ int crt_ext_pools_count; -+ u32 ext_pool_size; -+ u32 port_id; -+ u32 channel_id; -+ -+ int channel_ids_count; -+ int channel_idx; -+ struct fq_duple *fqd; -+ struct list_head *fq_list, *fq_list_tmp; -+ -+ const __be32 *bpool_cfg; -+ uint32_t bpid; -+ -+ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params)); -+ dpa_oh_dev = &_of_dev->dev; -+ dpa_oh_node = dpa_oh_dev->of_node; -+ BUG_ON(dpa_oh_node == NULL); -+ -+ match = of_match_device(oh_port_match_table, dpa_oh_dev); -+ if (!match) -+ return -EINVAL; -+ -+ dev_dbg(dpa_oh_dev, "Probing OH port...\n"); -+ -+ /* Find the referenced OH node */ -+ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0); -+ if (oh_node == NULL) { -+ dev_err(dpa_oh_dev, -+ "Can't find OH node referenced from node %s\n", -+ dpa_oh_node->full_name); -+ return -EINVAL; -+ } -+ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n", -+ match->compatible); -+ -+ _errno = of_property_read_u32(oh_node, "cell-index", &port_id); -+ if (_errno) { -+ dev_err(dpa_oh_dev, "No port id found in node %s\n", -+ dpa_oh_node->full_name); -+ goto return_kfree; -+ } -+ -+ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id", -+ &channel_id); -+ if (_errno) { -+ dev_err(dpa_oh_dev, "No channel id found in node %s\n", -+ dpa_oh_node->full_name); -+ goto return_kfree; -+ } -+ -+ oh_of_dev = of_find_device_by_node(oh_node); -+ BUG_ON(oh_of_dev == NULL); -+ oh_dev = &oh_of_dev->dev; -+ -+ /* The OH port must be initialized exactly once. -+ * The following scenarios are of interest: -+ * - the node is Linux-private (will always initialize it); -+ * - the node is shared between two Linux partitions -+ * (only one of them will initialize it); -+ * - the node is shared between a Linux and a LWE partition -+ * (Linux will initialize it) - "fsl,dpa-oh-shared" -+ */ -+ -+ /* Check if the current partition owns the OH port -+ * and ought to initialize it. It may be the case that we leave this -+ * to another (also Linux) partition. -+ */ -+ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared"); -+ -+ /* If we aren't the "owner" of the OH node, we're done here. */ -+ if (!init_oh_port) { -+ dev_dbg(dpa_oh_dev, -+ "Not owning the shared OH port %s, will not initialize it.\n", -+ oh_node->full_name); -+ of_node_put(oh_node); -+ return 0; -+ } -+ -+ /* Allocate OH dev private data */ -+ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL); -+ if (oh_config == NULL) { -+ dev_err(dpa_oh_dev, -+ "Can't allocate private data for OH node %s referenced from node %s!\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ INIT_LIST_HEAD(&oh_config->fqs_ingress_list); -+ INIT_LIST_HEAD(&oh_config->fqs_egress_list); -+ -+ /* FQs that enter OH port */ -+ lenp = 0; -+ oh_all_queues = of_get_property(dpa_oh_node, -+ "fsl,qman-frame-queues-ingress", &lenp); -+ if (lenp % (2 * sizeof(*oh_all_queues))) { -+ dev_warn(dpa_oh_dev, -+ "Wrong ingress queues format for OH node %s referenced from node %s!\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ /* just ignore the last unpaired value */ -+ } -+ -+ duples_count = lenp / (2 * sizeof(*oh_all_queues)); -+ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n", -+ duples_count); -+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) { -+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]); -+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]); -+ -+ fqd = devm_kzalloc(dpa_oh_dev, -+ sizeof(struct fq_duple), GFP_KERNEL); -+ if (!fqd) { -+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n", -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ fqd->fqs = devm_kzalloc(dpa_oh_dev, -+ crt_fq_count * sizeof(struct qman_fq), -+ GFP_KERNEL); -+ if (!fqd->fqs) { -+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n", -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ for (j = 0; j < crt_fq_count; j++) -+ (fqd->fqs + j)->fqid = crt_fqid_base + j; -+ fqd->fqs_count = crt_fq_count; -+ fqd->channel_id = (uint16_t)channel_id; -+ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list); -+ } -+ -+ /* create the ingress queues */ -+ list_for_each(fq_list, &oh_config->fqs_ingress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ -+ for (j = 0; j < fqd->fqs_count; j++) { -+ ret = oh_fq_create(fqd->fqs + j, -+ (fqd->fqs + j)->fqid, -+ fqd->channel_id, 3); -+ if (ret != 0) { -+ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n", -+ (fqd->fqs + j)->fqid, -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ } -+ } -+ -+ /* FQs that exit OH port */ -+ lenp = 0; -+ oh_all_queues = of_get_property(dpa_oh_node, -+ "fsl,qman-frame-queues-egress", &lenp); -+ if (lenp % (2 * sizeof(*oh_all_queues))) { -+ dev_warn(dpa_oh_dev, -+ "Wrong egress queues format for OH node %s referenced from node %s!\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ /* just ignore the last unpaired value */ -+ } -+ -+ duples_count = lenp / (2 * sizeof(*oh_all_queues)); -+ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n", -+ duples_count); -+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) { -+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]); -+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]); -+ -+ fqd = devm_kzalloc(dpa_oh_dev, -+ sizeof(struct fq_duple), GFP_KERNEL); -+ if (!fqd) { -+ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n", -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ fqd->fqs = devm_kzalloc(dpa_oh_dev, -+ crt_fq_count * sizeof(struct qman_fq), -+ GFP_KERNEL); -+ if (!fqd->fqs) { -+ dev_err(dpa_oh_dev, -+ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n", -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ for (j = 0; j < crt_fq_count; j++) -+ (fqd->fqs + j)->fqid = crt_fqid_base + j; -+ fqd->fqs_count = crt_fq_count; -+ /* channel ID is specified in another attribute */ -+ fqd->channel_id = 0; -+ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list); -+ -+ /* allocate the queue */ -+ -+ } -+ -+ /* channel_ids for FQs that exit OH port */ -+ lenp = 0; -+ channel_ids = of_get_property(dpa_oh_node, -+ "fsl,qman-channel-ids-egress", &lenp); -+ -+ channel_ids_count = lenp / (sizeof(*channel_ids)); -+ if (channel_ids_count != duples_count) { -+ dev_warn(dpa_oh_dev, -+ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ /* just ignore the queues that do not have a Channel ID */ -+ } -+ -+ channel_idx = 0; -+ list_for_each(fq_list, &oh_config->fqs_egress_list) { -+ if (channel_idx + 1 > channel_ids_count) -+ break; -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ fqd->channel_id = -+ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]); -+ } -+ -+ /* create egress queues */ -+ list_for_each(fq_list, &oh_config->fqs_egress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ -+ if (fqd->channel_id == 0) { -+ /* missing channel id in dts */ -+ continue; -+ } -+ -+ for (j = 0; j < fqd->fqs_count; j++) { -+ ret = oh_fq_create(fqd->fqs + j, -+ (fqd->fqs + j)->fqid, -+ fqd->channel_id, 3); -+ if (ret != 0) { -+ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n", -+ (fqd->fqs + j)->fqid, -+ oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ } -+ } -+ -+ /* Read FQ ids/nums for the DPA OH node */ -+ oh_all_queues = of_get_property(dpa_oh_node, -+ "fsl,qman-frame-queues-oh", &lenp); -+ if (oh_all_queues == NULL) { -+ dev_err(dpa_oh_dev, -+ "No frame queues have been defined for OH node %s referenced from node %s\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ /* Check that the OH error and default FQs are there */ -+ BUG_ON(lenp % (2 * sizeof(*oh_all_queues))); -+ queues_count = lenp / (2 * sizeof(*oh_all_queues)); -+ if (queues_count != 2) { -+ dev_err(dpa_oh_dev, -+ "Error and Default queues must be defined for OH node %s referenced from node %s\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ /* Read the FQIDs defined for this OH port */ -+ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count); -+ fq_idx = 0; -+ -+ /* Error FQID - must be present */ -+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]); -+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]); -+ if (crt_fq_count != 1) { -+ dev_err(dpa_oh_dev, -+ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n", -+ oh_node->full_name, dpa_oh_node->full_name, -+ crt_fq_count); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ oh_config->error_fqid = crt_fqid_base; -+ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n", -+ oh_config->error_fqid, oh_node->full_name); -+ -+ /* Default FQID - must be present */ -+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]); -+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]); -+ if (crt_fq_count != 1) { -+ dev_err(dpa_oh_dev, -+ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n", -+ oh_node->full_name, dpa_oh_node->full_name, -+ crt_fq_count); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ oh_config->default_fqid = crt_fqid_base; -+ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n", -+ oh_config->default_fqid, oh_node->full_name); -+ -+ /* TX FQID - presence is optional */ -+ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx", -+ &lenp); -+ if (oh_tx_queues == NULL) { -+ dev_dbg(dpa_oh_dev, -+ "No tx queues have been defined for OH node %s referenced from node %s\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ goto config_port; -+ } -+ -+ /* Check that queues-tx has only a base and a count defined */ -+ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues))); -+ queues_count = lenp / (2 * sizeof(*oh_tx_queues)); -+ if (queues_count != 1) { -+ dev_err(dpa_oh_dev, -+ "TX queues must be defined in only one tuple for OH node %s referenced from node %s\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ fq_idx = 0; -+ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]); -+ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]); -+ oh_config->egress_cnt = crt_fq_count; -+ -+ /* Allocate TX queues */ -+ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count); -+ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev, -+ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL); -+ if (oh_config->egress_fqs == NULL) { -+ dev_err(dpa_oh_dev, -+ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n", -+ oh_node->full_name, dpa_oh_node->full_name); -+ _errno = -ENOMEM; -+ goto return_kfree; -+ } -+ -+ /* Create TX queues */ -+ for (i = 0; i < crt_fq_count; i++) { -+ ret = oh_fq_create(oh_config->egress_fqs + i, -+ crt_fqid_base + i, (uint16_t)channel_id, 3); -+ if (ret != 0) { -+ dev_err(dpa_oh_dev, -+ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n", -+ crt_fqid_base + i, oh_node->full_name, -+ dpa_oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ } -+ -+config_port: -+ /* Get a handle to the fm_port so we can set -+ * its configuration params -+ */ -+ oh_config->oh_port = fm_port_bind(oh_dev); -+ if (oh_config->oh_port == NULL) { -+ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n", -+ oh_node->full_name); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ oh_set_buffer_layout(oh_config->oh_port, &buf_layout); -+ -+ /* read the pool handlers */ -+ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node, -+ "fsl,bman-buffer-pools", NULL); -+ if (crt_ext_pools_count <= 0) { -+ dev_info(dpa_oh_dev, -+ "OH port %s has no buffer pool. Fragmentation will not be enabled\n", -+ oh_node->full_name); -+ goto init_port; -+ } -+ -+ /* used for reading ext_pool_size*/ -+ root_node = of_find_node_by_path("/"); -+ if (root_node == NULL) { -+ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n"); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ n_size = of_n_size_cells(root_node); -+ of_node_put(root_node); -+ -+ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n", -+ crt_ext_pools_count); -+ -+ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count; -+ -+ for (i = 0; i < crt_ext_pools_count; i++) { -+ bpool_node = of_parse_phandle(dpa_oh_node, -+ "fsl,bman-buffer-pools", i); -+ if (bpool_node == NULL) { -+ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n"); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid); -+ if (_errno) { -+ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n"); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid; -+ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid); -+ -+ bpool_cfg = of_get_property(bpool_node, -+ "fsl,bpool-ethernet-cfg", &lenp); -+ if (bpool_cfg == NULL) { -+ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n"); -+ _errno = -EINVAL; -+ goto return_kfree; -+ } -+ -+ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size); -+ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size; -+ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n", -+ ext_pool_size); -+ of_node_put(bpool_node); -+ -+ } -+ -+ if (buf_layout.data_align != FRAG_DATA_ALIGN || -+ buf_layout.manip_extra_space != FRAG_MANIP_SPACE) -+ goto init_port; -+ -+ frag_enabled = true; -+ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d", -+ port_id); -+ -+init_port: -+ of_node_put(oh_node); -+ /* Set Tx params */ -+ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params, -+ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout), -+ frag_enabled); -+ /* Set PCD params */ -+ oh_port_pcd_params.cba = oh_alloc_pcd_fqids; -+ oh_port_pcd_params.cbf = oh_free_pcd_fqids; -+ oh_port_pcd_params.dev = dpa_oh_dev; -+ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params); -+ -+ dev_set_drvdata(dpa_oh_dev, oh_config); -+ -+ /* Enable the OH port */ -+ _errno = fm_port_enable(oh_config->oh_port); -+ if (_errno) -+ goto return_kfree; -+ -+ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name); -+ -+ /* print of all referenced & created queues */ -+ dump_oh_config(dpa_oh_dev, oh_config); -+ -+ return 0; -+ -+return_kfree: -+ if (bpool_node) -+ of_node_put(bpool_node); -+ if (oh_node) -+ of_node_put(oh_node); -+ if (oh_config && oh_config->egress_fqs) -+ devm_kfree(dpa_oh_dev, oh_config->egress_fqs); -+ -+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ list_del(fq_list); -+ devm_kfree(dpa_oh_dev, fqd->fqs); -+ devm_kfree(dpa_oh_dev, fqd); -+ } -+ -+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) { -+ fqd = list_entry(fq_list, struct fq_duple, fq_list); -+ list_del(fq_list); -+ devm_kfree(dpa_oh_dev, fqd->fqs); -+ devm_kfree(dpa_oh_dev, fqd); -+ } -+ -+ devm_kfree(dpa_oh_dev, oh_config); -+ return _errno; -+} -+ -+static int __cold oh_port_remove(struct platform_device *_of_dev) -+{ -+ int _errno = 0, i; -+ struct dpa_oh_config_s *oh_config; -+ -+ pr_info("Removing OH port...\n"); -+ -+ oh_config = dev_get_drvdata(&_of_dev->dev); -+ if (oh_config == NULL) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): No OH config in device private data!\n", -+ KBUILD_BASENAME".c", __LINE__, __func__); -+ _errno = -ENODEV; -+ goto return_error; -+ } -+ -+ if (oh_config->egress_fqs) -+ for (i = 0; i < oh_config->egress_cnt; i++) -+ oh_fq_destroy(oh_config->egress_fqs + i); -+ -+ if (oh_config->oh_port == NULL) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): No fm port in device private data!\n", -+ KBUILD_BASENAME".c", __LINE__, __func__); -+ _errno = -EINVAL; -+ goto free_egress_fqs; -+ } -+ -+ _errno = fm_port_disable(oh_config->oh_port); -+ -+free_egress_fqs: -+ if (oh_config->egress_fqs) -+ devm_kfree(&_of_dev->dev, oh_config->egress_fqs); -+ devm_kfree(&_of_dev->dev, oh_config); -+ dev_set_drvdata(&_of_dev->dev, NULL); -+ -+return_error: -+ return _errno; -+} -+ -+static struct platform_driver oh_port_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .of_match_table = oh_port_match_table, -+ .owner = THIS_MODULE, -+ .pm = OH_PM_OPS, -+ }, -+ .probe = oh_port_probe, -+ .remove = oh_port_remove -+}; -+ -+static int __init __cold oh_port_load(void) -+{ -+ int _errno; -+ -+ pr_info(OH_MOD_DESCRIPTION "\n"); -+ -+ _errno = platform_driver_register(&oh_port_driver); -+ if (_errno < 0) { -+ pr_err(KBUILD_MODNAME -+ ": %s:%hu:%s(): platform_driver_register() = %d\n", -+ KBUILD_BASENAME".c", __LINE__, __func__, _errno); -+ } -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+ return _errno; -+} -+module_init(oh_port_load); -+ -+static void __exit __cold oh_port_unload(void) -+{ -+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", -+ KBUILD_BASENAME".c", __func__); -+ -+ platform_driver_unregister(&oh_port_driver); -+ -+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", -+ KBUILD_BASENAME".c", __func__); -+} -+module_exit(oh_port_unload); ---- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h -@@ -0,0 +1,59 @@ -+/* Copyright 2011 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __OFFLINE_PORT_H -+#define __OFFLINE_PORT_H -+ -+struct fm_port; -+struct qman_fq; -+ -+/* fqs are defined in duples (base_fq, fq_count) */ -+struct fq_duple { -+ struct qman_fq *fqs; -+ int fqs_count; -+ uint16_t channel_id; -+ struct list_head fq_list; -+}; -+ -+/* OH port configuration */ -+struct dpa_oh_config_s { -+ uint32_t error_fqid; -+ uint32_t default_fqid; -+ struct fm_port *oh_port; -+ uint32_t egress_cnt; -+ struct qman_fq *egress_fqs; -+ uint16_t channel; -+ -+ struct list_head fqs_ingress_list; -+ struct list_head fqs_egress_list; -+}; -+ -+#endif /* __OFFLINE_PORT_H */ diff --git a/target/linux/layerscape/patches-4.4/7017-fsl_qbman-add-qbman-driver.patch b/target/linux/layerscape/patches-4.4/7017-fsl_qbman-add-qbman-driver.patch deleted file mode 100644 index e7e5d311c..000000000 --- a/target/linux/layerscape/patches-4.4/7017-fsl_qbman-add-qbman-driver.patch +++ /dev/null @@ -1,24828 +0,0 @@ -From f6f8ed4784936724154832ff9e4c5afe8caa63e4 Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Mon, 11 Jul 2016 14:39:18 +0800 -Subject: [PATCH 17/70] fsl_qbman: add qbman driver - -The QMan and BMan are infrastructure components of dpaa, which are used -by both software and hardware for queuing and memory allocation/deallocation. - -Signed-off-by: Roy Pledge Signed-off-by: -Camelia Groza Signed-off-by: Geoff Thorpe - Signed-off-by: Ahmed Mansour - Signed-off-by: Alex Porosanu - Signed-off-by: Pan Jiafei - Signed-off-by: Haiying Wang - -Signed-off-by: Xie Jianhua-B29408 -Signed-off-by: Zhao Qiang ---- - arch/arm/Kconfig | 5 + - arch/powerpc/Kconfig | 9 +- - drivers/misc/Kconfig | 17 + - drivers/staging/Kconfig | 2 + - drivers/staging/Makefile | 1 + - drivers/staging/fsl_qbman/Kconfig | 211 + - drivers/staging/fsl_qbman/Makefile | 28 + - drivers/staging/fsl_qbman/bman_config.c | 705 +++ - drivers/staging/fsl_qbman/bman_debugfs.c | 119 + - drivers/staging/fsl_qbman/bman_driver.c | 574 +++ - drivers/staging/fsl_qbman/bman_high.c | 1141 +++++ - drivers/staging/fsl_qbman/bman_low.h | 559 +++ - drivers/staging/fsl_qbman/bman_private.h | 166 + - drivers/staging/fsl_qbman/bman_test.c | 56 + - drivers/staging/fsl_qbman/bman_test.h | 44 + - drivers/staging/fsl_qbman/bman_test_high.c | 183 + - drivers/staging/fsl_qbman/bman_test_thresh.c | 196 + - drivers/staging/fsl_qbman/dpa_alloc.c | 706 +++ - drivers/staging/fsl_qbman/dpa_sys.h | 259 ++ - drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 + - drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 + - drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 + - drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 + - drivers/staging/fsl_qbman/fsl_usdpaa.c | 1982 ++++++++ - drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 ++ - drivers/staging/fsl_qbman/qbman_driver.c | 88 + - drivers/staging/fsl_qbman/qman_config.c | 1199 +++++ - drivers/staging/fsl_qbman/qman_debugfs.c | 1594 +++++++ - drivers/staging/fsl_qbman/qman_driver.c | 980 ++++ - drivers/staging/fsl_qbman/qman_high.c | 5568 +++++++++++++++++++++++ - drivers/staging/fsl_qbman/qman_low.h | 1407 ++++++ - drivers/staging/fsl_qbman/qman_private.h | 398 ++ - drivers/staging/fsl_qbman/qman_test.c | 57 + - drivers/staging/fsl_qbman/qman_test.h | 45 + - drivers/staging/fsl_qbman/qman_test_high.c | 216 + - drivers/staging/fsl_qbman/qman_test_hotpotato.c | 499 ++ - drivers/staging/fsl_qbman/qman_utility.c | 129 + - include/linux/fsl_bman.h | 532 +++ - include/linux/fsl_qman.h | 3889 ++++++++++++++++ - include/linux/fsl_usdpaa.h | 372 ++ - 40 files changed, 24569 insertions(+), 2 deletions(-) - create mode 100644 drivers/staging/fsl_qbman/Kconfig - create mode 100644 drivers/staging/fsl_qbman/Makefile - create mode 100644 drivers/staging/fsl_qbman/bman_config.c - create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c - create mode 100644 drivers/staging/fsl_qbman/bman_driver.c - create mode 100644 drivers/staging/fsl_qbman/bman_high.c - create mode 100644 drivers/staging/fsl_qbman/bman_low.h - create mode 100644 drivers/staging/fsl_qbman/bman_private.h - create mode 100644 drivers/staging/fsl_qbman/bman_test.c - create mode 100644 drivers/staging/fsl_qbman/bman_test.h - create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c - create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c - create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c - create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h - create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h - create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h - create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h - create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h - create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c - create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c - create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c - create mode 100644 drivers/staging/fsl_qbman/qman_config.c - create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c - create mode 100644 drivers/staging/fsl_qbman/qman_driver.c - create mode 100644 drivers/staging/fsl_qbman/qman_high.c - create mode 100644 drivers/staging/fsl_qbman/qman_low.h - create mode 100644 drivers/staging/fsl_qbman/qman_private.h - create mode 100644 drivers/staging/fsl_qbman/qman_test.c - create mode 100644 drivers/staging/fsl_qbman/qman_test.h - create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c - create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c - create mode 100644 drivers/staging/fsl_qbman/qman_utility.c - create mode 100644 include/linux/fsl_bman.h - create mode 100644 include/linux/fsl_qman.h - create mode 100644 include/linux/fsl_usdpaa.h - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -1250,6 +1250,11 @@ source "arch/arm/common/Kconfig" - - menu "Bus support" - -+config HAS_FSL_QBMAN -+ bool "Datapath Acceleration Queue and Buffer management" -+ help -+ Datapath Acceleration Queue and Buffer management -+ - config ISA - bool - help ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -786,6 +786,11 @@ config FSL_GTM - help - Freescale General-purpose Timers support - -+config HAS_FSL_QBMAN -+ bool "Datapath Acceleration Queue and Buffer management" -+ help -+ Datapath Acceleration Queue and Buffer management -+ - # Yes MCA RS/6000s exist but Linux-PPC does not currently support any - config MCA - bool -@@ -918,14 +923,14 @@ config DYNAMIC_MEMSTART - select NONSTATIC_KERNEL - help - This option enables the kernel to be loaded at any page aligned -- physical address. The kernel creates a mapping from KERNELBASE to -+ physical address. The kernel creates a mapping from KERNELBASE to - the address where the kernel is loaded. The page size here implies - the TLB page size of the mapping for kernel on the particular platform. - Please refer to the init code for finding the TLB page size. - - DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE - kernel image, where the only restriction is the page aligned kernel -- load address. When this option is enabled, the compile time physical -+ load address. When this option is enabled, the compile time physical - address CONFIG_PHYSICAL_START is ignored. - - This option is overridden by CONFIG_RELOCATABLE ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -236,6 +236,23 @@ config SGI_XP - this feature will allow for direct communication between SSIs - based on a network adapter and DMA messaging. - -+config FSL_USDPAA -+ bool "Freescale USDPAA process driver" -+ depends on FSL_DPA -+ default y -+ help -+ This driver provides user-space access to kernel-managed -+ resource interfaces for USDPAA applications, on the assumption -+ that each process will open this device once. Specifically, this -+ device exposes functionality that would be awkward if exposed -+ via the portal devices - ie. this device exposes functionality -+ that is inherently process-wide rather than portal-specific. -+ This device is necessary for obtaining access to DMA memory and -+ for allocation of Qman and Bman resources. In short, if you wish -+ to use USDPAA applications, you need this. -+ -+ If unsure, say Y. -+ - config CS5535_MFGPT - tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" - depends on MFD_CS5535 ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -106,6 +106,8 @@ source "drivers/staging/fbtft/Kconfig" - - source "drivers/staging/fsl-mc/Kconfig" - -+source "drivers/staging/fsl_qbman/Kconfig" -+ - source "drivers/staging/wilc1000/Kconfig" - - source "drivers/staging/most/Kconfig" ---- a/drivers/staging/Makefile -+++ b/drivers/staging/Makefile -@@ -45,5 +45,6 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/ - obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/ - obj-$(CONFIG_FB_TFT) += fbtft/ - obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ -+obj-$(CONFIG_FSL_DPA) += fsl_qbman/ - obj-$(CONFIG_WILC1000) += wilc1000/ - obj-$(CONFIG_MOST) += most/ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/Kconfig -@@ -0,0 +1,211 @@ -+config FSL_DPA -+ bool "Freescale Datapath Queue and Buffer management" -+ depends on HAS_FSL_QBMAN -+ default y -+ select FSL_QMAN_FQ_LOOKUP if PPC64 -+ select FSL_QMAN_FQ_LOOKUP if ARM64 -+ -+ -+menu "Freescale Datapath QMan/BMan options" -+ depends on FSL_DPA -+ -+config FSL_DPA_CHECKING -+ bool "additional driver checking" -+ default n -+ ---help--- -+ Compiles in additional checks to sanity-check the drivers and any -+ use of it by other code. Not recommended for performance. -+ -+config FSL_DPA_CAN_WAIT -+ bool -+ default y -+ -+config FSL_DPA_CAN_WAIT_SYNC -+ bool -+ default y -+ -+config FSL_DPA_PIRQ_FAST -+ bool -+ default y -+ -+config FSL_DPA_PIRQ_SLOW -+ bool -+ default y -+ -+config FSL_DPA_PORTAL_SHARE -+ bool -+ default y -+ -+config FSL_BMAN -+ bool "Freescale Buffer Manager (BMan) support" -+ default y -+ -+if FSL_BMAN -+ -+config FSL_BMAN_CONFIG -+ bool "BMan device management" -+ default y -+ ---help--- -+ If this linux image is running natively, you need this option. If this -+ linux image is running as a guest OS under the hypervisor, only one -+ guest OS ("the control plane") needs this option. -+ -+config FSL_BMAN_TEST -+ tristate "BMan self-tests" -+ default n -+ ---help--- -+ This option compiles self-test code for BMan. -+ -+config FSL_BMAN_TEST_HIGH -+ bool "BMan high-level self-test" -+ depends on FSL_BMAN_TEST -+ default y -+ ---help--- -+ This requires the presence of cpu-affine portals, and performs -+ high-level API testing with them (whichever portal(s) are affine to -+ the cpu(s) the test executes on). -+ -+config FSL_BMAN_TEST_THRESH -+ bool "BMan threshold test" -+ depends on FSL_BMAN_TEST -+ default y -+ ---help--- -+ Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded -+ before multiple threads (one per cpu) create pool objects to track -+ depletion state changes. The pool is then drained to empty by a -+ "drainer" thread, and the other threads that they observe exactly -+ the depletion state changes that are expected. -+ -+config FSL_BMAN_DEBUGFS -+ tristate "BMan debugfs interface" -+ depends on DEBUG_FS -+ default y -+ ---help--- -+ This option compiles debugfs code for BMan. -+ -+endif # FSL_BMAN -+ -+config FSL_QMAN -+ bool "Freescale Queue Manager (QMan) support" -+ default y -+ -+if FSL_QMAN -+ -+config FSL_QMAN_POLL_LIMIT -+ int -+ default 32 -+ -+config FSL_QMAN_CONFIG -+ bool "QMan device management" -+ default y -+ ---help--- -+ If this linux image is running natively, you need this option. If this -+ linux image is running as a guest OS under the hypervisor, only one -+ guest OS ("the control plane") needs this option. -+ -+config FSL_QMAN_TEST -+ tristate "QMan self-tests" -+ default n -+ ---help--- -+ This option compiles self-test code for QMan. -+ -+config FSL_QMAN_TEST_STASH_POTATO -+ bool "QMan 'hot potato' data-stashing self-test" -+ depends on FSL_QMAN_TEST -+ default y -+ ---help--- -+ This performs a "hot potato" style test enqueuing/dequeuing a frame -+ across a series of FQs scheduled to different portals (and cpus), with -+ DQRR, data and context stashing always on. -+ -+config FSL_QMAN_TEST_HIGH -+ bool "QMan high-level self-test" -+ depends on FSL_QMAN_TEST -+ default y -+ ---help--- -+ This requires the presence of cpu-affine portals, and performs -+ high-level API testing with them (whichever portal(s) are affine to -+ the cpu(s) the test executes on). -+ -+config FSL_QMAN_DEBUGFS -+ tristate "QMan debugfs interface" -+ depends on DEBUG_FS -+ default y -+ ---help--- -+ This option compiles debugfs code for QMan. -+ -+# H/w settings that can be hard-coded for now. -+config FSL_QMAN_FQD_SZ -+ int "size of Frame Queue Descriptor region" -+ default 10 -+ ---help--- -+ This is the size of the FQD region defined as: PAGE_SIZE * (2^value) -+ ex: 10 => PAGE_SIZE * (2^10) -+ Note: Default device-trees now require minimum Kconfig setting of 10. -+ -+config FSL_QMAN_PFDR_SZ -+ int "size of the PFDR pool" -+ default 13 -+ ---help--- -+ This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value) -+ ex: 13 => PAGE_SIZE * (2^13) -+ -+# Corenet initiator settings. Stash request queues are 4-deep to match cores' -+# ability to snart. Stash priority is 3, other priorities are 2. -+config FSL_QMAN_CI_SCHED_CFG_SRCCIV -+ int -+ depends on FSL_QMAN_CONFIG -+ default 4 -+config FSL_QMAN_CI_SCHED_CFG_SRQ_W -+ int -+ depends on FSL_QMAN_CONFIG -+ default 3 -+config FSL_QMAN_CI_SCHED_CFG_RW_W -+ int -+ depends on FSL_QMAN_CONFIG -+ default 2 -+config FSL_QMAN_CI_SCHED_CFG_BMAN_W -+ int -+ depends on FSL_QMAN_CONFIG -+ default 2 -+ -+# portal interrupt settings -+config FSL_QMAN_PIRQ_DQRR_ITHRESH -+ int -+ default 12 -+config FSL_QMAN_PIRQ_MR_ITHRESH -+ int -+ default 4 -+config FSL_QMAN_PIRQ_IPERIOD -+ int -+ default 100 -+ -+# 64 bit kernel support -+config FSL_QMAN_FQ_LOOKUP -+ bool -+ default n -+ -+config QMAN_CEETM_UPDATE_PERIOD -+ int "Token update period for shaping, in nanoseconds" -+ default 1000 -+ ---help--- -+ Traffic shaping works by performing token calculations (using -+ credits) on shaper instances periodically. This update period -+ sets the granularity for how often those token rate credit -+ updates are performed, and thus determines the accuracy and -+ range of traffic rates that can be configured by users. The -+ reference manual recommends a 1 microsecond period as providing -+ a good balance between granularity and range. -+ -+ Unless you know what you are doing, leave this value at its default. -+ -+config FSL_QMAN_INIT_TIMEOUT -+ int "timeout for qman init stage, in seconds" -+ default 10 -+ ---help--- -+ The timeout setting to quit the initialization loop for non-control -+ partition in case the control partition fails to boot-up. -+ -+endif # FSL_QMAN -+ -+endmenu ---- /dev/null -+++ b/drivers/staging/fsl_qbman/Makefile -@@ -0,0 +1,28 @@ -+subdir-ccflags-y := -Werror -+ -+# Common -+obj-$(CONFIG_FSL_DPA) += dpa_alloc.o -+obj-$(CONFIG_HAS_FSL_QBMAN) += qbman_driver.o -+ -+# Bman -+obj-$(CONFIG_FSL_BMAN) += bman_high.o -+obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o -+obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o -+obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o -+bman_tester-y = bman_test.o -+bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o -+bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o -+bman_debugfs_interface-y = bman_debugfs.o -+ -+# Qman -+obj-$(CONFIG_FSL_QMAN) += qman_high.o qman_utility.o -+obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o -+obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o -+qman_tester-y = qman_test.o -+qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o -+qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o -+obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o -+qman_debugfs_interface-y = qman_debugfs.o -+ -+# USDPAA -+obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_config.c -@@ -0,0 +1,705 @@ -+/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include "bman_private.h" -+#include -+ -+/* Last updated for v00.79 of the BG */ -+ -+struct bman; -+ -+/* Register offsets */ -+#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04)) -+#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04)) -+#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04)) -+#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04)) -+#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04)) -+#define REG_FBPR_FPC 0x0800 -+#define REG_STATE_IDLE 0x960 -+#define REG_STATE_STOP 0x964 -+#define REG_ECSR 0x0a00 -+#define REG_ECIR 0x0a04 -+#define REG_EADR 0x0a08 -+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) -+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) -+#define REG_IP_REV_1 0x0bf8 -+#define REG_IP_REV_2 0x0bfc -+#define REG_FBPR_BARE 0x0c00 -+#define REG_FBPR_BAR 0x0c04 -+#define REG_FBPR_AR 0x0c10 -+#define REG_SRCIDR 0x0d04 -+#define REG_LIODNR 0x0d08 -+#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */ -+ -+/* Used by all error interrupt registers except 'inhibit' */ -+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ -+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ -+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ -+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ -+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ -+ -+/* BMAN_ECIR valid error bit */ -+#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI) -+ -+union bman_ecir { -+ u32 ecir_raw; -+ struct { -+ u32 __reserved1:4; -+ u32 portal_num:4; -+ u32 __reserved2:12; -+ u32 numb:4; -+ u32 __reserved3:2; -+ u32 pid:6; -+ } __packed info; -+}; -+ -+union bman_eadr { -+ u32 eadr_raw; -+ struct { -+ u32 __reserved1:5; -+ u32 memid:3; -+ u32 __reserved2:14; -+ u32 eadr:10; -+ } __packed info; -+}; -+ -+struct bman_hwerr_txt { -+ u32 mask; -+ const char *txt; -+}; -+ -+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b } -+ -+static const struct bman_hwerr_txt bman_hwerr_txts[] = { -+ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"), -+ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"), -+ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), -+ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), -+ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"), -+}; -+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt)) -+ -+struct bman_error_info_mdata { -+ u16 addr_mask; -+ u16 bits; -+ const char *txt; -+}; -+ -+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} -+static const struct bman_error_info_mdata error_mdata[] = { -+ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"), -+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"), -+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"), -+}; -+#define BMAN_ERR_MDATA_COUNT \ -+ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata)) -+ -+/* Add this in Kconfig */ -+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI) -+ -+/** -+ * bm_err_isr__ - Manipulate global interrupt registers -+ * @v: for accessors that write values, this is the 32-bit value -+ * -+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All -+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of -+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means -+ * "write the enable register" rather than "enable the write register"! -+ */ -+#define bm_err_isr_status_read(bm) \ -+ __bm_err_isr_read(bm, bm_isr_status) -+#define bm_err_isr_status_clear(bm, m) \ -+ __bm_err_isr_write(bm, bm_isr_status, m) -+#define bm_err_isr_enable_read(bm) \ -+ __bm_err_isr_read(bm, bm_isr_enable) -+#define bm_err_isr_enable_write(bm, v) \ -+ __bm_err_isr_write(bm, bm_isr_enable, v) -+#define bm_err_isr_disable_read(bm) \ -+ __bm_err_isr_read(bm, bm_isr_disable) -+#define bm_err_isr_disable_write(bm, v) \ -+ __bm_err_isr_write(bm, bm_isr_disable, v) -+#define bm_err_isr_inhibit(bm) \ -+ __bm_err_isr_write(bm, bm_isr_inhibit, 1) -+#define bm_err_isr_uninhibit(bm) \ -+ __bm_err_isr_write(bm, bm_isr_inhibit, 0) -+ -+/* -+ * TODO: unimplemented registers -+ * -+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT, -+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ -+ */ -+ -+/* Encapsulate "struct bman *" as a cast of the register space address. */ -+ -+static struct bman *bm_create(void *regs) -+{ -+ return (struct bman *)regs; -+} -+ -+static inline u32 __bm_in(struct bman *bm, u32 offset) -+{ -+ return in_be32((void *)bm + offset); -+} -+static inline void __bm_out(struct bman *bm, u32 offset, u32 val) -+{ -+ out_be32((void *)bm + offset, val); -+} -+#define bm_in(reg) __bm_in(bm, REG_##reg) -+#define bm_out(reg, val) __bm_out(bm, REG_##reg, val) -+ -+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n) -+{ -+ return __bm_in(bm, REG_ERR_ISR + (n << 2)); -+} -+ -+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val) -+{ -+ __bm_out(bm, REG_ERR_ISR + (n << 2), val); -+} -+ -+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor) -+{ -+ u32 v = bm_in(IP_REV_1); -+ *id = (v >> 16); -+ *major = (v >> 8) & 0xff; -+ *minor = v & 0xff; -+} -+ -+static u32 __generate_thresh(u32 val, int roundup) -+{ -+ u32 e = 0; /* co-efficient, exponent */ -+ int oddbit = 0; -+ while (val > 0xff) { -+ oddbit = val & 1; -+ val >>= 1; -+ e++; -+ if (roundup && oddbit) -+ val++; -+ } -+ DPA_ASSERT(e < 0x10); -+ return val | (e << 8); -+} -+ -+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt, -+ u32 hwdet, u32 hwdxt) -+{ -+ DPA_ASSERT(pool < bman_pool_max); -+ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0)); -+ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1)); -+ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0)); -+ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1)); -+} -+ -+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size) -+{ -+ u32 exp = ilog2(size); -+ /* choke if size isn't within range */ -+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) && -+ is_power_of_2(size)); -+ /* choke if '[e]ba' has lower-alignment than 'size' */ -+ DPA_ASSERT(!(ba & (size - 1))); -+ bm_out(FBPR_BARE, upper_32_bits(ba)); -+ bm_out(FBPR_BAR, lower_32_bits(ba)); -+ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1)); -+} -+ -+/*****************/ -+/* Config driver */ -+/*****************/ -+ -+/* TODO: Kconfig these? */ -+#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12) -+ -+/* We support only one of these. */ -+static struct bman *bm; -+static struct device_node *bm_node; -+ -+/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used -+ * during bman_init_ccsr(). */ -+static dma_addr_t fbpr_a; -+static size_t fbpr_sz = DEFAULT_FBPR_SZ; -+ -+static int bman_fbpr(struct reserved_mem *rmem) -+{ -+ fbpr_a = rmem->base; -+ fbpr_sz = rmem->size; -+ -+ WARN_ON(!(fbpr_a && fbpr_sz)); -+ -+ return 0; -+} -+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); -+ -+static int __init fsl_bman_init(struct device_node *node) -+{ -+ struct resource res; -+ u32 __iomem *regs; -+ const char *s; -+ int ret, standby = 0; -+ u16 id; -+ u8 major, minor; -+ -+ ret = of_address_to_resource(node, 0, &res); -+ if (ret) { -+ pr_err("Can't get %s property 'reg'\n", -+ node->full_name); -+ return ret; -+ } -+ s = of_get_property(node, "fsl,hv-claimable", &ret); -+ if (s && !strcmp(s, "standby")) -+ standby = 1; -+ /* Global configuration */ -+ regs = ioremap(res.start, res.end - res.start + 1); -+ bm = bm_create(regs); -+ BUG_ON(!bm); -+ bm_node = node; -+ bm_get_version(bm, &id, &major, &minor); -+ pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor); -+ if ((major == 1) && (minor == 0)) { -+ bman_ip_rev = BMAN_REV10; -+ bman_pool_max = 64; -+ } else if ((major == 2) && (minor == 0)) { -+ bman_ip_rev = BMAN_REV20; -+ bman_pool_max = 8; -+ } else if ((major == 2) && (minor == 1)) { -+ bman_ip_rev = BMAN_REV21; -+ bman_pool_max = 64; -+ } else { -+ pr_warn("unknown Bman version, default to rev1.0\n"); -+ } -+ -+ if (standby) { -+ pr_info(" -> in standby mode\n"); -+ return 0; -+ } -+ return 0; -+} -+ -+int bman_have_ccsr(void) -+{ -+ return bm ? 1 : 0; -+} -+ -+int bm_pool_set(u32 bpid, const u32 *thresholds) -+{ -+ if (!bm) -+ return -ENODEV; -+ bm_set_pool(bm, bpid, thresholds[0], -+ thresholds[1], thresholds[2], -+ thresholds[3]); -+ return 0; -+} -+EXPORT_SYMBOL(bm_pool_set); -+ -+__init int bman_init_early(void) -+{ -+ struct device_node *dn; -+ int ret; -+ -+ for_each_compatible_node(dn, NULL, "fsl,bman") { -+ if (bm) -+ pr_err("%s: only one 'fsl,bman' allowed\n", -+ dn->full_name); -+ else { -+ if (!of_device_is_available(dn)) -+ continue; -+ -+ ret = fsl_bman_init(dn); -+ BUG_ON(ret); -+ } -+ } -+ return 0; -+} -+postcore_initcall_sync(bman_init_early); -+ -+ -+static void log_edata_bits(u32 bit_count) -+{ -+ u32 i, j, mask = 0xffffffff; -+ -+ pr_warn("Bman ErrInt, EDATA:\n"); -+ i = bit_count/32; -+ if (bit_count%32) { -+ i++; -+ mask = ~(mask << bit_count%32); -+ } -+ j = 16-i; -+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask); -+ j++; -+ for (; j < 16; j++) -+ pr_warn(" 0x%08x\n", bm_in(EDATA(j))); -+} -+ -+static void log_additional_error_info(u32 isr_val, u32 ecsr_val) -+{ -+ union bman_ecir ecir_val; -+ union bman_eadr eadr_val; -+ -+ ecir_val.ecir_raw = bm_in(ECIR); -+ /* Is portal info valid */ -+ if (ecsr_val & PORTAL_ECSR_ERR) { -+ pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n", -+ ecir_val.info.portal_num, ecir_val.info.numb, -+ ecir_val.info.pid); -+ } -+ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) { -+ eadr_val.eadr_raw = bm_in(EADR); -+ pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n", -+ error_mdata[eadr_val.info.memid].txt, -+ error_mdata[eadr_val.info.memid].addr_mask -+ & eadr_val.info.eadr); -+ log_edata_bits(error_mdata[eadr_val.info.memid].bits); -+ } -+} -+ -+/* Bman interrupt handler */ -+static irqreturn_t bman_isr(int irq, void *ptr) -+{ -+ u32 isr_val, ier_val, ecsr_val, isr_mask, i; -+ -+ ier_val = bm_err_isr_enable_read(bm); -+ isr_val = bm_err_isr_status_read(bm); -+ ecsr_val = bm_in(ECSR); -+ isr_mask = isr_val & ier_val; -+ -+ if (!isr_mask) -+ return IRQ_NONE; -+ for (i = 0; i < BMAN_HWE_COUNT; i++) { -+ if (bman_hwerr_txts[i].mask & isr_mask) { -+ pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt); -+ if (bman_hwerr_txts[i].mask & ecsr_val) { -+ log_additional_error_info(isr_mask, ecsr_val); -+ /* Re-arm error capture registers */ -+ bm_out(ECSR, ecsr_val); -+ } -+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) { -+ pr_devel("Bman un-enabling error 0x%x\n", -+ bman_hwerr_txts[i].mask); -+ ier_val &= ~bman_hwerr_txts[i].mask; -+ bm_err_isr_enable_write(bm, ier_val); -+ } -+ } -+ } -+ bm_err_isr_status_clear(bm, isr_val); -+ return IRQ_HANDLED; -+} -+ -+static int __bind_irq(void) -+{ -+ int ret, err_irq; -+ -+ err_irq = of_irq_to_resource(bm_node, 0, NULL); -+ if (err_irq == 0) { -+ pr_info("Can't get %s property '%s'\n", bm_node->full_name, -+ "interrupts"); -+ return -ENODEV; -+ } -+ ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node); -+ if (ret) { -+ pr_err("request_irq() failed %d for '%s'\n", ret, -+ bm_node->full_name); -+ return -ENODEV; -+ } -+ /* Disable Buffer Pool State Change */ -+ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN); -+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior -+ * to resource allocation during driver init). */ -+ bm_err_isr_status_clear(bm, 0xffffffff); -+ /* Enable Error Interrupts */ -+ bm_err_isr_enable_write(bm, 0xffffffff); -+ return 0; -+} -+ -+int bman_init_ccsr(struct device_node *node) -+{ -+ int ret; -+ if (!bman_have_ccsr()) -+ return 0; -+ if (node != bm_node) -+ return -EINVAL; -+ /* FBPR memory */ -+ bm_set_memory(bm, fbpr_a, 0, fbpr_sz); -+ pr_info("bman-fbpr addr 0x%llx size 0x%zx\n", -+ (unsigned long long)fbpr_a, fbpr_sz); -+ -+ ret = __bind_irq(); -+ if (ret) -+ return ret; -+ return 0; -+} -+ -+u32 bm_pool_free_buffers(u32 bpid) -+{ -+ return bm_in(POOL_CONTENT(bpid)); -+} -+ -+#ifdef CONFIG_SYSFS -+ -+#define DRV_NAME "fsl-bman" -+#define SBEC_MAX_ID 1 -+#define SBEC_MIN_ID 0 -+ -+static ssize_t show_fbpr_fpc(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC)); -+}; -+ -+static ssize_t show_pool_count(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ u32 data; -+ int i; -+ -+ if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max)) -+ return -EINVAL; -+ data = bm_in(POOL_CONTENT(i)); -+ return snprintf(buf, PAGE_SIZE, "%d\n", data); -+}; -+ -+static ssize_t show_err_isr(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR)); -+}; -+ -+static ssize_t show_sbec(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ int i; -+ -+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) -+ return -EINVAL; -+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) -+ return -EINVAL; -+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); -+}; -+ -+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); -+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL); -+ -+/* Didn't use DEVICE_ATTR as 64 of this would be required. -+ * Initialize them when needed. */ -+static char *name_attrs_pool_count; /* "xx" + null-terminator */ -+static struct device_attribute *dev_attr_buffer_pool_count; -+ -+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); -+ -+static struct attribute *bman_dev_attributes[] = { -+ &dev_attr_fbpr_fpc.attr, -+ &dev_attr_err_isr.attr, -+ NULL -+}; -+ -+static struct attribute *bman_dev_ecr_attributes[] = { -+ &dev_attr_sbec_0.attr, -+ &dev_attr_sbec_1.attr, -+ NULL -+}; -+ -+static struct attribute **bman_dev_pool_count_attributes; -+ -+ -+/* root level */ -+static const struct attribute_group bman_dev_attr_grp = { -+ .name = NULL, -+ .attrs = bman_dev_attributes -+}; -+static const struct attribute_group bman_dev_ecr_grp = { -+ .name = "error_capture", -+ .attrs = bman_dev_ecr_attributes -+}; -+static struct attribute_group bman_dev_pool_countent_grp = { -+ .name = "pool_count", -+}; -+ -+static int of_fsl_bman_remove(struct platform_device *ofdev) -+{ -+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); -+ return 0; -+}; -+ -+static int of_fsl_bman_probe(struct platform_device *ofdev) -+{ -+ int ret, i; -+ -+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp); -+ if (ret) -+ goto done; -+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); -+ if (ret) -+ goto del_group_0; -+ -+ name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3, -+ GFP_KERNEL); -+ if (!name_attrs_pool_count) { -+ pr_err("Can't alloc name_attrs_pool_count\n"); -+ goto del_group_1; -+ } -+ -+ dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) * -+ bman_pool_max, GFP_KERNEL); -+ if (!dev_attr_buffer_pool_count) { -+ pr_err("Can't alloc dev_attr-buffer_pool_count\n"); -+ goto del_group_2; -+ } -+ -+ bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) * -+ (bman_pool_max + 1), GFP_KERNEL); -+ if (!bman_dev_pool_count_attributes) { -+ pr_err("can't alloc bman_dev_pool_count_attributes\n"); -+ goto del_group_3; -+ } -+ -+ for (i = 0; i < bman_pool_max; i++) { -+ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i); -+ if (!ret) -+ goto del_group_4; -+ dev_attr_buffer_pool_count[i].attr.name = -+ (name_attrs_pool_count + i * 3); -+ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR; -+ dev_attr_buffer_pool_count[i].show = show_pool_count; -+ bman_dev_pool_count_attributes[i] = -+ &dev_attr_buffer_pool_count[i].attr; -+ sysfs_attr_init(bman_dev_pool_count_attributes[i]); -+ } -+ bman_dev_pool_count_attributes[bman_pool_max] = NULL; -+ -+ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes; -+ -+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp); -+ if (ret) -+ goto del_group_4; -+ -+ goto done; -+ -+del_group_4: -+ kfree(bman_dev_pool_count_attributes); -+del_group_3: -+ kfree(dev_attr_buffer_pool_count); -+del_group_2: -+ kfree(name_attrs_pool_count); -+del_group_1: -+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); -+del_group_0: -+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); -+done: -+ if (ret) -+ dev_err(&ofdev->dev, -+ "Cannot create dev attributes ret=%d\n", ret); -+ return ret; -+}; -+ -+static struct of_device_id of_fsl_bman_ids[] = { -+ { -+ .compatible = "fsl,bman", -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, of_fsl_bman_ids); -+ -+#ifdef CONFIG_SUSPEND -+static u32 saved_isdr; -+ -+static int bman_pm_suspend_noirq(struct device *dev) -+{ -+ uint32_t idle_state; -+ -+ suspend_unused_bportal(); -+ /* save isdr, disable all, clear isr */ -+ saved_isdr = bm_err_isr_disable_read(bm); -+ bm_err_isr_disable_write(bm, 0xffffffff); -+ bm_err_isr_status_clear(bm, 0xffffffff); -+ -+ if (bman_ip_rev < BMAN_REV21) { -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Bman version doesn't have STATE_IDLE\n"); -+#endif -+ return 0; -+ } -+ idle_state = bm_in(STATE_IDLE); -+ if (!(idle_state & 0x1)) { -+ pr_err("Bman not idle 0x%x aborting\n", idle_state); -+ bm_err_isr_disable_write(bm, saved_isdr); -+ resume_unused_bportal(); -+ return -EBUSY; -+ } -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state); -+#endif -+ return 0; -+} -+ -+static int bman_pm_resume_noirq(struct device *dev) -+{ -+ /* restore isdr */ -+ bm_err_isr_disable_write(bm, saved_isdr); -+ resume_unused_bportal(); -+ return 0; -+} -+#else -+#define bman_pm_suspend_noirq NULL -+#define bman_pm_resume_noirq NULL -+#endif -+ -+static const struct dev_pm_ops bman_pm_ops = { -+ .suspend_noirq = bman_pm_suspend_noirq, -+ .resume_noirq = bman_pm_resume_noirq, -+}; -+ -+static struct platform_driver of_fsl_bman_driver = { -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = DRV_NAME, -+ .of_match_table = of_fsl_bman_ids, -+ .pm = &bman_pm_ops, -+ }, -+ .probe = of_fsl_bman_probe, -+ .remove = of_fsl_bman_remove, -+}; -+ -+static int bman_ctrl_init(void) -+{ -+ return platform_driver_register(&of_fsl_bman_driver); -+} -+ -+static void bman_ctrl_exit(void) -+{ -+ platform_driver_unregister(&of_fsl_bman_driver); -+} -+ -+module_init(bman_ctrl_init); -+module_exit(bman_ctrl_exit); -+ -+#endif /* CONFIG_SYSFS */ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_debugfs.c -@@ -0,0 +1,119 @@ -+/* Copyright 2010-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+static struct dentry *dfs_root; /* debugfs root directory */ -+ -+/******************************************************************************* -+ * Query Buffer Pool State -+ ******************************************************************************/ -+static int query_bp_state_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct bm_pool_state state; -+ int i, j; -+ u32 mask; -+ -+ memset(&state, 0, sizeof(struct bm_pool_state)); -+ ret = bman_query_pools(&state); -+ if (ret) { -+ seq_printf(file, "Error %d\n", ret); -+ return 0; -+ } -+ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n"); -+ for (i = 0; i < 2; i++) { -+ mask = 0x80000000; -+ for (j = 0; j < 32; j++) { -+ seq_printf(file, -+ " %-2u %-3s %-3s\n", -+ (i*32)+j, -+ (state.as.state.__state[i] & mask) ? "no" : "yes", -+ (state.ds.state.__state[i] & mask) ? "yes" : "no"); -+ mask >>= 1; -+ } -+ } -+ return 0; -+} -+ -+static int query_bp_state_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, query_bp_state_show, NULL); -+} -+ -+static const struct file_operations query_bp_state_fops = { -+ .owner = THIS_MODULE, -+ .open = query_bp_state_open, -+ .read = seq_read, -+ .release = single_release, -+}; -+ -+static int __init bman_debugfs_module_init(void) -+{ -+ int ret = 0; -+ struct dentry *d; -+ -+ dfs_root = debugfs_create_dir("bman", NULL); -+ -+ if (dfs_root == NULL) { -+ ret = -ENOMEM; -+ pr_err("Cannot create bman debugfs dir\n"); -+ goto _return; -+ } -+ d = debugfs_create_file("query_bp_state", -+ S_IRUGO, -+ dfs_root, -+ NULL, -+ &query_bp_state_fops); -+ if (d == NULL) { -+ ret = -ENOMEM; -+ pr_err("Cannot create query_bp_state\n"); -+ goto _return; -+ } -+ return 0; -+ -+_return: -+ debugfs_remove_recursive(dfs_root); -+ return ret; -+} -+ -+static void __exit bman_debugfs_module_exit(void) -+{ -+ debugfs_remove_recursive(dfs_root); -+} -+ -+ -+module_init(bman_debugfs_module_init); -+module_exit(bman_debugfs_module_exit); -+MODULE_LICENSE("Dual BSD/GPL"); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_driver.c -@@ -0,0 +1,574 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "bman_low.h" -+#ifdef CONFIG_HOTPLUG_CPU -+#include -+#endif -+/* -+ * Global variables of the max portal/pool number this bman version supported -+ */ -+u16 bman_ip_rev; -+EXPORT_SYMBOL(bman_ip_rev); -+u16 bman_pool_max; -+EXPORT_SYMBOL(bman_pool_max); -+static u16 bman_portal_max; -+ -+/* After initialising cpus that own shared portal configs, we cache the -+ * resulting portals (ie. not just the configs) in this array. Then we -+ * initialise slave cpus that don't have their own portals, redirecting them to -+ * portals from this cache in a round-robin assignment. */ -+static struct bman_portal *shared_portals[NR_CPUS]; -+static int num_shared_portals; -+static int shared_portals_idx; -+static LIST_HEAD(unused_pcfgs); -+static DEFINE_SPINLOCK(unused_pcfgs_lock); -+static void *affine_bportals[NR_CPUS]; -+ -+static int __init fsl_bpool_init(struct device_node *node) -+{ -+ int ret; -+ u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret); -+ if (!bpid || (ret != 4)) { -+ pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name); -+ return -ENODEV; -+ } -+ thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret); -+ if (thresh) { -+ if (ret != 16) { -+ pr_err("Invalid %s property '%s'\n", -+ node->full_name, "fsl,bpool-thresholds"); -+ return -ENODEV; -+ } -+ } -+ if (thresh) { -+#ifdef CONFIG_FSL_BMAN_CONFIG -+ ret = bm_pool_set(be32_to_cpu(*bpid), thresh); -+ if (ret) -+ pr_err("No CCSR node for %s property '%s'\n", -+ node->full_name, "fsl,bpool-thresholds"); -+ return ret; -+#else -+ pr_err("Ignoring %s property '%s', no CCSR support\n", -+ node->full_name, "fsl,bpool-thresholds"); -+#endif -+ } -+ return 0; -+} -+ -+static int __init fsl_bpid_range_init(struct device_node *node) -+{ -+ int ret; -+ u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret); -+ if (!range) { -+ pr_err("No 'fsl,bpid-range' property in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ pr_info("Bman: BPID allocator includes range %d:%d\n", -+ be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ return 0; -+} -+ -+static struct bm_portal_config * __init parse_pcfg(struct device_node *node) -+{ -+ struct bm_portal_config *pcfg; -+ const u32 *index; -+ int irq, ret; -+ resource_size_t len; -+ -+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); -+ if (!pcfg) { -+ pr_err("can't allocate portal config"); -+ return NULL; -+ } -+ -+ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") || -+ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) { -+ bman_ip_rev = BMAN_REV10; -+ bman_pool_max = 64; -+ bman_portal_max = 10; -+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") || -+ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) { -+ bman_ip_rev = BMAN_REV20; -+ bman_pool_max = 8; -+ bman_portal_max = 3; -+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) { -+ bman_ip_rev = BMAN_REV21; -+ bman_pool_max = 64; -+ bman_portal_max = 50; -+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) { -+ bman_ip_rev = BMAN_REV21; -+ bman_pool_max = 64; -+ bman_portal_max = 25; -+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) { -+ bman_ip_rev = BMAN_REV21; -+ bman_pool_max = 64; -+ bman_portal_max = 18; -+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) { -+ bman_ip_rev = BMAN_REV21; -+ bman_pool_max = 64; -+ bman_portal_max = 10; -+ } else { -+ pr_warn("unknown BMan version in portal node," -+ "default to rev1.0\n"); -+ bman_ip_rev = BMAN_REV10; -+ bman_pool_max = 64; -+ bman_portal_max = 10; -+ } -+ -+ ret = of_address_to_resource(node, DPA_PORTAL_CE, -+ &pcfg->addr_phys[DPA_PORTAL_CE]); -+ if (ret) { -+ pr_err("Can't get %s property 'reg::CE'\n", node->full_name); -+ goto err; -+ } -+ ret = of_address_to_resource(node, DPA_PORTAL_CI, -+ &pcfg->addr_phys[DPA_PORTAL_CI]); -+ if (ret) { -+ pr_err("Can't get %s property 'reg::CI'\n", node->full_name); -+ goto err; -+ } -+ -+ index = of_get_property(node, "cell-index", &ret); -+ if (!index || (ret != 4)) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "cell-index"); -+ goto err; -+ } -+ if (be32_to_cpu(*index) >= bman_portal_max) { -+ pr_err("BMan portal cell index %d out of range, max %d\n", -+ be32_to_cpu(*index), bman_portal_max); -+ goto err; -+ } -+ -+ pcfg->public_cfg.cpu = -1; -+ -+ irq = irq_of_parse_and_map(node, 0); -+ if (irq == 0) { -+ pr_err("Can't get %s property 'interrupts'\n", node->full_name); -+ goto err; -+ } -+ pcfg->public_cfg.irq = irq; -+ pcfg->public_cfg.index = be32_to_cpu(*index); -+ bman_depletion_fill(&pcfg->public_cfg.mask); -+ -+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); -+ if (len != (unsigned long)len) -+ goto err; -+ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns( -+ pcfg->addr_phys[DPA_PORTAL_CE].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE])); -+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap( -+ pcfg->addr_phys[DPA_PORTAL_CI].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI])); -+ -+#else -+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( -+ pcfg->addr_phys[DPA_PORTAL_CE].start, -+ (unsigned long)len, -+ 0); -+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( -+ pcfg->addr_phys[DPA_PORTAL_CI].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), -+ _PAGE_GUARDED | _PAGE_NO_CACHE); -+#endif -+ /* disable bp depletion */ -+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0)); -+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1)); -+ return pcfg; -+err: -+ kfree(pcfg); -+ return NULL; -+} -+ -+static struct bm_portal_config *get_pcfg(struct list_head *list) -+{ -+ struct bm_portal_config *pcfg; -+ if (list_empty(list)) -+ return NULL; -+ pcfg = list_entry(list->prev, struct bm_portal_config, list); -+ list_del(&pcfg->list); -+ return pcfg; -+} -+ -+static struct bm_portal_config *get_pcfg_idx(struct list_head *list, -+ uint32_t idx) -+{ -+ struct bm_portal_config *pcfg; -+ if (list_empty(list)) -+ return NULL; -+ list_for_each_entry(pcfg, list, list) { -+ if (pcfg->public_cfg.index == idx) { -+ list_del(&pcfg->list); -+ return pcfg; -+ } -+ } -+ return NULL; -+} -+ -+struct bm_portal_config *bm_get_unused_portal(void) -+{ -+ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); -+} -+ -+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx) -+{ -+ struct bm_portal_config *ret; -+ spin_lock(&unused_pcfgs_lock); -+ if (idx == QBMAN_ANY_PORTAL_IDX) -+ ret = get_pcfg(&unused_pcfgs); -+ else -+ ret = get_pcfg_idx(&unused_pcfgs, idx); -+ spin_unlock(&unused_pcfgs_lock); -+ return ret; -+} -+ -+void bm_put_unused_portal(struct bm_portal_config *pcfg) -+{ -+ spin_lock(&unused_pcfgs_lock); -+ list_add(&pcfg->list, &unused_pcfgs); -+ spin_unlock(&unused_pcfgs_lock); -+} -+ -+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) -+{ -+ struct bman_portal *p; -+ p = bman_create_affine_portal(pcfg); -+ if (p) { -+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW -+ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN); -+#endif -+ pr_info("Bman portal %sinitialised, cpu %d\n", -+ pcfg->public_cfg.is_shared ? "(shared) " : "", -+ pcfg->public_cfg.cpu); -+ affine_bportals[pcfg->public_cfg.cpu] = p; -+ } else -+ pr_crit("Bman portal failure on cpu %d\n", -+ pcfg->public_cfg.cpu); -+ return p; -+} -+ -+static void init_slave(int cpu) -+{ -+ struct bman_portal *p; -+ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); -+ if (!p) -+ pr_err("Bman slave portal failure on cpu %d\n", cpu); -+ else -+ pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu); -+ if (shared_portals_idx >= num_shared_portals) -+ shared_portals_idx = 0; -+ affine_bportals[cpu] = p; -+} -+ -+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the -+ * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes -+ * and/or ranges of indexes, with each being optionally prefixed by "s" to -+ * explicitly mark it or them for sharing. -+ * Eg; -+ * bportals=s0,1-3,s4 -+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared" -+ * portals, and any remaining cpus share the portals that are assigned to cpus 0 -+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share -+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu -+ * 0's portal.) */ -+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */ -+static struct cpumask want_shared __initdata; /* cpus requested with "s" */ -+ -+static int __init parse_bportals(char *str) -+{ -+ return parse_portals_bootarg(str, &want_shared, &want_unshared, -+ "bportals"); -+} -+__setup("bportals=", parse_bportals); -+ -+static void bman_offline_cpu(unsigned int cpu) -+{ -+ struct bman_portal *p; -+ const struct bm_portal_config *pcfg; -+ p = (struct bman_portal *)affine_bportals[cpu]; -+ if (p) { -+ pcfg = bman_get_bm_portal_config(p); -+ if (pcfg) -+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); -+ } -+} -+ -+#ifdef CONFIG_HOTPLUG_CPU -+static void bman_online_cpu(unsigned int cpu) -+{ -+ struct bman_portal *p; -+ const struct bm_portal_config *pcfg; -+ p = (struct bman_portal *)affine_bportals[cpu]; -+ if (p) { -+ pcfg = bman_get_bm_portal_config(p); -+ if (pcfg) -+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); -+ } -+} -+ -+static int bman_hotplug_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ unsigned int cpu = (unsigned long)hcpu; -+ -+ switch (action) { -+ case CPU_ONLINE: -+ case CPU_ONLINE_FROZEN: -+ bman_online_cpu(cpu); -+ break; -+ case CPU_DOWN_PREPARE: -+ case CPU_DOWN_PREPARE_FROZEN: -+ bman_offline_cpu(cpu); -+ default: -+ break; -+ } -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block bman_hotplug_cpu_notifier = { -+ .notifier_call = bman_hotplug_cpu_callback, -+}; -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+/* Initialise the Bman driver. The meat of this function deals with portals. The -+ * following describes the flow of portal-handling, the code "steps" refer to -+ * this description; -+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with -+ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not -+ * bound). -+ * 2. The "want_shared" and "want_unshared" lists (as filled by the -+ * "bportals=[...]" bootarg) are processed, allocating portals and assigning -+ * them to cpus, placing them in the relevant list and setting ::cpu as -+ * appropriate. If no "bportals" bootarg was present, the defaut is to try to -+ * assign portals to all online cpus at the time of driver initialisation. -+ * Any failure to allocate portals (when parsing the "want" lists or when -+ * using default behaviour) will be silently tolerated (the "fixup" logic in -+ * step 3 will determine what happens in this case). -+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for -+ * sharing and sharing is required (because not all cpus have been assigned -+ * portals), then one portal will marked for sharing. Conversely if no -+ * sharing is required, any portals marked for sharing will not be shared. It -+ * may be that sharing occurs when it wasn't expected, if portal allocation -+ * failed to honour all the requested assignments (including the default -+ * assignments if no bootarg is present). -+ * 4. Unshared portals are initialised on their respective cpus. -+ * 5. Shared portals are initialised on their respective cpus. -+ * 6. Each remaining cpu is initialised to slave to one of the shared portals, -+ * which are selected in a round-robin fashion. -+ * Any portal configs left unused are available for USDPAA allocation. -+ */ -+__init int bman_init(void) -+{ -+ struct cpumask slave_cpus; -+ struct cpumask unshared_cpus = *cpu_none_mask; -+ struct cpumask shared_cpus = *cpu_none_mask; -+ LIST_HEAD(unshared_pcfgs); -+ LIST_HEAD(shared_pcfgs); -+ struct device_node *dn; -+ struct bm_portal_config *pcfg; -+ struct bman_portal *p; -+ int cpu, ret; -+ struct cpumask offline_cpus; -+ -+ /* Initialise the Bman (CCSR) device */ -+ for_each_compatible_node(dn, NULL, "fsl,bman") { -+ if (!bman_init_ccsr(dn)) -+ pr_info("Bman err interrupt handler present\n"); -+ else -+ pr_err("Bman CCSR setup failed\n"); -+ } -+ /* Initialise any declared buffer pools */ -+ for_each_compatible_node(dn, NULL, "fsl,bpool") { -+ ret = fsl_bpool_init(dn); -+ if (ret) -+ return ret; -+ } -+ /* Step 1. See comments at the beginning of the file. */ -+ for_each_compatible_node(dn, NULL, "fsl,bman-portal") { -+ if (!of_device_is_available(dn)) -+ continue; -+ pcfg = parse_pcfg(dn); -+ if (pcfg) -+ list_add_tail(&pcfg->list, &unused_pcfgs); -+ } -+ /* Step 2. */ -+ for_each_possible_cpu(cpu) { -+ if (cpumask_test_cpu(cpu, &want_shared)) { -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &shared_pcfgs); -+ cpumask_set_cpu(cpu, &shared_cpus); -+ } -+ if (cpumask_test_cpu(cpu, &want_unshared)) { -+ if (cpumask_test_cpu(cpu, &shared_cpus)) -+ continue; -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &unshared_pcfgs); -+ cpumask_set_cpu(cpu, &unshared_cpus); -+ } -+ } -+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { -+ /* Default, give an unshared portal to each online cpu */ -+ for_each_online_cpu(cpu) { -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &unshared_pcfgs); -+ cpumask_set_cpu(cpu, &unshared_cpus); -+ } -+ } -+ /* Step 3. */ -+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); -+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); -+ if (cpumask_empty(&slave_cpus)) { -+ /* No sharing required */ -+ if (!list_empty(&shared_pcfgs)) { -+ /* Migrate "shared" to "unshared" */ -+ cpumask_or(&unshared_cpus, &unshared_cpus, -+ &shared_cpus); -+ cpumask_clear(&shared_cpus); -+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs); -+ INIT_LIST_HEAD(&shared_pcfgs); -+ } -+ } else { -+ /* Sharing required */ -+ if (list_empty(&shared_pcfgs)) { -+ /* Migrate one "unshared" to "shared" */ -+ pcfg = get_pcfg(&unshared_pcfgs); -+ if (!pcfg) { -+ pr_crit("No BMan portals available!\n"); -+ return 0; -+ } -+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); -+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); -+ list_add_tail(&pcfg->list, &shared_pcfgs); -+ } -+ } -+ /* Step 4. */ -+ list_for_each_entry(pcfg, &unshared_pcfgs, list) { -+ pcfg->public_cfg.is_shared = 0; -+ p = init_pcfg(pcfg); -+ if (!p) { -+ pr_crit("Unable to initialize bman portal\n"); -+ return 0; -+ } -+ } -+ /* Step 5. */ -+ list_for_each_entry(pcfg, &shared_pcfgs, list) { -+ pcfg->public_cfg.is_shared = 1; -+ p = init_pcfg(pcfg); -+ if (p) -+ shared_portals[num_shared_portals++] = p; -+ } -+ /* Step 6. */ -+ if (!cpumask_empty(&slave_cpus)) -+ for_each_cpu(cpu, &slave_cpus) -+ init_slave(cpu); -+ pr_info("Bman portals initialised\n"); -+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); -+ for_each_cpu(cpu, &offline_cpus) -+ bman_offline_cpu(cpu); -+#ifdef CONFIG_HOTPLUG_CPU -+ register_hotcpu_notifier(&bman_hotplug_cpu_notifier); -+#endif -+ return 0; -+} -+ -+__init int bman_resource_init(void) -+{ -+ struct device_node *dn; -+ int ret; -+ -+ /* Initialise BPID allocation ranges */ -+ for_each_compatible_node(dn, NULL, "fsl,bpid-range") { -+ ret = fsl_bpid_range_init(dn); -+ if (ret) -+ return ret; -+ } -+ return 0; -+} -+ -+#ifdef CONFIG_SUSPEND -+void suspend_unused_bportal(void) -+{ -+ struct bm_portal_config *pcfg; -+ -+ if (list_empty(&unused_pcfgs)) -+ return; -+ -+ list_for_each_entry(pcfg, &unused_pcfgs, list) { -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Need to save bportal %d\n", pcfg->public_cfg.index); -+#endif -+ /* save isdr, disable all via isdr, clear isr */ -+ pcfg->saved_isdr = -+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); -+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + -+ 0xe08); -+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + -+ 0xe00); -+ } -+ return; -+} -+ -+void resume_unused_bportal(void) -+{ -+ struct bm_portal_config *pcfg; -+ -+ if (list_empty(&unused_pcfgs)) -+ return; -+ -+ list_for_each_entry(pcfg, &unused_pcfgs, list) { -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index); -+#endif -+ /* restore isdr */ -+ __raw_writel(pcfg->saved_isdr, -+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); -+ } -+ return; -+} -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_high.c -@@ -0,0 +1,1141 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "bman_low.h" -+ -+/* Compilation constants */ -+#define RCR_THRESH 2 /* reread h/w CI when running out of space */ -+#define IRQNAME "BMan portal %d" -+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ -+ -+struct bman_portal { -+ struct bm_portal p; -+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ -+ struct bman_depletion *pools; -+ int thresh_set; -+ unsigned long irq_sources; -+ u32 slowpoll; /* only used when interrupts are off */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ -+#endif -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ raw_spinlock_t sharing_lock; /* only used if is_shared */ -+ int is_shared; -+ struct bman_portal *sharing_redirect; -+#endif -+ /* When the cpu-affine portal is activated, this is non-NULL */ -+ const struct bm_portal_config *config; -+ /* This is needed for power management */ -+ struct platform_device *pdev; -+ /* 64-entry hash-table of pool objects that are tracking depletion -+ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so -+ * we're not fussy about cache-misses and so forth - whereas the above -+ * members should all fit in one cacheline. -+ * BTW, with 64 entries in the hash table and 64 buffer pools to track, -+ * you'll never guess the hash-function ... */ -+ struct bman_pool *cb[64]; -+ char irqname[MAX_IRQNAME]; -+ /* Track if the portal was alloced by the driver */ -+ u8 alloced; -+ /* power management data */ -+ u32 save_isdr; -+}; -+ -+/* For an explanation of the locking, redirection, or affine-portal logic, -+ * please consult the Qman driver for details. This is the same, only simpler -+ * (no fiddly Qman-specific bits.) */ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+#define PORTAL_IRQ_LOCK(p, irqflags) \ -+ do { \ -+ if ((p)->is_shared) \ -+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ -+ else \ -+ local_irq_save(irqflags); \ -+ } while (0) -+#define PORTAL_IRQ_UNLOCK(p, irqflags) \ -+ do { \ -+ if ((p)->is_shared) \ -+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ -+ irqflags); \ -+ else \ -+ local_irq_restore(irqflags); \ -+ } while (0) -+#else -+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) -+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) -+#endif -+ -+static cpumask_t affine_mask; -+static DEFINE_SPINLOCK(affine_mask_lock); -+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); -+static inline struct bman_portal *get_raw_affine_portal(void) -+{ -+ return &get_cpu_var(bman_affine_portal); -+} -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+static inline struct bman_portal *get_affine_portal(void) -+{ -+ struct bman_portal *p = get_raw_affine_portal(); -+ if (p->sharing_redirect) -+ return p->sharing_redirect; -+ return p; -+} -+#else -+#define get_affine_portal() get_raw_affine_portal() -+#endif -+static inline void put_affine_portal(void) -+{ -+ put_cpu_var(bman_affine_portal); -+} -+static inline struct bman_portal *get_poll_portal(void) -+{ -+ return &get_cpu_var(bman_affine_portal); -+} -+#define put_poll_portal() -+ -+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be -+ * more than one such object per Bman buffer pool, eg. if different users of the -+ * pool are operating via different portals. */ -+struct bman_pool { -+ struct bman_pool_params params; -+ /* Used for hash-table admin when using depletion notifications. */ -+ struct bman_portal *portal; -+ struct bman_pool *next; -+ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ -+ struct bm_buffer *sp; -+ unsigned int sp_fill; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ atomic_t in_use; -+#endif -+}; -+ -+/* (De)Registration of depletion notification callbacks */ -+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) -+{ -+ __maybe_unused unsigned long irqflags; -+ pool->portal = portal; -+ PORTAL_IRQ_LOCK(portal, irqflags); -+ pool->next = portal->cb[pool->params.bpid]; -+ portal->cb[pool->params.bpid] = pool; -+ if (!pool->next) -+ /* First object for that bpid on this portal, enable the BSCN -+ * mask bit. */ -+ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); -+ PORTAL_IRQ_UNLOCK(portal, irqflags); -+} -+static void depletion_unlink(struct bman_pool *pool) -+{ -+ struct bman_pool *it, *last = NULL; -+ struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; -+ __maybe_unused unsigned long irqflags; -+ PORTAL_IRQ_LOCK(pool->portal, irqflags); -+ it = *base; /* <-- gotcha, don't do this prior to the irq_save */ -+ while (it != pool) { -+ last = it; -+ it = it->next; -+ } -+ if (!last) -+ *base = pool->next; -+ else -+ last->next = pool->next; -+ if (!last && !pool->next) { -+ /* Last object for that bpid on this portal, disable the BSCN -+ * mask bit. */ -+ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); -+ /* And "forget" that we last saw this pool as depleted */ -+ bman_depletion_unset(&pool->portal->pools[1], -+ pool->params.bpid); -+ } -+ PORTAL_IRQ_UNLOCK(pool->portal, irqflags); -+} -+ -+/* In the case that the application's core loop calls qman_poll() and -+ * bman_poll(), we ought to balance how often we incur the overheads of the -+ * slow-path poll. We'll use two decrementer sources. The idle decrementer -+ * constant is used when the last slow-poll detected no work to do, and the busy -+ * decrementer constant when the last slow-poll had work to do. */ -+#define SLOW_POLL_IDLE 1000 -+#define SLOW_POLL_BUSY 10 -+static u32 __poll_portal_slow(struct bman_portal *p, u32 is); -+ -+/* Portal interrupt handler */ -+static irqreturn_t portal_isr(__always_unused int irq, void *ptr) -+{ -+ struct bman_portal *p = ptr; -+ u32 clear = p->irq_sources; -+ u32 is = bm_isr_status_read(&p->p) & p->irq_sources; -+ clear |= __poll_portal_slow(p, is); -+ bm_isr_status_clear(&p->p, clear); -+ return IRQ_HANDLED; -+} -+ -+#ifdef CONFIG_SUSPEND -+static int _bman_portal_suspend_noirq(struct device *dev) -+{ -+ struct bman_portal *p = (struct bman_portal *)dev->platform_data; -+#ifdef CONFIG_PM_DEBUG -+ struct platform_device *pdev = to_platform_device(dev); -+#endif -+ p->save_isdr = bm_isr_disable_read(&p->p); -+ bm_isr_disable_write(&p->p, 0xffffffff); -+ bm_isr_status_clear(&p->p, 0xffffffff); -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Suspend for %s\n", pdev->name); -+#endif -+ return 0; -+} -+ -+static int _bman_portal_resume_noirq(struct device *dev) -+{ -+ struct bman_portal *p = (struct bman_portal *)dev->platform_data; -+ -+ /* restore isdr */ -+ bm_isr_disable_write(&p->p, p->save_isdr); -+ return 0; -+} -+#else -+#define _bman_portal_suspend_noirq NULL -+#define _bman_portal_resume_noirq NULL -+#endif -+ -+struct dev_pm_domain bman_portal_device_pm_domain = { -+ .ops = { -+ USE_PLATFORM_PM_SLEEP_OPS -+ .suspend_noirq = _bman_portal_suspend_noirq, -+ .resume_noirq = _bman_portal_resume_noirq, -+ } -+}; -+ -+struct bman_portal *bman_create_portal( -+ struct bman_portal *portal, -+ const struct bm_portal_config *config) -+{ -+ struct bm_portal *__p; -+ const struct bman_depletion *pools = &config->public_cfg.mask; -+ int ret; -+ u8 bpid = 0; -+ char buf[16]; -+ -+ if (!portal) { -+ portal = kmalloc(sizeof(*portal), GFP_KERNEL); -+ if (!portal) -+ return portal; -+ portal->alloced = 1; -+ } else -+ portal->alloced = 0; -+ -+ __p = &portal->p; -+ -+ /* prep the low-level portal struct with the mapped addresses from the -+ * config, everything that follows depends on it and "config" is more -+ * for (de)reference... */ -+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; -+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; -+ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { -+ pr_err("Bman RCR initialisation failed\n"); -+ goto fail_rcr; -+ } -+ if (bm_mc_init(__p)) { -+ pr_err("Bman MC initialisation failed\n"); -+ goto fail_mc; -+ } -+ if (bm_isr_init(__p)) { -+ pr_err("Bman ISR initialisation failed\n"); -+ goto fail_isr; -+ } -+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); -+ if (!portal->pools) -+ goto fail_pools; -+ portal->pools[0] = *pools; -+ bman_depletion_init(portal->pools + 1); -+ while (bpid < bman_pool_max) { -+ /* Default to all BPIDs disabled, we enable as required at -+ * run-time. */ -+ bm_isr_bscn_mask(__p, bpid, 0); -+ bpid++; -+ } -+ portal->slowpoll = 0; -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ portal->rcri_owned = NULL; -+#endif -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ raw_spin_lock_init(&portal->sharing_lock); -+ portal->is_shared = config->public_cfg.is_shared; -+ portal->sharing_redirect = NULL; -+#endif -+ sprintf(buf, "bportal-%u", config->public_cfg.index); -+ portal->pdev = platform_device_alloc(buf, -1); -+ if (!portal->pdev) -+ goto fail_devalloc; -+ portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain; -+ portal->pdev->dev.platform_data = portal; -+ ret = platform_device_add(portal->pdev); -+ if (ret) -+ goto fail_devadd; -+ memset(&portal->cb, 0, sizeof(portal->cb)); -+ /* Write-to-clear any stale interrupt status bits */ -+ bm_isr_disable_write(__p, 0xffffffff); -+ portal->irq_sources = 0; -+ bm_isr_enable_write(__p, portal->irq_sources); -+ bm_isr_status_clear(__p, 0xffffffff); -+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); -+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, -+ portal)) { -+ pr_err("request_irq() failed\n"); -+ goto fail_irq; -+ } -+ if ((config->public_cfg.cpu != -1) && -+ irq_can_set_affinity(config->public_cfg.irq) && -+ irq_set_affinity(config->public_cfg.irq, -+ cpumask_of(config->public_cfg.cpu))) { -+ pr_err("irq_set_affinity() failed %s\n", portal->irqname); -+ goto fail_affinity; -+ } -+ -+ /* Need RCR to be empty before continuing */ -+ ret = bm_rcr_get_fill(__p); -+ if (ret) { -+ pr_err("Bman RCR unclean\n"); -+ goto fail_rcr_empty; -+ } -+ /* Success */ -+ portal->config = config; -+ -+ bm_isr_disable_write(__p, 0); -+ bm_isr_uninhibit(__p); -+ return portal; -+fail_rcr_empty: -+fail_affinity: -+ free_irq(config->public_cfg.irq, portal); -+fail_irq: -+ platform_device_del(portal->pdev); -+fail_devadd: -+ platform_device_put(portal->pdev); -+fail_devalloc: -+ kfree(portal->pools); -+fail_pools: -+ bm_isr_finish(__p); -+fail_isr: -+ bm_mc_finish(__p); -+fail_mc: -+ bm_rcr_finish(__p); -+fail_rcr: -+ if (portal->alloced) -+ kfree(portal); -+ return NULL; -+} -+ -+struct bman_portal *bman_create_affine_portal( -+ const struct bm_portal_config *config) -+{ -+ struct bman_portal *portal; -+ -+ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu); -+ portal = bman_create_portal(portal, config); -+ if (portal) { -+ spin_lock(&affine_mask_lock); -+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); -+ spin_unlock(&affine_mask_lock); -+ } -+ return portal; -+} -+ -+ -+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, -+ int cpu) -+{ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ struct bman_portal *p; -+ p = &per_cpu(bman_affine_portal, cpu); -+ BUG_ON(p->config); -+ BUG_ON(p->is_shared); -+ BUG_ON(!redirect->config->public_cfg.is_shared); -+ p->irq_sources = 0; -+ p->sharing_redirect = redirect; -+ return p; -+#else -+ BUG(); -+ return NULL; -+#endif -+} -+ -+void bman_destroy_portal(struct bman_portal *bm) -+{ -+ const struct bm_portal_config *pcfg; -+ pcfg = bm->config; -+ bm_rcr_cce_update(&bm->p); -+ bm_rcr_cce_update(&bm->p); -+ -+ free_irq(pcfg->public_cfg.irq, bm); -+ -+ kfree(bm->pools); -+ bm_isr_finish(&bm->p); -+ bm_mc_finish(&bm->p); -+ bm_rcr_finish(&bm->p); -+ bm->config = NULL; -+ if (bm->alloced) -+ kfree(bm); -+} -+ -+const struct bm_portal_config *bman_destroy_affine_portal(void) -+{ -+ struct bman_portal *bm = get_raw_affine_portal(); -+ const struct bm_portal_config *pcfg; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (bm->sharing_redirect) { -+ bm->sharing_redirect = NULL; -+ put_affine_portal(); -+ return NULL; -+ } -+ bm->is_shared = 0; -+#endif -+ pcfg = bm->config; -+ bman_destroy_portal(bm); -+ spin_lock(&affine_mask_lock); -+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); -+ spin_unlock(&affine_mask_lock); -+ put_affine_portal(); -+ return pcfg; -+} -+ -+/* When release logic waits on available RCR space, we need a global waitqueue -+ * in the case of "affine" use (as the waits wake on different cpus which means -+ * different portals - so we can't wait on any per-portal waitqueue). */ -+static DECLARE_WAIT_QUEUE_HEAD(affine_queue); -+ -+static u32 __poll_portal_slow(struct bman_portal *p, u32 is) -+{ -+ struct bman_depletion tmp; -+ u32 ret = is; -+ -+ /* There is a gotcha to be aware of. If we do the query before clearing -+ * the status register, we may miss state changes that occur between the -+ * two. If we write to clear the status register before the query, the -+ * cache-enabled query command may overtake the status register write -+ * unless we use a heavyweight sync (which we don't want). Instead, we -+ * write-to-clear the status register then *read it back* before doing -+ * the query, hence the odd while loop with the 'is' accumulation. */ -+ if (is & BM_PIRQ_BSCN) { -+ struct bm_mc_result *mcr; -+ __maybe_unused unsigned long irqflags; -+ unsigned int i, j; -+ u32 __is; -+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); -+ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { -+ is |= __is; -+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); -+ } -+ is &= ~BM_PIRQ_BSCN; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ bm_mc_start(&p->p); -+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); -+ while (!(mcr = bm_mc_result(&p->p))) -+ cpu_relax(); -+ tmp = mcr->query.ds.state; -+ tmp.__state[0] = be32_to_cpu(tmp.__state[0]); -+ tmp.__state[1] = be32_to_cpu(tmp.__state[1]); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ for (i = 0; i < 2; i++) { -+ int idx = i * 32; -+ /* tmp is a mask of currently-depleted pools. -+ * pools[0] is mask of those we care about. -+ * pools[1] is our previous view (we only want to -+ * be told about changes). */ -+ tmp.__state[i] &= p->pools[0].__state[i]; -+ if (tmp.__state[i] == p->pools[1].__state[i]) -+ /* fast-path, nothing to see, move along */ -+ continue; -+ for (j = 0; j <= 31; j++, idx++) { -+ struct bman_pool *pool = p->cb[idx]; -+ int b4 = bman_depletion_get(&p->pools[1], idx); -+ int af = bman_depletion_get(&tmp, idx); -+ if (b4 == af) -+ continue; -+ while (pool) { -+ pool->params.cb(p, pool, -+ pool->params.cb_ctx, af); -+ pool = pool->next; -+ } -+ } -+ } -+ p->pools[1] = tmp; -+ } -+ -+ if (is & BM_PIRQ_RCRI) { -+ __maybe_unused unsigned long irqflags; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ bm_rcr_cce_update(&p->p); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ /* If waiting for sync, we only cancel the interrupt threshold -+ * when the ring utilisation hits zero. */ -+ if (p->rcri_owned) { -+ if (!bm_rcr_get_fill(&p->p)) { -+ p->rcri_owned = NULL; -+ bm_rcr_set_ithresh(&p->p, 0); -+ } -+ } else -+#endif -+ bm_rcr_set_ithresh(&p->p, 0); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ wake_up(&affine_queue); -+ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); -+ is &= ~BM_PIRQ_RCRI; -+ } -+ -+ /* There should be no status register bits left undefined */ -+ DPA_ASSERT(!is); -+ return ret; -+} -+ -+const struct bman_portal_config *bman_get_portal_config(void) -+{ -+ struct bman_portal *p = get_affine_portal(); -+ const struct bman_portal_config *ret = &p->config->public_cfg; -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(bman_get_portal_config); -+ -+u32 bman_irqsource_get(void) -+{ -+ struct bman_portal *p = get_raw_affine_portal(); -+ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(bman_irqsource_get); -+ -+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits) -+{ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (p->sharing_redirect) -+ return -EINVAL; -+ else -+#endif -+ { -+ __maybe_unused unsigned long irqflags; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); -+ bm_isr_enable_write(&p->p, p->irq_sources); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(bman_p_irqsource_add); -+ -+int bman_irqsource_add(__maybe_unused u32 bits) -+{ -+ struct bman_portal *p = get_raw_affine_portal(); -+ int ret = 0; -+ ret = bman_p_irqsource_add(p, bits); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(bman_irqsource_add); -+ -+int bman_irqsource_remove(u32 bits) -+{ -+ struct bman_portal *p = get_raw_affine_portal(); -+ __maybe_unused unsigned long irqflags; -+ u32 ier; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (p->sharing_redirect) { -+ put_affine_portal(); -+ return -EINVAL; -+ } -+#endif -+ /* Our interrupt handler only processes+clears status register bits that -+ * are in p->irq_sources. As we're trimming that mask, if one of them -+ * were to assert in the status register just before we remove it from -+ * the enable register, there would be an interrupt-storm when we -+ * release the IRQ lock. So we wait for the enable register update to -+ * take effect in h/w (by reading it back) and then clear all other bits -+ * in the status register. Ie. we clear them from ISR once it's certain -+ * IER won't allow them to reassert. */ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ bits &= BM_PIRQ_VISIBLE; -+ clear_bits(bits, &p->irq_sources); -+ bm_isr_enable_write(&p->p, p->irq_sources); -+ ier = bm_isr_enable_read(&p->p); -+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a -+ * data-dependency, ie. to protect against re-ordering. */ -+ bm_isr_status_clear(&p->p, ~ier); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return 0; -+} -+EXPORT_SYMBOL(bman_irqsource_remove); -+ -+const cpumask_t *bman_affine_cpus(void) -+{ -+ return &affine_mask; -+} -+EXPORT_SYMBOL(bman_affine_cpus); -+ -+u32 bman_poll_slow(void) -+{ -+ struct bman_portal *p = get_poll_portal(); -+ u32 ret; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (unlikely(p->sharing_redirect)) -+ ret = (u32)-1; -+ else -+#endif -+ { -+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; -+ ret = __poll_portal_slow(p, is); -+ bm_isr_status_clear(&p->p, ret); -+ } -+ put_poll_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(bman_poll_slow); -+ -+/* Legacy wrapper */ -+void bman_poll(void) -+{ -+ struct bman_portal *p = get_poll_portal(); -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (unlikely(p->sharing_redirect)) -+ goto done; -+#endif -+ if (!(p->slowpoll--)) { -+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; -+ u32 active = __poll_portal_slow(p, is); -+ if (active) -+ p->slowpoll = SLOW_POLL_BUSY; -+ else -+ p->slowpoll = SLOW_POLL_IDLE; -+ } -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+done: -+#endif -+ put_poll_portal(); -+} -+EXPORT_SYMBOL(bman_poll); -+ -+static const u32 zero_thresholds[4] = {0, 0, 0, 0}; -+ -+struct bman_pool *bman_new_pool(const struct bman_pool_params *params) -+{ -+ struct bman_pool *pool = NULL; -+ u32 bpid; -+ -+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { -+ int ret = bman_alloc_bpid(&bpid); -+ if (ret) -+ return NULL; -+ } else { -+ if (params->bpid >= bman_pool_max) -+ return NULL; -+ bpid = params->bpid; -+ } -+#ifdef CONFIG_FSL_BMAN_CONFIG -+ if (params->flags & BMAN_POOL_FLAG_THRESH) { -+ int ret = bm_pool_set(bpid, params->thresholds); -+ if (ret) -+ goto err; -+ } -+#else -+ if (params->flags & BMAN_POOL_FLAG_THRESH) -+ goto err; -+#endif -+ pool = kmalloc(sizeof(*pool), GFP_KERNEL); -+ if (!pool) -+ goto err; -+ pool->sp = NULL; -+ pool->sp_fill = 0; -+ pool->params = *params; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ atomic_set(&pool->in_use, 1); -+#endif -+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) -+ pool->params.bpid = bpid; -+ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { -+ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, -+ GFP_KERNEL); -+ if (!pool->sp) -+ goto err; -+ } -+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { -+ struct bman_portal *p = get_affine_portal(); -+ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { -+ pr_err("Depletion events disabled for bpid %d\n", bpid); -+ goto err; -+ } -+ depletion_link(p, pool); -+ put_affine_portal(); -+ } -+ return pool; -+err: -+#ifdef CONFIG_FSL_BMAN_CONFIG -+ if (params->flags & BMAN_POOL_FLAG_THRESH) -+ bm_pool_set(bpid, zero_thresholds); -+#endif -+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) -+ bman_release_bpid(bpid); -+ if (pool) { -+ kfree(pool->sp); -+ kfree(pool); -+ } -+ return NULL; -+} -+EXPORT_SYMBOL(bman_new_pool); -+ -+void bman_free_pool(struct bman_pool *pool) -+{ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH) -+ bm_pool_set(pool->params.bpid, zero_thresholds); -+#endif -+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) -+ depletion_unlink(pool); -+ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { -+ if (pool->sp_fill) -+ pr_err("Stockpile not flushed, has %u in bpid %u.\n", -+ pool->sp_fill, pool->params.bpid); -+ kfree(pool->sp); -+ pool->sp = NULL; -+ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; -+ } -+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) -+ bman_release_bpid(pool->params.bpid); -+ kfree(pool); -+} -+EXPORT_SYMBOL(bman_free_pool); -+ -+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) -+{ -+ return &pool->params; -+} -+EXPORT_SYMBOL(bman_get_params); -+ -+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) -+{ -+ if (avail) -+ bm_rcr_cce_prefetch(&p->p); -+ else -+ bm_rcr_cce_update(&p->p); -+} -+ -+int bman_rcr_is_empty(void) -+{ -+ __maybe_unused unsigned long irqflags; -+ struct bman_portal *p = get_affine_portal(); -+ u8 avail; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ update_rcr_ci(p, 0); -+ avail = bm_rcr_get_fill(&p->p); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return avail == 0; -+} -+EXPORT_SYMBOL(bman_rcr_is_empty); -+ -+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ __maybe_unused struct bman_pool *pool, -+#endif -+ __maybe_unused unsigned long *irqflags, -+ __maybe_unused u32 flags) -+{ -+ struct bm_rcr_entry *r; -+ u8 avail; -+ -+ *p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(*p, (*irqflags)); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && -+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { -+ if ((*p)->rcri_owned) { -+ PORTAL_IRQ_UNLOCK(*p, (*irqflags)); -+ put_affine_portal(); -+ return NULL; -+ } -+ (*p)->rcri_owned = pool; -+ } -+#endif -+ avail = bm_rcr_get_avail(&(*p)->p); -+ if (avail < 2) -+ update_rcr_ci(*p, avail); -+ r = bm_rcr_start(&(*p)->p); -+ if (unlikely(!r)) { -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && -+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) -+ (*p)->rcri_owned = NULL; -+#endif -+ PORTAL_IRQ_UNLOCK(*p, (*irqflags)); -+ put_affine_portal(); -+ } -+ return r; -+} -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, -+ struct bman_pool *pool, -+ __maybe_unused unsigned long *irqflags, -+ u32 flags) -+{ -+ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); -+ if (!rcr) -+ bm_rcr_set_ithresh(&(*p)->p, 1); -+ return rcr; -+} -+ -+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, -+ struct bman_pool *pool, -+ __maybe_unused unsigned long *irqflags, -+ u32 flags) -+{ -+ struct bm_rcr_entry *rcr; -+#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ pool = NULL; -+#endif -+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT) -+ /* NB: return NULL if signal occurs before completion. Signal -+ * can occur during return. Caller must check for signal */ -+ wait_event_interruptible(affine_queue, -+ (rcr = __wait_rel_start(p, pool, irqflags, flags))); -+ else -+ wait_event(affine_queue, -+ (rcr = __wait_rel_start(p, pool, irqflags, flags))); -+ return rcr; -+} -+#endif -+ -+static inline int __bman_release(struct bman_pool *pool, -+ const struct bm_buffer *bufs, u8 num, u32 flags) -+{ -+ struct bman_portal *p; -+ struct bm_rcr_entry *r; -+ __maybe_unused unsigned long irqflags; -+ u32 i = num - 1; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & BMAN_RELEASE_FLAG_WAIT) -+ r = wait_rel_start(&p, pool, &irqflags, flags); -+ else -+ r = try_rel_start(&p, pool, &irqflags, flags); -+#else -+ r = try_rel_start(&p, &irqflags, flags); -+#endif -+ if (!r) -+ return -EBUSY; -+ /* We can copy all but the first entry, as this can trigger badness -+ * with the valid-bit. Use the overlay to mask the verb byte. */ -+ r->bufs[0].opaque = -+ ((cpu_to_be64((bufs[0].opaque | -+ ((u64)pool->params.bpid<<48)) -+ & 0x00ffffffffffffff))); -+ if (i) { -+ for (i = 1; i < num; i++) -+ r->bufs[i].opaque = -+ cpu_to_be64(bufs[i].opaque); -+ } -+ -+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | -+ (num & BM_RCR_VERB_BUFCOUNT_MASK)); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ /* if we wish to sync we need to set the threshold after h/w sees the -+ * new ring entry. As we're mixing cache-enabled and cache-inhibited -+ * accesses, this requires a heavy-weight sync. */ -+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && -+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { -+ hwsync(); -+ bm_rcr_set_ithresh(&p->p, 1); -+ } -+#endif -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && -+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { -+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->rcri_owned != pool)); -+ else -+ wait_event(affine_queue, (p->rcri_owned != pool)); -+ } -+#endif -+ return 0; -+} -+ -+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, -+ u32 flags) -+{ -+ int ret; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (!num || (num > 8)) -+ return -EINVAL; -+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) -+ return -EINVAL; -+#endif -+ /* Without stockpile, this API is a pass-through to the h/w operation */ -+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) -+ return __bman_release(pool, bufs, num, flags); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (!atomic_dec_and_test(&pool->in_use)) { -+ pr_crit("Parallel attempts to enter bman_released() detected."); -+ panic("only one instance of bman_released/acquired allowed"); -+ } -+#endif -+ /* Two movements of buffers are possible, and can occur in either order. -+ * A: moving buffers from the caller to the stockpile. -+ * B: moving buffers from the stockpile to hardware. -+ * Order 1: if there is already enough space in the stockpile for A -+ * then we want to do A first, and only do B if we trigger the -+ * stockpile-high threshold. -+ * Order 2: if there is not enough space in the stockpile for A, then -+ * we want to do B first, then do A if B had succeeded. However in this -+ * case B is dependent on how many buffers the user needs to release, -+ * not the stockpile-high threshold. -+ * Due to the different handling of B between the two cases, putting A -+ * and B in a while() loop would require quite obscure logic, so handle -+ * the different sequences explicitly. */ -+ if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) { -+ /* Order 1: do A */ -+ copy_words(pool->sp + pool->sp_fill, bufs, -+ sizeof(struct bm_buffer) * num); -+ pool->sp_fill += num; -+ /* do B relative to STOCKPILE_HIGH */ -+ while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) { -+ ret = __bman_release(pool, -+ pool->sp + (pool->sp_fill - 8), 8, -+ flags); -+ if (ret >= 0) -+ pool->sp_fill -= 8; -+ } -+ } else { -+ /* Order 2: do B relative to 'num' */ -+ do { -+ ret = __bman_release(pool, -+ pool->sp + (pool->sp_fill - 8), 8, -+ flags); -+ if (ret < 0) -+ /* failure */ -+ goto release_done; -+ pool->sp_fill -= 8; -+ } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ); -+ /* do A */ -+ copy_words(pool->sp + pool->sp_fill, bufs, -+ sizeof(struct bm_buffer) * num); -+ pool->sp_fill += num; -+ } -+ /* success */ -+ ret = 0; -+release_done: -+#ifdef CONFIG_FSL_DPA_CHECKING -+ atomic_inc(&pool->in_use); -+#endif -+ return ret; -+} -+EXPORT_SYMBOL(bman_release); -+ -+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, -+ u8 num) -+{ -+ struct bman_portal *p = get_affine_portal(); -+ struct bm_mc_command *mcc; -+ struct bm_mc_result *mcr; -+ __maybe_unused unsigned long irqflags; -+ int ret, i; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = bm_mc_start(&p->p); -+ mcc->acquire.bpid = pool->params.bpid; -+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | -+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); -+ while (!(mcr = bm_mc_result(&p->p))) -+ cpu_relax(); -+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; -+ if (bufs) { -+ for (i = 0; i < num; i++) -+ bufs[i].opaque = -+ be64_to_cpu(mcr->acquire.bufs[i].opaque); -+ } -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (ret != num) -+ ret = -ENOMEM; -+ return ret; -+} -+ -+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, -+ u32 flags) -+{ -+ int ret; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (!num || (num > 8)) -+ return -EINVAL; -+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) -+ return -EINVAL; -+#endif -+ /* Without stockpile, this API is a pass-through to the h/w operation */ -+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) -+ return __bman_acquire(pool, bufs, num); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (!atomic_dec_and_test(&pool->in_use)) { -+ pr_crit("Parallel attempts to enter bman_acquire() detected."); -+ panic("only one instance of bman_released/acquired allowed"); -+ } -+#endif -+ /* Two movements of buffers are possible, and can occur in either order. -+ * A: moving buffers from stockpile to the caller. -+ * B: moving buffers from hardware to the stockpile. -+ * Order 1: if there are already enough buffers in the stockpile for A -+ * then we want to do A first, and only do B if we trigger the -+ * stockpile-low threshold. -+ * Order 2: if there are not enough buffers in the stockpile for A, -+ * then we want to do B first, then do A if B had succeeded. However in -+ * this case B is dependent on how many buffers the user needs, not the -+ * stockpile-low threshold. -+ * Due to the different handling of B between the two cases, putting A -+ * and B in a while() loop would require quite obscure logic, so handle -+ * the different sequences explicitly. */ -+ if (num <= pool->sp_fill) { -+ /* Order 1: do A */ -+ copy_words(bufs, pool->sp + (pool->sp_fill - num), -+ sizeof(struct bm_buffer) * num); -+ pool->sp_fill -= num; -+ /* do B relative to STOCKPILE_LOW */ -+ while (pool->sp_fill <= BMAN_STOCKPILE_LOW) { -+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8); -+ if (ret < 0) -+ ret = __bman_acquire(pool, -+ pool->sp + pool->sp_fill, 1); -+ if (ret < 0) -+ break; -+ pool->sp_fill += ret; -+ } -+ } else { -+ /* Order 2: do B relative to 'num' */ -+ do { -+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8); -+ if (ret < 0) -+ ret = __bman_acquire(pool, -+ pool->sp + pool->sp_fill, 1); -+ if (ret < 0) -+ /* failure */ -+ goto acquire_done; -+ pool->sp_fill += ret; -+ } while (pool->sp_fill < num); -+ /* do A */ -+ copy_words(bufs, pool->sp + (pool->sp_fill - num), -+ sizeof(struct bm_buffer) * num); -+ pool->sp_fill -= num; -+ } -+ /* success */ -+ ret = num; -+acquire_done: -+#ifdef CONFIG_FSL_DPA_CHECKING -+ atomic_inc(&pool->in_use); -+#endif -+ return ret; -+} -+EXPORT_SYMBOL(bman_acquire); -+ -+int bman_flush_stockpile(struct bman_pool *pool, u32 flags) -+{ -+ u8 num; -+ int ret; -+ -+ while (pool->sp_fill) { -+ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); -+ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), -+ num, flags); -+ if (ret) -+ return ret; -+ pool->sp_fill -= num; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(bman_flush_stockpile); -+ -+int bman_query_pools(struct bm_pool_state *state) -+{ -+ struct bman_portal *p = get_affine_portal(); -+ struct bm_mc_result *mcr; -+ __maybe_unused unsigned long irqflags; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ bm_mc_start(&p->p); -+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); -+ while (!(mcr = bm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); -+ *state = mcr->query; -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return 0; -+} -+EXPORT_SYMBOL(bman_query_pools); -+ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+u32 bman_query_free_buffers(struct bman_pool *pool) -+{ -+ return bm_pool_free_buffers(pool->params.bpid); -+} -+EXPORT_SYMBOL(bman_query_free_buffers); -+ -+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds) -+{ -+ u32 bpid; -+ -+ bpid = bman_get_params(pool)->bpid; -+ -+ return bm_pool_set(bpid, thresholds); -+} -+EXPORT_SYMBOL(bman_update_pool_thresholds); -+#endif -+ -+int bman_shutdown_pool(u32 bpid) -+{ -+ struct bman_portal *p = get_affine_portal(); -+ __maybe_unused unsigned long irqflags; -+ int ret; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ ret = bm_shutdown_pool(&p->p, bpid); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(bman_shutdown_pool); -+ -+const struct bm_portal_config *bman_get_bm_portal_config( -+ struct bman_portal *portal) -+{ -+ return portal->sharing_redirect ? NULL : portal->config; -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_low.h -@@ -0,0 +1,559 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "bman_private.h" -+ -+/***************************/ -+/* Portal register assists */ -+/***************************/ -+ -+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) -+ -+/* Cache-inhibited register offsets */ -+#define BM_REG_RCR_PI_CINH 0x0000 -+#define BM_REG_RCR_CI_CINH 0x0004 -+#define BM_REG_RCR_ITR 0x0008 -+#define BM_REG_CFG 0x0100 -+#define BM_REG_SCN(n) (0x0200 + ((n) << 2)) -+#define BM_REG_ISR 0x0e00 -+#define BM_REG_IIR 0x0e0c -+ -+/* Cache-enabled register offsets */ -+#define BM_CL_CR 0x0000 -+#define BM_CL_RR0 0x0100 -+#define BM_CL_RR1 0x0140 -+#define BM_CL_RCR 0x1000 -+#define BM_CL_RCR_PI_CENA 0x3000 -+#define BM_CL_RCR_CI_CENA 0x3100 -+ -+#endif -+ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ -+/* Cache-inhibited register offsets */ -+#define BM_REG_RCR_PI_CINH 0x3000 -+#define BM_REG_RCR_CI_CINH 0x3100 -+#define BM_REG_RCR_ITR 0x3200 -+#define BM_REG_CFG 0x3300 -+#define BM_REG_SCN(n) (0x3400 + ((n) << 6)) -+#define BM_REG_ISR 0x3e00 -+#define BM_REG_IIR 0x3ec0 -+ -+/* Cache-enabled register offsets */ -+#define BM_CL_CR 0x0000 -+#define BM_CL_RR0 0x0100 -+#define BM_CL_RR1 0x0140 -+#define BM_CL_RCR 0x1000 -+#define BM_CL_RCR_PI_CENA 0x3000 -+#define BM_CL_RCR_CI_CENA 0x3100 -+ -+#endif -+ -+/* BTW, the drivers (and h/w programming model) already obtain the required -+ * synchronisation for portal accesses via lwsync(), hwsync(), and -+ * data-dependencies. Use of barrier()s or other order-preserving primitives -+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which -+ * simply ensure that the compiler treats the portal registers as volatile (ie. -+ * non-coherent). */ -+ -+/* Cache-inhibited register access. */ -+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o))) -+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \ -+ (bm)->addr_ci + (o)); -+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg) -+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val) -+ -+/* Cache-enabled (index) register access */ -+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o)) -+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o)) -+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o))) -+#define __bm_cl_out(bm, o, val) \ -+ do { \ -+ u32 *__tmpclout = (bm)->addr_ce + (o); \ -+ __raw_writel(cpu_to_be32(val), __tmpclout); \ -+ dcbf(__tmpclout); \ -+ } while (0) -+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o)) -+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA) -+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA) -+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA) -+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val) -+#define bm_cl_invalidate(reg)\ -+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA) -+ -+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf -+ * analysis, look at using the "extra" bit in the ring index registers to avoid -+ * cyclic issues. */ -+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last) -+{ -+ /* 'first' is included, 'last' is excluded */ -+ if (first <= last) -+ return last - first; -+ return ringsize + last - first; -+} -+ -+/* Portal modes. -+ * Enum types; -+ * pmode == production mode -+ * cmode == consumption mode, -+ * Enum values use 3 letter codes. First letter matches the portal mode, -+ * remaining two letters indicate; -+ * ci == cache-inhibited portal register -+ * ce == cache-enabled portal register -+ * vb == in-band valid-bit (cache-enabled) -+ */ -+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ -+ bm_rcr_pci = 0, /* PI index, cache-inhibited */ -+ bm_rcr_pce = 1, /* PI index, cache-enabled */ -+ bm_rcr_pvb = 2 /* valid-bit */ -+}; -+enum bm_rcr_cmode { /* s/w-only */ -+ bm_rcr_cci, /* CI index, cache-inhibited */ -+ bm_rcr_cce /* CI index, cache-enabled */ -+}; -+ -+ -+/* ------------------------- */ -+/* --- Portal structures --- */ -+ -+#define BM_RCR_SIZE 8 -+ -+struct bm_rcr { -+ struct bm_rcr_entry *ring, *cursor; -+ u8 ci, available, ithresh, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ u32 busy; -+ enum bm_rcr_pmode pmode; -+ enum bm_rcr_cmode cmode; -+#endif -+}; -+ -+struct bm_mc { -+ struct bm_mc_command *cr; -+ struct bm_mc_result *rr; -+ u8 rridx, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ enum { -+ /* Can only be _mc_start()ed */ -+ mc_idle, -+ /* Can only be _mc_commit()ed or _mc_abort()ed */ -+ mc_user, -+ /* Can only be _mc_retry()ed */ -+ mc_hw -+ } state; -+#endif -+}; -+ -+struct bm_addr { -+ void __iomem *addr_ce; /* cache-enabled */ -+ void __iomem *addr_ci; /* cache-inhibited */ -+}; -+ -+struct bm_portal { -+ struct bm_addr addr; -+ struct bm_rcr rcr; -+ struct bm_mc mc; -+ struct bm_portal_config config; -+} ____cacheline_aligned; -+ -+ -+/* --------------- */ -+/* --- RCR API --- */ -+ -+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ -+#define RCR_CARRYCLEAR(p) \ -+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6))) -+ -+/* Bit-wise logic to convert a ring pointer to a ring index */ -+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e) -+{ -+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1); -+} -+ -+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ -+static inline void RCR_INC(struct bm_rcr *rcr) -+{ -+ /* NB: this is odd-looking, but experiments show that it generates -+ * fast code with essentially no branching overheads. We increment to -+ * the next RCR pointer and handle overflow and 'vbit'. */ -+ struct bm_rcr_entry *partial = rcr->cursor + 1; -+ rcr->cursor = RCR_CARRYCLEAR(partial); -+ if (partial != rcr->cursor) -+ rcr->vbit ^= BM_RCR_VERB_VBIT; -+} -+ -+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, -+ __maybe_unused enum bm_rcr_cmode cmode) -+{ -+ /* This use of 'register', as well as all other occurrences, is because -+ * it has been observed to generate much faster code with gcc than is -+ * otherwise the case. */ -+ register struct bm_rcr *rcr = &portal->rcr; -+ u32 cfg; -+ u8 pi; -+ -+ rcr->ring = portal->addr.addr_ce + BM_CL_RCR; -+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); -+ -+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); -+ rcr->cursor = rcr->ring + pi; -+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0; -+ rcr->available = BM_RCR_SIZE - 1 -+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); -+ rcr->ithresh = bm_in(RCR_ITR); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 0; -+ rcr->pmode = pmode; -+ rcr->cmode = cmode; -+#endif -+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */ -+ bm_out(CFG, cfg); -+ return 0; -+} -+ -+static inline void bm_rcr_finish(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); -+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); -+ DPA_ASSERT(!rcr->busy); -+ if (pi != RCR_PTR2IDX(rcr->cursor)) -+ pr_crit("losing uncommited RCR entries\n"); -+ if (ci != rcr->ci) -+ pr_crit("missing existing RCR completions\n"); -+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor)) -+ pr_crit("RCR destroyed unquiesced\n"); -+} -+ -+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(!rcr->busy); -+ if (!rcr->available) -+ return NULL; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 1; -+#endif -+ dcbz_64(rcr->cursor); -+ return rcr->cursor; -+} -+ -+static inline void bm_rcr_abort(struct bm_portal *portal) -+{ -+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->busy); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 0; -+#endif -+} -+ -+static inline struct bm_rcr_entry *bm_rcr_pend_and_next( -+ struct bm_portal *portal, u8 myverb) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->busy); -+ DPA_ASSERT(rcr->pmode != bm_rcr_pvb); -+ if (rcr->available == 1) -+ return NULL; -+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; -+ dcbf_64(rcr->cursor); -+ RCR_INC(rcr); -+ rcr->available--; -+ dcbz_64(rcr->cursor); -+ return rcr->cursor; -+} -+ -+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->busy); -+ DPA_ASSERT(rcr->pmode == bm_rcr_pci); -+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; -+ RCR_INC(rcr); -+ rcr->available--; -+ hwsync(); -+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor)); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 0; -+#endif -+} -+ -+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal) -+{ -+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->pmode == bm_rcr_pce); -+ bm_cl_invalidate(RCR_PI); -+ bm_cl_touch_rw(RCR_PI); -+} -+ -+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->busy); -+ DPA_ASSERT(rcr->pmode == bm_rcr_pce); -+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; -+ RCR_INC(rcr); -+ rcr->available--; -+ lwsync(); -+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor)); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 0; -+#endif -+} -+ -+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ struct bm_rcr_entry *rcursor; -+ DPA_ASSERT(rcr->busy); -+ DPA_ASSERT(rcr->pmode == bm_rcr_pvb); -+ lwsync(); -+ rcursor = rcr->cursor; -+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit; -+ dcbf_64(rcursor); -+ RCR_INC(rcr); -+ rcr->available--; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ rcr->busy = 0; -+#endif -+} -+ -+static inline u8 bm_rcr_cci_update(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ u8 diff, old_ci = rcr->ci; -+ DPA_ASSERT(rcr->cmode == bm_rcr_cci); -+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); -+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); -+ rcr->available += diff; -+ return diff; -+} -+ -+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal) -+{ -+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; -+ DPA_ASSERT(rcr->cmode == bm_rcr_cce); -+ bm_cl_touch_ro(RCR_CI); -+} -+ -+static inline u8 bm_rcr_cce_update(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ u8 diff, old_ci = rcr->ci; -+ DPA_ASSERT(rcr->cmode == bm_rcr_cce); -+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1); -+ bm_cl_invalidate(RCR_CI); -+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); -+ rcr->available += diff; -+ return diff; -+} -+ -+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ return rcr->ithresh; -+} -+ -+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ rcr->ithresh = ithresh; -+ bm_out(RCR_ITR, ithresh); -+} -+ -+static inline u8 bm_rcr_get_avail(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ return rcr->available; -+} -+ -+static inline u8 bm_rcr_get_fill(struct bm_portal *portal) -+{ -+ register struct bm_rcr *rcr = &portal->rcr; -+ return BM_RCR_SIZE - 1 - rcr->available; -+} -+ -+ -+/* ------------------------------ */ -+/* --- Management command API --- */ -+ -+static inline int bm_mc_init(struct bm_portal *portal) -+{ -+ register struct bm_mc *mc = &portal->mc; -+ mc->cr = portal->addr.addr_ce + BM_CL_CR; -+ mc->rr = portal->addr.addr_ce + BM_CL_RR0; -+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & -+ BM_MCC_VERB_VBIT) ? 0 : 1; -+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = mc_idle; -+#endif -+ return 0; -+} -+ -+static inline void bm_mc_finish(struct bm_portal *portal) -+{ -+ __maybe_unused register struct bm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == mc_idle); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (mc->state != mc_idle) -+ pr_crit("Losing incomplete MC command\n"); -+#endif -+} -+ -+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) -+{ -+ register struct bm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == mc_idle); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = mc_user; -+#endif -+ dcbz_64(mc->cr); -+ return mc->cr; -+} -+ -+static inline void bm_mc_abort(struct bm_portal *portal) -+{ -+ __maybe_unused register struct bm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == mc_user); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = mc_idle; -+#endif -+} -+ -+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) -+{ -+ register struct bm_mc *mc = &portal->mc; -+ struct bm_mc_result *rr = mc->rr + mc->rridx; -+ DPA_ASSERT(mc->state == mc_user); -+ lwsync(); -+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit; -+ dcbf(mc->cr); -+ dcbit_ro(rr); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = mc_hw; -+#endif -+} -+ -+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal) -+{ -+ register struct bm_mc *mc = &portal->mc; -+ struct bm_mc_result *rr = mc->rr + mc->rridx; -+ DPA_ASSERT(mc->state == mc_hw); -+ /* The inactive response register's verb byte always returns zero until -+ * its command is submitted and completed. This includes the valid-bit, -+ * in case you were wondering... */ -+ if (!__raw_readb(&rr->verb)) { -+ dcbit_ro(rr); -+ return NULL; -+ } -+ mc->rridx ^= 1; -+ mc->vbit ^= BM_MCC_VERB_VBIT; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = mc_idle; -+#endif -+ return rr; -+} -+ -+ -+/* ------------------------------------- */ -+/* --- Portal interrupt register API --- */ -+ -+static inline int bm_isr_init(__always_unused struct bm_portal *portal) -+{ -+ return 0; -+} -+ -+static inline void bm_isr_finish(__always_unused struct bm_portal *portal) -+{ -+} -+ -+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32) -+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31)) -+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid, -+ int enable) -+{ -+ u32 val; -+ DPA_ASSERT(bpid < bman_pool_max); -+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */ -+ val = __bm_in(&portal->addr, SCN_REG(bpid)); -+ if (enable) -+ val |= SCN_BIT(bpid); -+ else -+ val &= ~SCN_BIT(bpid); -+ __bm_out(&portal->addr, SCN_REG(bpid), val); -+} -+ -+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n) -+{ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6)); -+#else -+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2)); -+#endif -+} -+ -+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, -+ u32 val) -+{ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val); -+#else -+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val); -+#endif -+} -+ -+/* Buffer Pool Cleanup */ -+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid) -+{ -+ struct bm_mc_command *bm_cmd; -+ struct bm_mc_result *bm_res; -+ -+ int aq_count = 0; -+ bool stop = false; -+ while (!stop) { -+ /* Acquire buffers until empty */ -+ bm_cmd = bm_mc_start(p); -+ bm_cmd->acquire.bpid = bpid; -+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1); -+ while (!(bm_res = bm_mc_result(p))) -+ cpu_relax(); -+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { -+ /* Pool is empty */ -+ /* TBD : Should we do a few extra iterations in -+ case some other some blocks keep buffers 'on deck', -+ which may also be problematic */ -+ stop = true; -+ } else -+ ++aq_count; -+ } -+ return 0; -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_private.h -@@ -0,0 +1,166 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpa_sys.h" -+#include -+ -+/* Revision info (for errata and feature handling) */ -+#define BMAN_REV10 0x0100 -+#define BMAN_REV20 0x0200 -+#define BMAN_REV21 0x0201 -+#define QBMAN_ANY_PORTAL_IDX 0xffffffff -+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ -+ -+/* -+ * Global variables of the max portal/pool number this bman version supported -+ */ -+extern u16 bman_pool_max; -+ -+/* used by CCSR and portal interrupt code */ -+enum bm_isr_reg { -+ bm_isr_status = 0, -+ bm_isr_enable = 1, -+ bm_isr_disable = 2, -+ bm_isr_inhibit = 3 -+}; -+ -+struct bm_portal_config { -+ /* Corenet portal addresses; -+ * [0]==cache-enabled, [1]==cache-inhibited. */ -+ __iomem void *addr_virt[2]; -+ struct resource addr_phys[2]; -+ /* Allow these to be joined in lists */ -+ struct list_head list; -+ /* User-visible portal configuration settings */ -+ struct bman_portal_config public_cfg; -+ /* power management saved data */ -+ u32 saved_isdr; -+}; -+ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+/* Hooks from bman_driver.c to bman_config.c */ -+int bman_init_ccsr(struct device_node *node); -+#endif -+ -+/* Hooks from bman_driver.c in to bman_high.c */ -+struct bman_portal *bman_create_portal( -+ struct bman_portal *portal, -+ const struct bm_portal_config *config); -+struct bman_portal *bman_create_affine_portal( -+ const struct bm_portal_config *config); -+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, -+ int cpu); -+void bman_destroy_portal(struct bman_portal *bm); -+ -+const struct bm_portal_config *bman_destroy_affine_portal(void); -+ -+/* Hooks from fsl_usdpaa.c to bman_driver.c */ -+struct bm_portal_config *bm_get_unused_portal(void); -+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx); -+void bm_put_unused_portal(struct bm_portal_config *pcfg); -+void bm_set_liodns(struct bm_portal_config *pcfg); -+ -+/* Pool logic in the portal driver, during initialisation, needs to know if -+ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+int bman_have_ccsr(void); -+#else -+#define bman_have_ccsr() 0 -+#endif -+ -+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and -+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it -+ * might fail (if the buffer pool is depleted). So this value provides some -+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs -+ * are requested at once or if h/w has been tested a couple of times without -+ * luck. The _HIGH value: when bman_release() is called and the stockpile -+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if -+ * the release ring is full). So this value provides some "stagger" so that -+ * ring-access is retried a couple of times prior to the API returning a -+ * failure. The following *must* be true; -+ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8 -+ * (to avoid thrashing) -+ * BMAN_STOCKPILE_SZ >= 16 -+ * (as the release logic expects to either send 8 buffers to hw prior to -+ * adding the given buffers to the stockpile or add the buffers to the -+ * stockpile before sending 8 to hw, as the API must be an all-or-nothing -+ * success/fail.) -+ */ -+#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */ -+#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */ -+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */ -+ -+/*************************************************/ -+/* BMan s/w corenet portal, low-level i/face */ -+/*************************************************/ -+ -+/* Used by all portal interrupt registers except 'inhibit' -+ * This mask contains all the "irqsource" bits visible to API users -+ */ -+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN) -+ -+/* These are bm__(). So for example, bm_disable_write() means "write -+ * the disable register" rather than "disable the ability to write". */ -+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status) -+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m) -+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable) -+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v) -+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable) -+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v) -+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1) -+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0) -+ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+/* Set depletion thresholds associated with a buffer pool. Requires that the -+ * operating system have access to Bman CCSR (ie. compiled in support and -+ * run-time access courtesy of the device-tree). */ -+int bm_pool_set(u32 bpid, const u32 *thresholds); -+#define BM_POOL_THRESH_SW_ENTER 0 -+#define BM_POOL_THRESH_SW_EXIT 1 -+#define BM_POOL_THRESH_HW_ENTER 2 -+#define BM_POOL_THRESH_HW_EXIT 3 -+ -+/* Read the free buffer count for a given buffer */ -+u32 bm_pool_free_buffers(u32 bpid); -+ -+__init int bman_init(void); -+__init int bman_resource_init(void); -+ -+const struct bm_portal_config *bman_get_bm_portal_config( -+ struct bman_portal *portal); -+ -+/* power management */ -+#ifdef CONFIG_SUSPEND -+void suspend_unused_bportal(void); -+void resume_unused_bportal(void); -+#endif -+ -+#endif /* CONFIG_FSL_BMAN_CONFIG */ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_test.c -@@ -0,0 +1,56 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "bman_test.h" -+ -+MODULE_AUTHOR("Geoff Thorpe"); -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("Bman testing"); -+ -+static int test_init(void) -+{ -+#ifdef CONFIG_FSL_BMAN_TEST_HIGH -+ int loop = 1; -+ while (loop--) -+ bman_test_high(); -+#endif -+#ifdef CONFIG_FSL_BMAN_TEST_THRESH -+ bman_test_thresh(); -+#endif -+ return 0; -+} -+ -+static void test_exit(void) -+{ -+} -+ -+module_init(test_init); -+module_exit(test_exit); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_test.h -@@ -0,0 +1,44 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+void bman_test_high(void); -+void bman_test_thresh(void); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_test_high.c -@@ -0,0 +1,183 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "bman_test.h" -+#include "bman_private.h" -+ -+/*************/ -+/* constants */ -+/*************/ -+ -+#define PORTAL_OPAQUE ((void *)0xf00dbeef) -+#define POOL_OPAQUE ((void *)0xdeadabba) -+#define NUM_BUFS 93 -+#define LOOPS 3 -+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU -+ -+/***************/ -+/* global vars */ -+/***************/ -+ -+static struct bman_pool *pool; -+static int depleted; -+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; -+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; -+static int bufs_received; -+ -+/* Predeclare the callback so we can instantiate pool parameters */ -+static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int); -+ -+/**********************/ -+/* internal functions */ -+/**********************/ -+ -+static void bufs_init(void) -+{ -+ int i; -+ for (i = 0; i < NUM_BUFS; i++) -+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); -+ bufs_received = 0; -+} -+ -+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) -+{ -+ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) { -+ -+ /* On SoCs with Bman revison 2.0, Bman only respects the 40 -+ * LS-bits of buffer addresses, masking off the upper 8-bits on -+ * release commands. The API provides for 48-bit addresses -+ * because some SoCs support all 48-bits. When generating -+ * garbage addresses for testing, we either need to zero the -+ * upper 8-bits when releasing to Bman (otherwise we'll be -+ * disappointed when the buffers we acquire back from Bman -+ * don't match), or we need to mask the upper 8-bits off when -+ * comparing. We do the latter. -+ */ -+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) -+ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) -+ return -1; -+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) -+ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) -+ return 1; -+ } else { -+ if (bm_buffer_get64(a) < bm_buffer_get64(b)) -+ return -1; -+ if (bm_buffer_get64(a) > bm_buffer_get64(b)) -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static void bufs_confirm(void) -+{ -+ int i, j; -+ for (i = 0; i < NUM_BUFS; i++) { -+ int matches = 0; -+ for (j = 0; j < NUM_BUFS; j++) -+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) -+ matches++; -+ BUG_ON(matches != 1); -+ } -+} -+ -+/********/ -+/* test */ -+/********/ -+ -+static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool, -+ void *pool_ctx, int __depleted) -+{ -+ BUG_ON(__pool != pool); -+ BUG_ON(pool_ctx != POOL_OPAQUE); -+ depleted = __depleted; -+} -+ -+void bman_test_high(void) -+{ -+ struct bman_pool_params pparams = { -+ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, -+ .cb = depletion_cb, -+ .cb_ctx = POOL_OPAQUE, -+ }; -+ int i, loops = LOOPS; -+ struct bm_buffer tmp_buf; -+ -+ bufs_init(); -+ -+ pr_info("BMAN: --- starting high-level test ---\n"); -+ -+ pool = bman_new_pool(&pparams); -+ BUG_ON(!pool); -+ -+ /*******************/ -+ /* Release buffers */ -+ /*******************/ -+do_loop: -+ i = 0; -+ while (i < NUM_BUFS) { -+ u32 flags = BMAN_RELEASE_FLAG_WAIT; -+ int num = 8; -+ if ((i + num) > NUM_BUFS) -+ num = NUM_BUFS - i; -+ if ((i + num) == NUM_BUFS) -+ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; -+ if (bman_release(pool, bufs_in + i, num, flags)) -+ panic("bman_release() failed\n"); -+ i += num; -+ } -+ -+ /*******************/ -+ /* Acquire buffers */ -+ /*******************/ -+ while (i > 0) { -+ int tmp, num = 8; -+ if (num > i) -+ num = i; -+ tmp = bman_acquire(pool, bufs_out + i - num, num, 0); -+ BUG_ON(tmp != num); -+ i -= num; -+ } -+ -+ i = bman_acquire(pool, &tmp_buf, 1, 0); -+ BUG_ON(i > 0); -+ -+ bufs_confirm(); -+ -+ if (--loops) -+ goto do_loop; -+ -+ /************/ -+ /* Clean up */ -+ /************/ -+ bman_free_pool(pool); -+ pr_info("BMAN: --- finished high-level test ---\n"); -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/bman_test_thresh.c -@@ -0,0 +1,196 @@ -+/* Copyright 2010-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "bman_test.h" -+ -+/* Test constants */ -+#define TEST_NUMBUFS 129728 -+#define TEST_EXIT 129536 -+#define TEST_ENTRY 129024 -+ -+struct affine_test_data { -+ struct task_struct *t; -+ int cpu; -+ int expect_affinity; -+ int drain; -+ int num_enter; -+ int num_exit; -+ struct list_head node; -+ struct completion wakethread; -+ struct completion wakeparent; -+}; -+ -+static void cb_depletion(struct bman_portal *portal, -+ struct bman_pool *pool, -+ void *opaque, -+ int depleted) -+{ -+ struct affine_test_data *data = opaque; -+ int c = smp_processor_id(); -+ pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n", -+ bman_get_params(pool)->bpid, !!depleted, c, data->cpu); -+ /* We should be executing on the CPU of the thread that owns the pool if -+ * and that CPU has an affine portal (ie. it isn't slaved). */ -+ BUG_ON((c != data->cpu) && data->expect_affinity); -+ BUG_ON((c == data->cpu) && !data->expect_affinity); -+ if (depleted) -+ data->num_enter++; -+ else -+ data->num_exit++; -+} -+ -+/* Params used to set up a pool, this also dynamically allocates a BPID */ -+static const struct bman_pool_params params_nocb = { -+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH, -+ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 } -+}; -+ -+/* Params used to set up each cpu's pool with callbacks enabled */ -+static struct bman_pool_params params_cb = { -+ .bpid = 0, /* will be replaced to match pool_nocb */ -+ .flags = BMAN_POOL_FLAG_DEPLETION, -+ .cb = cb_depletion -+}; -+ -+static struct bman_pool *pool_nocb; -+static LIST_HEAD(threads); -+ -+static int affine_test(void *__data) -+{ -+ struct bman_pool *pool; -+ struct affine_test_data *data = __data; -+ struct bman_pool_params my_params = params_cb; -+ -+ pr_info("thread %d: starting\n", data->cpu); -+ /* create the pool */ -+ my_params.cb_ctx = data; -+ pool = bman_new_pool(&my_params); -+ BUG_ON(!pool); -+ complete(&data->wakeparent); -+ wait_for_completion(&data->wakethread); -+ init_completion(&data->wakethread); -+ -+ /* if we're the drainer, we get signalled for that */ -+ if (data->drain) { -+ struct bm_buffer buf; -+ int ret; -+ pr_info("thread %d: draining...\n", data->cpu); -+ do { -+ ret = bman_acquire(pool, &buf, 1, 0); -+ } while (ret > 0); -+ pr_info("thread %d: draining done.\n", data->cpu); -+ complete(&data->wakeparent); -+ wait_for_completion(&data->wakethread); -+ init_completion(&data->wakethread); -+ } -+ -+ /* cleanup */ -+ bman_free_pool(pool); -+ while (!kthread_should_stop()) -+ cpu_relax(); -+ pr_info("thread %d: exiting\n", data->cpu); -+ return 0; -+} -+ -+static struct affine_test_data *start_affine_test(int cpu, int drain) -+{ -+ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL); -+ -+ if (!data) -+ return NULL; -+ data->cpu = cpu; -+ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus()); -+ data->drain = drain; -+ data->num_enter = 0; -+ data->num_exit = 0; -+ init_completion(&data->wakethread); -+ init_completion(&data->wakeparent); -+ list_add_tail(&data->node, &threads); -+ data->t = kthread_create(affine_test, data, "threshtest%d", cpu); -+ BUG_ON(IS_ERR(data->t)); -+ kthread_bind(data->t, cpu); -+ wake_up_process(data->t); -+ return data; -+} -+ -+void bman_test_thresh(void) -+{ -+ int loop = TEST_NUMBUFS; -+ int ret, num_cpus = 0; -+ struct affine_test_data *data, *drainer = NULL; -+ -+ pr_info("bman_test_thresh: start\n"); -+ -+ /* allocate a BPID and seed it */ -+ pool_nocb = bman_new_pool(¶ms_nocb); -+ BUG_ON(!pool_nocb); -+ while (loop--) { -+ struct bm_buffer buf; -+ bm_buffer_set64(&buf, 0x0badbeef + loop); -+ ret = bman_release(pool_nocb, &buf, 1, -+ BMAN_RELEASE_FLAG_WAIT); -+ BUG_ON(ret); -+ } -+ while (!bman_rcr_is_empty()) -+ cpu_relax(); -+ pr_info("bman_test_thresh: buffers are in\n"); -+ -+ /* create threads and wait for them to create pools */ -+ params_cb.bpid = bman_get_params(pool_nocb)->bpid; -+ for_each_cpu(loop, cpu_online_mask) { -+ data = start_affine_test(loop, drainer ? 0 : 1); -+ BUG_ON(!data); -+ if (!drainer) -+ drainer = data; -+ num_cpus++; -+ wait_for_completion(&data->wakeparent); -+ } -+ -+ /* signal the drainer to start draining */ -+ complete(&drainer->wakethread); -+ wait_for_completion(&drainer->wakeparent); -+ init_completion(&drainer->wakeparent); -+ -+ /* tear down */ -+ list_for_each_entry_safe(data, drainer, &threads, node) { -+ complete(&data->wakethread); -+ ret = kthread_stop(data->t); -+ BUG_ON(ret); -+ list_del(&data->node); -+ /* check that we get the expected callbacks (and no others) */ -+ BUG_ON(data->num_enter != 1); -+ BUG_ON(data->num_exit != 0); -+ kfree(data); -+ } -+ bman_free_pool(pool_nocb); -+ -+ pr_info("bman_test_thresh: done\n"); -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_alloc.c -@@ -0,0 +1,706 @@ -+/* Copyright 2009-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpa_sys.h" -+#include -+#include -+ -+/* Qman and Bman APIs are front-ends to the common code; */ -+ -+static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */ -+static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */ -+static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */ -+static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */ -+static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */ -+static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */ -+static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */ -+static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */ -+ -+/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing -+ * FQIDs (probably from user-space), it can filter out those that aren't in the -+ * OOS state (better to leak a h/w resource than to crash). This function -+ * returns the number of invalid IDs that were not released. */ -+static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count, -+ int (*is_valid)(u32 id)) -+{ -+ int valid_mode = 0; -+ u32 loop = id, total_invalid = 0; -+ while (loop < (id + count)) { -+ int isvalid = is_valid ? is_valid(loop) : 1; -+ if (!valid_mode) { -+ /* We're looking for a valid ID to terminate an invalid -+ * range */ -+ if (isvalid) { -+ /* We finished a range of invalid IDs, a valid -+ * range is now underway */ -+ valid_mode = 1; -+ count -= (loop - id); -+ id = loop; -+ } else -+ total_invalid++; -+ } else { -+ /* We're looking for an invalid ID to terminate a -+ * valid range */ -+ if (!isvalid) { -+ /* Release the range of valid IDs, an unvalid -+ * range is now underway */ -+ if (loop > id) -+ dpa_alloc_free(alloc, id, loop - id); -+ valid_mode = 0; -+ } -+ } -+ loop++; -+ } -+ /* Release any unterminated range of valid IDs */ -+ if (valid_mode && count) -+ dpa_alloc_free(alloc, id, count); -+ return total_invalid; -+} -+ -+/* BPID allocator front-end */ -+ -+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial) -+{ -+ return dpa_alloc_new(&bpalloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(bman_alloc_bpid_range); -+ -+static int bp_cleanup(u32 bpid) -+{ -+ return bman_shutdown_pool(bpid) == 0; -+} -+void bman_release_bpid_range(u32 bpid, u32 count) -+{ -+ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup); -+ if (total_invalid) -+ pr_err("BPID range [%d..%d] (%d) had %d leaks\n", -+ bpid, bpid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(bman_release_bpid_range); -+ -+void bman_seed_bpid_range(u32 bpid, u32 count) -+{ -+ dpa_alloc_seed(&bpalloc, bpid, count); -+} -+EXPORT_SYMBOL(bman_seed_bpid_range); -+ -+int bman_reserve_bpid_range(u32 bpid, u32 count) -+{ -+ return dpa_alloc_reserve(&bpalloc, bpid, count); -+} -+EXPORT_SYMBOL(bman_reserve_bpid_range); -+ -+ -+/* FQID allocator front-end */ -+ -+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial) -+{ -+ return dpa_alloc_new(&fqalloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_fqid_range); -+ -+static int fq_cleanup(u32 fqid) -+{ -+ return qman_shutdown_fq(fqid) == 0; -+} -+void qman_release_fqid_range(u32 fqid, u32 count) -+{ -+ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup); -+ if (total_invalid) -+ pr_err("FQID range [%d..%d] (%d) had %d leaks\n", -+ fqid, fqid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_fqid_range); -+ -+int qman_reserve_fqid_range(u32 fqid, u32 count) -+{ -+ return dpa_alloc_reserve(&fqalloc, fqid, count); -+} -+EXPORT_SYMBOL(qman_reserve_fqid_range); -+ -+void qman_seed_fqid_range(u32 fqid, u32 count) -+{ -+ dpa_alloc_seed(&fqalloc, fqid, count); -+} -+EXPORT_SYMBOL(qman_seed_fqid_range); -+ -+/* Pool-channel allocator front-end */ -+ -+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial) -+{ -+ return dpa_alloc_new(&qpalloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_pool_range); -+ -+static int qpool_cleanup(u32 qp) -+{ -+ /* We query all FQDs starting from -+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs -+ * whose destination channel is the pool-channel being released. -+ * When a non-OOS FQD is found we attempt to clean it up */ -+ struct qman_fq fq = { -+ .fqid = 1 -+ }; -+ int err; -+ do { -+ struct qm_mcr_queryfq_np np; -+ err = qman_query_fq_np(&fq, &np); -+ if (err) -+ /* FQID range exceeded, found no problems */ -+ return 1; -+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { -+ struct qm_fqd fqd; -+ err = qman_query_fq(&fq, &fqd); -+ BUG_ON(err); -+ if (fqd.dest.channel == qp) { -+ /* The channel is the FQ's target, clean it */ -+ if (qman_shutdown_fq(fq.fqid) != 0) -+ /* Couldn't shut down the FQ -+ so the pool must be leaked */ -+ return 0; -+ } -+ } -+ /* Move to the next FQID */ -+ fq.fqid++; -+ } while (1); -+} -+void qman_release_pool_range(u32 qp, u32 count) -+{ -+ u32 total_invalid = release_id_range(&qpalloc, qp, -+ count, qpool_cleanup); -+ if (total_invalid) { -+ /* Pool channels are almost always used individually */ -+ if (count == 1) -+ pr_err("Pool channel 0x%x had %d leaks\n", -+ qp, total_invalid); -+ else -+ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n", -+ qp, qp + count - 1, count, total_invalid); -+ } -+} -+EXPORT_SYMBOL(qman_release_pool_range); -+ -+ -+void qman_seed_pool_range(u32 poolid, u32 count) -+{ -+ dpa_alloc_seed(&qpalloc, poolid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_pool_range); -+ -+int qman_reserve_pool_range(u32 poolid, u32 count) -+{ -+ return dpa_alloc_reserve(&qpalloc, poolid, count); -+} -+EXPORT_SYMBOL(qman_reserve_pool_range); -+ -+ -+/* CGR ID allocator front-end */ -+ -+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial) -+{ -+ return dpa_alloc_new(&cgralloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_cgrid_range); -+ -+static int cqr_cleanup(u32 cgrid) -+{ -+ /* We query all FQDs starting from -+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs -+ * whose CGR is the CGR being released. -+ */ -+ struct qman_fq fq = { -+ .fqid = 1 -+ }; -+ int err; -+ do { -+ struct qm_mcr_queryfq_np np; -+ err = qman_query_fq_np(&fq, &np); -+ if (err) -+ /* FQID range exceeded, found no problems */ -+ return 1; -+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { -+ struct qm_fqd fqd; -+ err = qman_query_fq(&fq, &fqd); -+ BUG_ON(err); -+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && -+ (fqd.cgid == cgrid)) { -+ pr_err("CRGID 0x%x is being used by FQID 0x%x," -+ " CGR will be leaked\n", -+ cgrid, fq.fqid); -+ return 1; -+ } -+ } -+ /* Move to the next FQID */ -+ fq.fqid++; -+ } while (1); -+} -+ -+void qman_release_cgrid_range(u32 cgrid, u32 count) -+{ -+ u32 total_invalid = release_id_range(&cgralloc, cgrid, -+ count, cqr_cleanup); -+ if (total_invalid) -+ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n", -+ cgrid, cgrid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_cgrid_range); -+ -+void qman_seed_cgrid_range(u32 cgrid, u32 count) -+{ -+ dpa_alloc_seed(&cgralloc, cgrid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_cgrid_range); -+ -+/* CEETM CHANNEL ID allocator front-end */ -+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, -+ int partial) -+{ -+ return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range); -+ -+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, -+ int partial) -+{ -+ return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range); -+ -+void qman_release_ceetm0_channel_range(u32 channelid, u32 count) -+{ -+ u32 total_invalid; -+ -+ total_invalid = release_id_range(&ceetm0_challoc, channelid, count, -+ NULL); -+ if (total_invalid) -+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", -+ channelid, channelid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_ceetm0_channel_range); -+ -+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count) -+{ -+ dpa_alloc_seed(&ceetm0_challoc, channelid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_ceetm0_channel_range); -+ -+void qman_release_ceetm1_channel_range(u32 channelid, u32 count) -+{ -+ u32 total_invalid; -+ total_invalid = release_id_range(&ceetm1_challoc, channelid, count, -+ NULL); -+ if (total_invalid) -+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", -+ channelid, channelid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_ceetm1_channel_range); -+ -+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count) -+{ -+ dpa_alloc_seed(&ceetm1_challoc, channelid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_ceetm1_channel_range); -+ -+/* CEETM LFQID allocator front-end */ -+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, -+ int partial) -+{ -+ return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range); -+ -+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, -+ int partial) -+{ -+ return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial); -+} -+EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range); -+ -+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count) -+{ -+ u32 total_invalid; -+ -+ total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count, -+ NULL); -+ if (total_invalid) -+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", -+ lfqid, lfqid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range); -+ -+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count) -+{ -+ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range); -+ -+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count) -+{ -+ u32 total_invalid; -+ -+ total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count, -+ NULL); -+ if (total_invalid) -+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", -+ lfqid, lfqid + count - 1, count, total_invalid); -+} -+EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range); -+ -+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count) -+{ -+ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count); -+ -+} -+EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range); -+ -+ -+/* Everything else is the common backend to all the allocators */ -+ -+/* The allocator is a (possibly-empty) list of these; */ -+struct alloc_node { -+ struct list_head list; -+ u32 base; -+ u32 num; -+ /* refcount and is_alloced are only set -+ when the node is in the used list */ -+ unsigned int refcount; -+ int is_alloced; -+}; -+ -+/* #define DPA_ALLOC_DEBUG */ -+ -+#ifdef DPA_ALLOC_DEBUG -+#define DPRINT pr_info -+static void DUMP(struct dpa_alloc *alloc) -+{ -+ int off = 0; -+ char buf[256]; -+ struct alloc_node *p; -+ pr_info("Free Nodes\n"); -+ list_for_each_entry(p, &alloc->free, list) { -+ if (off < 255) -+ off += snprintf(buf + off, 255-off, "{%d,%d}", -+ p->base, p->base + p->num - 1); -+ } -+ pr_info("%s\n", buf); -+ -+ off = 0; -+ pr_info("Used Nodes\n"); -+ list_for_each_entry(p, &alloc->used, list) { -+ if (off < 255) -+ off += snprintf(buf + off, 255-off, "{%d,%d}", -+ p->base, p->base + p->num - 1); -+ } -+ pr_info("%s\n", buf); -+ -+ -+ -+} -+#else -+#define DPRINT(x...) -+#define DUMP(a) -+#endif -+ -+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, -+ int partial) -+{ -+ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL; -+ u32 base, next_best_base = 0, num = 0, next_best_num = 0; -+ struct alloc_node *margin_left, *margin_right; -+ -+ *result = (u32)-1; -+ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial); -+ DUMP(alloc); -+ /* If 'align' is 0, it should behave as though it was 1 */ -+ if (!align) -+ align = 1; -+ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); -+ if (!margin_left) -+ goto err; -+ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); -+ if (!margin_right) { -+ kfree(margin_left); -+ goto err; -+ } -+ spin_lock_irq(&alloc->lock); -+ list_for_each_entry(i, &alloc->free, list) { -+ base = (i->base + align - 1) / align; -+ base *= align; -+ if ((base - i->base) >= i->num) -+ /* alignment is impossible, regardless of count */ -+ continue; -+ num = i->num - (base - i->base); -+ if (num >= count) { -+ /* this one will do nicely */ -+ num = count; -+ goto done; -+ } -+ if (num > next_best_num) { -+ next_best = i; -+ next_best_base = base; -+ next_best_num = num; -+ } -+ } -+ if (partial && next_best) { -+ i = next_best; -+ base = next_best_base; -+ num = next_best_num; -+ } else -+ i = NULL; -+done: -+ if (i) { -+ if (base != i->base) { -+ margin_left->base = i->base; -+ margin_left->num = base - i->base; -+ list_add_tail(&margin_left->list, &i->list); -+ } else -+ kfree(margin_left); -+ if ((base + num) < (i->base + i->num)) { -+ margin_right->base = base + num; -+ margin_right->num = (i->base + i->num) - -+ (base + num); -+ list_add(&margin_right->list, &i->list); -+ } else -+ kfree(margin_right); -+ list_del(&i->list); -+ kfree(i); -+ *result = base; -+ } else { -+ spin_unlock_irq(&alloc->lock); -+ kfree(margin_left); -+ kfree(margin_right); -+ } -+ -+err: -+ DPRINT("returning %d\n", i ? num : -ENOMEM); -+ DUMP(alloc); -+ if (!i) -+ return -ENOMEM; -+ -+ /* Add the allocation to the used list with a refcount of 1 */ -+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); -+ if (!used_node) { -+ spin_unlock_irq(&alloc->lock); -+ return -ENOMEM; -+ } -+ used_node->base = *result; -+ used_node->num = num; -+ used_node->refcount = 1; -+ used_node->is_alloced = 1; -+ list_add_tail(&used_node->list, &alloc->used); -+ spin_unlock_irq(&alloc->lock); -+ return (int)num; -+} -+ -+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid -+ * forcing error-handling on to users in the deallocation path. */ -+static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count) -+{ -+ struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC); -+ BUG_ON(!node); -+ DPRINT("release_range(%d,%d)\n", base_id, count); -+ DUMP(alloc); -+ BUG_ON(!count); -+ spin_lock_irq(&alloc->lock); -+ -+ -+ node->base = base_id; -+ node->num = count; -+ list_for_each_entry(i, &alloc->free, list) { -+ if (i->base >= node->base) { -+ /* BUG_ON(any overlapping) */ -+ BUG_ON(i->base < (node->base + node->num)); -+ list_add_tail(&node->list, &i->list); -+ goto done; -+ } -+ } -+ list_add_tail(&node->list, &alloc->free); -+done: -+ /* Merge to the left */ -+ i = list_entry(node->list.prev, struct alloc_node, list); -+ if (node->list.prev != &alloc->free) { -+ BUG_ON((i->base + i->num) > node->base); -+ if ((i->base + i->num) == node->base) { -+ node->base = i->base; -+ node->num += i->num; -+ list_del(&i->list); -+ kfree(i); -+ } -+ } -+ /* Merge to the right */ -+ i = list_entry(node->list.next, struct alloc_node, list); -+ if (node->list.next != &alloc->free) { -+ BUG_ON((node->base + node->num) > i->base); -+ if ((node->base + node->num) == i->base) { -+ node->num += i->num; -+ list_del(&i->list); -+ kfree(i); -+ } -+ } -+ spin_unlock_irq(&alloc->lock); -+ DUMP(alloc); -+} -+ -+ -+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count) -+{ -+ struct alloc_node *i = NULL; -+ spin_lock_irq(&alloc->lock); -+ -+ /* First find the node in the used list and decrement its ref count */ -+ list_for_each_entry(i, &alloc->used, list) { -+ if (i->base == base_id && i->num == count) { -+ --i->refcount; -+ if (i->refcount == 0) { -+ list_del(&i->list); -+ spin_unlock_irq(&alloc->lock); -+ if (i->is_alloced) -+ _dpa_alloc_free(alloc, base_id, count); -+ kfree(i); -+ return; -+ } -+ spin_unlock_irq(&alloc->lock); -+ return; -+ } -+ } -+ /* Couldn't find the allocation */ -+ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n", -+ base_id, count); -+ spin_unlock_irq(&alloc->lock); -+} -+ -+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count) -+{ -+ /* Same as free but no previous allocation checking is needed */ -+ _dpa_alloc_free(alloc, base_id, count); -+} -+ -+ -+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num) -+{ -+ struct alloc_node *i = NULL, *used_node; -+ -+ DPRINT("alloc_reserve(%d,%d)\n", base, num); -+ DUMP(alloc); -+ -+ spin_lock_irq(&alloc->lock); -+ -+ /* Check for the node in the used list. -+ If found, increase it's refcount */ -+ list_for_each_entry(i, &alloc->used, list) { -+ if ((i->base == base) && (i->num == num)) { -+ ++i->refcount; -+ spin_unlock_irq(&alloc->lock); -+ return 0; -+ } -+ if ((base >= i->base) && (base < (i->base + i->num))) { -+ /* This is an attempt to reserve a region that was -+ already reserved or alloced with a different -+ base or num */ -+ pr_err("Cannot reserve %d - %d, it overlaps with" -+ " existing reservation from %d - %d\n", -+ base, base + num - 1, i->base, -+ i->base + i->num - 1); -+ spin_unlock_irq(&alloc->lock); -+ return -1; -+ } -+ } -+ /* Check to make sure this ID isn't in the free list */ -+ list_for_each_entry(i, &alloc->free, list) { -+ if ((base >= i->base) && (base < (i->base + i->num))) { -+ /* yep, the reservation is within this node */ -+ pr_err("Cannot reserve %d - %d, it overlaps with" -+ " free range %d - %d and must be alloced\n", -+ base, base + num - 1, -+ i->base, i->base + i->num - 1); -+ spin_unlock_irq(&alloc->lock); -+ return -1; -+ } -+ } -+ /* Add the allocation to the used list with a refcount of 1 */ -+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); -+ if (!used_node) { -+ spin_unlock_irq(&alloc->lock); -+ return -ENOMEM; -+ -+ } -+ used_node->base = base; -+ used_node->num = num; -+ used_node->refcount = 1; -+ used_node->is_alloced = 0; -+ list_add_tail(&used_node->list, &alloc->used); -+ spin_unlock_irq(&alloc->lock); -+ return 0; -+} -+ -+ -+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count) -+{ -+ struct alloc_node *i = NULL; -+ DPRINT("alloc_pop()\n"); -+ DUMP(alloc); -+ spin_lock_irq(&alloc->lock); -+ if (!list_empty(&alloc->free)) { -+ i = list_entry(alloc->free.next, struct alloc_node, list); -+ list_del(&i->list); -+ } -+ spin_unlock_irq(&alloc->lock); -+ DPRINT("returning %d\n", i ? 0 : -ENOMEM); -+ DUMP(alloc); -+ if (!i) -+ return -ENOMEM; -+ *result = i->base; -+ *count = i->num; -+ kfree(i); -+ return 0; -+} -+ -+int dpa_alloc_check(struct dpa_alloc *list_head, u32 item) -+{ -+ struct alloc_node *i = NULL; -+ int res = 0; -+ DPRINT("alloc_check()\n"); -+ spin_lock_irq(&list_head->lock); -+ -+ list_for_each_entry(i, &list_head->free, list) { -+ if ((item >= i->base) && (item < (i->base + i->num))) { -+ res = 1; -+ break; -+ } -+ } -+ spin_unlock_irq(&list_head->lock); -+ return res; -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_sys.h -@@ -0,0 +1,259 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPA_SYS_H -+#define DPA_SYS_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+/* When copying aligned words or shorts, try to avoid memcpy() */ -+#define CONFIG_TRY_BETTER_MEMCPY -+ -+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */ -+#define DPA_PORTAL_CE 0 -+#define DPA_PORTAL_CI 1 -+ -+/***********************/ -+/* Misc inline assists */ -+/***********************/ -+ -+#if defined CONFIG_PPC32 -+#include "dpa_sys_ppc32.h" -+#elif defined CONFIG_PPC64 -+#include "dpa_sys_ppc64.h" -+#elif defined CONFIG_ARM -+#include "dpa_sys_arm.h" -+#elif defined CONFIG_ARM64 -+#include "dpa_sys_arm64.h" -+#endif -+ -+ -+#ifdef CONFIG_FSL_DPA_CHECKING -+#define DPA_ASSERT(x) \ -+ do { \ -+ if (!(x)) { \ -+ pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \ -+ __stringify_1(x)); \ -+ dump_stack(); \ -+ panic("assertion failure"); \ -+ } \ -+ } while (0) -+#else -+#define DPA_ASSERT(x) -+#endif -+ -+/* memcpy() stuff - when you know alignments in advance */ -+#ifdef CONFIG_TRY_BETTER_MEMCPY -+static inline void copy_words(void *dest, const void *src, size_t sz) -+{ -+ u32 *__dest = dest; -+ const u32 *__src = src; -+ size_t __sz = sz >> 2; -+ BUG_ON((unsigned long)dest & 0x3); -+ BUG_ON((unsigned long)src & 0x3); -+ BUG_ON(sz & 0x3); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+static inline void copy_shorts(void *dest, const void *src, size_t sz) -+{ -+ u16 *__dest = dest; -+ const u16 *__src = src; -+ size_t __sz = sz >> 1; -+ BUG_ON((unsigned long)dest & 0x1); -+ BUG_ON((unsigned long)src & 0x1); -+ BUG_ON(sz & 0x1); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+static inline void copy_bytes(void *dest, const void *src, size_t sz) -+{ -+ u8 *__dest = dest; -+ const u8 *__src = src; -+ while (sz--) -+ *(__dest++) = *(__src++); -+} -+#else -+#define copy_words memcpy -+#define copy_shorts memcpy -+#define copy_bytes memcpy -+#endif -+ -+/************/ -+/* RB-trees */ -+/************/ -+ -+/* We encapsulate RB-trees so that its easier to use non-linux forms in -+ * non-linux systems. This also encapsulates the extra plumbing that linux code -+ * usually provides when using RB-trees. This encapsulation assumes that the -+ * data type held by the tree is u32. */ -+ -+struct dpa_rbtree { -+ struct rb_root root; -+}; -+#define DPA_RBTREE { .root = RB_ROOT } -+ -+static inline void dpa_rbtree_init(struct dpa_rbtree *tree) -+{ -+ tree->root = RB_ROOT; -+} -+ -+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \ -+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \ -+{ \ -+ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \ -+ while (*p) { \ -+ u32 item; \ -+ parent = *p; \ -+ item = rb_entry(parent, type, node_field)->val_field; \ -+ if (obj->val_field < item) \ -+ p = &parent->rb_left; \ -+ else if (obj->val_field > item) \ -+ p = &parent->rb_right; \ -+ else \ -+ return -EBUSY; \ -+ } \ -+ rb_link_node(&obj->node_field, parent, p); \ -+ rb_insert_color(&obj->node_field, &tree->root); \ -+ return 0; \ -+} \ -+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \ -+{ \ -+ rb_erase(&obj->node_field, &tree->root); \ -+} \ -+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \ -+{ \ -+ type *ret; \ -+ struct rb_node *p = tree->root.rb_node; \ -+ while (p) { \ -+ ret = rb_entry(p, type, node_field); \ -+ if (val < ret->val_field) \ -+ p = p->rb_left; \ -+ else if (val > ret->val_field) \ -+ p = p->rb_right; \ -+ else \ -+ return ret; \ -+ } \ -+ return NULL; \ -+} -+ -+/************/ -+/* Bootargs */ -+/************/ -+ -+/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax -+ * though; a comma-separated list of items, each item being a cpu index and/or a -+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate -+ * that the portal associated with that cpu should be shared. See bman_driver.c -+ * for more specifics. */ -+static int __parse_portals_cpu(const char **s, unsigned int *cpu) -+{ -+ *cpu = 0; -+ if (!isdigit(**s)) -+ return -EINVAL; -+ while (isdigit(**s)) -+ *cpu = *cpu * 10 + (*((*s)++) - '0'); -+ return 0; -+} -+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared, -+ struct cpumask *want_unshared, -+ const char *argname) -+{ -+ const char *s = str; -+ unsigned int shared, cpu1, cpu2, loop; -+ -+keep_going: -+ if (*s == 's') { -+ shared = 1; -+ s++; -+ } else -+ shared = 0; -+ if (__parse_portals_cpu(&s, &cpu1)) -+ goto err; -+ if (*s == '-') { -+ s++; -+ if (__parse_portals_cpu(&s, &cpu2)) -+ goto err; -+ if (cpu2 < cpu1) -+ goto err; -+ } else -+ cpu2 = cpu1; -+ for (loop = cpu1; loop <= cpu2; loop++) -+ cpumask_set_cpu(loop, shared ? want_shared : want_unshared); -+ if (*s == ',') { -+ s++; -+ goto keep_going; -+ } else if ((*s == '\0') || isspace(*s)) -+ return 0; -+err: -+ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str, -+ (unsigned long)s - (unsigned long)str); -+ return -EINVAL; -+} -+ -+/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */ -+int usdpaa_get_portal_config(struct file *filp, void *cinh, -+ enum usdpaa_portal_type ptype, unsigned int *irq, -+ void **iir_reg); -+ -+#endif /* DPA_SYS_H */ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h -@@ -0,0 +1,95 @@ -+/* Copyright 2016 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPA_SYS_ARM_H -+#define DPA_SYS_ARM_H -+ -+#include -+#include -+ -+/* Implementation of ARM specific routines */ -+ -+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler -+ * barriers and that dcb*() won't fall victim to compiler or execution -+ * reordering with respect to other code/instructions that manipulate the same -+ * cacheline. */ -+#define hwsync() { asm volatile("dmb st" : : : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); } -+#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); } -+#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); } -+#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); } -+ -+#define dcbz_64(p) { memset(p, 0, sizeof(*p)); } -+ -+#define dcbf_64(p) \ -+ do { \ -+ dcbf((u32)p); \ -+ } while (0) -+/* Commonly used combo */ -+#define dcbit_ro(p) \ -+ do { \ -+ dcbi((u32)p); \ -+ dcbt_ro((u32)p); \ -+ } while (0) -+ -+static inline u64 mfatb(void) -+{ -+ return get_cycles(); -+} -+ -+static inline u32 in_be32(volatile void *addr) -+{ -+ return be32_to_cpu(*((volatile u32 *) addr)); -+} -+ -+static inline void out_be32(void *addr, u32 val) -+{ -+ *((u32 *) addr) = cpu_to_be32(val); -+} -+ -+ -+static inline void set_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p |= mask; -+} -+static inline void clear_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p &= ~mask; -+} -+ -+static inline void flush_dcache_range(unsigned long start, unsigned long stop) -+{ -+ __cpuc_flush_dcache_area((void *) start, stop - start); -+} -+ -+#define hard_smp_processor_id() raw_smp_processor_id() -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h -@@ -0,0 +1,102 @@ -+/* Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPA_SYS_ARM64_H -+#define DPA_SYS_ARM64_H -+ -+#include -+#include -+ -+/* Implementation of ARM 64 bit specific routines */ -+ -+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler -+ * barriers and that dcb*() won't fall victim to compiler or execution -+ * reordering with respect to other code/instructions that manipulate the same -+ * cacheline. */ -+#define hwsync() { asm volatile("dmb st" : : : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } -+#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); } -+#define dcbt_rw(p) { asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); } -+#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+ -+#define dcbz_64(p) \ -+ do { \ -+ dcbz(p); \ -+ } while (0) -+ -+#define dcbf_64(p) \ -+ do { \ -+ dcbf(p); \ -+ } while (0) -+/* Commonly used combo */ -+#define dcbit_ro(p) \ -+ do { \ -+ dcbi(p); \ -+ dcbt_ro(p); \ -+ } while (0) -+ -+static inline u64 mfatb(void) -+{ -+ return get_cycles(); -+} -+ -+static inline u32 in_be32(volatile void *addr) -+{ -+ return be32_to_cpu(*((volatile u32 *) addr)); -+} -+ -+static inline void out_be32(void *addr, u32 val) -+{ -+ *((u32 *) addr) = cpu_to_be32(val); -+} -+ -+ -+static inline void set_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p |= mask; -+} -+static inline void clear_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p &= ~mask; -+} -+ -+static inline void flush_dcache_range(unsigned long start, unsigned long stop) -+{ -+ __flush_dcache_area((void *) start, stop - start); -+} -+ -+#define hard_smp_processor_id() raw_smp_processor_id() -+ -+ -+ -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h -@@ -0,0 +1,70 @@ -+/* Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPA_SYS_PPC32_H -+#define DPA_SYS_PPC32_H -+ -+/* Implementation of PowerPC 32 bit specific routines */ -+ -+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler -+ * barriers and that dcb*() won't fall victim to compiler or execution -+ * reordering with respect to other code/instructions that manipulate the same -+ * cacheline. */ -+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory") -+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory") -+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory") -+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)) -+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)) -+#define dcbi(p) dcbf(p) -+ -+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p)) -+#define dcbz_64(p) dcbzl(p) -+#define dcbf_64(p) dcbf(p) -+ -+/* Commonly used combo */ -+#define dcbit_ro(p) \ -+ do { \ -+ dcbi(p); \ -+ dcbt_ro(p); \ -+ } while (0) -+ -+static inline u64 mfatb(void) -+{ -+ u32 hi, lo, chk; -+ do { -+ hi = mfspr(SPRN_ATBU); -+ lo = mfspr(SPRN_ATBL); -+ chk = mfspr(SPRN_ATBU); -+ } while (unlikely(hi != chk)); -+ return ((u64)hi << 32) | (u64)lo; -+} -+ -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h -@@ -0,0 +1,79 @@ -+/* Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPA_SYS_PPC64_H -+#define DPA_SYS_PPC64_H -+ -+/* Implementation of PowerPC 64 bit specific routines */ -+ -+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler -+ * barriers and that dcb*() won't fall victim to compiler or execution -+ * reordering with respect to other code/instructions that manipulate the same -+ * cacheline. */ -+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory") -+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory") -+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory") -+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)) -+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)) -+#define dcbi(p) dcbf(p) -+ -+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p)) -+#define dcbz_64(p) \ -+ do { \ -+ dcbz((void*)p + 32); \ -+ dcbz(p); \ -+ } while (0) -+#define dcbf_64(p) \ -+ do { \ -+ dcbf((void*)p + 32); \ -+ dcbf(p); \ -+ } while (0) -+/* Commonly used combo */ -+#define dcbit_ro(p) \ -+ do { \ -+ dcbi(p); \ -+ dcbi((void*)p + 32); \ -+ dcbt_ro(p); \ -+ dcbt_ro((void*)p + 32); \ -+ } while (0) -+ -+static inline u64 mfatb(void) -+{ -+ u32 hi, lo, chk; -+ do { -+ hi = mfspr(SPRN_ATBU); -+ lo = mfspr(SPRN_ATBL); -+ chk = mfspr(SPRN_ATBU); -+ } while (unlikely(hi != chk)); -+ return ((u64)hi << 32) | (u64)lo; -+} -+ -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c -@@ -0,0 +1,1982 @@ -+/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc. -+ * Authors: Andy Fleming -+ * Timur Tabi -+ * Geoff Thorpe -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64)) -+#include -+#endif -+ -+#include "dpa_sys.h" -+#include -+#include "bman_low.h" -+#include "qman_low.h" -+ -+/* Physical address range of the memory reservation, exported for mm/mem.c */ -+static u64 phys_start; -+static u64 phys_size; -+static u64 arg_phys_size; -+ -+/* PFN versions of the above */ -+static unsigned long pfn_start; -+static unsigned long pfn_size; -+ -+/* Memory reservations are manipulated under this spinlock (which is why 'refs' -+ * isn't atomic_t). */ -+static DEFINE_SPINLOCK(mem_lock); -+ -+/* The range of TLB1 indices */ -+static unsigned int first_tlb; -+static unsigned int num_tlb = 1; -+static unsigned int current_tlb; /* loops around for fault handling */ -+ -+/* Memory reservation is represented as a list of 'mem_fragment's, some of which -+ * may be mapped. Unmapped fragments are always merged where possible. */ -+static LIST_HEAD(mem_list); -+ -+struct mem_mapping; -+ -+/* Memory fragments are in 'mem_list'. */ -+struct mem_fragment { -+ u64 base; -+ u64 len; -+ unsigned long pfn_base; /* PFN version of 'base' */ -+ unsigned long pfn_len; /* PFN version of 'len' */ -+ unsigned int refs; /* zero if unmapped */ -+ u64 root_len; /* Size of the orignal fragment */ -+ unsigned long root_pfn; /* PFN of the orignal fragment */ -+ struct list_head list; -+ /* if mapped, flags+name captured at creation time */ -+ u32 flags; -+ char name[USDPAA_DMA_NAME_MAX]; -+ u64 map_len; -+ /* support multi-process locks per-memory-fragment. */ -+ int has_locking; -+ wait_queue_head_t wq; -+ struct mem_mapping *owner; -+}; -+ -+/* Mappings of memory fragments in 'struct ctx'. These are created from -+ * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a -+ * mmap(). */ -+struct mem_mapping { -+ struct mem_fragment *root_frag; -+ u32 frag_count; -+ u64 total_size; -+ struct list_head list; -+ int refs; -+ void *virt_addr; -+}; -+ -+struct portal_mapping { -+ struct usdpaa_ioctl_portal_map user; -+ union { -+ struct qm_portal_config *qportal; -+ struct bm_portal_config *bportal; -+ }; -+ /* Declare space for the portals in case the process -+ exits unexpectedly and needs to be cleaned by the kernel */ -+ union { -+ struct qm_portal qman_portal_low; -+ struct bm_portal bman_portal_low; -+ }; -+ struct list_head list; -+ struct resource *phys; -+ struct iommu_domain *iommu_domain; -+}; -+ -+/* Track the DPAA resources the process is using */ -+struct active_resource { -+ struct list_head list; -+ u32 id; -+ u32 num; -+ unsigned int refcount; -+}; -+ -+/* Per-FD state (which should also be per-process but we don't enforce that) */ -+struct ctx { -+ /* Lock to protect the context */ -+ spinlock_t lock; -+ /* Allocated resources get put here for accounting */ -+ struct list_head resources[usdpaa_id_max]; -+ /* list of DMA maps */ -+ struct list_head maps; -+ /* list of portal maps */ -+ struct list_head portals; -+}; -+ -+/* Different resource classes */ -+static const struct alloc_backend { -+ enum usdpaa_id_type id_type; -+ int (*alloc)(u32 *, u32, u32, int); -+ void (*release)(u32 base, unsigned int count); -+ int (*reserve)(u32 base, unsigned int count); -+ const char *acronym; -+} alloc_backends[] = { -+ { -+ .id_type = usdpaa_id_fqid, -+ .alloc = qman_alloc_fqid_range, -+ .release = qman_release_fqid_range, -+ .reserve = qman_reserve_fqid_range, -+ .acronym = "FQID" -+ }, -+ { -+ .id_type = usdpaa_id_bpid, -+ .alloc = bman_alloc_bpid_range, -+ .release = bman_release_bpid_range, -+ .reserve = bman_reserve_bpid_range, -+ .acronym = "BPID" -+ }, -+ { -+ .id_type = usdpaa_id_qpool, -+ .alloc = qman_alloc_pool_range, -+ .release = qman_release_pool_range, -+ .reserve = qman_reserve_pool_range, -+ .acronym = "QPOOL" -+ }, -+ { -+ .id_type = usdpaa_id_cgrid, -+ .alloc = qman_alloc_cgrid_range, -+ .release = qman_release_cgrid_range, -+ .acronym = "CGRID" -+ }, -+ { -+ .id_type = usdpaa_id_ceetm0_lfqid, -+ .alloc = qman_alloc_ceetm0_lfqid_range, -+ .release = qman_release_ceetm0_lfqid_range, -+ .acronym = "CEETM0_LFQID" -+ }, -+ { -+ .id_type = usdpaa_id_ceetm0_channelid, -+ .alloc = qman_alloc_ceetm0_channel_range, -+ .release = qman_release_ceetm0_channel_range, -+ .acronym = "CEETM0_LFQID" -+ }, -+ { -+ .id_type = usdpaa_id_ceetm1_lfqid, -+ .alloc = qman_alloc_ceetm1_lfqid_range, -+ .release = qman_release_ceetm1_lfqid_range, -+ .acronym = "CEETM1_LFQID" -+ }, -+ { -+ .id_type = usdpaa_id_ceetm1_channelid, -+ .alloc = qman_alloc_ceetm1_channel_range, -+ .release = qman_release_ceetm1_channel_range, -+ .acronym = "CEETM1_LFQID" -+ }, -+ { -+ /* This terminates the array */ -+ .id_type = usdpaa_id_max -+ } -+}; -+ -+/* Determines the largest acceptable page size for a given size -+ The sizes are determined by what the TLB1 acceptable page sizes are */ -+static u32 largest_page_size(u32 size) -+{ -+ int shift = 30; /* Start at 1G size */ -+ if (size < 4096) -+ return 0; -+ do { -+ if (size >= (1<= 12); /* Up to 4k */ -+ return 0; -+} -+ -+/* Determine if value is power of 4 */ -+static inline bool is_power_of_4(u64 x) -+{ -+ if (x == 0 || ((x & (x - 1)) != 0)) -+ return false; -+ return !!(x & 0x5555555555555555ull); -+} -+ -+/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This -+ * splits the fragment into 4 and returns the upper-most. (The caller can loop -+ * until it has a suitable fragment size.) */ -+static struct mem_fragment *split_frag(struct mem_fragment *frag) -+{ -+ struct mem_fragment *x[3]; -+ -+ x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); -+ x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); -+ x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); -+ if (!x[0] || !x[1] || !x[2]) { -+ kfree(x[0]); -+ kfree(x[1]); -+ kfree(x[2]); -+ return NULL; -+ } -+ BUG_ON(frag->refs); -+ frag->len >>= 2; -+ frag->pfn_len >>= 2; -+ x[0]->base = frag->base + frag->len; -+ x[1]->base = x[0]->base + frag->len; -+ x[2]->base = x[1]->base + frag->len; -+ x[0]->len = x[1]->len = x[2]->len = frag->len; -+ x[0]->pfn_base = frag->pfn_base + frag->pfn_len; -+ x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len; -+ x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len; -+ x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len; -+ x[0]->refs = x[1]->refs = x[2]->refs = 0; -+ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len; -+ x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn; -+ x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0; -+ list_add_tail(&x[0]->list, &frag->list); -+ list_add_tail(&x[1]->list, &x[0]->list); -+ list_add_tail(&x[2]->list, &x[1]->list); -+ return x[2]; -+} -+ -+static __maybe_unused void dump_frags(void) -+{ -+ struct mem_fragment *frag; -+ int i = 0; -+ list_for_each_entry(frag, &mem_list, list) { -+ pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n", -+ i, frag->base, frag->pfn_base, -+ frag->len, frag->root_len, frag->root_pfn, -+ frag->refs, frag->name); -+ ++i; -+ } -+} -+ -+/* Walk the list of fragments and adjoin neighbouring segments if possible */ -+static void compress_frags(void) -+{ -+ /* Walk the fragment list and combine fragments */ -+ struct mem_fragment *frag, *nxtfrag; -+ u64 len = 0; -+ -+ int i, numfrags; -+ -+ -+ frag = list_entry(mem_list.next, struct mem_fragment, list); -+ -+ while (&frag->list != &mem_list) { -+ /* Must combine consecutive fragemenst with -+ same root_pfn such that they are power of 4 */ -+ if (frag->refs != 0) { -+ frag = list_entry(frag->list.next, -+ struct mem_fragment, list); -+ continue; /* Not this window */ -+ } -+ len = frag->len; -+ numfrags = 0; -+ nxtfrag = list_entry(frag->list.next, -+ struct mem_fragment, list); -+ while (true) { -+ if (&nxtfrag->list == &mem_list) { -+ numfrags = 0; -+ break; /* End of list */ -+ } -+ if (nxtfrag->refs) { -+ numfrags = 0; -+ break; /* In use still */ -+ } -+ if (nxtfrag->root_pfn != frag->root_pfn) { -+ numfrags = 0; -+ break; /* Crosses root fragment boundary */ -+ } -+ len += nxtfrag->len; -+ numfrags++; -+ if (is_power_of_4(len)) { -+ /* These fragments can be combined */ -+ break; -+ } -+ nxtfrag = list_entry(nxtfrag->list.next, -+ struct mem_fragment, list); -+ } -+ if (numfrags == 0) { -+ frag = list_entry(frag->list.next, -+ struct mem_fragment, list); -+ continue; /* try the next window */ -+ } -+ for (i = 0; i < numfrags; i++) { -+ struct mem_fragment *todel = -+ list_entry(nxtfrag->list.prev, -+ struct mem_fragment, list); -+ nxtfrag->len += todel->len; -+ nxtfrag->pfn_len += todel->pfn_len; -+ list_del(&todel->list); -+ } -+ /* Re evaluate the list, things may merge now */ -+ frag = list_entry(mem_list.next, struct mem_fragment, list); -+ } -+} -+ -+/* Hook from arch/powerpc/mm/mem.c */ -+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size) -+{ -+ struct mem_fragment *frag; -+ int idx = -1; -+ if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size))) -+ return -1; -+ /* It's in-range, we need to find the fragment */ -+ spin_lock(&mem_lock); -+ list_for_each_entry(frag, &mem_list, list) { -+ if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base + -+ frag->pfn_len))) { -+ *phys_addr = frag->base; -+ *size = frag->len; -+ idx = current_tlb++; -+ if (current_tlb >= (first_tlb + num_tlb)) -+ current_tlb = first_tlb; -+ break; -+ } -+ } -+ spin_unlock(&mem_lock); -+ return idx; -+} -+ -+static int usdpaa_open(struct inode *inode, struct file *filp) -+{ -+ const struct alloc_backend *backend = &alloc_backends[0]; -+ struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL); -+ if (!ctx) -+ return -ENOMEM; -+ filp->private_data = ctx; -+ -+ while (backend->id_type != usdpaa_id_max) { -+ INIT_LIST_HEAD(&ctx->resources[backend->id_type]); -+ backend++; -+ } -+ -+ INIT_LIST_HEAD(&ctx->maps); -+ INIT_LIST_HEAD(&ctx->portals); -+ spin_lock_init(&ctx->lock); -+ -+ //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi; -+ -+ return 0; -+} -+ -+#define DQRR_MAXFILL 15 -+ -+/* Reset a QMan portal to its default state */ -+static int init_qm_portal(struct qm_portal_config *config, -+ struct qm_portal *portal) -+{ -+ const struct qm_dqrr_entry *dqrr = NULL; -+ int i; -+ -+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; -+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; -+ -+ /* Make sure interrupts are inhibited */ -+ qm_out(IIR, 1); -+ -+ /* Initialize the DQRR. This will stop any dequeue -+ commands that are in progress */ -+ if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb, -+ qm_dqrr_cdc, DQRR_MAXFILL)) { -+ pr_err("qm_dqrr_init() failed when trying to" -+ " recover portal, portal will be leaked\n"); -+ return 1; -+ } -+ -+ /* Discard any entries on the DQRR */ -+ /* If we consume the ring twice something is wrong */ -+ for (i = 0; i < DQRR_MAXFILL * 2; i++) { -+ qm_dqrr_pvb_update(portal); -+ dqrr = qm_dqrr_current(portal); -+ if (!dqrr) -+ break; -+ qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0); -+ qm_dqrr_pvb_update(portal); -+ qm_dqrr_next(portal); -+ } -+ /* Initialize the EQCR */ -+ if (qm_eqcr_init(portal, qm_eqcr_pvb, -+ qm_eqcr_get_ci_stashing(portal), 1)) { -+ pr_err("Qman EQCR initialisation failed\n"); -+ return 1; -+ } -+ /* initialize the MR */ -+ if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) { -+ pr_err("Qman MR initialisation failed\n"); -+ return 1; -+ } -+ qm_mr_pvb_update(portal); -+ while (qm_mr_current(portal)) { -+ qm_mr_next(portal); -+ qm_mr_cci_consume_to_current(portal); -+ qm_mr_pvb_update(portal); -+ } -+ -+ if (qm_mc_init(portal)) { -+ pr_err("Qman MC initialisation failed\n"); -+ return 1; -+ } -+ return 0; -+} -+ -+static int init_bm_portal(struct bm_portal_config *config, -+ struct bm_portal *portal) -+{ -+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; -+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; -+ -+ if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) { -+ pr_err("Bman RCR initialisation failed\n"); -+ return 1; -+ } -+ if (bm_mc_init(portal)) { -+ pr_err("Bman MC initialisation failed\n"); -+ return 1; -+ } -+ return 0; -+} -+ -+/* Function that will scan all FQ's in the system. For each FQ that is not -+ OOS it will call the check_channel helper to determine if the FQ should -+ be torn down. If the check_channel helper returns true the FQ will be -+ transitioned to the OOS state */ -+static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx, -+ bool (*check_channel)(void*, u32)) -+{ -+ u32 fq_id = 0; -+ while (1) { -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ u8 state; -+ u32 channel; -+ -+ /* Determine the channel for the FQID */ -+ mcc = qm_mc_start(portal); -+ mcc->queryfq.fqid = fq_id; -+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ); -+ while (!(mcr = qm_mc_result(portal))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) -+ == QM_MCR_VERB_QUERYFQ); -+ if (mcr->result != QM_MCR_RESULT_OK) -+ break; /* End of valid FQIDs */ -+ -+ channel = mcr->queryfq.fqd.dest.channel; -+ /* Determine the state of the FQID */ -+ mcc = qm_mc_start(portal); -+ mcc->queryfq_np.fqid = fq_id; -+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP); -+ while (!(mcr = qm_mc_result(portal))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) -+ == QM_MCR_VERB_QUERYFQ_NP); -+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; -+ if (state == QM_MCR_NP_STATE_OOS) -+ /* Already OOS, no need to do anymore checks */ -+ goto next; -+ -+ if (check_channel(ctx, channel)) -+ qm_shutdown_fq(&portal, 1, fq_id); -+ next: -+ ++fq_id; -+ } -+ return 0; -+} -+ -+static bool check_channel_device(void *_ctx, u32 channel) -+{ -+ struct ctx *ctx = _ctx; -+ struct portal_mapping *portal, *tmpportal; -+ struct active_resource *res; -+ -+ /* See if the FQ is destined for one of the portals we're cleaning up */ -+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { -+ if (portal->user.type == usdpaa_portal_qman) { -+ if (portal->qportal->public_cfg.channel == channel) { -+ /* This FQs destination is a portal -+ we're cleaning, send a retire */ -+ return true; -+ } -+ } -+ } -+ -+ /* Check the pool channels that will be released as well */ -+ list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) { -+ if ((res->id >= channel) && -+ ((res->id + res->num - 1) <= channel)) -+ return true; -+ } -+ return false; -+} -+ -+static bool check_portal_channel(void *ctx, u32 channel) -+{ -+ u32 portal_channel = *(u32 *)ctx; -+ if (portal_channel == channel) { -+ /* This FQs destination is a portal -+ we're cleaning, send a retire */ -+ return true; -+ } -+ return false; -+} -+ -+ -+ -+ -+static int usdpaa_release(struct inode *inode, struct file *filp) -+{ -+ struct ctx *ctx = filp->private_data; -+ struct mem_mapping *map, *tmpmap; -+ struct portal_mapping *portal, *tmpportal; -+ const struct alloc_backend *backend = &alloc_backends[0]; -+ struct active_resource *res; -+ struct qm_portal *qm_cleanup_portal = NULL; -+ struct bm_portal *bm_cleanup_portal = NULL; -+ struct qm_portal_config *qm_alloced_portal = NULL; -+ struct bm_portal_config *bm_alloced_portal = NULL; -+ -+ struct qm_portal *portal_array[qman_portal_max]; -+ int portal_count = 0; -+ -+ /* Ensure the release operation cannot be migrated to another -+ CPU as CPU specific variables may be needed during cleanup */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_disable(); -+#endif -+ /* The following logic is used to recover resources that were not -+ correctly released by the process that is closing the FD. -+ Step 1: syncronize the HW with the qm_portal/bm_portal structures -+ in the kernel -+ */ -+ -+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { -+ /* Try to recover any portals that weren't shut down */ -+ if (portal->user.type == usdpaa_portal_qman) { -+ portal_array[portal_count] = &portal->qman_portal_low; -+ ++portal_count; -+ init_qm_portal(portal->qportal, -+ &portal->qman_portal_low); -+ if (!qm_cleanup_portal) { -+ qm_cleanup_portal = &portal->qman_portal_low; -+ } else { -+ /* Clean FQs on the dedicated channel */ -+ u32 chan = portal->qportal->public_cfg.channel; -+ qm_check_and_destroy_fqs( -+ &portal->qman_portal_low, &chan, -+ check_portal_channel); -+ } -+ } else { -+ /* BMAN */ -+ init_bm_portal(portal->bportal, -+ &portal->bman_portal_low); -+ if (!bm_cleanup_portal) -+ bm_cleanup_portal = &portal->bman_portal_low; -+ } -+ } -+ /* If no portal was found, allocate one for cleanup */ -+ if (!qm_cleanup_portal) { -+ qm_alloced_portal = qm_get_unused_portal(); -+ if (!qm_alloced_portal) { -+ pr_crit("No QMan portal avalaible for cleanup\n"); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_enable(); -+#endif -+ return -1; -+ } -+ qm_cleanup_portal = kmalloc(sizeof(struct qm_portal), -+ GFP_KERNEL); -+ if (!qm_cleanup_portal) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_enable(); -+#endif -+ return -ENOMEM; -+ } -+ init_qm_portal(qm_alloced_portal, qm_cleanup_portal); -+ portal_array[portal_count] = qm_cleanup_portal; -+ ++portal_count; -+ } -+ if (!bm_cleanup_portal) { -+ bm_alloced_portal = bm_get_unused_portal(); -+ if (!bm_alloced_portal) { -+ pr_crit("No BMan portal avalaible for cleanup\n"); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_enable(); -+#endif -+ return -1; -+ } -+ bm_cleanup_portal = kmalloc(sizeof(struct bm_portal), -+ GFP_KERNEL); -+ if (!bm_cleanup_portal) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_enable(); -+#endif -+ return -ENOMEM; -+ } -+ init_bm_portal(bm_alloced_portal, bm_cleanup_portal); -+ } -+ -+ /* OOS the FQs associated with this process */ -+ qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device); -+ -+ while (backend->id_type != usdpaa_id_max) { -+ int leaks = 0; -+ list_for_each_entry(res, &ctx->resources[backend->id_type], -+ list) { -+ if (backend->id_type == usdpaa_id_fqid) { -+ int i = 0; -+ for (; i < res->num; i++) { -+ /* Clean FQs with the cleanup portal */ -+ qm_shutdown_fq(portal_array, -+ portal_count, -+ res->id + i); -+ } -+ } -+ leaks += res->num; -+ backend->release(res->id, res->num); -+ } -+ if (leaks) -+ pr_crit("USDPAA process leaking %d %s%s\n", leaks, -+ backend->acronym, (leaks > 1) ? "s" : ""); -+ backend++; -+ } -+ /* Release any DMA regions */ -+ spin_lock(&mem_lock); -+ list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) { -+ struct mem_fragment *current_frag = map->root_frag; -+ int i; -+ if (map->root_frag->has_locking && -+ (map->root_frag->owner == map)) { -+ map->root_frag->owner = NULL; -+ wake_up(&map->root_frag->wq); -+ } -+ /* Check each fragment and merge if the ref count is 0 */ -+ for (i = 0; i < map->frag_count; i++) { -+ --current_frag->refs; -+ current_frag = list_entry(current_frag->list.prev, -+ struct mem_fragment, list); -+ } -+ -+ compress_frags(); -+ list_del(&map->list); -+ kfree(map); -+ } -+ spin_unlock(&mem_lock); -+ -+ /* Return portals */ -+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { -+ if (portal->user.type == usdpaa_portal_qman) { -+ /* Give the portal back to the allocator */ -+ init_qm_portal(portal->qportal, -+ &portal->qman_portal_low); -+ qm_put_unused_portal(portal->qportal); -+ } else { -+ init_bm_portal(portal->bportal, -+ &portal->bman_portal_low); -+ bm_put_unused_portal(portal->bportal); -+ } -+ list_del(&portal->list); -+ kfree(portal); -+ } -+ if (qm_alloced_portal) { -+ qm_put_unused_portal(qm_alloced_portal); -+ kfree(qm_cleanup_portal); -+ } -+ if (bm_alloced_portal) { -+ bm_put_unused_portal(bm_alloced_portal); -+ kfree(bm_cleanup_portal); -+ } -+ -+ kfree(ctx); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_enable(); -+#endif -+ return 0; -+} -+ -+static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma, -+ int *match, unsigned long *pfn) -+{ -+ struct mem_mapping *map; -+ -+ list_for_each_entry(map, &ctx->maps, list) { -+ int i; -+ struct mem_fragment *frag = map->root_frag; -+ -+ for (i = 0; i < map->frag_count; i++) { -+ if (frag->pfn_base == vma->vm_pgoff) { -+ *match = 1; -+ *pfn = frag->pfn_base; -+ return 0; -+ } -+ frag = list_entry(frag->list.next, struct mem_fragment, -+ list); -+ } -+ } -+ *match = 0; -+ return 0; -+} -+ -+static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma, -+ int *match, unsigned long *pfn) -+{ -+ *pfn = res->start >> PAGE_SHIFT; -+ if (*pfn == vma->vm_pgoff) { -+ *match = 1; -+ if ((vma->vm_end - vma->vm_start) != resource_size(res)) -+ return -EINVAL; -+ } else -+ *match = 0; -+ return 0; -+} -+ -+static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma, -+ int *match, unsigned long *pfn) -+{ -+ struct portal_mapping *portal; -+ int ret; -+ -+ list_for_each_entry(portal, &ctx->portals, list) { -+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma, -+ match, pfn); -+ if (*match) { -+ vma->vm_page_prot = -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ pgprot_cached_ns(vma->vm_page_prot); -+#else -+ pgprot_cached_noncoherent(vma->vm_page_prot); -+#endif -+ return ret; -+ } -+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma, -+ match, pfn); -+ if (*match) { -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ return ret; -+ } -+ } -+ *match = 0; -+ return 0; -+} -+ -+static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ struct ctx *ctx = filp->private_data; -+ unsigned long pfn = 0; -+ int match, ret; -+ -+ spin_lock(&mem_lock); -+ ret = check_mmap_dma(ctx, vma, &match, &pfn); -+ if (!match) -+ ret = check_mmap_portal(ctx, vma, &match, &pfn); -+ spin_unlock(&mem_lock); -+ if (!match) -+ return -EINVAL; -+ if (!ret) -+ ret = remap_pfn_range(vma, vma->vm_start, pfn, -+ vma->vm_end - vma->vm_start, -+ vma->vm_page_prot); -+ return ret; -+} -+ -+/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz' -+ * must be a power of 2, but both 'addr' and 'sz' can be expressions. */ -+#define USDPAA_MEM_ROUNDUP(addr, sz) \ -+ ({ \ -+ unsigned long foo_align = (sz) - 1; \ -+ ((addr) + foo_align) & ~foo_align; \ -+ }) -+/* Searching for a size-aligned virtual address range starting from 'addr' */ -+static unsigned long usdpaa_get_unmapped_area(struct file *file, -+ unsigned long addr, -+ unsigned long len, -+ unsigned long pgoff, -+ unsigned long flags) -+{ -+ struct vm_area_struct *vma; -+ -+ if (len % PAGE_SIZE) -+ return -EINVAL; -+ if (!len) -+ return -EINVAL; -+ -+ /* Need to align the address to the largest pagesize of the mapping -+ * because the MMU requires the virtual address to have the same -+ * alignment as the physical address */ -+ addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len)); -+ vma = find_vma(current->mm, addr); -+ /* Keep searching until we reach the end of currently-used virtual -+ * address-space or we find a big enough gap. */ -+ while (vma) { -+ if ((addr + len) < vma->vm_start) -+ return addr; -+ -+ addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len)); -+ vma = vma->vm_next; -+ } -+ if ((TASK_SIZE - len) < addr) -+ return -ENOMEM; -+ return addr; -+} -+ -+static long ioctl_id_alloc(struct ctx *ctx, void __user *arg) -+{ -+ struct usdpaa_ioctl_id_alloc i; -+ const struct alloc_backend *backend; -+ struct active_resource *res; -+ int ret = copy_from_user(&i, arg, sizeof(i)); -+ if (ret) -+ return ret; -+ if ((i.id_type >= usdpaa_id_max) || !i.num) -+ return -EINVAL; -+ backend = &alloc_backends[i.id_type]; -+ /* Allocate the required resource type */ -+ ret = backend->alloc(&i.base, i.num, i.align, i.partial); -+ if (ret < 0) -+ return ret; -+ i.num = ret; -+ /* Copy the result to user-space */ -+ ret = copy_to_user(arg, &i, sizeof(i)); -+ if (ret) { -+ backend->release(i.base, i.num); -+ return ret; -+ } -+ /* Assign the allocated range to the FD accounting */ -+ res = kmalloc(sizeof(*res), GFP_KERNEL); -+ if (!res) { -+ backend->release(i.base, i.num); -+ return -ENOMEM; -+ } -+ spin_lock(&ctx->lock); -+ res->id = i.base; -+ res->num = i.num; -+ res->refcount = 1; -+ list_add(&res->list, &ctx->resources[i.id_type]); -+ spin_unlock(&ctx->lock); -+ return 0; -+} -+ -+static long ioctl_id_release(struct ctx *ctx, void __user *arg) -+{ -+ struct usdpaa_ioctl_id_release i; -+ const struct alloc_backend *backend; -+ struct active_resource *tmp, *pos; -+ -+ int ret = copy_from_user(&i, arg, sizeof(i)); -+ if (ret) -+ return ret; -+ if ((i.id_type >= usdpaa_id_max) || !i.num) -+ return -EINVAL; -+ backend = &alloc_backends[i.id_type]; -+ /* Pull the range out of the FD accounting - the range is valid iff this -+ * succeeds. */ -+ spin_lock(&ctx->lock); -+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) { -+ if (pos->id == i.base && pos->num == i.num) { -+ pos->refcount--; -+ if (pos->refcount) { -+ spin_unlock(&ctx->lock); -+ return 0; /* Still being used */ -+ } -+ list_del(&pos->list); -+ kfree(pos); -+ spin_unlock(&ctx->lock); -+ goto found; -+ } -+ } -+ /* Failed to find the resource */ -+ spin_unlock(&ctx->lock); -+ pr_err("Couldn't find resource type %d base 0x%x num %d\n", -+ i.id_type, i.base, i.num); -+ return -EINVAL; -+found: -+ /* Release the resource to the backend */ -+ backend->release(i.base, i.num); -+ return 0; -+} -+ -+static long ioctl_id_reserve(struct ctx *ctx, void __user *arg) -+{ -+ struct usdpaa_ioctl_id_reserve i; -+ const struct alloc_backend *backend; -+ struct active_resource *tmp, *pos; -+ -+ int ret = copy_from_user(&i, arg, sizeof(i)); -+ if (ret) -+ return ret; -+ if ((i.id_type >= usdpaa_id_max) || !i.num) -+ return -EINVAL; -+ backend = &alloc_backends[i.id_type]; -+ if (!backend->reserve) -+ return -EINVAL; -+ /* Pull the range out of the FD accounting - the range is valid iff this -+ * succeeds. */ -+ spin_lock(&ctx->lock); -+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) { -+ if (pos->id == i.base && pos->num == i.num) { -+ pos->refcount++; -+ spin_unlock(&ctx->lock); -+ return 0; -+ } -+ } -+ -+ /* Failed to find the resource */ -+ spin_unlock(&ctx->lock); -+ -+ /* Reserve the resource in the backend */ -+ ret = backend->reserve(i.base, i.num); -+ if (ret) -+ return ret; -+ /* Assign the reserved range to the FD accounting */ -+ pos = kmalloc(sizeof(*pos), GFP_KERNEL); -+ if (!pos) { -+ backend->release(i.base, i.num); -+ return -ENOMEM; -+ } -+ spin_lock(&ctx->lock); -+ pos->id = i.base; -+ pos->num = i.num; -+ pos->refcount = 1; -+ list_add(&pos->list, &ctx->resources[i.id_type]); -+ spin_unlock(&ctx->lock); -+ return 0; -+} -+ -+static long ioctl_dma_map(struct file *fp, struct ctx *ctx, -+ struct usdpaa_ioctl_dma_map *i) -+{ -+ struct mem_fragment *frag, *start_frag, *next_frag; -+ struct mem_mapping *map, *tmp; -+ int ret = 0; -+ u32 largest_page, so_far = 0; -+ int frag_count = 0; -+ unsigned long next_addr = PAGE_SIZE, populate; -+ -+ /* error checking to ensure values copied from user space are valid */ -+ if (i->len % PAGE_SIZE) -+ return -EINVAL; -+ -+ map = kmalloc(sizeof(*map), GFP_KERNEL); -+ if (!map) -+ return -ENOMEM; -+ -+ spin_lock(&mem_lock); -+ if (i->flags & USDPAA_DMA_FLAG_SHARE) { -+ list_for_each_entry(frag, &mem_list, list) { -+ if (frag->refs && (frag->flags & -+ USDPAA_DMA_FLAG_SHARE) && -+ !strncmp(i->name, frag->name, -+ USDPAA_DMA_NAME_MAX)) { -+ /* Matching entry */ -+ if ((i->flags & USDPAA_DMA_FLAG_CREATE) && -+ !(i->flags & USDPAA_DMA_FLAG_LAZY)) { -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ /* Check to ensure size matches record */ -+ if (i->len != frag->map_len && i->len) { -+ pr_err("ioctl_dma_map() Size requested does not match %s and is none zero. This usage will be disallowed in future release\n", -+ frag->name); -+ } -+ -+ /* Check if this has already been mapped -+ to this process */ -+ list_for_each_entry(tmp, &ctx->maps, list) -+ if (tmp->root_frag == frag) { -+ /* Already mapped, just need to -+ inc ref count */ -+ tmp->refs++; -+ kfree(map); -+ i->did_create = 0; -+ i->len = tmp->total_size; -+ i->phys_addr = frag->base; -+ i->ptr = tmp->virt_addr; -+ spin_unlock(&mem_lock); -+ return 0; -+ } -+ /* Matching entry - just need to map */ -+ i->has_locking = frag->has_locking; -+ i->did_create = 0; -+ i->len = frag->map_len; -+ start_frag = frag; -+ goto do_map; -+ } -+ } -+ /* No matching entry */ -+ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) { -+ pr_err("ioctl_dma_map() No matching entry\n"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ } -+ /* New fragment required, size must be provided. */ -+ if (!i->len) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ /* Find one of more contiguous fragments that satisfy the total length -+ trying to minimize the number of fragments -+ compute the largest page size that the allocation could use */ -+ largest_page = largest_page_size(i->len); -+ start_frag = NULL; -+ while (largest_page && -+ largest_page <= largest_page_size(phys_size) && -+ start_frag == NULL) { -+ /* Search the list for a frag of that size */ -+ list_for_each_entry(frag, &mem_list, list) { -+ if (!frag->refs && (frag->len == largest_page)) { -+ /* See if the next x fragments are free -+ and can accomidate the size */ -+ u32 found_size = largest_page; -+ next_frag = list_entry(frag->list.prev, -+ struct mem_fragment, -+ list); -+ /* If the fragement is too small check -+ if the neighbours cab support it */ -+ while (found_size < i->len) { -+ if (&mem_list == &next_frag->list) -+ break; /* End of list */ -+ if (next_frag->refs != 0 || -+ next_frag->len == 0) -+ break; /* not enough space */ -+ found_size += next_frag->len; -+ next_frag = list_entry( -+ next_frag->list.prev, -+ struct mem_fragment, -+ list); -+ } -+ if (found_size >= i->len) { -+ /* Success! there is enough contigous -+ free space */ -+ start_frag = frag; -+ break; -+ } -+ } -+ } /* next frag loop */ -+ /* Couldn't statisfy the request with this -+ largest page size, try a smaller one */ -+ largest_page <<= 2; -+ } -+ if (start_frag == NULL) { -+ /* Couldn't find proper amount of space */ -+ ret = -ENOMEM; -+ goto out; -+ } -+ i->did_create = 1; -+do_map: -+ /* Verify there is sufficient space to do the mapping */ -+ down_write(¤t->mm->mmap_sem); -+ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (next_addr & ~PAGE_MASK) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* We may need to divide the final fragment to accomidate the mapping */ -+ next_frag = start_frag; -+ while (so_far != i->len) { -+ BUG_ON(next_frag->len == 0); -+ while ((next_frag->len + so_far) > i->len) { -+ /* Split frag until they match */ -+ split_frag(next_frag); -+ } -+ so_far += next_frag->len; -+ next_frag->refs++; -+ ++frag_count; -+ next_frag = list_entry(next_frag->list.prev, -+ struct mem_fragment, list); -+ } -+ if (i->did_create) { -+ size_t name_len = 0; -+ start_frag->flags = i->flags; -+ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX); -+ name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX); -+ if (name_len >= USDPAA_DMA_NAME_MAX) { -+ ret = -EFAULT; -+ goto out; -+ } -+ start_frag->map_len = i->len; -+ start_frag->has_locking = i->has_locking; -+ init_waitqueue_head(&start_frag->wq); -+ start_frag->owner = NULL; -+ } -+ -+ /* Setup the map entry */ -+ map->root_frag = start_frag; -+ map->total_size = i->len; -+ map->frag_count = frag_count; -+ map->refs = 1; -+ list_add(&map->list, &ctx->maps); -+ i->phys_addr = start_frag->base; -+out: -+ spin_unlock(&mem_lock); -+ -+ if (!ret) { -+ unsigned long longret; -+ down_write(¤t->mm->mmap_sem); -+ longret = do_mmap_pgoff(fp, next_addr, map->total_size, -+ PROT_READ | -+ (i->flags & -+ USDPAA_DMA_FLAG_RDONLY ? 0 -+ : PROT_WRITE), -+ MAP_SHARED, -+ start_frag->pfn_base, -+ &populate); -+ up_write(¤t->mm->mmap_sem); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ } else { -+ i->ptr = (void *)longret; -+ map->virt_addr = i->ptr; -+ } -+ } else -+ kfree(map); -+ return ret; -+} -+ -+static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg) -+{ -+ struct mem_mapping *map; -+ struct vm_area_struct *vma; -+ int ret, i; -+ struct mem_fragment *current_frag; -+ size_t sz; -+ unsigned long base; -+ unsigned long vaddr; -+ -+ down_write(¤t->mm->mmap_sem); -+ vma = find_vma(current->mm, (unsigned long)arg); -+ if (!vma || (vma->vm_start > (unsigned long)arg)) { -+ up_write(¤t->mm->mmap_sem); -+ return -EFAULT; -+ } -+ spin_lock(&mem_lock); -+ list_for_each_entry(map, &ctx->maps, list) { -+ if (map->root_frag->pfn_base == vma->vm_pgoff) { -+ /* Drop the map lock if we hold it */ -+ if (map->root_frag->has_locking && -+ (map->root_frag->owner == map)) { -+ map->root_frag->owner = NULL; -+ wake_up(&map->root_frag->wq); -+ } -+ goto map_match; -+ } -+ } -+ /* Failed to find a matching mapping for this process */ -+ ret = -EFAULT; -+ spin_unlock(&mem_lock); -+ goto out; -+map_match: -+ map->refs--; -+ if (map->refs != 0) { -+ /* Another call the dma_map is referencing this */ -+ ret = 0; -+ spin_unlock(&mem_lock); -+ goto out; -+ } -+ -+ current_frag = map->root_frag; -+ vaddr = (unsigned long) map->virt_addr; -+ for (i = 0; i < map->frag_count; i++) { -+ DPA_ASSERT(current_frag->refs > 0); -+ --current_frag->refs; -+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64)) -+ /* -+ * Make sure we invalidate the TLB entry for -+ * this fragment, otherwise a remap of a different -+ * page to this vaddr would give acces to an -+ * incorrect piece of memory -+ */ -+ cleartlbcam(vaddr, mfspr(SPRN_PID)); -+#endif -+ vaddr += current_frag->len; -+ current_frag = list_entry(current_frag->list.prev, -+ struct mem_fragment, list); -+ } -+ map->root_frag->name[0] = 0; -+ list_del(&map->list); -+ compress_frags(); -+ spin_unlock(&mem_lock); -+ -+ base = vma->vm_start; -+ sz = vma->vm_end - vma->vm_start; -+ do_munmap(current->mm, base, sz); -+ ret = 0; -+ out: -+ up_write(¤t->mm->mmap_sem); -+ return ret; -+} -+ -+static long ioctl_dma_stats(struct ctx *ctx, void __user *arg) -+{ -+ struct mem_fragment *frag; -+ struct usdpaa_ioctl_dma_used result; -+ -+ result.free_bytes = 0; -+ result.total_bytes = phys_size; -+ -+ list_for_each_entry(frag, &mem_list, list) { -+ if (frag->refs == 0) -+ result.free_bytes += frag->len; -+ } -+ -+ return copy_to_user(arg, &result, sizeof(result)); } -+ -+static int test_lock(struct mem_mapping *map) -+{ -+ int ret = 0; -+ spin_lock(&mem_lock); -+ if (!map->root_frag->owner) { -+ map->root_frag->owner = map; -+ ret = 1; -+ } -+ spin_unlock(&mem_lock); -+ return ret; -+} -+ -+static long ioctl_dma_lock(struct ctx *ctx, void __user *arg) -+{ -+ struct mem_mapping *map; -+ struct vm_area_struct *vma; -+ -+ down_read(¤t->mm->mmap_sem); -+ vma = find_vma(current->mm, (unsigned long)arg); -+ if (!vma || (vma->vm_start > (unsigned long)arg)) { -+ up_read(¤t->mm->mmap_sem); -+ return -EFAULT; -+ } -+ spin_lock(&mem_lock); -+ list_for_each_entry(map, &ctx->maps, list) { -+ if (map->root_frag->pfn_base == vma->vm_pgoff) -+ goto map_match; -+ } -+ map = NULL; -+map_match: -+ spin_unlock(&mem_lock); -+ up_read(¤t->mm->mmap_sem); -+ -+ if (!map) -+ return -EFAULT; -+ if (!map->root_frag->has_locking) -+ return -ENODEV; -+ return wait_event_interruptible(map->root_frag->wq, test_lock(map)); -+} -+ -+static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg) -+{ -+ struct mem_mapping *map; -+ struct vm_area_struct *vma; -+ int ret; -+ -+ down_read(¤t->mm->mmap_sem); -+ vma = find_vma(current->mm, (unsigned long)arg); -+ if (!vma || (vma->vm_start > (unsigned long)arg)) -+ ret = -EFAULT; -+ else { -+ spin_lock(&mem_lock); -+ list_for_each_entry(map, &ctx->maps, list) { -+ if (map->root_frag->pfn_base == vma->vm_pgoff) { -+ if (!map->root_frag->has_locking) -+ ret = -ENODEV; -+ else if (map->root_frag->owner == map) { -+ map->root_frag->owner = NULL; -+ wake_up(&map->root_frag->wq); -+ ret = 0; -+ } else -+ ret = -EBUSY; -+ goto map_match; -+ } -+ } -+ ret = -EINVAL; -+map_match: -+ spin_unlock(&mem_lock); -+ } -+ up_read(¤t->mm->mmap_sem); -+ return ret; -+} -+ -+static int portal_mmap(struct file *fp, struct resource *res, void **ptr) -+{ -+ unsigned long longret = 0, populate; -+ resource_size_t len; -+ -+ down_write(¤t->mm->mmap_sem); -+ len = resource_size(res); -+ if (len != (unsigned long)len) -+ return -EINVAL; -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ res->start >> PAGE_SHIFT, &populate); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (longret & ~PAGE_MASK) -+ return (int)longret; -+ -+ *ptr = (void *) longret; -+ return 0; -+} -+ -+static void portal_munmap(struct resource *res, void *ptr) -+{ -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, (unsigned long)ptr, resource_size(res)); -+ up_write(¤t->mm->mmap_sem); -+} -+ -+static long ioctl_portal_map(struct file *fp, struct ctx *ctx, -+ struct usdpaa_ioctl_portal_map *arg) -+{ -+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); -+ int ret; -+ -+ if (!mapping) -+ return -ENOMEM; -+ -+ mapping->user = *arg; -+ mapping->iommu_domain = NULL; -+ -+ if (mapping->user.type == usdpaa_portal_qman) { -+ mapping->qportal = -+ qm_get_unused_portal_idx(mapping->user.index); -+ if (!mapping->qportal) { -+ ret = -ENODEV; -+ goto err_get_portal; -+ } -+ mapping->phys = &mapping->qportal->addr_phys[0]; -+ mapping->user.channel = mapping->qportal->public_cfg.channel; -+ mapping->user.pools = mapping->qportal->public_cfg.pools; -+ mapping->user.index = mapping->qportal->public_cfg.index; -+ } else if (mapping->user.type == usdpaa_portal_bman) { -+ mapping->bportal = -+ bm_get_unused_portal_idx(mapping->user.index); -+ if (!mapping->bportal) { -+ ret = -ENODEV; -+ goto err_get_portal; -+ } -+ mapping->phys = &mapping->bportal->addr_phys[0]; -+ mapping->user.index = mapping->bportal->public_cfg.index; -+ } else { -+ ret = -EINVAL; -+ goto err_copy_from_user; -+ } -+ /* Need to put pcfg in ctx's list before the mmaps because the mmap -+ * handlers look it up. */ -+ spin_lock(&mem_lock); -+ list_add(&mapping->list, &ctx->portals); -+ spin_unlock(&mem_lock); -+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE], -+ &mapping->user.addr.cena); -+ if (ret) -+ goto err_mmap_cena; -+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI], -+ &mapping->user.addr.cinh); -+ if (ret) -+ goto err_mmap_cinh; -+ *arg = mapping->user; -+ return ret; -+ -+err_mmap_cinh: -+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena); -+err_mmap_cena: -+ if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal) -+ qm_put_unused_portal(mapping->qportal); -+ else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal) -+ bm_put_unused_portal(mapping->bportal); -+ spin_lock(&mem_lock); -+ list_del(&mapping->list); -+ spin_unlock(&mem_lock); -+err_get_portal: -+err_copy_from_user: -+ kfree(mapping); -+ return ret; -+} -+ -+static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i) -+{ -+ struct portal_mapping *mapping; -+ struct vm_area_struct *vma; -+ unsigned long pfn; -+ u32 channel; -+ -+ /* Get the PFN corresponding to one of the virt addresses */ -+ down_read(¤t->mm->mmap_sem); -+ vma = find_vma(current->mm, (unsigned long)i->cinh); -+ if (!vma || (vma->vm_start > (unsigned long)i->cinh)) { -+ up_read(¤t->mm->mmap_sem); -+ return -EFAULT; -+ } -+ pfn = vma->vm_pgoff; -+ up_read(¤t->mm->mmap_sem); -+ -+ /* Find the corresponding portal */ -+ spin_lock(&mem_lock); -+ list_for_each_entry(mapping, &ctx->portals, list) { -+ if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT)) -+ goto found; -+ } -+ mapping = NULL; -+found: -+ if (mapping) -+ list_del(&mapping->list); -+ spin_unlock(&mem_lock); -+ if (!mapping) -+ return -ENODEV; -+ portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh); -+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena); -+ if (mapping->user.type == usdpaa_portal_qman) { -+ init_qm_portal(mapping->qportal, -+ &mapping->qman_portal_low); -+ -+ /* Tear down any FQs this portal is referencing */ -+ channel = mapping->qportal->public_cfg.channel; -+ qm_check_and_destroy_fqs(&mapping->qman_portal_low, -+ &channel, -+ check_portal_channel); -+ qm_put_unused_portal(mapping->qportal); -+ } else if (mapping->user.type == usdpaa_portal_bman) { -+ init_bm_portal(mapping->bportal, -+ &mapping->bman_portal_low); -+ bm_put_unused_portal(mapping->bportal); -+ } -+ kfree(mapping); -+ return 0; -+} -+ -+static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest, -+ uint32_t cpu, uint32_t cache, uint32_t window) -+{ -+#ifdef CONFIG_FSL_PAMU -+ int ret; -+ int window_count = 1; -+ struct iommu_domain_geometry geom_attr; -+ struct pamu_stash_attribute stash_attr; -+ -+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); -+ if (!pcfg->iommu_domain) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed", -+ __func__); -+ goto _no_iommu; -+ } -+ geom_attr.aperture_start = 0; -+ geom_attr.aperture_end = -+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; -+ geom_attr.force_aperture = true; -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, -+ &geom_attr); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, -+ &window_count); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ stash_attr.cpu = cpu; -+ stash_attr.cache = cache; -+ /* set stash information for the window */ -+ stash_attr.window = 0; -+ -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, -+ DOMAIN_ATTR_FSL_PAMU_STASH, -+ &stash_attr); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, -+ IOMMU_READ | IOMMU_WRITE); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, -+ DOMAIN_ATTR_FSL_PAMU_ENABLE, -+ &window_count); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_detach_device; -+ } -+_no_iommu: -+#endif -+ -+#ifdef CONFIG_FSL_QMAN_CONFIG -+ if (qman_set_sdest(pcfg->public_cfg.channel, sdest)) -+#endif -+ pr_warn("Failed to set QMan portal's stash request queue\n"); -+ -+ return; -+ -+#ifdef CONFIG_FSL_PAMU -+_iommu_detach_device: -+ iommu_detach_device(pcfg->iommu_domain, NULL); -+_iommu_domain_free: -+ iommu_domain_free(pcfg->iommu_domain); -+#endif -+} -+ -+static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx, -+ struct usdpaa_ioctl_raw_portal *arg) -+{ -+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); -+ int ret; -+ -+ if (!mapping) -+ return -ENOMEM; -+ -+ mapping->user.type = arg->type; -+ mapping->iommu_domain = NULL; -+ if (arg->type == usdpaa_portal_qman) { -+ mapping->qportal = qm_get_unused_portal_idx(arg->index); -+ if (!mapping->qportal) { -+ ret = -ENODEV; -+ goto err; -+ } -+ mapping->phys = &mapping->qportal->addr_phys[0]; -+ arg->index = mapping->qportal->public_cfg.index; -+ arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start; -+ arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start; -+ if (arg->enable_stash) { -+ /* Setup the PAMU with the supplied parameters */ -+ portal_config_pamu(mapping->qportal, arg->sdest, -+ arg->cpu, arg->cache, arg->window); -+ } -+ } else if (mapping->user.type == usdpaa_portal_bman) { -+ mapping->bportal = -+ bm_get_unused_portal_idx(arg->index); -+ if (!mapping->bportal) { -+ ret = -ENODEV; -+ goto err; -+ } -+ mapping->phys = &mapping->bportal->addr_phys[0]; -+ arg->index = mapping->bportal->public_cfg.index; -+ arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start; -+ arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start; -+ } else { -+ ret = -EINVAL; -+ goto err; -+ } -+ /* Need to put pcfg in ctx's list before the mmaps because the mmap -+ * handlers look it up. */ -+ spin_lock(&mem_lock); -+ list_add(&mapping->list, &ctx->portals); -+ spin_unlock(&mem_lock); -+ return 0; -+err: -+ kfree(mapping); -+ return ret; -+} -+ -+static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx, -+ struct usdpaa_ioctl_raw_portal *arg) -+{ -+ struct portal_mapping *mapping; -+ u32 channel; -+ -+ /* Find the corresponding portal */ -+ spin_lock(&mem_lock); -+ list_for_each_entry(mapping, &ctx->portals, list) { -+ if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh) -+ goto found; -+ } -+ mapping = NULL; -+found: -+ if (mapping) -+ list_del(&mapping->list); -+ spin_unlock(&mem_lock); -+ if (!mapping) -+ return -ENODEV; -+ if (mapping->user.type == usdpaa_portal_qman) { -+ init_qm_portal(mapping->qportal, -+ &mapping->qman_portal_low); -+ -+ /* Tear down any FQs this portal is referencing */ -+ channel = mapping->qportal->public_cfg.channel; -+ qm_check_and_destroy_fqs(&mapping->qman_portal_low, -+ &channel, -+ check_portal_channel); -+ qm_put_unused_portal(mapping->qportal); -+ } else if (mapping->user.type == usdpaa_portal_bman) { -+ init_bm_portal(mapping->bportal, -+ &mapping->bman_portal_low); -+ bm_put_unused_portal(mapping->bportal); -+ } -+ kfree(mapping); -+ return 0; -+} -+ -+static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) -+{ -+ struct ctx *ctx = fp->private_data; -+ void __user *a = (void __user *)arg; -+ switch (cmd) { -+ case USDPAA_IOCTL_ID_ALLOC: -+ return ioctl_id_alloc(ctx, a); -+ case USDPAA_IOCTL_ID_RELEASE: -+ return ioctl_id_release(ctx, a); -+ case USDPAA_IOCTL_ID_RESERVE: -+ return ioctl_id_reserve(ctx, a); -+ case USDPAA_IOCTL_DMA_MAP: -+ { -+ struct usdpaa_ioctl_dma_map input; -+ int ret; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ ret = ioctl_dma_map(fp, ctx, &input); -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_DMA_UNMAP: -+ return ioctl_dma_unmap(ctx, a); -+ case USDPAA_IOCTL_DMA_LOCK: -+ return ioctl_dma_lock(ctx, a); -+ case USDPAA_IOCTL_DMA_UNLOCK: -+ return ioctl_dma_unlock(ctx, a); -+ case USDPAA_IOCTL_PORTAL_MAP: -+ { -+ struct usdpaa_ioctl_portal_map input; -+ int ret; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ ret = ioctl_portal_map(fp, ctx, &input); -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_PORTAL_UNMAP: -+ { -+ struct usdpaa_portal_map input; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ return ioctl_portal_unmap(ctx, &input); -+ } -+ case USDPAA_IOCTL_DMA_USED: -+ return ioctl_dma_stats(ctx, a); -+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL: -+ { -+ struct usdpaa_ioctl_raw_portal input; -+ int ret; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ ret = ioctl_allocate_raw_portal(fp, ctx, &input); -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_FREE_RAW_PORTAL: -+ { -+ struct usdpaa_ioctl_raw_portal input; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ return ioctl_free_raw_portal(fp, ctx, &input); -+ } -+ } -+ return -EINVAL; -+} -+ -+static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+#ifdef CONFIG_COMPAT -+ struct ctx *ctx = fp->private_data; -+ void __user *a = (void __user *)arg; -+#endif -+ switch (cmd) { -+#ifdef CONFIG_COMPAT -+ case USDPAA_IOCTL_DMA_MAP_COMPAT: -+ { -+ int ret; -+ struct usdpaa_ioctl_dma_map_compat input; -+ struct usdpaa_ioctl_dma_map converted; -+ -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ -+ converted.ptr = compat_ptr(input.ptr); -+ converted.phys_addr = input.phys_addr; -+ converted.len = input.len; -+ converted.flags = input.flags; -+ strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX); -+ converted.has_locking = input.has_locking; -+ converted.did_create = input.did_create; -+ -+ ret = ioctl_dma_map(fp, ctx, &converted); -+ input.ptr = ptr_to_compat(converted.ptr); -+ input.phys_addr = converted.phys_addr; -+ input.len = converted.len; -+ input.flags = converted.flags; -+ strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX); -+ input.has_locking = converted.has_locking; -+ input.did_create = converted.did_create; -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_PORTAL_MAP_COMPAT: -+ { -+ int ret; -+ struct compat_usdpaa_ioctl_portal_map input; -+ struct usdpaa_ioctl_portal_map converted; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ converted.type = input.type; -+ converted.index = input.index; -+ ret = ioctl_portal_map(fp, ctx, &converted); -+ input.addr.cinh = ptr_to_compat(converted.addr.cinh); -+ input.addr.cena = ptr_to_compat(converted.addr.cena); -+ input.channel = converted.channel; -+ input.pools = converted.pools; -+ input.index = converted.index; -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT: -+ { -+ struct usdpaa_portal_map_compat input; -+ struct usdpaa_portal_map converted; -+ -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ converted.cinh = compat_ptr(input.cinh); -+ converted.cena = compat_ptr(input.cena); -+ return ioctl_portal_unmap(ctx, &converted); -+ } -+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT: -+ { -+ int ret; -+ struct usdpaa_ioctl_raw_portal converted; -+ struct compat_ioctl_raw_portal input; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ converted.type = input.type; -+ converted.index = input.index; -+ converted.enable_stash = input.enable_stash; -+ converted.cpu = input.cpu; -+ converted.cache = input.cache; -+ converted.window = input.window; -+ converted.sdest = input.sdest; -+ ret = ioctl_allocate_raw_portal(fp, ctx, &converted); -+ -+ input.cinh = converted.cinh; -+ input.cena = converted.cena; -+ input.index = converted.index; -+ -+ if (copy_to_user(a, &input, sizeof(input))) -+ return -EFAULT; -+ return ret; -+ } -+ case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT: -+ { -+ struct usdpaa_ioctl_raw_portal converted; -+ struct compat_ioctl_raw_portal input; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ converted.type = input.type; -+ converted.index = input.index; -+ converted.cinh = input.cinh; -+ converted.cena = input.cena; -+ return ioctl_free_raw_portal(fp, ctx, &converted); -+ } -+#endif -+ default: -+ return usdpaa_ioctl(fp, cmd, arg); -+ } -+ return -EINVAL; -+} -+ -+int usdpaa_get_portal_config(struct file *filp, void *cinh, -+ enum usdpaa_portal_type ptype, unsigned int *irq, -+ void **iir_reg) -+{ -+ /* Walk the list of portals for filp and return the config -+ for the portal that matches the hint */ -+ struct ctx *context; -+ struct portal_mapping *portal; -+ -+ /* First sanitize the filp */ -+ if (filp->f_op->open != usdpaa_open) -+ return -ENODEV; -+ context = filp->private_data; -+ spin_lock(&context->lock); -+ list_for_each_entry(portal, &context->portals, list) { -+ if (portal->user.type == ptype && -+ portal->user.addr.cinh == cinh) { -+ if (ptype == usdpaa_portal_qman) { -+ *irq = portal->qportal->public_cfg.irq; -+ *iir_reg = portal->qportal->addr_virt[1] + -+ QM_REG_IIR; -+ } else { -+ *irq = portal->bportal->public_cfg.irq; -+ *iir_reg = portal->bportal->addr_virt[1] + -+ BM_REG_IIR; -+ } -+ spin_unlock(&context->lock); -+ return 0; -+ } -+ } -+ spin_unlock(&context->lock); -+ return -EINVAL; -+} -+ -+static const struct file_operations usdpaa_fops = { -+ .open = usdpaa_open, -+ .release = usdpaa_release, -+ .mmap = usdpaa_mmap, -+ .get_unmapped_area = usdpaa_get_unmapped_area, -+ .unlocked_ioctl = usdpaa_ioctl, -+ .compat_ioctl = usdpaa_ioctl_compat -+}; -+ -+static struct miscdevice usdpaa_miscdev = { -+ .name = "fsl-usdpaa", -+ .fops = &usdpaa_fops, -+ .minor = MISC_DYNAMIC_MINOR, -+}; -+ -+/* Early-boot memory allocation. The boot-arg "usdpaa_mem=" is used to -+ * indicate how much memory (if any) to allocate during early boot. If the -+ * format "usdpaa_mem=," is used, then will be interpreted as the -+ * number of TLB1 entries to reserve (default is 1). If there are more mappings -+ * than there are TLB1 entries, fault-handling will occur. */ -+ -+static __init int usdpaa_mem(char *arg) -+{ -+ pr_warn("uspdaa_mem argument is depracated\n"); -+ arg_phys_size = memparse(arg, &arg); -+ num_tlb = 1; -+ if (*arg == ',') { -+ unsigned long ul; -+ int err = kstrtoul(arg + 1, 0, &ul); -+ if (err < 0) { -+ num_tlb = 1; -+ pr_warn("ERROR, usdpaa_mem arg is invalid\n"); -+ } else -+ num_tlb = (unsigned int)ul; -+ } -+ return 0; -+} -+early_param("usdpaa_mem", usdpaa_mem); -+ -+static int usdpaa_mem_init(struct reserved_mem *rmem) -+{ -+ phys_start = rmem->base; -+ phys_size = rmem->size; -+ -+ WARN_ON(!(phys_start && phys_size)); -+ -+ return 0; -+} -+RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init); -+ -+__init int fsl_usdpaa_init_early(void) -+{ -+ if (!phys_size || !phys_start) { -+ pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n"); -+ return 0; -+ } -+ if (phys_size % PAGE_SIZE) { -+ pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n"); -+ phys_size = 0; -+ return 0; -+ } -+ if (arg_phys_size && phys_size != arg_phys_size) { -+ pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n", -+ arg_phys_size, phys_size); -+ phys_size = 0; -+ return 0; -+ } -+ pfn_start = phys_start >> PAGE_SHIFT; -+ pfn_size = phys_size >> PAGE_SHIFT; -+#ifdef CONFIG_PPC -+ first_tlb = current_tlb = tlbcam_index; -+ tlbcam_index += num_tlb; -+#endif -+ pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n", -+ phys_start, phys_size, pfn_start, pfn_size, num_tlb); -+ return 0; -+} -+subsys_initcall(fsl_usdpaa_init_early); -+ -+ -+static int __init usdpaa_init(void) -+{ -+ struct mem_fragment *frag; -+ int ret; -+ u64 tmp_size = phys_size; -+ u64 tmp_start = phys_start; -+ u64 tmp_pfn_size = pfn_size; -+ u64 tmp_pfn_start = pfn_start; -+ -+ pr_info("Freescale USDPAA process driver\n"); -+ if (!phys_start) { -+ pr_warn("fsl-usdpaa: no region found\n"); -+ return 0; -+ } -+ -+ while (tmp_size != 0) { -+ u32 frag_size = largest_page_size(tmp_size); -+ frag = kmalloc(sizeof(*frag), GFP_KERNEL); -+ if (!frag) { -+ pr_err("Failed to setup USDPAA memory accounting\n"); -+ return -ENOMEM; -+ } -+ frag->base = tmp_start; -+ frag->len = frag->root_len = frag_size; -+ frag->root_pfn = tmp_pfn_start; -+ frag->pfn_base = tmp_pfn_start; -+ frag->pfn_len = frag_size / PAGE_SIZE; -+ frag->refs = 0; -+ init_waitqueue_head(&frag->wq); -+ frag->owner = NULL; -+ list_add(&frag->list, &mem_list); -+ -+ /* Adjust for this frag */ -+ tmp_start += frag_size; -+ tmp_size -= frag_size; -+ tmp_pfn_start += frag_size / PAGE_SIZE; -+ tmp_pfn_size -= frag_size / PAGE_SIZE; -+ } -+ ret = misc_register(&usdpaa_miscdev); -+ if (ret) -+ pr_err("fsl-usdpaa: failed to register misc device\n"); -+ return ret; -+} -+ -+static void __exit usdpaa_exit(void) -+{ -+ misc_deregister(&usdpaa_miscdev); -+} -+ -+module_init(usdpaa_init); -+module_exit(usdpaa_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Freescale Semiconductor"); -+MODULE_DESCRIPTION("Freescale USDPAA process driver"); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c -@@ -0,0 +1,289 @@ -+/* Copyright (c) 2013 Freescale Semiconductor, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/* define a device that allows USPDAA processes to open a file -+ descriptor and specify which IRQ it wants to montior using an ioctl() -+ When an IRQ is received, the device becomes readable so that a process -+ can use read() or select() type calls to monitor for IRQs */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "qman_low.h" -+#include "bman_low.h" -+ -+struct usdpaa_irq_ctx { -+ int irq_set; /* Set to true once the irq is set via ioctl */ -+ unsigned int irq_num; -+ u32 last_irq_count; /* Last value returned from read */ -+ u32 irq_count; /* Number of irqs since last read */ -+ wait_queue_head_t wait_queue; /* Waiting processes */ -+ spinlock_t lock; -+ void *inhibit_addr; /* inhibit register address */ -+ struct file *usdpaa_filp; -+ char irq_name[128]; -+}; -+ -+static int usdpaa_irq_open(struct inode *inode, struct file *filp) -+{ -+ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); -+ if (!ctx) -+ return -ENOMEM; -+ ctx->irq_set = 0; -+ ctx->irq_count = 0; -+ ctx->last_irq_count = 0; -+ init_waitqueue_head(&ctx->wait_queue); -+ spin_lock_init(&ctx->lock); -+ filp->private_data = ctx; -+ return 0; -+} -+ -+static int usdpaa_irq_release(struct inode *inode, struct file *filp) -+{ -+ struct usdpaa_irq_ctx *ctx = filp->private_data; -+ if (ctx->irq_set) { -+ /* Inhibit the IRQ */ -+ out_be32(ctx->inhibit_addr, 0x1); -+ irq_set_affinity_hint(ctx->irq_num, NULL); -+ free_irq(ctx->irq_num, ctx); -+ ctx->irq_set = 0; -+ fput(ctx->usdpaa_filp); -+ } -+ kfree(filp->private_data); -+ return 0; -+} -+ -+static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx) -+{ -+ unsigned long flags; -+ struct usdpaa_irq_ctx *ctx = _ctx; -+ spin_lock_irqsave(&ctx->lock, flags); -+ ++ctx->irq_count; -+ spin_unlock_irqrestore(&ctx->lock, flags); -+ wake_up_all(&ctx->wait_queue); -+ /* Set the inhibit register. This will be reenabled -+ once the USDPAA code handles the IRQ */ -+ out_be32(ctx->inhibit_addr, 0x1); -+ pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count); -+ return IRQ_HANDLED; -+} -+ -+static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map) -+{ -+ struct usdpaa_irq_ctx *ctx = fp->private_data; -+ int ret; -+ -+ if (ctx->irq_set) { -+ pr_debug("Setting USDPAA IRQ when it was already set!\n"); -+ return -EBUSY; -+ } -+ -+ ctx->usdpaa_filp = fget(irq_map->fd); -+ if (!ctx->usdpaa_filp) { -+ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd); -+ return -EINVAL; -+ } -+ -+ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh, -+ irq_map->type, &ctx->irq_num, -+ &ctx->inhibit_addr); -+ if (ret) { -+ pr_debug("USDPAA IRQ couldn't identify portal\n"); -+ fput(ctx->usdpaa_filp); -+ return ret; -+ } -+ -+ ctx->irq_set = 1; -+ -+ snprintf(ctx->irq_name, sizeof(ctx->irq_name), -+ "usdpaa_irq %d", ctx->irq_num); -+ -+ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0, -+ ctx->irq_name, ctx); -+ if (ret) { -+ pr_err("USDPAA request_irq(%d) failed, ret= %d\n", -+ ctx->irq_num, ret); -+ ctx->irq_set = 0; -+ fput(ctx->usdpaa_filp); -+ return ret; -+ } -+ ret = irq_set_affinity(ctx->irq_num, tsk_cpus_allowed(current)); -+ if (ret) -+ pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret); -+ -+ ret = irq_set_affinity_hint(ctx->irq_num, tsk_cpus_allowed(current)); -+ if (ret) -+ pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret); -+ -+ return 0; -+} -+ -+static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+ int ret; -+ struct usdpaa_ioctl_irq_map irq_map; -+ -+ if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) { -+ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd); -+ return -EINVAL; -+ } -+ -+ ret = copy_from_user(&irq_map, (void __user *)arg, -+ sizeof(irq_map)); -+ if (ret) -+ return ret; -+ return map_irq(fp, &irq_map); -+} -+ -+static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff, -+ size_t count, loff_t *offp) -+{ -+ struct usdpaa_irq_ctx *ctx = filp->private_data; -+ int ret; -+ -+ if (!ctx->irq_set) { -+ pr_debug("Reading USDPAA IRQ before it was set\n"); -+ return -EINVAL; -+ } -+ -+ if (count < sizeof(ctx->irq_count)) { -+ pr_debug("USDPAA IRQ Read too small\n"); -+ return -EINVAL; -+ } -+ if (ctx->irq_count == ctx->last_irq_count) { -+ if (filp->f_flags & O_NONBLOCK) -+ return -EAGAIN; -+ -+ ret = wait_event_interruptible(ctx->wait_queue, -+ ctx->irq_count != ctx->last_irq_count); -+ if (ret == -ERESTARTSYS) -+ return ret; -+ } -+ -+ ctx->last_irq_count = ctx->irq_count; -+ -+ if (copy_to_user(buff, &ctx->last_irq_count, -+ sizeof(ctx->last_irq_count))) -+ return -EFAULT; -+ return sizeof(ctx->irq_count); -+} -+ -+static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait) -+{ -+ struct usdpaa_irq_ctx *ctx = filp->private_data; -+ unsigned int ret = 0; -+ unsigned long flags; -+ -+ if (!ctx->irq_set) -+ return POLLHUP; -+ -+ poll_wait(filp, &ctx->wait_queue, wait); -+ -+ spin_lock_irqsave(&ctx->lock, flags); -+ if (ctx->irq_count != ctx->last_irq_count) -+ ret |= POLLIN | POLLRDNORM; -+ spin_unlock_irqrestore(&ctx->lock, flags); -+ return ret; -+} -+ -+static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+#ifdef CONFIG_COMPAT -+ void __user *a = (void __user *)arg; -+#endif -+ switch (cmd) { -+#ifdef CONFIG_COMPAT -+ case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT: -+ { -+ struct compat_ioctl_irq_map input; -+ struct usdpaa_ioctl_irq_map converted; -+ if (copy_from_user(&input, a, sizeof(input))) -+ return -EFAULT; -+ converted.type = input.type; -+ converted.fd = input.fd; -+ converted.portal_cinh = compat_ptr(input.portal_cinh); -+ return map_irq(fp, &converted); -+ } -+#endif -+ default: -+ return usdpaa_irq_ioctl(fp, cmd, arg); -+ } -+} -+ -+static const struct file_operations usdpaa_irq_fops = { -+ .open = usdpaa_irq_open, -+ .release = usdpaa_irq_release, -+ .unlocked_ioctl = usdpaa_irq_ioctl, -+ .compat_ioctl = usdpaa_irq_ioctl_compat, -+ .read = usdpaa_irq_read, -+ .poll = usdpaa_irq_poll -+}; -+ -+static struct miscdevice usdpaa_miscdev = { -+ .name = "fsl-usdpaa-irq", -+ .fops = &usdpaa_irq_fops, -+ .minor = MISC_DYNAMIC_MINOR, -+}; -+ -+static int __init usdpaa_irq_init(void) -+{ -+ int ret; -+ -+ pr_info("Freescale USDPAA process IRQ driver\n"); -+ ret = misc_register(&usdpaa_miscdev); -+ if (ret) -+ pr_err("fsl-usdpaa-irq: failed to register misc device\n"); -+ return ret; -+} -+ -+static void __exit usdpaa_irq_exit(void) -+{ -+ misc_deregister(&usdpaa_miscdev); -+} -+ -+module_init(usdpaa_irq_init); -+module_exit(usdpaa_irq_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Freescale Semiconductor"); -+MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver"); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qbman_driver.c -@@ -0,0 +1,88 @@ -+/* Copyright 2013 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include "qman_private.h" -+#include "bman_private.h" -+__init void qman_init_early(void); -+__init void bman_init_early(void); -+ -+static __init int qbman_init(void) -+{ -+ struct device_node *dn; -+ u32 is_portal_available; -+ -+ bman_init(); -+ qman_init(); -+ -+ is_portal_available = 0; -+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { -+ if (!of_device_is_available(dn)) -+ continue; -+ else -+ is_portal_available = 1; -+ } -+ -+ if (!qman_have_ccsr() && is_portal_available) { -+ struct qman_fq fq = { -+ .fqid = 1 -+ }; -+ struct qm_mcr_queryfq_np np; -+ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT; -+ struct timespec nowts, diffts, startts = current_kernel_time(); -+ /* Loop while querying given fqid succeeds or time out */ -+ while (1) { -+ err = qman_query_fq_np(&fq, &np); -+ if (!err) { -+ /* success, control-plane has configured QMan */ -+ break; -+ } else if (err != -ERANGE) { -+ pr_err("QMan: I/O error, continuing anyway\n"); -+ break; -+ } -+ nowts = current_kernel_time(); -+ diffts = timespec_sub(nowts, startts); -+ if (diffts.tv_sec > 0) { -+ if (!retry--) { -+ pr_err("QMan: time out, control-plane" -+ " dead?\n"); -+ break; -+ } -+ pr_warn("QMan: polling for the control-plane" -+ " (%d)\n", retry); -+ } -+ } -+ } -+ bman_resource_init(); -+ qman_resource_init(); -+ return 0; -+} -+subsys_initcall(qbman_init); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_config.c -@@ -0,0 +1,1199 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include "qman_private.h" -+#include -+#include -+ -+/* Last updated for v00.800 of the BG */ -+ -+/* Register offsets */ -+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) -+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) -+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) -+#define REG_DD_CFG 0x0200 -+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) -+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) -+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) -+#define REG_PFDR_FPC 0x0400 -+#define REG_PFDR_FP_HEAD 0x0404 -+#define REG_PFDR_FP_TAIL 0x0408 -+#define REG_PFDR_FP_LWIT 0x0410 -+#define REG_PFDR_CFG 0x0414 -+#define REG_SFDR_CFG 0x0500 -+#define REG_SFDR_IN_USE 0x0504 -+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) -+#define REG_WQ_DEF_ENC_WQID 0x0630 -+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) -+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) -+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) -+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) -+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ -+#define REG_CM_CFG 0x0800 -+#define REG_ECSR 0x0a00 -+#define REG_ECIR 0x0a04 -+#define REG_EADR 0x0a08 -+#define REG_ECIR2 0x0a0c -+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) -+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) -+#define REG_MCR 0x0b00 -+#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) -+#define REG_MISC_CFG 0x0be0 -+#define REG_HID_CFG 0x0bf0 -+#define REG_IDLE_STAT 0x0bf4 -+#define REG_IP_REV_1 0x0bf8 -+#define REG_IP_REV_2 0x0bfc -+#define REG_FQD_BARE 0x0c00 -+#define REG_PFDR_BARE 0x0c20 -+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ -+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ -+#define REG_QCSP_BARE 0x0c80 -+#define REG_QCSP_BAR 0x0c84 -+#define REG_CI_SCHED_CFG 0x0d00 -+#define REG_SRCIDR 0x0d04 -+#define REG_LIODNR 0x0d08 -+#define REG_CI_RLM_AVG 0x0d14 -+#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */ -+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) -+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) -+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) -+#define REG_CEETM_CFG_IDX 0x900 -+#define REG_CEETM_CFG_PRES 0x904 -+#define REG_CEETM_XSFDR_IN_USE 0x908 -+ -+/* Assists for QMAN_MCR */ -+#define MCR_INIT_PFDR 0x01000000 -+#define MCR_get_rslt(v) (u8)((v) >> 24) -+#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0)) -+#define MCR_rslt_ok(r) (rslt == 0xf0) -+#define MCR_rslt_eaccess(r) (rslt == 0xf8) -+#define MCR_rslt_inval(r) (rslt == 0xff) -+ -+struct qman; -+ -+/* Follows WQ_CS_CFG0-5 */ -+enum qm_wq_class { -+ qm_wq_portal = 0, -+ qm_wq_pool = 1, -+ qm_wq_fman0 = 2, -+ qm_wq_fman1 = 3, -+ qm_wq_caam = 4, -+ qm_wq_pme = 5, -+ qm_wq_first = qm_wq_portal, -+ qm_wq_last = qm_wq_pme -+}; -+ -+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ -+enum qm_memory { -+ qm_memory_fqd, -+ qm_memory_pfdr -+}; -+ -+/* Used by all error interrupt registers except 'inhibit' */ -+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ -+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ -+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ -+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ -+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ -+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ -+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ -+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ -+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ -+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ -+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ -+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ -+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ -+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ -+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ -+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ -+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ -+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ -+ -+/* QMAN_ECIR valid error bit */ -+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ -+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ -+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) -+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ -+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ -+ QM_EIRQ_IFSI) -+ -+union qman_ecir { -+ u32 ecir_raw; -+ struct { -+ u32 __reserved:2; -+ u32 portal_type:1; -+ u32 portal_num:5; -+ u32 fqid:24; -+ } __packed info; -+}; -+ -+union qman_ecir2 { -+ u32 ecir2_raw; -+ struct { -+ u32 portal_type:1; -+ u32 __reserved:21; -+ u32 portal_num:10; -+ } __packed info; -+}; -+ -+union qman_eadr { -+ u32 eadr_raw; -+ struct { -+ u32 __reserved1:4; -+ u32 memid:4; -+ u32 __reserved2:12; -+ u32 eadr:12; -+ } __packed info; -+ struct { -+ u32 __reserved1:3; -+ u32 memid:5; -+ u32 __reserved:8; -+ u32 eadr:16; -+ } __packed info_rev3; -+}; -+ -+struct qman_hwerr_txt { -+ u32 mask; -+ const char *txt; -+}; -+ -+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b } -+ -+static const struct qman_hwerr_txt qman_hwerr_txts[] = { -+ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"), -+ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"), -+ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"), -+ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"), -+ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), -+ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), -+ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"), -+ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"), -+ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"), -+ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"), -+ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"), -+ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"), -+ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"), -+ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"), -+ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"), -+ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"), -+ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"), -+ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue") -+}; -+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt)) -+ -+struct qman_error_info_mdata { -+ u16 addr_mask; -+ u16 bits; -+ const char *txt; -+}; -+ -+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} -+static const struct qman_error_info_mdata error_mdata[] = { -+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"), -+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"), -+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"), -+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"), -+ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"), -+ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"), -+ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"), -+ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"), -+ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"), -+ QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"), -+ QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"), -+ QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"), -+ QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"), -+ QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"), -+ QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"), -+ QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"), -+ QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"), -+ QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"), -+}; -+#define QMAN_ERR_MDATA_COUNT \ -+ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata)) -+ -+/* Add this in Kconfig */ -+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) -+ -+/** -+ * qm_err_isr__ - Manipulate global interrupt registers -+ * @v: for accessors that write values, this is the 32-bit value -+ * -+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All -+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of -+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means -+ * "write the enable register" rather than "enable the write register"! -+ */ -+#define qm_err_isr_status_read(qm) \ -+ __qm_err_isr_read(qm, qm_isr_status) -+#define qm_err_isr_status_clear(qm, m) \ -+ __qm_err_isr_write(qm, qm_isr_status, m) -+#define qm_err_isr_enable_read(qm) \ -+ __qm_err_isr_read(qm, qm_isr_enable) -+#define qm_err_isr_enable_write(qm, v) \ -+ __qm_err_isr_write(qm, qm_isr_enable, v) -+#define qm_err_isr_disable_read(qm) \ -+ __qm_err_isr_read(qm, qm_isr_disable) -+#define qm_err_isr_disable_write(qm, v) \ -+ __qm_err_isr_write(qm, qm_isr_disable, v) -+#define qm_err_isr_inhibit(qm) \ -+ __qm_err_isr_write(qm, qm_isr_inhibit, 1) -+#define qm_err_isr_uninhibit(qm) \ -+ __qm_err_isr_write(qm, qm_isr_inhibit, 0) -+ -+/* -+ * TODO: unimplemented registers -+ * -+ * Keeping a list here of Qman registers I have not yet covered; -+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, -+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, -+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 -+ */ -+ -+/* Encapsulate "struct qman *" as a cast of the register space address. */ -+ -+static struct qman *qm_create(void *regs) -+{ -+ return (struct qman *)regs; -+} -+ -+static inline u32 __qm_in(struct qman *qm, u32 offset) -+{ -+ return in_be32((void *)qm + offset); -+} -+static inline void __qm_out(struct qman *qm, u32 offset, u32 val) -+{ -+ out_be32((void *)qm + offset, val); -+} -+#define qm_in(reg) __qm_in(qm, REG_##reg) -+#define qm_out(reg, val) __qm_out(qm, REG_##reg, val) -+ -+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n) -+{ -+ return __qm_in(qm, REG_ERR_ISR + (n << 2)); -+} -+ -+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val) -+{ -+ __qm_out(qm, REG_ERR_ISR + (n << 2), val); -+} -+ -+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal, -+ int ed, u8 sernd) -+{ -+ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) || -+ (portal == qm_dc_portal_fman1)); -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff)); -+ else -+ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f)); -+} -+ -+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class, -+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5, -+ u8 csw6, u8 csw7) -+{ -+ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | -+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | -+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | -+ ((csw6 & 0x7) << 4) | (csw7 & 0x7)); -+} -+ -+static void qm_set_hid(struct qman *qm) -+{ -+ qm_out(HID_CFG, 0); -+} -+ -+static void qm_set_corenet_initiator(struct qman *qm) -+{ -+ qm_out(CI_SCHED_CFG, -+ 0x80000000 | /* write srcciv enable */ -+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) | -+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) | -+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) | -+ CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W); -+} -+ -+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor, -+ u8 *cfg) -+{ -+ u32 v = qm_in(IP_REV_1); -+ u32 v2 = qm_in(IP_REV_2); -+ *id = (v >> 16); -+ *major = (v >> 8) & 0xff; -+ *minor = v & 0xff; -+ *cfg = v2 & 0xff; -+} -+ -+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba, -+ int enable, int prio, int stash, u32 size) -+{ -+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; -+ u32 exp = ilog2(size); -+ /* choke if size isn't within range */ -+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) && -+ is_power_of_2(size)); -+ /* choke if 'ba' has lower-alignment than 'size' */ -+ DPA_ASSERT(!(ba & (size - 1))); -+ __qm_out(qm, offset, upper_32_bits(ba)); -+ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba)); -+ __qm_out(qm, offset + REG_offset_AR, -+ (enable ? 0x80000000 : 0) | -+ (prio ? 0x40000000 : 0) | -+ (stash ? 0x20000000 : 0) | -+ (exp - 1)); -+} -+ -+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k) -+{ -+ qm_out(PFDR_FP_LWIT, th & 0xffffff); -+ qm_out(PFDR_CFG, k); -+} -+ -+static void qm_set_sfdr_threshold(struct qman *qm, u16 th) -+{ -+ qm_out(SFDR_CFG, th & 0x3ff); -+} -+ -+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num) -+{ -+ u8 rslt = MCR_get_rslt(qm_in(MCR)); -+ -+ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); -+ /* Make sure the command interface is 'idle' */ -+ if (!MCR_rslt_idle(rslt)) -+ panic("QMAN_MCR isn't idle"); -+ -+ /* Write the MCR command params then the verb */ -+ qm_out(MCP(0), pfdr_start); -+ /* TODO: remove this - it's a workaround for a model bug that is -+ * corrected in more recent versions. We use the workaround until -+ * everyone has upgraded. */ -+ qm_out(MCP(1), (pfdr_start + num - 16)); -+ lwsync(); -+ qm_out(MCR, MCR_INIT_PFDR); -+ /* Poll for the result */ -+ do { -+ rslt = MCR_get_rslt(qm_in(MCR)); -+ } while (!MCR_rslt_idle(rslt)); -+ if (MCR_rslt_ok(rslt)) -+ return 0; -+ if (MCR_rslt_eaccess(rslt)) -+ return -EACCES; -+ if (MCR_rslt_inval(rslt)) -+ return -EINVAL; -+ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); -+ return -ENOSYS; -+} -+ -+/*****************/ -+/* Config driver */ -+/*****************/ -+ -+#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ) -+#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ) -+ -+/* We support only one of these */ -+static struct qman *qm; -+static struct device_node *qm_node; -+ -+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used -+ * during qman_init_ccsr(). */ -+static dma_addr_t fqd_a, pfdr_a; -+static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ; -+ -+static int qman_fqd(struct reserved_mem *rmem) -+{ -+ fqd_a = rmem->base; -+ fqd_sz = rmem->size; -+ -+ WARN_ON(!(fqd_a && fqd_sz)); -+ -+ return 0; -+} -+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); -+ -+static int qman_pfdr(struct reserved_mem *rmem) -+{ -+ pfdr_a = rmem->base; -+ pfdr_sz = rmem->size; -+ -+ WARN_ON(!(pfdr_a && pfdr_sz)); -+ -+ return 0; -+} -+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr); -+ -+size_t get_qman_fqd_size() -+{ -+ return fqd_sz; -+} -+ -+/* Parse the property to extract the memory location and size and -+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default -+ * size. Also flush this memory range from data cache so that QMAN originated -+ * transactions for this memory region could be marked non-coherent. -+ */ -+static __init int parse_mem_property(struct device_node *node, const char *name, -+ dma_addr_t *addr, size_t *sz, int zero) -+{ -+ int ret; -+ -+ /* If using a "zero-pma", don't try to zero it, even if you asked */ -+ if (zero && of_find_property(node, "zero-pma", &ret)) { -+ pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); -+ zero = 0; -+ } -+ -+ if (zero) { -+ /* map as cacheable, non-guarded */ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ void __iomem *tmpp = ioremap_cache(*addr, *sz); -+#else -+ void __iomem *tmpp = ioremap(*addr, *sz); -+#endif -+ -+ if (!tmpp) -+ return -ENOMEM; -+ memset_io(tmpp, 0, *sz); -+ flush_dcache_range((unsigned long)tmpp, -+ (unsigned long)tmpp + *sz); -+ iounmap(tmpp); -+ } -+ -+ return 0; -+} -+ -+/* TODO: -+ * - there is obviously no handling of errors, -+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for -+ * both memory resources to zero. -+ */ -+static int __init fsl_qman_init(struct device_node *node) -+{ -+ struct resource res; -+ resource_size_t len; -+ u32 __iomem *regs; -+ const char *s; -+ int ret, standby = 0; -+ u16 id; -+ u8 major, minor, cfg; -+ ret = of_address_to_resource(node, 0, &res); -+ if (ret) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, "reg"); -+ return ret; -+ } -+ s = of_get_property(node, "fsl,hv-claimable", &ret); -+ if (s && !strcmp(s, "standby")) -+ standby = 1; -+ if (!standby) { -+ ret = parse_mem_property(node, "fsl,qman-fqd", -+ &fqd_a, &fqd_sz, 1); -+ pr_info("qman-fqd addr 0x%llx size 0x%zx\n", -+ (unsigned long long)fqd_a, fqd_sz); -+ BUG_ON(ret); -+ ret = parse_mem_property(node, "fsl,qman-pfdr", -+ &pfdr_a, &pfdr_sz, 0); -+ pr_info("qman-pfdr addr 0x%llx size 0x%zx\n", -+ (unsigned long long)pfdr_a, pfdr_sz); -+ BUG_ON(ret); -+ } -+ /* Global configuration */ -+ len = resource_size(&res); -+ if (len != (unsigned long)len) -+ return -EINVAL; -+ regs = ioremap(res.start, (unsigned long)len); -+ qm = qm_create(regs); -+ qm_node = node; -+ qm_get_version(qm, &id, &major, &minor, &cfg); -+ pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg); -+ if (!qman_ip_rev) { -+ if ((major == 1) && (minor == 0)) { -+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n"); -+ iounmap(regs); -+ return -ENODEV; -+ } else if ((major == 1) && (minor == 1)) -+ qman_ip_rev = QMAN_REV11; -+ else if ((major == 1) && (minor == 2)) -+ qman_ip_rev = QMAN_REV12; -+ else if ((major == 2) && (minor == 0)) -+ qman_ip_rev = QMAN_REV20; -+ else if ((major == 3) && (minor == 0)) -+ qman_ip_rev = QMAN_REV30; -+ else if ((major == 3) && (minor == 1)) -+ qman_ip_rev = QMAN_REV31; -+ else if ((major == 3) && (minor == 2)) -+ qman_ip_rev = QMAN_REV32; -+ else { -+ pr_warn("unknown Qman version, default to rev1.1\n"); -+ qman_ip_rev = QMAN_REV11; -+ } -+ qman_ip_cfg = cfg; -+ } -+ -+ if (standby) { -+ pr_info(" -> in standby mode\n"); -+ return 0; -+ } -+ return 0; -+} -+ -+int qman_have_ccsr(void) -+{ -+ return qm ? 1 : 0; -+} -+ -+__init int qman_init_early(void) -+{ -+ struct device_node *dn; -+ int ret; -+ -+ for_each_compatible_node(dn, NULL, "fsl,qman") { -+ if (qm) -+ pr_err("%s: only one 'fsl,qman' allowed\n", -+ dn->full_name); -+ else { -+ if (!of_device_is_available(dn)) -+ continue; -+ -+ ret = fsl_qman_init(dn); -+ BUG_ON(ret); -+ } -+ } -+ return 0; -+} -+postcore_initcall_sync(qman_init_early); -+ -+static void log_edata_bits(u32 bit_count) -+{ -+ u32 i, j, mask = 0xffffffff; -+ -+ pr_warn("Qman ErrInt, EDATA:\n"); -+ i = bit_count/32; -+ if (bit_count%32) { -+ i++; -+ mask = ~(mask << bit_count%32); -+ } -+ j = 16-i; -+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask); -+ j++; -+ for (; j < 16; j++) -+ pr_warn(" 0x%08x\n", qm_in(EDATA(j))); -+} -+ -+static void log_additional_error_info(u32 isr_val, u32 ecsr_val) -+{ -+ union qman_ecir ecir_val; -+ union qman_eadr eadr_val; -+ -+ ecir_val.ecir_raw = qm_in(ECIR); -+ /* Is portal info valid */ -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { -+ union qman_ecir2 ecir2_val; -+ ecir2_val.ecir2_raw = qm_in(ECIR2); -+ if (ecsr_val & PORTAL_ECSR_ERR) { -+ pr_warn("Qman ErrInt: %s id %d\n", -+ (ecir2_val.info.portal_type) ? -+ "DCP" : "SWP", ecir2_val.info.portal_num); -+ } -+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) { -+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n", -+ ecir_val.info.fqid); -+ } -+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { -+ eadr_val.eadr_raw = qm_in(EADR); -+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n", -+ error_mdata[eadr_val.info_rev3.memid].txt, -+ error_mdata[eadr_val.info_rev3.memid].addr_mask -+ & eadr_val.info_rev3.eadr); -+ log_edata_bits( -+ error_mdata[eadr_val.info_rev3.memid].bits); -+ } -+ } else { -+ if (ecsr_val & PORTAL_ECSR_ERR) { -+ pr_warn("Qman ErrInt: %s id %d\n", -+ (ecir_val.info.portal_type) ? -+ "DCP" : "SWP", ecir_val.info.portal_num); -+ } -+ if (ecsr_val & FQID_ECSR_ERR) { -+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n", -+ ecir_val.info.fqid); -+ } -+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { -+ eadr_val.eadr_raw = qm_in(EADR); -+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n", -+ error_mdata[eadr_val.info.memid].txt, -+ error_mdata[eadr_val.info.memid].addr_mask -+ & eadr_val.info.eadr); -+ log_edata_bits(error_mdata[eadr_val.info.memid].bits); -+ } -+ } -+} -+ -+/* Qman interrupt handler */ -+static irqreturn_t qman_isr(int irq, void *ptr) -+{ -+ u32 isr_val, ier_val, ecsr_val, isr_mask, i; -+ -+ ier_val = qm_err_isr_enable_read(qm); -+ isr_val = qm_err_isr_status_read(qm); -+ ecsr_val = qm_in(ECSR); -+ isr_mask = isr_val & ier_val; -+ -+ if (!isr_mask) -+ return IRQ_NONE; -+ for (i = 0; i < QMAN_HWE_COUNT; i++) { -+ if (qman_hwerr_txts[i].mask & isr_mask) { -+ pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt); -+ if (qman_hwerr_txts[i].mask & ecsr_val) { -+ log_additional_error_info(isr_mask, ecsr_val); -+ /* Re-arm error capture registers */ -+ qm_out(ECSR, ecsr_val); -+ } -+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) { -+ pr_devel("Qman un-enabling error 0x%x\n", -+ qman_hwerr_txts[i].mask); -+ ier_val &= ~qman_hwerr_txts[i].mask; -+ qm_err_isr_enable_write(qm, ier_val); -+ } -+ } -+ } -+ qm_err_isr_status_clear(qm, isr_val); -+ return IRQ_HANDLED; -+} -+ -+static int __bind_irq(void) -+{ -+ int ret, err_irq; -+ -+ err_irq = of_irq_to_resource(qm_node, 0, NULL); -+ if (err_irq == 0) { -+ pr_info("Can't get %s property '%s'\n", qm_node->full_name, -+ "interrupts"); -+ return -ENODEV; -+ } -+ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node); -+ if (ret) { -+ pr_err("request_irq() failed %d for '%s'\n", ret, -+ qm_node->full_name); -+ return -ENODEV; -+ } -+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior -+ * to resource allocation during driver init). */ -+ qm_err_isr_status_clear(qm, 0xffffffff); -+ /* Enable Error Interrupts */ -+ qm_err_isr_enable_write(qm, 0xffffffff); -+ return 0; -+} -+ -+int qman_init_ccsr(struct device_node *node) -+{ -+ int ret; -+ if (!qman_have_ccsr()) -+ return 0; -+ if (node != qm_node) -+ return -EINVAL; -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ /* TEMP for LS1043 : should be done in uboot */ -+ qm_out(QCSP_BARE, 0x5); -+ qm_out(QCSP_BAR, 0x0); -+#endif -+ /* FQD memory */ -+ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz); -+ /* PFDR memory */ -+ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz); -+ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8); -+ /* thresholds */ -+ qm_set_pfdr_threshold(qm, 512, 64); -+ qm_set_sfdr_threshold(qm, 128); -+ /* clear stale PEBI bit from interrupt status register */ -+ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI); -+ /* corenet initiator settings */ -+ qm_set_corenet_initiator(qm); -+ /* HID settings */ -+ qm_set_hid(qm); -+ /* Set scheduling weights to defaults */ -+ for (ret = qm_wq_first; ret <= qm_wq_last; ret++) -+ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0); -+ /* We are not prepared to accept ERNs for hardware enqueues */ -+ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0); -+ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0); -+ /* Initialise Error Interrupt Handler */ -+ ret = __bind_irq(); -+ if (ret) -+ return ret; -+ return 0; -+} -+ -+#define LIO_CFG_LIODN_MASK 0x0fff0000 -+void qman_liodn_fixup(u16 channel) -+{ -+ static int done; -+ static u32 liodn_offset; -+ u32 before, after; -+ int idx = channel - QM_CHANNEL_SWPORTAL0; -+ -+ if (!qman_have_ccsr()) -+ return; -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ before = qm_in(REV3_QCSP_LIO_CFG(idx)); -+ else -+ before = qm_in(QCSP_LIO_CFG(idx)); -+ if (!done) { -+ liodn_offset = before & LIO_CFG_LIODN_MASK; -+ done = 1; -+ return; -+ } -+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ qm_out(REV3_QCSP_LIO_CFG(idx), after); -+ else -+ qm_out(QCSP_LIO_CFG(idx), after); -+} -+ -+#define IO_CFG_SDEST_MASK 0x00ff0000 -+int qman_set_sdest(u16 channel, unsigned int cpu_idx) -+{ -+ int idx = channel - QM_CHANNEL_SWPORTAL0; -+ u32 before, after; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ if ((qman_ip_rev & 0xFF00) == QMAN_REV31) { -+ /* LS1043A - only one L2 cache */ -+ cpu_idx = 0; -+ } -+ -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { -+ before = qm_in(REV3_QCSP_IO_CFG(idx)); -+ /* Each pair of vcpu share the same SRQ(SDEST) */ -+ cpu_idx /= 2; -+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); -+ qm_out(REV3_QCSP_IO_CFG(idx), after); -+ } else { -+ before = qm_in(QCSP_IO_CFG(idx)); -+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); -+ qm_out(QCSP_IO_CFG(idx), after); -+ } -+ return 0; -+} -+ -+#define MISC_CFG_WPM_MASK 0x00000002 -+int qm_set_wpm(int wpm) -+{ -+ u32 before; -+ u32 after; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ -+ before = qm_in(MISC_CFG); -+ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1); -+ qm_out(MISC_CFG, after); -+ return 0; -+} -+ -+int qm_get_wpm(int *wpm) -+{ -+ u32 before; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ -+ before = qm_in(MISC_CFG); -+ *wpm = (before & MISC_CFG_WPM_MASK) >> 1; -+ return 0; -+} -+ -+/* CEETM_CFG_PRES register has PRES field which is calculated by: -+ * PRES = (2^22 / credit update reference period) * QMan clock period -+ * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk -+ */ -+ -+int qman_ceetm_set_prescaler(enum qm_dc_portal portal) -+{ -+ u64 temp; -+ u16 pres; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ -+ temp = 0x400000 * 100; -+ do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD); -+ temp *= 10000000; -+ do_div(temp, qman_clk); -+ pres = (u16) temp; -+ qm_out(CEETM_CFG_IDX, portal); -+ qm_out(CEETM_CFG_PRES, pres); -+ return 0; -+} -+ -+int qman_ceetm_get_prescaler(u16 *pres) -+{ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ *pres = (u16)qm_in(CEETM_CFG_PRES); -+ return 0; -+} -+ -+#define DCP_CFG_CEETME_MASK 0xFFFF0000 -+#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n)) -+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) -+{ -+ u32 dcp_cfg; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ -+ dcp_cfg = qm_in(DCP_CFG(portal)); -+ dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal); -+ qm_out(DCP_CFG(portal), dcp_cfg); -+ return 0; -+} -+ -+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) -+{ -+ u32 dcp_cfg; -+ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ dcp_cfg = qm_in(DCP_CFG(portal)); -+ dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal)); -+ qm_out(DCP_CFG(portal), dcp_cfg); -+ return 0; -+} -+ -+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num) -+{ -+ if (!qman_have_ccsr()) -+ return -ENODEV; -+ *num = qm_in(CEETM_XSFDR_IN_USE); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_get_xsfdr); -+ -+#ifdef CONFIG_SYSFS -+ -+#define DRV_NAME "fsl-qman" -+#define DCP_MAX_ID 3 -+#define DCP_MIN_ID 0 -+ -+static ssize_t show_pfdr_fpc(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC)); -+}; -+ -+static ssize_t show_dlm_avg(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ u32 data; -+ int i; -+ -+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) -+ return -EINVAL; -+ if (i < DCP_MIN_ID || i > DCP_MAX_ID) -+ return -EINVAL; -+ data = qm_in(DCP_DLM_AVG(i)); -+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, -+ (data & 0x000000ff)*390625); -+}; -+ -+static ssize_t set_dlm_avg(struct device *dev, -+ struct device_attribute *dev_attr, const char *buf, size_t count) -+{ -+ unsigned long val; -+ int i; -+ -+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) -+ return -EINVAL; -+ if (i < DCP_MIN_ID || i > DCP_MAX_ID) -+ return -EINVAL; -+ if (kstrtoul(buf, 0, &val)) { -+ dev_dbg(dev, "invalid input %s\n", buf); -+ return -EINVAL; -+ } -+ qm_out(DCP_DLM_AVG(i), val); -+ return count; -+}; -+ -+static ssize_t show_pfdr_cfg(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG)); -+}; -+ -+static ssize_t set_pfdr_cfg(struct device *dev, -+ struct device_attribute *dev_attr, const char *buf, size_t count) -+{ -+ unsigned long val; -+ -+ if (kstrtoul(buf, 0, &val)) { -+ dev_dbg(dev, "invalid input %s\n", buf); -+ return -EINVAL; -+ } -+ qm_out(PFDR_CFG, val); -+ return count; -+}; -+ -+static ssize_t show_sfdr_in_use(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE)); -+}; -+ -+static ssize_t show_idle_stat(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT)); -+}; -+ -+static ssize_t show_ci_rlm_avg(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ u32 data = qm_in(CI_RLM_AVG); -+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, -+ (data & 0x000000ff)*390625); -+}; -+ -+static ssize_t set_ci_rlm_avg(struct device *dev, -+ struct device_attribute *dev_attr, const char *buf, size_t count) -+{ -+ unsigned long val; -+ -+ if (kstrtoul(buf, 0, &val)) { -+ dev_dbg(dev, "invalid input %s\n", buf); -+ return -EINVAL; -+ } -+ qm_out(CI_RLM_AVG, val); -+ return count; -+}; -+ -+static ssize_t show_err_isr(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); -+}; -+ -+#define SBEC_MAX_ID 14 -+#define SBEC_MIN_ID 0 -+ -+static ssize_t show_sbec(struct device *dev, -+ struct device_attribute *dev_attr, char *buf) -+{ -+ int i; -+ -+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) -+ return -EINVAL; -+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) -+ return -EINVAL; -+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); -+}; -+ -+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL); -+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg); -+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL); -+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR), -+ show_ci_rlm_avg, set_ci_rlm_avg); -+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); -+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL); -+ -+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -+ -+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL); -+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL); -+ -+static struct attribute *qman_dev_attributes[] = { -+ &dev_attr_pfdr_fpc.attr, -+ &dev_attr_pfdr_cfg.attr, -+ &dev_attr_idle_stat.attr, -+ &dev_attr_ci_rlm_avg.attr, -+ &dev_attr_err_isr.attr, -+ &dev_attr_dcp0_dlm_avg.attr, -+ &dev_attr_dcp1_dlm_avg.attr, -+ &dev_attr_dcp2_dlm_avg.attr, -+ &dev_attr_dcp3_dlm_avg.attr, -+ /* sfdr_in_use will be added if necessary */ -+ NULL -+}; -+ -+static struct attribute *qman_dev_ecr_attributes[] = { -+ &dev_attr_sbec_0.attr, -+ &dev_attr_sbec_1.attr, -+ &dev_attr_sbec_2.attr, -+ &dev_attr_sbec_3.attr, -+ &dev_attr_sbec_4.attr, -+ &dev_attr_sbec_5.attr, -+ &dev_attr_sbec_6.attr, -+ &dev_attr_sbec_7.attr, -+ &dev_attr_sbec_8.attr, -+ &dev_attr_sbec_9.attr, -+ &dev_attr_sbec_10.attr, -+ &dev_attr_sbec_11.attr, -+ &dev_attr_sbec_12.attr, -+ &dev_attr_sbec_13.attr, -+ &dev_attr_sbec_14.attr, -+ NULL -+}; -+ -+/* root level */ -+static const struct attribute_group qman_dev_attr_grp = { -+ .name = NULL, -+ .attrs = qman_dev_attributes -+}; -+static const struct attribute_group qman_dev_ecr_grp = { -+ .name = "error_capture", -+ .attrs = qman_dev_ecr_attributes -+}; -+ -+static int of_fsl_qman_remove(struct platform_device *ofdev) -+{ -+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); -+ return 0; -+}; -+ -+static int of_fsl_qman_probe(struct platform_device *ofdev) -+{ -+ int ret; -+ -+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp); -+ if (ret) -+ goto done; -+ ret = sysfs_add_file_to_group(&ofdev->dev.kobj, -+ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name); -+ if (ret) -+ goto del_group_0; -+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp); -+ if (ret) -+ goto del_group_0; -+ -+ goto done; -+ -+del_group_0: -+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); -+done: -+ if (ret) -+ dev_err(&ofdev->dev, -+ "Cannot create dev attributes ret=%d\n", ret); -+ return ret; -+}; -+ -+static struct of_device_id of_fsl_qman_ids[] = { -+ { -+ .compatible = "fsl,qman", -+ }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, of_fsl_qman_ids); -+ -+#ifdef CONFIG_SUSPEND -+ -+static u32 saved_isdr; -+static int qman_pm_suspend_noirq(struct device *dev) -+{ -+ uint32_t idle_state; -+ -+ suspend_unused_qportal(); -+ /* save isdr, disable all, clear isr */ -+ saved_isdr = qm_err_isr_disable_read(qm); -+ qm_err_isr_disable_write(qm, 0xffffffff); -+ qm_err_isr_status_clear(qm, 0xffffffff); -+ idle_state = qm_in(IDLE_STAT); -+ if (!(idle_state & 0x1)) { -+ pr_err("Qman not idle 0x%x aborting\n", idle_state); -+ qm_err_isr_disable_write(qm, saved_isdr); -+ resume_unused_qportal(); -+ return -EBUSY; -+ } -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state); -+#endif -+ return 0; -+} -+ -+static int qman_pm_resume_noirq(struct device *dev) -+{ -+ /* restore isdr */ -+ qm_err_isr_disable_write(qm, saved_isdr); -+ resume_unused_qportal(); -+ return 0; -+} -+#else -+#define qman_pm_suspend_noirq NULL -+#define qman_pm_resume_noirq NULL -+#endif -+ -+static const struct dev_pm_ops qman_pm_ops = { -+ .suspend_noirq = qman_pm_suspend_noirq, -+ .resume_noirq = qman_pm_resume_noirq, -+}; -+ -+static struct platform_driver of_fsl_qman_driver = { -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = DRV_NAME, -+ .of_match_table = of_fsl_qman_ids, -+ .pm = &qman_pm_ops, -+ }, -+ .probe = of_fsl_qman_probe, -+ .remove = of_fsl_qman_remove, -+}; -+ -+static int qman_ctrl_init(void) -+{ -+ return platform_driver_register(&of_fsl_qman_driver); -+} -+ -+static void qman_ctrl_exit(void) -+{ -+ platform_driver_unregister(&of_fsl_qman_driver); -+} -+ -+module_init(qman_ctrl_init); -+module_exit(qman_ctrl_exit); -+ -+#endif /* CONFIG_SYSFS */ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_debugfs.c -@@ -0,0 +1,1594 @@ -+/* Copyright 2010-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "qman_private.h" -+ -+#define MAX_FQID (0x00ffffff) -+#define QM_FQD_BLOCK_SIZE 64 -+#define QM_FQD_AR (0xC10) -+ -+static u32 fqid_max; -+static u64 qman_ccsr_start; -+static u64 qman_ccsr_size; -+ -+static const char * const state_txt[] = { -+ "Out of Service", -+ "Retired", -+ "Tentatively Scheduled", -+ "Truly Scheduled", -+ "Parked", -+ "Active, Active Held or Held Suspended", -+ "Unknown State 6", -+ "Unknown State 7", -+ NULL, -+}; -+ -+static const u8 fqd_states[] = { -+ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED, -+ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED, -+ QM_MCR_NP_STATE_ACTIVE}; -+ -+struct mask_to_text { -+ u16 mask; -+ const char *txt; -+}; -+ -+struct mask_filter_s { -+ u16 mask; -+ u8 filter; -+}; -+ -+static const struct mask_filter_s mask_filter[] = { -+ {QM_FQCTRL_PREFERINCACHE, 0}, -+ {QM_FQCTRL_PREFERINCACHE, 1}, -+ {QM_FQCTRL_HOLDACTIVE, 0}, -+ {QM_FQCTRL_HOLDACTIVE, 1}, -+ {QM_FQCTRL_AVOIDBLOCK, 0}, -+ {QM_FQCTRL_AVOIDBLOCK, 1}, -+ {QM_FQCTRL_FORCESFDR, 0}, -+ {QM_FQCTRL_FORCESFDR, 1}, -+ {QM_FQCTRL_CPCSTASH, 0}, -+ {QM_FQCTRL_CPCSTASH, 1}, -+ {QM_FQCTRL_CTXASTASHING, 0}, -+ {QM_FQCTRL_CTXASTASHING, 1}, -+ {QM_FQCTRL_ORP, 0}, -+ {QM_FQCTRL_ORP, 1}, -+ {QM_FQCTRL_TDE, 0}, -+ {QM_FQCTRL_TDE, 1}, -+ {QM_FQCTRL_CGE, 0}, -+ {QM_FQCTRL_CGE, 1} -+}; -+ -+static const struct mask_to_text fq_ctrl_text_list[] = { -+ { -+ .mask = QM_FQCTRL_PREFERINCACHE, -+ .txt = "Prefer in cache", -+ }, -+ { -+ .mask = QM_FQCTRL_HOLDACTIVE, -+ .txt = "Hold active in portal", -+ }, -+ { -+ .mask = QM_FQCTRL_AVOIDBLOCK, -+ .txt = "Avoid Blocking", -+ }, -+ { -+ .mask = QM_FQCTRL_FORCESFDR, -+ .txt = "High-priority SFDRs", -+ }, -+ { -+ .mask = QM_FQCTRL_CPCSTASH, -+ .txt = "CPC Stash Enable", -+ }, -+ { -+ .mask = QM_FQCTRL_CTXASTASHING, -+ .txt = "Context-A stashing", -+ }, -+ { -+ .mask = QM_FQCTRL_ORP, -+ .txt = "ORP Enable", -+ }, -+ { -+ .mask = QM_FQCTRL_TDE, -+ .txt = "Tail-Drop Enable", -+ }, -+ { -+ .mask = QM_FQCTRL_CGE, -+ .txt = "Congestion Group Enable", -+ }, -+ { -+ .mask = 0, -+ .txt = NULL, -+ } -+}; -+ -+static const char *get_fqd_ctrl_text(u16 mask) -+{ -+ int i = 0; -+ -+ while (fq_ctrl_text_list[i].txt != NULL) { -+ if (fq_ctrl_text_list[i].mask == mask) -+ return fq_ctrl_text_list[i].txt; -+ i++; -+ } -+ return NULL; -+} -+ -+static const struct mask_to_text stashing_text_list[] = { -+ { -+ .mask = QM_STASHING_EXCL_CTX, -+ .txt = "FQ Ctx Stash" -+ }, -+ { -+ .mask = QM_STASHING_EXCL_DATA, -+ .txt = "Frame Data Stash", -+ }, -+ { -+ .mask = QM_STASHING_EXCL_ANNOTATION, -+ .txt = "Frame Annotation Stash", -+ }, -+ { -+ .mask = 0, -+ .txt = NULL, -+ }, -+}; -+ -+static int user_input_convert(const char __user *user_buf, size_t count, -+ unsigned long *val) -+{ -+ char buf[12]; -+ -+ if (count > sizeof(buf) - 1) -+ return -EINVAL; -+ if (copy_from_user(buf, user_buf, count)) -+ return -EFAULT; -+ buf[count] = '\0'; -+ if (kstrtoul(buf, 0, val)) -+ return -EINVAL; -+ return 0; -+} -+ -+struct line_buffer_fq { -+ u32 buf[8]; -+ u32 buf_cnt; -+ int line_cnt; -+}; -+ -+static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid, -+ struct seq_file *file) -+{ -+ line_buf->buf[line_buf->buf_cnt] = fqid; -+ line_buf->buf_cnt++; -+ if (line_buf->buf_cnt == 8) { -+ /* Buffer is full, flush it */ -+ if (line_buf->line_cnt != 0) -+ seq_puts(file, ",\n"); -+ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x," -+ "0x%06x,0x%06x,0x%06x", -+ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2], -+ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5], -+ line_buf->buf[6], line_buf->buf[7]); -+ line_buf->buf_cnt = 0; -+ line_buf->line_cnt++; -+ } -+} -+ -+static void flush_line_buffer(struct line_buffer_fq *line_buf, -+ struct seq_file *file) -+{ -+ if (line_buf->buf_cnt) { -+ int y = 0; -+ if (line_buf->line_cnt != 0) -+ seq_puts(file, ",\n"); -+ while (y != line_buf->buf_cnt) { -+ if (y+1 == line_buf->buf_cnt) -+ seq_printf(file, "0x%06x", line_buf->buf[y]); -+ else -+ seq_printf(file, "0x%06x,", line_buf->buf[y]); -+ y++; -+ } -+ line_buf->line_cnt++; -+ } -+ if (line_buf->line_cnt) -+ seq_putc(file, '\n'); -+} -+ -+static struct dentry *dfs_root; /* debugfs root directory */ -+ -+/******************************************************************************* -+ * Query Frame Queue Non Programmable Fields -+ ******************************************************************************/ -+struct query_fq_np_fields_data_s { -+ u32 fqid; -+}; -+static struct query_fq_np_fields_data_s query_fq_np_fields_data = { -+ .fqid = 1, -+}; -+ -+static int query_fq_np_fields_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_queryfq_np np; -+ struct qman_fq fq; -+ -+ fq.fqid = query_fq_np_fields_data.fqid; -+ ret = qman_query_fq_np(&fq, &np); -+ if (ret) -+ return ret; -+ /* Print state */ -+ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n", -+ fq.fqid); -+ seq_printf(file, " force eligible pending: %s\n", -+ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no"); -+ seq_printf(file, " retirement pending: %s\n", -+ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no"); -+ seq_printf(file, " state: %s\n", -+ state_txt[np.state & QM_MCR_NP_STATE_MASK]); -+ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link); -+ seq_printf(file, " odp_seq: %u\n", np.odp_seq); -+ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn); -+ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq); -+ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq); -+ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr); -+ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr); -+ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr); -+ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr); -+ seq_printf(file, " is: ics_surp contains a %s\n", -+ (np.is) ? "deficit" : "surplus"); -+ seq_printf(file, " ics_surp: %u\n", np.ics_surp); -+ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt); -+ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt); -+ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr); -+ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr); -+ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr); -+ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr); -+ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr); -+ return 0; -+} -+ -+static int query_fq_np_fields_open(struct inode *inode, -+ struct file *file) -+{ -+ return single_open(file, query_fq_np_fields_show, NULL); -+} -+ -+static ssize_t query_fq_np_fields_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > MAX_FQID) -+ return -EINVAL; -+ query_fq_np_fields_data.fqid = (u32)val; -+ return count; -+} -+ -+static const struct file_operations query_fq_np_fields_fops = { -+ .owner = THIS_MODULE, -+ .open = query_fq_np_fields_open, -+ .read = seq_read, -+ .write = query_fq_np_fields_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Frame Queue Programmable Fields -+ ******************************************************************************/ -+struct query_fq_fields_data_s { -+ u32 fqid; -+}; -+ -+static struct query_fq_fields_data_s query_fq_fields_data = { -+ .fqid = 1, -+}; -+ -+static int query_fq_fields_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_fqd fqd; -+ struct qman_fq fq; -+ int i = 0; -+ -+ memset(&fqd, 0, sizeof(struct qm_fqd)); -+ fq.fqid = query_fq_fields_data.fqid; -+ ret = qman_query_fq(&fq, &fqd); -+ if (ret) -+ return ret; -+ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n", -+ fq.fqid); -+ seq_printf(file, " orprws: %u\n", fqd.orprws); -+ seq_printf(file, " oa: %u\n", fqd.oa); -+ seq_printf(file, " olws: %u\n", fqd.olws); -+ -+ seq_printf(file, " cgid: %u\n", fqd.cgid); -+ -+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0) -+ seq_puts(file, " fq_ctrl: None\n"); -+ else { -+ i = 0; -+ seq_puts(file, " fq_ctrl:\n"); -+ while (fq_ctrl_text_list[i].txt != NULL) { -+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & -+ fq_ctrl_text_list[i].mask) -+ seq_printf(file, " %s\n", -+ fq_ctrl_text_list[i].txt); -+ i++; -+ } -+ } -+ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel); -+ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq); -+ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred); -+ seq_printf(file, " td_mant: %u\n", fqd.td.mant); -+ seq_printf(file, " td_exp: %u\n", fqd.td.exp); -+ -+ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b); -+ -+ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd)); -+ /* Any stashing configured */ -+ if ((fqd.context_a.stashing.exclusive & 0x7) == 0) -+ seq_puts(file, " ctx_a_stash_exclusive: None\n"); -+ else { -+ seq_puts(file, " ctx_a_stash_exclusive:\n"); -+ i = 0; -+ while (stashing_text_list[i].txt != NULL) { -+ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask) -+ seq_printf(file, " %s\n", -+ stashing_text_list[i].txt); -+ i++; -+ } -+ } -+ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n", -+ fqd.context_a.stashing.annotation_cl); -+ seq_printf(file, " ctx_a_stash_data_cl: %u\n", -+ fqd.context_a.stashing.data_cl); -+ seq_printf(file, " ctx_a_stash_context_cl: %u\n", -+ fqd.context_a.stashing.context_cl); -+ return 0; -+} -+ -+static int query_fq_fields_open(struct inode *inode, -+ struct file *file) -+{ -+ return single_open(file, query_fq_fields_show, NULL); -+} -+ -+static ssize_t query_fq_fields_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > MAX_FQID) -+ return -EINVAL; -+ query_fq_fields_data.fqid = (u32)val; -+ return count; -+} -+ -+static const struct file_operations query_fq_fields_fops = { -+ .owner = THIS_MODULE, -+ .open = query_fq_fields_open, -+ .read = seq_read, -+ .write = query_fq_fields_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Query WQ lengths -+ ******************************************************************************/ -+struct query_wq_lengths_data_s { -+ union { -+ u16 channel_wq; /* ignores wq (3 lsbits) */ -+ struct { -+ u16 id:13; /* qm_channel */ -+ u16 __reserved:3; -+ } __packed channel; -+ }; -+}; -+static struct query_wq_lengths_data_s query_wq_lengths_data; -+static int query_wq_lengths_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_querywq wq; -+ int i; -+ -+ memset(&wq, 0, sizeof(struct qm_mcr_querywq)); -+ wq.channel.id = query_wq_lengths_data.channel.id; -+ ret = qman_query_wq(0, &wq); -+ if (ret) -+ return ret; -+ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id); -+ for (i = 0; i < 8; i++) -+ /* mask out upper 4 bits since they are not part of length */ -+ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff); -+ return 0; -+} -+ -+static int query_wq_lengths_open(struct inode *inode, -+ struct file *file) -+{ -+ return single_open(file, query_wq_lengths_show, NULL); -+} -+ -+static ssize_t query_wq_lengths_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > 0xfff8) -+ return -EINVAL; -+ query_wq_lengths_data.channel.id = (u16)val; -+ return count; -+} -+ -+static const struct file_operations query_wq_lengths_fops = { -+ .owner = THIS_MODULE, -+ .open = query_wq_lengths_open, -+ .read = seq_read, -+ .write = query_wq_lengths_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Query CGR -+ ******************************************************************************/ -+struct query_cgr_s { -+ u8 cgid; -+}; -+static struct query_cgr_s query_cgr_data; -+ -+static int query_cgr_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_querycgr cgrd; -+ struct qman_cgr cgr; -+ int i, j; -+ u32 mask; -+ -+ memset(&cgr, 0, sizeof(cgr)); -+ memset(&cgrd, 0, sizeof(cgrd)); -+ cgr.cgrid = query_cgr_data.cgid; -+ ret = qman_query_cgr(&cgr, &cgrd); -+ if (ret) -+ return ret; -+ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid); -+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn, -+ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn, -+ cgrd.cgr.wr_parm_g.Pn); -+ -+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn, -+ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn, -+ cgrd.cgr.wr_parm_y.Pn); -+ -+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn, -+ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn, -+ cgrd.cgr.wr_parm_r.Pn); -+ -+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", -+ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r); -+ -+ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en); -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { -+ seq_puts(file, " cscn_targ_dcp:\n"); -+ mask = 0x80000000; -+ for (i = 0; i < 32; i++) { -+ if (cgrd.cgr.cscn_targ & mask) -+ seq_printf(file, " send CSCN to dcp %u\n", -+ (31 - i)); -+ mask >>= 1; -+ } -+ -+ seq_puts(file, " cscn_targ_swp:\n"); -+ for (i = 0; i < 4; i++) { -+ mask = 0x80000000; -+ for (j = 0; j < 32; j++) { -+ if (cgrd.cscn_targ_swp[i] & mask) -+ seq_printf(file, " send CSCN to swp" -+ " %u\n", (127 - (i * 32) - j)); -+ mask >>= 1; -+ } -+ } -+ } else { -+ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ); -+ } -+ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en); -+ seq_printf(file, " cs: %u\n", cgrd.cgr.cs); -+ -+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", -+ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn); -+ -+ seq_printf(file, " mode: %s\n", -+ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ? -+ "frame count" : "byte count"); -+ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd)); -+ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd)); -+ -+ return 0; -+} -+ -+static int query_cgr_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, query_cgr_show, NULL); -+} -+ -+static ssize_t query_cgr_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > 0xff) -+ return -EINVAL; -+ query_cgr_data.cgid = (u8)val; -+ return count; -+} -+ -+static const struct file_operations query_cgr_fops = { -+ .owner = THIS_MODULE, -+ .open = query_cgr_open, -+ .read = seq_read, -+ .write = query_cgr_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Test Write CGR -+ ******************************************************************************/ -+struct test_write_cgr_s { -+ u64 i_bcnt; -+ u8 cgid; -+}; -+static struct test_write_cgr_s test_write_cgr_data; -+ -+static int testwrite_cgr_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_cgrtestwrite result; -+ struct qman_cgr cgr; -+ u64 i_bcnt; -+ -+ memset(&cgr, 0, sizeof(struct qman_cgr)); -+ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite)); -+ cgr.cgrid = test_write_cgr_data.cgid; -+ i_bcnt = test_write_cgr_data.i_bcnt; -+ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result); -+ if (ret) -+ return ret; -+ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid); -+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn, -+ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn, -+ result.cgr.wr_parm_g.Pn); -+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn, -+ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn, -+ result.cgr.wr_parm_y.Pn); -+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn, -+ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn, -+ result.cgr.wr_parm_r.Pn); -+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", -+ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r); -+ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en); -+ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ); -+ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en); -+ seq_printf(file, " cs: %u\n", result.cgr.cs); -+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", -+ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn); -+ -+ /* Add Mode for Si 2 */ -+ seq_printf(file, " mode: %s\n", -+ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ? -+ "frame count" : "byte count"); -+ -+ seq_printf(file, " i_bcnt: %llu\n", -+ qm_mcr_cgrtestwrite_i_get64(&result)); -+ seq_printf(file, " a_bcnt: %llu\n", -+ qm_mcr_cgrtestwrite_a_get64(&result)); -+ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g); -+ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y); -+ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r); -+ return 0; -+} -+ -+static int testwrite_cgr_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, testwrite_cgr_show, NULL); -+} -+ -+static const struct file_operations testwrite_cgr_fops = { -+ .owner = THIS_MODULE, -+ .open = testwrite_cgr_open, -+ .read = seq_read, -+ .release = single_release, -+}; -+ -+ -+static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset) -+{ -+ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt); -+ return 0; -+} -+static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, testwrite_cgr_ibcnt_show, NULL); -+} -+ -+static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ test_write_cgr_data.i_bcnt = val; -+ return count; -+} -+ -+static const struct file_operations teswrite_cgr_ibcnt_fops = { -+ .owner = THIS_MODULE, -+ .open = testwrite_cgr_ibcnt_open, -+ .read = seq_read, -+ .write = testwrite_cgr_ibcnt_write, -+ .release = single_release, -+}; -+ -+static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset) -+{ -+ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid); -+ return 0; -+} -+static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, testwrite_cgr_cgrid_show, NULL); -+} -+ -+static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > 0xff) -+ return -EINVAL; -+ test_write_cgr_data.cgid = (u8)val; -+ return count; -+} -+ -+static const struct file_operations teswrite_cgr_cgrid_fops = { -+ .owner = THIS_MODULE, -+ .open = testwrite_cgr_cgrid_open, -+ .read = seq_read, -+ .write = testwrite_cgr_cgrid_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Query Congestion State -+ ******************************************************************************/ -+static int query_congestion_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_querycongestion cs; -+ int i, j, in_cong = 0; -+ u32 mask; -+ -+ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion)); -+ ret = qman_query_congestion(&cs); -+ if (ret) -+ return ret; -+ seq_puts(file, "Query Congestion Result\n"); -+ for (i = 0; i < 8; i++) { -+ mask = 0x80000000; -+ for (j = 0; j < 32; j++) { -+ if (cs.state.__state[i] & mask) { -+ in_cong = 1; -+ seq_printf(file, " cg %u: %s\n", (i*32)+j, -+ "in congestion"); -+ } -+ mask >>= 1; -+ } -+ } -+ if (!in_cong) -+ seq_puts(file, " All congestion groups not congested.\n"); -+ return 0; -+} -+ -+static int query_congestion_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, query_congestion_show, NULL); -+} -+ -+static const struct file_operations query_congestion_fops = { -+ .owner = THIS_MODULE, -+ .open = query_congestion_open, -+ .read = seq_read, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * Query CCGR -+ ******************************************************************************/ -+struct query_ccgr_s { -+ u32 ccgid; -+}; -+static struct query_ccgr_s query_ccgr_data; -+ -+static int query_ccgr_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_ceetm_ccgr_query ccgr_query; -+ struct qm_mcc_ceetm_ccgr_query query_opts; -+ int i, j; -+ u32 mask; -+ -+ memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query)); -+ memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query)); -+ -+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) -+ return -EINVAL; -+ -+ seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid); -+ query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24); -+ query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF; -+ ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query); -+ if (ret) -+ return ret; -+ seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid, -+ query_opts.dcpid); -+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ ccgr_query.cm_query.wr_parm_g.MA, -+ ccgr_query.cm_query.wr_parm_g.Mn, -+ ccgr_query.cm_query.wr_parm_g.SA, -+ ccgr_query.cm_query.wr_parm_g.Sn, -+ ccgr_query.cm_query.wr_parm_g.Pn); -+ -+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ ccgr_query.cm_query.wr_parm_y.MA, -+ ccgr_query.cm_query.wr_parm_y.Mn, -+ ccgr_query.cm_query.wr_parm_y.SA, -+ ccgr_query.cm_query.wr_parm_y.Sn, -+ ccgr_query.cm_query.wr_parm_y.Pn); -+ -+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", -+ ccgr_query.cm_query.wr_parm_r.MA, -+ ccgr_query.cm_query.wr_parm_r.Mn, -+ ccgr_query.cm_query.wr_parm_r.SA, -+ ccgr_query.cm_query.wr_parm_r.Sn, -+ ccgr_query.cm_query.wr_parm_r.Pn); -+ -+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", -+ ccgr_query.cm_query.ctl_wr_en_g, -+ ccgr_query.cm_query.ctl_wr_en_y, -+ ccgr_query.cm_query.ctl_wr_en_r); -+ -+ seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en); -+ seq_puts(file, " cscn_targ_dcp:\n"); -+ mask = 0x80000000; -+ for (i = 0; i < 32; i++) { -+ if (ccgr_query.cm_query.cscn_targ_dcp & mask) -+ seq_printf(file, " send CSCN to dcp %u\n", (31 - i)); -+ mask >>= 1; -+ } -+ -+ seq_puts(file, " cscn_targ_swp:\n"); -+ for (i = 0; i < 4; i++) { -+ mask = 0x80000000; -+ for (j = 0; j < 32; j++) { -+ if (ccgr_query.cm_query.cscn_targ_swp[i] & mask) -+ seq_printf(file, " send CSCN to swp" -+ "%u\n", (127 - (i * 32) - j)); -+ mask >>= 1; -+ } -+ } -+ -+ seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en); -+ -+ seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n", -+ ccgr_query.cm_query.cs_thres.TA, -+ ccgr_query.cm_query.cs_thres.Tn); -+ -+ seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n", -+ ccgr_query.cm_query.cs_thres_x.TA, -+ ccgr_query.cm_query.cs_thres_x.Tn); -+ -+ seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n", -+ ccgr_query.cm_query.td_thres.TA, -+ ccgr_query.cm_query.td_thres.Tn); -+ -+ seq_printf(file, " mode: %s\n", -+ (ccgr_query.cm_query.ctl_mode & -+ QMAN_CGR_MODE_FRAME) ? -+ "frame count" : "byte count"); -+ seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt); -+ seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt); -+ -+ return 0; -+} -+ -+static int query_ccgr_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, query_ccgr_show, NULL); -+} -+ -+static ssize_t query_ccgr_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ query_ccgr_data.ccgid = val; -+ return count; -+} -+ -+static const struct file_operations query_ccgr_fops = { -+ .owner = THIS_MODULE, -+ .open = query_ccgr_open, -+ .read = seq_read, -+ .write = query_ccgr_write, -+ .release = single_release, -+}; -+/******************************************************************************* -+ * QMan register -+ ******************************************************************************/ -+struct qman_register_s { -+ u32 val; -+}; -+static struct qman_register_s qman_register_data; -+ -+static void init_ccsrmempeek(void) -+{ -+ struct device_node *dn; -+ const u32 *regaddr_p; -+ -+ dn = of_find_compatible_node(NULL, NULL, "fsl,qman"); -+ if (!dn) { -+ pr_info("No fsl,qman node\n"); -+ return; -+ } -+ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL); -+ if (!regaddr_p) { -+ of_node_put(dn); -+ return; -+ } -+ qman_ccsr_start = of_translate_address(dn, regaddr_p); -+ of_node_put(dn); -+} -+/* This function provides access to QMan ccsr memory map */ -+static int qman_ccsrmempeek(u32 *val, u32 offset) -+{ -+ void __iomem *addr; -+ u64 phys_addr; -+ -+ if (!qman_ccsr_start) -+ return -EINVAL; -+ -+ if (offset > (qman_ccsr_size - sizeof(u32))) -+ return -EINVAL; -+ -+ phys_addr = qman_ccsr_start + offset; -+ addr = ioremap(phys_addr, sizeof(u32)); -+ if (!addr) { -+ pr_err("ccsrmempeek, ioremap failed\n"); -+ return -EINVAL; -+ } -+ *val = in_be32(addr); -+ iounmap(addr); -+ return 0; -+} -+ -+static int qman_ccsrmempeek_show(struct seq_file *file, void *offset) -+{ -+ u32 b; -+ -+ qman_ccsrmempeek(&b, qman_register_data.val); -+ seq_printf(file, "QMan register offset = 0x%x\n", -+ qman_register_data.val); -+ seq_printf(file, "value = 0x%08x\n", b); -+ -+ return 0; -+} -+ -+static int qman_ccsrmempeek_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_ccsrmempeek_show, NULL); -+} -+ -+static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ /* multiple of 4 */ -+ if (val > (qman_ccsr_size - sizeof(u32))) { -+ pr_info("Input 0x%lx > 0x%llx\n", -+ val, (qman_ccsr_size - sizeof(u32))); -+ return -EINVAL; -+ } -+ if (val & 0x3) { -+ pr_info("Input 0x%lx not multiple of 4\n", val); -+ return -EINVAL; -+ } -+ qman_register_data.val = val; -+ return count; -+} -+ -+static const struct file_operations qman_ccsrmempeek_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_ccsrmempeek_open, -+ .read = seq_read, -+ .write = qman_ccsrmempeek_write, -+}; -+ -+/******************************************************************************* -+ * QMan state -+ ******************************************************************************/ -+static int qman_fqd_state_show(struct seq_file *file, void *offset) -+{ -+ struct qm_mcr_queryfq_np np; -+ struct qman_fq fq; -+ struct line_buffer_fq line_buf; -+ int ret, i; -+ u8 *state = file->private; -+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)]; -+ -+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); -+ memset(&line_buf, 0, sizeof(line_buf)); -+ -+ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]); -+ -+ for (i = 1; i < fqid_max; i++) { -+ fq.fqid = i; -+ ret = qman_query_fq_np(&fq, &np); -+ if (ret) -+ return ret; -+ if (*state == (np.state & QM_MCR_NP_STATE_MASK)) -+ add_to_line_buffer(&line_buf, fq.fqid, file); -+ /* Keep a summary count of all states */ -+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states)) -+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; -+ } -+ flush_line_buffer(&line_buf, file); -+ -+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) { -+ seq_printf(file, "%s count = %u\n", state_txt[i], -+ qm_fq_state_cnt[i]); -+ } -+ return 0; -+} -+ -+static int qman_fqd_state_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_fqd_state_show, inode->i_private); -+} -+ -+static const struct file_operations qman_fqd_state_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_fqd_state_open, -+ .read = seq_read, -+}; -+ -+static int qman_fqd_ctrl_show(struct seq_file *file, void *offset) -+{ -+ struct qm_fqd fqd; -+ struct qman_fq fq; -+ u32 fq_en_cnt = 0, fq_di_cnt = 0; -+ int ret, i; -+ struct mask_filter_s *data = file->private; -+ const char *ctrl_txt = get_fqd_ctrl_text(data->mask); -+ struct line_buffer_fq line_buf; -+ -+ memset(&line_buf, 0, sizeof(line_buf)); -+ seq_printf(file, "List of fq ids with: %s :%s\n", -+ ctrl_txt, (data->filter) ? "enabled" : "disabled"); -+ for (i = 1; i < fqid_max; i++) { -+ fq.fqid = i; -+ memset(&fqd, 0, sizeof(struct qm_fqd)); -+ ret = qman_query_fq(&fq, &fqd); -+ if (ret) -+ return ret; -+ if (data->filter) { -+ if (fqd.fq_ctrl & data->mask) -+ add_to_line_buffer(&line_buf, fq.fqid, file); -+ } else { -+ if (!(fqd.fq_ctrl & data->mask)) -+ add_to_line_buffer(&line_buf, fq.fqid, file); -+ } -+ if (fqd.fq_ctrl & data->mask) -+ fq_en_cnt++; -+ else -+ fq_di_cnt++; -+ } -+ flush_line_buffer(&line_buf, file); -+ -+ seq_printf(file, "Total FQD with: %s : enabled = %u\n", -+ ctrl_txt, fq_en_cnt); -+ seq_printf(file, "Total FQD with: %s : disabled = %u\n", -+ ctrl_txt, fq_di_cnt); -+ return 0; -+} -+ -+/******************************************************************************* -+ * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE -+ ******************************************************************************/ -+static int qman_fqd_ctrl_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_fqd_ctrl_show, inode->i_private); -+} -+ -+static const struct file_operations qman_fqd_ctrl_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_fqd_ctrl_open, -+ .read = seq_read, -+}; -+ -+/******************************************************************************* -+ * QMan ctrl summary -+ ******************************************************************************/ -+/******************************************************************************* -+ * QMan summary state -+ ******************************************************************************/ -+static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset) -+{ -+ struct qm_mcr_queryfq_np np; -+ struct qman_fq fq; -+ int ret, i; -+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)]; -+ -+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); -+ -+ for (i = 1; i < fqid_max; i++) { -+ fq.fqid = i; -+ ret = qman_query_fq_np(&fq, &np); -+ if (ret) -+ return ret; -+ /* Keep a summary count of all states */ -+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states)) -+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) { -+ seq_printf(file, "%s count = %u\n", state_txt[i], -+ qm_fq_state_cnt[i]); -+ } -+ return 0; -+} -+ -+static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset) -+{ -+ struct qm_fqd fqd; -+ struct qman_fq fq; -+ int ret, i , j; -+ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2]; -+ -+ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt)); -+ -+ for (i = 1; i < fqid_max; i++) { -+ memset(&fqd, 0, sizeof(struct qm_fqd)); -+ fq.fqid = i; -+ ret = qman_query_fq(&fq, &fqd); -+ if (ret) -+ return ret; -+ /* Keep a summary count of all states */ -+ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2) -+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & -+ mask_filter[j].mask) -+ qm_prog_cnt[j/2]++; -+ } -+ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) { -+ seq_printf(file, "%s count = %u\n", -+ get_fqd_ctrl_text(mask_filter[i*2].mask), -+ qm_prog_cnt[i]); -+ } -+ return 0; -+} -+ -+static int qman_fqd_summary_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ -+ /* Display summary of non programmable fields */ -+ ret = qman_fqd_non_prog_summary_show(file, offset); -+ if (ret) -+ return ret; -+ seq_puts(file, "-----------------------------------------\n"); -+ /* Display programmable fields */ -+ ret = qman_fqd_prog_summary_show(file, offset); -+ if (ret) -+ return ret; -+ return 0; -+} -+ -+static int qman_fqd_summary_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_fqd_summary_show, NULL); -+} -+ -+static const struct file_operations qman_fqd_summary_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_fqd_summary_open, -+ .read = seq_read, -+}; -+ -+/******************************************************************************* -+ * QMan destination work queue -+ ******************************************************************************/ -+struct qman_dest_wq_s { -+ u16 wq_id; -+}; -+static struct qman_dest_wq_s qman_dest_wq_data = { -+ .wq_id = 0, -+}; -+ -+static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset) -+{ -+ struct qm_fqd fqd; -+ struct qman_fq fq; -+ int ret, i; -+ u16 *wq, wq_id = qman_dest_wq_data.wq_id; -+ struct line_buffer_fq line_buf; -+ -+ memset(&line_buf, 0, sizeof(line_buf)); -+ /* use vmalloc : need to allocate large memory region and don't -+ * require the memory to be physically contiguous. */ -+ wq = vzalloc(sizeof(u16) * (0xFFFF+1)); -+ if (!wq) -+ return -ENOMEM; -+ -+ seq_printf(file, "List of fq ids with destination work queue id" -+ " = 0x%x\n", wq_id); -+ -+ for (i = 1; i < fqid_max; i++) { -+ fq.fqid = i; -+ memset(&fqd, 0, sizeof(struct qm_fqd)); -+ ret = qman_query_fq(&fq, &fqd); -+ if (ret) { -+ vfree(wq); -+ return ret; -+ } -+ if (wq_id == fqd.dest_wq) -+ add_to_line_buffer(&line_buf, fq.fqid, file); -+ wq[fqd.dest_wq]++; -+ } -+ flush_line_buffer(&line_buf, file); -+ -+ seq_puts(file, "Summary of all FQD destination work queue values\n"); -+ for (i = 0; i < 0xFFFF; i++) { -+ if (wq[i]) -+ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, " -+ "count = %u\n", i >> 3, i & 0x3, i, wq[i]); -+ } -+ vfree(wq); -+ return 0; -+} -+ -+static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf, -+ size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > 0xFFFF) -+ return -EINVAL; -+ qman_dest_wq_data.wq_id = val; -+ return count; -+} -+ -+static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_fqd_dest_wq_show, NULL); -+} -+ -+static const struct file_operations qman_fqd_dest_wq_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_fqd_dest_wq_open, -+ .read = seq_read, -+ .write = qman_fqd_dest_wq_write, -+}; -+ -+/******************************************************************************* -+ * QMan Intra-Class Scheduling Credit -+ ******************************************************************************/ -+static int qman_fqd_cred_show(struct seq_file *file, void *offset) -+{ -+ struct qm_fqd fqd; -+ struct qman_fq fq; -+ int ret, i; -+ u32 fq_cnt = 0; -+ struct line_buffer_fq line_buf; -+ -+ memset(&line_buf, 0, sizeof(line_buf)); -+ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0" -+ "\n"); -+ -+ for (i = 1; i < fqid_max; i++) { -+ fq.fqid = i; -+ memset(&fqd, 0, sizeof(struct qm_fqd)); -+ ret = qman_query_fq(&fq, &fqd); -+ if (ret) -+ return ret; -+ if (fqd.ics_cred > 0) { -+ add_to_line_buffer(&line_buf, fq.fqid, file); -+ fq_cnt++; -+ } -+ } -+ flush_line_buffer(&line_buf, file); -+ -+ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt); -+ return 0; -+} -+ -+static int qman_fqd_cred_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, qman_fqd_cred_show, NULL); -+} -+ -+static const struct file_operations qman_fqd_cred_fops = { -+ .owner = THIS_MODULE, -+ .open = qman_fqd_cred_open, -+ .read = seq_read, -+}; -+ -+/******************************************************************************* -+ * Class Queue Fields -+ ******************************************************************************/ -+struct query_cq_fields_data_s { -+ u32 cqid; -+}; -+ -+static struct query_cq_fields_data_s query_cq_fields_data = { -+ .cqid = 1, -+}; -+ -+static int query_cq_fields_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ struct qm_mcr_ceetm_cq_query query_result; -+ unsigned int cqid; -+ unsigned int portal; -+ -+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) -+ return -EINVAL; -+ -+ cqid = query_cq_fields_data.cqid & 0x00FFFFFF; -+ portal = query_cq_fields_data.cqid >> 24; -+ if (portal > qm_dc_portal_fman1) -+ return -EINVAL; -+ -+ ret = qman_ceetm_query_cq(cqid, portal, &query_result); -+ if (ret) -+ return ret; -+ seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n", -+ cqid, portal); -+ seq_printf(file, " ccgid: %u\n", query_result.ccgid); -+ seq_printf(file, " state: %u\n", query_result.state); -+ seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr); -+ seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr); -+ seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr); -+ seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr); -+ seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr); -+ seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr); -+ seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr); -+ seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr); -+ seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr); -+ seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr); -+ seq_printf(file, " frame_count: %u\n", query_result.frm_cnt); -+ -+ return 0; -+} -+ -+static int query_cq_fields_open(struct inode *inode, -+ struct file *file) -+{ -+ return single_open(file, query_cq_fields_show, NULL); -+} -+ -+static ssize_t query_cq_fields_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ query_cq_fields_data.cqid = (u32)val; -+ return count; -+} -+ -+static const struct file_operations query_cq_fields_fops = { -+ .owner = THIS_MODULE, -+ .open = query_cq_fields_open, -+ .read = seq_read, -+ .write = query_cq_fields_write, -+ .release = single_release, -+}; -+ -+/******************************************************************************* -+ * READ CEETM_XSFDR_IN_USE -+ ******************************************************************************/ -+struct query_ceetm_xsfdr_data_s { -+ enum qm_dc_portal dcp_portal; -+}; -+ -+static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data; -+ -+static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset) -+{ -+ int ret; -+ unsigned int xsfdr_in_use; -+ enum qm_dc_portal portal; -+ -+ -+ if (qman_ip_rev < QMAN_REV31) -+ return -EINVAL; -+ -+ portal = query_ceetm_xsfdr_data.dcp_portal; -+ ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use); -+ if (ret) { -+ seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n", -+ portal); -+ return ret; -+ } -+ -+ seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal, -+ (xsfdr_in_use & 0x1FFF)); -+ return 0; -+} -+ -+static int query_ceetm_xsfdr_open(struct inode *inode, -+ struct file *file) -+{ -+ return single_open(file, query_ceetm_xsfdr_show, NULL); -+} -+ -+static ssize_t query_ceetm_xsfdr_write(struct file *f, -+ const char __user *buf, size_t count, loff_t *off) -+{ -+ int ret; -+ unsigned long val; -+ -+ ret = user_input_convert(buf, count, &val); -+ if (ret) -+ return ret; -+ if (val > qm_dc_portal_fman1) -+ return -EINVAL; -+ query_ceetm_xsfdr_data.dcp_portal = (u32)val; -+ return count; -+} -+ -+static const struct file_operations query_ceetm_xsfdr_fops = { -+ .owner = THIS_MODULE, -+ .open = query_ceetm_xsfdr_open, -+ .read = seq_read, -+ .write = query_ceetm_xsfdr_write, -+ .release = single_release, -+}; -+ -+/* helper macros used in qman_debugfs_module_init */ -+#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \ -+ do { \ -+ d = debugfs_create_file(name, \ -+ mode, parent, \ -+ data, \ -+ fops); \ -+ if (d == NULL) { \ -+ ret = -ENOMEM; \ -+ goto _return; \ -+ } \ -+ } while (0) -+ -+/* dfs_root as parent */ -+#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \ -+ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops) -+ -+/* fqd_root as parent */ -+#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \ -+ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops) -+ -+/* fqd state */ -+#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \ -+ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \ -+ (void *)&mask_filter[index], &qman_fqd_ctrl_fops) -+ -+static int __init qman_debugfs_module_init(void) -+{ -+ int ret = 0; -+ struct dentry *d, *fqd_root; -+ u32 reg; -+ -+ fqid_max = 0; -+ init_ccsrmempeek(); -+ if (qman_ccsr_start) { -+ if (!qman_ccsrmempeek(®, QM_FQD_AR)) { -+ /* extract the size of the FQD window */ -+ reg = reg & 0x3f; -+ /* calculate valid frame queue descriptor range */ -+ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE; -+ } -+ } -+ dfs_root = debugfs_create_dir("qman", NULL); -+ fqd_root = debugfs_create_dir("fqd", dfs_root); -+ if (dfs_root == NULL || fqd_root == NULL) { -+ ret = -ENOMEM; -+ pr_err("Cannot create qman/fqd debugfs dir\n"); -+ goto _return; -+ } -+ if (fqid_max) { -+ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO, -+ NULL, &qman_ccsrmempeek_fops); -+ } -+ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO, -+ &query_fq_np_fields_data, &query_fq_np_fields_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO, -+ &query_fq_fields_data, &query_fq_fields_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO, -+ &query_wq_lengths_data, &query_wq_lengths_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO, -+ &query_cgr_data, &query_cgr_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO, -+ NULL, &query_congestion_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO, -+ NULL, &testwrite_cgr_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO, -+ NULL, &teswrite_cgr_cgrid_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO, -+ NULL, &teswrite_cgr_ibcnt_fops); -+ -+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO, -+ &query_ccgr_data, &query_ccgr_fops); -+ /* Create files with fqd_root as parent */ -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED], -+ &qman_fqd_state_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED], -+ &qman_fqd_state_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED], -+ &qman_fqd_state_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED], -+ &qman_fqd_state_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO, -+ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE], -+ &qman_fqd_state_fops); -+ QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO, -+ &query_cq_fields_data, &query_cq_fields_fops); -+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO, -+ &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops); -+ -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1); -+ -+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO, -+ NULL, &qman_fqd_summary_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO, -+ NULL, &qman_fqd_dest_wq_fops); -+ -+ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO, -+ NULL, &qman_fqd_cred_fops); -+ -+ return 0; -+ -+_return: -+ debugfs_remove_recursive(dfs_root); -+ return ret; -+} -+ -+static void __exit qman_debugfs_module_exit(void) -+{ -+ debugfs_remove_recursive(dfs_root); -+} -+ -+module_init(qman_debugfs_module_init); -+module_exit(qman_debugfs_module_exit); -+MODULE_LICENSE("Dual BSD/GPL"); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_driver.c -@@ -0,0 +1,980 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_private.h" -+ -+#include /* hard_smp_processor_id() if !CONFIG_SMP */ -+#ifdef CONFIG_HOTPLUG_CPU -+#include -+#endif -+ -+/* Global variable containing revision id (even on non-control plane systems -+ * where CCSR isn't available) */ -+u16 qman_ip_rev; -+EXPORT_SYMBOL(qman_ip_rev); -+u8 qman_ip_cfg; -+EXPORT_SYMBOL(qman_ip_cfg); -+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; -+EXPORT_SYMBOL(qm_channel_pool1); -+u16 qm_channel_caam = QMAN_CHANNEL_CAAM; -+EXPORT_SYMBOL(qm_channel_caam); -+u16 qm_channel_pme = QMAN_CHANNEL_PME; -+EXPORT_SYMBOL(qm_channel_pme); -+u16 qm_channel_dce = QMAN_CHANNEL_DCE; -+EXPORT_SYMBOL(qm_channel_dce); -+u16 qman_portal_max; -+EXPORT_SYMBOL(qman_portal_max); -+ -+u32 qman_clk; -+struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; -+/* the qman ceetm instances on the given SoC */ -+u8 num_ceetms; -+ -+/* For these variables, and the portal-initialisation logic, the -+ * comments in bman_driver.c apply here so won't be repeated. */ -+static struct qman_portal *shared_portals[NR_CPUS]; -+static int num_shared_portals; -+static int shared_portals_idx; -+static LIST_HEAD(unused_pcfgs); -+static DEFINE_SPINLOCK(unused_pcfgs_lock); -+ -+/* A SDQCR mask comprising all the available/visible pool channels */ -+static u32 pools_sdqcr; -+ -+#define STR_ERR_NOPROP "No '%s' property in node %s\n" -+#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n" -+#define STR_FQID_RANGE "fsl,fqid-range" -+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range" -+#define STR_CGRID_RANGE "fsl,cgrid-range" -+ -+/* A "fsl,fqid-range" node; release the given range to the allocator */ -+static __init int fsl_fqid_range_init(struct device_node *node) -+{ -+ int ret; -+ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret); -+ if (!range) { -+ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name); -+ return -EINVAL; -+ } -+ qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ pr_info("Qman: FQID allocator includes range %d:%d\n", -+ be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ return 0; -+} -+ -+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */ -+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node) -+{ -+ int ret; -+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); -+ if (!chanid) { -+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); -+ return -EINVAL; -+ } -+ for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++) -+ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret); -+ return 0; -+} -+ -+/* A "fsl,pool-channel-range" node; release the given range to the allocator */ -+static __init int fsl_pool_channel_range_init(struct device_node *node) -+{ -+ int ret; -+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); -+ if (!chanid) { -+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); -+ return -EINVAL; -+ } -+ qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1])); -+ pr_info("Qman: pool channel allocator includes range %d:%d\n", -+ be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1])); -+ return 0; -+} -+ -+/* A "fsl,cgrid-range" node; release the given range to the allocator */ -+static __init int fsl_cgrid_range_init(struct device_node *node) -+{ -+ struct qman_cgr cgr; -+ int ret, errors = 0; -+ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret); -+ if (!range) { -+ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name); -+ return -EINVAL; -+ } -+ qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ pr_info("Qman: CGRID allocator includes range %d:%d\n", -+ be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) { -+ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL); -+ if (ret) -+ errors++; -+ } -+ if (errors) -+ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n", -+ errors, (errors > 1) ? "s" : "", range[0], range[1]); -+ return 0; -+} -+ -+static __init int fsl_ceetm_init(struct device_node *node) -+{ -+ enum qm_dc_portal dcp_portal; -+ struct qm_ceetm_sp *sp; -+ struct qm_ceetm_lni *lni; -+ int ret, i; -+ const u32 *range; -+ -+ /* Find LFQID range */ -+ range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret); -+ if (!range) { -+ pr_err("No fsl,ceetm-lfqid-range in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node" -+ " %s\n", node->full_name); -+ return -EINVAL; -+ } -+ -+ dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16; -+ if (dcp_portal > qm_dc_portal_fman1) { -+ pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal); -+ return -EINVAL; -+ } -+ -+ if (dcp_portal == qm_dc_portal_fman0) -+ qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ if (dcp_portal == qm_dc_portal_fman1) -+ qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ pr_debug("Qman: The lfqid allocator of CEETM %d includes range" -+ " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ -+ qman_ceetms[dcp_portal].idx = dcp_portal; -+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals); -+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis); -+ -+ /* Find Sub-portal range */ -+ range = of_get_property(node, "fsl,ceetm-sp-range", &ret); -+ if (!range) { -+ pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < be32_to_cpu(range[1]); i++) { -+ sp = kzalloc(sizeof(*sp), GFP_KERNEL); -+ if (!sp) { -+ pr_err("Can't alloc memory for sub-portal %d\n", -+ range[0] + i); -+ return -ENOMEM; -+ } -+ sp->idx = be32_to_cpu(range[0]) + i; -+ sp->dcp_idx = dcp_portal; -+ sp->is_claimed = 0; -+ list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals); -+ sp++; -+ } -+ pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n", -+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal); -+ qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]); -+ qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]); -+ -+ /* Find LNI range */ -+ range = of_get_property(node, "fsl,ceetm-lni-range", &ret); -+ if (!range) { -+ pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < be32_to_cpu(range[1]); i++) { -+ lni = kzalloc(sizeof(*lni), GFP_KERNEL); -+ if (!lni) { -+ pr_err("Can't alloc memory for LNI %d\n", -+ range[0] + i); -+ return -ENOMEM; -+ } -+ lni->idx = be32_to_cpu(range[0]) + i; -+ lni->dcp_idx = dcp_portal; -+ lni->is_claimed = 0; -+ INIT_LIST_HEAD(&lni->channels); -+ list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis); -+ lni++; -+ } -+ pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n", -+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal); -+ qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]); -+ qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]); -+ -+ /* Find CEETM channel range */ -+ range = of_get_property(node, "fsl,ceetm-channel-range", &ret); -+ if (!range) { -+ pr_err("No fsl,ceetm-channel-range in node %s\n", -+ node->full_name); -+ return -EINVAL; -+ } -+ if (ret != 8) { -+ pr_err("fsl,ceetm-channel-range is not a 2-cell range in node" -+ "%s\n", node->full_name); -+ return -EINVAL; -+ } -+ -+ if (dcp_portal == qm_dc_portal_fman0) -+ qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ if (dcp_portal == qm_dc_portal_fman1) -+ qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ pr_debug("Qman: The channel allocator of CEETM %d includes" -+ " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1])); -+ -+ /* Set CEETM PRES register */ -+ ret = qman_ceetm_set_prescaler(dcp_portal); -+ if (ret) -+ return ret; -+ return 0; -+} -+ -+static void qman_get_ip_revision(struct device_node *dn) -+{ -+ u16 ip_rev = 0; -+ u8 ip_cfg = QMAN_REV_CFG_0; -+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { -+ if (!of_device_is_available(dn)) -+ continue; -+ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") || -+ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) { -+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n"); -+ BUG_ON(1); -+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") || -+ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) { -+ ip_rev = QMAN_REV11; -+ qman_portal_max = 10; -+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") || -+ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) { -+ ip_rev = QMAN_REV12; -+ qman_portal_max = 10; -+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") || -+ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) { -+ ip_rev = QMAN_REV20; -+ qman_portal_max = 3; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.0.0")) { -+ ip_rev = QMAN_REV30; -+ qman_portal_max = 50; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.0.1")) { -+ ip_rev = QMAN_REV30; -+ qman_portal_max = 25; -+ ip_cfg = QMAN_REV_CFG_1; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.1.0")) { -+ ip_rev = QMAN_REV31; -+ qman_portal_max = 50; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.1.1")) { -+ ip_rev = QMAN_REV31; -+ qman_portal_max = 25; -+ ip_cfg = QMAN_REV_CFG_1; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.1.2")) { -+ ip_rev = QMAN_REV31; -+ qman_portal_max = 18; -+ ip_cfg = QMAN_REV_CFG_2; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.1.3")) { -+ ip_rev = QMAN_REV31; -+ qman_portal_max = 10; -+ ip_cfg = QMAN_REV_CFG_3; -+ } else if (of_device_is_compatible(dn, -+ "fsl,qman-portal-3.2.0")) { -+ ip_rev = QMAN_REV32; -+ qman_portal_max = 10; -+ ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043 -+ } else { -+ pr_warn("unknown QMan version in portal node," -+ "default to rev1.1\n"); -+ ip_rev = QMAN_REV11; -+ qman_portal_max = 10; -+ } -+ -+ if (!qman_ip_rev) { -+ if (ip_rev) { -+ qman_ip_rev = ip_rev; -+ qman_ip_cfg = ip_cfg; -+ } else { -+ pr_warn("unknown Qman version," -+ " default to rev1.1\n"); -+ qman_ip_rev = QMAN_REV11; -+ qman_ip_cfg = QMAN_REV_CFG_0; -+ } -+ } else if (ip_rev && (qman_ip_rev != ip_rev)) -+ pr_warn("Revision=0x%04x, but portal '%s' has" -+ " 0x%04x\n", -+ qman_ip_rev, dn->full_name, ip_rev); -+ if (qman_ip_rev == ip_rev) -+ break; -+ } -+} -+ -+/* Parse a portal node, perform generic mapping duties and return the config. It -+ * is not known at this stage for what purpose (or even if) the portal will be -+ * used. */ -+static struct qm_portal_config * __init parse_pcfg(struct device_node *node) -+{ -+ struct qm_portal_config *pcfg; -+ const u32 *index_p, *channel_p; -+ u32 index, channel; -+ int irq, ret; -+ resource_size_t len; -+ -+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); -+ if (!pcfg) { -+ pr_err("can't allocate portal config"); -+ return NULL; -+ } -+ -+ /* -+ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a -+ * 'struct device' in order to get the PAMU stashing setup and the QMan -+ * portal [driver] won't function at all without ring stashing -+ * -+ * Making the QMan portal driver nice and proper is part of the -+ * upstreaming effort -+ */ -+ pcfg->dev.bus = &platform_bus_type; -+ pcfg->dev.of_node = node; -+#ifdef CONFIG_FSL_PAMU -+ pcfg->dev.archdata.iommu_domain = NULL; -+#endif -+ -+ ret = of_address_to_resource(node, DPA_PORTAL_CE, -+ &pcfg->addr_phys[DPA_PORTAL_CE]); -+ if (ret) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "reg::CE"); -+ goto err; -+ } -+ ret = of_address_to_resource(node, DPA_PORTAL_CI, -+ &pcfg->addr_phys[DPA_PORTAL_CI]); -+ if (ret) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "reg::CI"); -+ goto err; -+ } -+ index_p = of_get_property(node, "cell-index", &ret); -+ if (!index_p || (ret != 4)) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "cell-index"); -+ goto err; -+ } -+ index = be32_to_cpu(*index_p); -+ if (index >= qman_portal_max) { -+ pr_err("QMan portal index %d is beyond max (%d)\n", -+ index, qman_portal_max); -+ goto err; -+ } -+ -+ channel_p = of_get_property(node, "fsl,qman-channel-id", &ret); -+ if (!channel_p || (ret != 4)) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "fsl,qman-channel-id"); -+ goto err; -+ } -+ channel = be32_to_cpu(*channel_p); -+ if (channel != (index + QM_CHANNEL_SWPORTAL0)) -+ pr_err("Warning: node %s has mismatched %s and %s\n", -+ node->full_name, "cell-index", "fsl,qman-channel-id"); -+ pcfg->public_cfg.channel = channel; -+ pcfg->public_cfg.cpu = -1; -+ irq = irq_of_parse_and_map(node, 0); -+ if (irq == 0) { -+ pr_err("Can't get %s property '%s'\n", node->full_name, -+ "interrupts"); -+ goto err; -+ } -+ pcfg->public_cfg.irq = irq; -+ pcfg->public_cfg.index = index; -+#ifdef CONFIG_FSL_QMAN_CONFIG -+ /* We need the same LIODN offset for all portals */ -+ qman_liodn_fixup(pcfg->public_cfg.channel); -+#endif -+ -+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); -+ if (len != (unsigned long)len) -+ goto err; -+ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns( -+ pcfg->addr_phys[DPA_PORTAL_CE].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE])); -+ -+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap( -+ pcfg->addr_phys[DPA_PORTAL_CI].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI])); -+#else -+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( -+ pcfg->addr_phys[DPA_PORTAL_CE].start, -+ (unsigned long)len, -+ 0); -+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( -+ pcfg->addr_phys[DPA_PORTAL_CI].start, -+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), -+ _PAGE_GUARDED | _PAGE_NO_CACHE); -+#endif -+ return pcfg; -+err: -+ kfree(pcfg); -+ return NULL; -+} -+ -+static struct qm_portal_config *get_pcfg(struct list_head *list) -+{ -+ struct qm_portal_config *pcfg; -+ if (list_empty(list)) -+ return NULL; -+ pcfg = list_entry(list->prev, struct qm_portal_config, list); -+ list_del(&pcfg->list); -+ return pcfg; -+} -+ -+static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx) -+{ -+ struct qm_portal_config *pcfg; -+ if (list_empty(list)) -+ return NULL; -+ list_for_each_entry(pcfg, list, list) { -+ if (pcfg->public_cfg.index == idx) { -+ list_del(&pcfg->list); -+ return pcfg; -+ } -+ } -+ return NULL; -+} -+ -+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) -+{ -+#ifdef CONFIG_FSL_PAMU -+ int ret; -+ int window_count = 1; -+ struct iommu_domain_geometry geom_attr; -+ struct pamu_stash_attribute stash_attr; -+ -+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); -+ if (!pcfg->iommu_domain) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed", -+ __func__); -+ goto _no_iommu; -+ } -+ geom_attr.aperture_start = 0; -+ geom_attr.aperture_end = -+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; -+ geom_attr.force_aperture = true; -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, -+ &geom_attr); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, -+ &window_count); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ stash_attr.cpu = cpu; -+ stash_attr.cache = PAMU_ATTR_CACHE_L1; -+ /* set stash information for the window */ -+ stash_attr.window = 0; -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, -+ DOMAIN_ATTR_FSL_PAMU_STASH, -+ &stash_attr); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, -+ IOMMU_READ | IOMMU_WRITE); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d", -+ __func__, ret); -+ goto _iommu_domain_free; -+ } -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, -+ DOMAIN_ATTR_FSL_PAMU_ENABLE, -+ &window_count); -+ if (ret < 0) { -+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", -+ __func__, ret); -+ goto _iommu_detach_device; -+ } -+ -+_no_iommu: -+#endif -+#ifdef CONFIG_FSL_QMAN_CONFIG -+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) -+#endif -+ pr_warn("Failed to set QMan portal's stash request queue\n"); -+ -+ return; -+ -+#ifdef CONFIG_FSL_PAMU -+_iommu_detach_device: -+ iommu_detach_device(pcfg->iommu_domain, NULL); -+_iommu_domain_free: -+ iommu_domain_free(pcfg->iommu_domain); -+#endif -+} -+ -+struct qm_portal_config *qm_get_unused_portal_idx(u32 idx) -+{ -+ struct qm_portal_config *ret; -+ spin_lock(&unused_pcfgs_lock); -+ if (idx == QBMAN_ANY_PORTAL_IDX) -+ ret = get_pcfg(&unused_pcfgs); -+ else -+ ret = get_pcfg_idx(&unused_pcfgs, idx); -+ spin_unlock(&unused_pcfgs_lock); -+ /* Bind stashing LIODNs to the CPU we are currently executing on, and -+ * set the portal to use the stashing request queue corresonding to the -+ * cpu as well. The user-space driver assumption is that the pthread has -+ * to already be affine to one cpu only before opening a portal. If that -+ * check is circumvented, the only risk is a performance degradation - -+ * stashing will go to whatever cpu they happened to be running on when -+ * opening the device file, and if that isn't the cpu they subsequently -+ * bind to and do their polling on, tough. */ -+ if (ret) -+ portal_set_cpu(ret, hard_smp_processor_id()); -+ return ret; -+} -+ -+struct qm_portal_config *qm_get_unused_portal(void) -+{ -+ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); -+} -+ -+void qm_put_unused_portal(struct qm_portal_config *pcfg) -+{ -+ spin_lock(&unused_pcfgs_lock); -+ list_add(&pcfg->list, &unused_pcfgs); -+ spin_unlock(&unused_pcfgs_lock); -+} -+ -+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) -+{ -+ struct qman_portal *p; -+ -+ pcfg->iommu_domain = NULL; -+ portal_set_cpu(pcfg, pcfg->public_cfg.cpu); -+ p = qman_create_affine_portal(pcfg, NULL); -+ if (p) { -+ u32 irq_sources = 0; -+ /* Determine what should be interrupt-vs-poll driven */ -+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW -+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | -+ QM_PIRQ_CSCI | QM_PIRQ_CCSCI; -+#endif -+#ifdef CONFIG_FSL_DPA_PIRQ_FAST -+ irq_sources |= QM_PIRQ_DQRI; -+#endif -+ qman_p_irqsource_add(p, irq_sources); -+ pr_info("Qman portal %sinitialised, cpu %d\n", -+ pcfg->public_cfg.is_shared ? "(shared) " : "", -+ pcfg->public_cfg.cpu); -+ } else -+ pr_crit("Qman portal failure on cpu %d\n", -+ pcfg->public_cfg.cpu); -+ return p; -+} -+ -+static void init_slave(int cpu) -+{ -+ struct qman_portal *p; -+ struct cpumask oldmask = *tsk_cpus_allowed(current); -+ set_cpus_allowed_ptr(current, get_cpu_mask(cpu)); -+ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); -+ if (!p) -+ pr_err("Qman slave portal failure on cpu %d\n", cpu); -+ else -+ pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu); -+ set_cpus_allowed_ptr(current, &oldmask); -+ if (shared_portals_idx >= num_shared_portals) -+ shared_portals_idx = 0; -+} -+ -+static struct cpumask want_unshared __initdata; -+static struct cpumask want_shared __initdata; -+ -+static int __init parse_qportals(char *str) -+{ -+ return parse_portals_bootarg(str, &want_shared, &want_unshared, -+ "qportals"); -+} -+__setup("qportals=", parse_qportals); -+ -+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, -+ unsigned int cpu) -+{ -+#ifdef CONFIG_FSL_PAMU -+ struct pamu_stash_attribute stash_attr; -+ int ret; -+ -+ if (pcfg->iommu_domain) { -+ stash_attr.cpu = cpu; -+ stash_attr.cache = PAMU_ATTR_CACHE_L1; -+ /* set stash information for the window */ -+ stash_attr.window = 0; -+ ret = iommu_domain_set_attr(pcfg->iommu_domain, -+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); -+ if (ret < 0) { -+ pr_err("Failed to update pamu stash setting\n"); -+ return; -+ } -+ } -+#endif -+#ifdef CONFIG_FSL_QMAN_CONFIG -+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) -+ pr_warn("Failed to update portal's stash request queue\n"); -+#endif -+} -+ -+static void qman_offline_cpu(unsigned int cpu) -+{ -+ struct qman_portal *p; -+ const struct qm_portal_config *pcfg; -+ p = (struct qman_portal *)affine_portals[cpu]; -+ if (p) { -+ pcfg = qman_get_qm_portal_config(p); -+ if (pcfg) { -+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); -+ qman_portal_update_sdest(pcfg, 0); -+ } -+ } -+} -+ -+#ifdef CONFIG_HOTPLUG_CPU -+static void qman_online_cpu(unsigned int cpu) -+{ -+ struct qman_portal *p; -+ const struct qm_portal_config *pcfg; -+ p = (struct qman_portal *)affine_portals[cpu]; -+ if (p) { -+ pcfg = qman_get_qm_portal_config(p); -+ if (pcfg) { -+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); -+ qman_portal_update_sdest(pcfg, cpu); -+ } -+ } -+} -+ -+static int qman_hotplug_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ unsigned int cpu = (unsigned long)hcpu; -+ -+ switch (action) { -+ case CPU_ONLINE: -+ case CPU_ONLINE_FROZEN: -+ qman_online_cpu(cpu); -+ break; -+ case CPU_DOWN_PREPARE: -+ case CPU_DOWN_PREPARE_FROZEN: -+ qman_offline_cpu(cpu); -+ default: -+ break; -+ } -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block qman_hotplug_cpu_notifier = { -+ .notifier_call = qman_hotplug_cpu_callback, -+}; -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+__init int qman_init(void) -+{ -+ struct cpumask slave_cpus; -+ struct cpumask unshared_cpus = *cpu_none_mask; -+ struct cpumask shared_cpus = *cpu_none_mask; -+ LIST_HEAD(unshared_pcfgs); -+ LIST_HEAD(shared_pcfgs); -+ struct device_node *dn; -+ struct qm_portal_config *pcfg; -+ struct qman_portal *p; -+ int cpu, ret; -+ const u32 *clk; -+ struct cpumask offline_cpus; -+ -+ /* Initialise the Qman (CCSR) device */ -+ for_each_compatible_node(dn, NULL, "fsl,qman") { -+ if (!qman_init_ccsr(dn)) -+ pr_info("Qman err interrupt handler present\n"); -+ else -+ pr_err("Qman CCSR setup failed\n"); -+ -+ clk = of_get_property(dn, "clock-frequency", NULL); -+ if (!clk) -+ pr_warn("Can't find Qman clock frequency\n"); -+ else -+ qman_clk = be32_to_cpu(*clk); -+ } -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ /* Setup lookup table for FQ demux */ -+ ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64); -+ if (ret) -+ return ret; -+#endif -+ -+ /* Get qman ip revision */ -+ qman_get_ip_revision(dn); -+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { -+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; -+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; -+ qm_channel_pme = QMAN_CHANNEL_PME_REV3; -+ } -+ -+ if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2)) -+ qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312; -+ -+ /* -+ * Parse the ceetm node to get how many ceetm instances are supported -+ * on the current silicon. num_ceetms must be confirmed before portals -+ * are intiailized. -+ */ -+ num_ceetms = 0; -+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") -+ num_ceetms++; -+ -+ /* Parse pool channels into the SDQCR mask. (Must happen before portals -+ * are initialised.) */ -+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { -+ ret = fsl_pool_channel_range_sdqcr(dn); -+ if (ret) -+ return ret; -+ } -+ -+ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus()); -+ /* Initialise portals. See bman_driver.c for comments */ -+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { -+ if (!of_device_is_available(dn)) -+ continue; -+ pcfg = parse_pcfg(dn); -+ if (pcfg) { -+ pcfg->public_cfg.pools = pools_sdqcr; -+ list_add_tail(&pcfg->list, &unused_pcfgs); -+ } -+ } -+ for_each_possible_cpu(cpu) { -+ if (cpumask_test_cpu(cpu, &want_shared)) { -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &shared_pcfgs); -+ cpumask_set_cpu(cpu, &shared_cpus); -+ } -+ if (cpumask_test_cpu(cpu, &want_unshared)) { -+ if (cpumask_test_cpu(cpu, &shared_cpus)) -+ continue; -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &unshared_pcfgs); -+ cpumask_set_cpu(cpu, &unshared_cpus); -+ } -+ } -+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { -+ for_each_online_cpu(cpu) { -+ pcfg = get_pcfg(&unused_pcfgs); -+ if (!pcfg) -+ break; -+ pcfg->public_cfg.cpu = cpu; -+ list_add_tail(&pcfg->list, &unshared_pcfgs); -+ cpumask_set_cpu(cpu, &unshared_cpus); -+ } -+ } -+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); -+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); -+ if (cpumask_empty(&slave_cpus)) { -+ if (!list_empty(&shared_pcfgs)) { -+ cpumask_or(&unshared_cpus, &unshared_cpus, -+ &shared_cpus); -+ cpumask_clear(&shared_cpus); -+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs); -+ INIT_LIST_HEAD(&shared_pcfgs); -+ } -+ } else { -+ if (list_empty(&shared_pcfgs)) { -+ pcfg = get_pcfg(&unshared_pcfgs); -+ if (!pcfg) { -+ pr_crit("No QMan portals available!\n"); -+ return 0; -+ } -+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); -+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); -+ list_add_tail(&pcfg->list, &shared_pcfgs); -+ } -+ } -+ list_for_each_entry(pcfg, &unshared_pcfgs, list) { -+ pcfg->public_cfg.is_shared = 0; -+ p = init_pcfg(pcfg); -+ if (!p) { -+ pr_crit("Unable to configure portals\n"); -+ return 0; -+ } -+ } -+ list_for_each_entry(pcfg, &shared_pcfgs, list) { -+ pcfg->public_cfg.is_shared = 1; -+ p = init_pcfg(pcfg); -+ if (p) -+ shared_portals[num_shared_portals++] = p; -+ } -+ if (!cpumask_empty(&slave_cpus)) -+ for_each_cpu(cpu, &slave_cpus) -+ init_slave(cpu); -+ pr_info("Qman portals initialised\n"); -+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); -+ for_each_cpu(cpu, &offline_cpus) -+ qman_offline_cpu(cpu); -+#ifdef CONFIG_HOTPLUG_CPU -+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier); -+#endif -+ return 0; -+} -+ -+__init int qman_resource_init(void) -+{ -+ struct device_node *dn; -+ int ret; -+ -+ /* Initialise FQID allocation ranges */ -+ for_each_compatible_node(dn, NULL, "fsl,fqid-range") { -+ ret = fsl_fqid_range_init(dn); -+ if (ret) -+ return ret; -+ } -+ /* Initialise CGRID allocation ranges */ -+ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") { -+ ret = fsl_cgrid_range_init(dn); -+ if (ret) -+ return ret; -+ } -+ /* Parse pool channels into the allocator. (Must happen after portals -+ * are initialised.) */ -+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { -+ ret = fsl_pool_channel_range_init(dn); -+ if (ret) -+ return ret; -+ } -+ -+ /* Parse CEETM */ -+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") { -+ ret = fsl_ceetm_init(dn); -+ if (ret) -+ return ret; -+ } -+ return 0; -+} -+ -+#ifdef CONFIG_SUSPEND -+void suspend_unused_qportal(void) -+{ -+ struct qm_portal_config *pcfg; -+ -+ if (list_empty(&unused_pcfgs)) -+ return; -+ -+ list_for_each_entry(pcfg, &unused_pcfgs, list) { -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Need to save qportal %d\n", pcfg->public_cfg.index); -+#endif -+ /* save isdr, disable all via isdr, clear isr */ -+ pcfg->saved_isdr = -+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); -+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + -+ 0xe08); -+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + -+ 0xe00); -+ } -+ return; -+} -+ -+void resume_unused_qportal(void) -+{ -+ struct qm_portal_config *pcfg; -+ -+ if (list_empty(&unused_pcfgs)) -+ return; -+ -+ list_for_each_entry(pcfg, &unused_pcfgs, list) { -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index); -+#endif -+ /* restore isdr */ -+ __raw_writel(pcfg->saved_isdr, -+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); -+ } -+ return; -+} -+#endif -+ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_high.c -@@ -0,0 +1,5568 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_low.h" -+ -+/* Compilation constants */ -+#define DQRR_MAXFILL 15 -+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ -+#define IRQNAME "QMan portal %d" -+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ -+ -+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's -+ * positive, and rounding to the closest value if it's zero. NB, this macro -+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types -+ * that are compatible with this. NB, these arguments should not be expressions -+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass -+ * in "some_value++" as a parameter to the macro! */ -+#define ROUNDING(n, d, r) \ -+ (((r) < 0) ? div64_u64((n), (d)) : \ -+ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \ -+ div64_u64(((n) + ((d) / 2)), (d)))) -+ -+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about -+ * inter-processor locking only. Note, FQLOCK() is always called either under a -+ * local_irq_save() or from interrupt context - hence there's no need for irq -+ * protection (and indeed, attempting to nest irq-protection doesn't work, as -+ * the "irq en/disable" machinery isn't recursive...). */ -+#define FQLOCK(fq) \ -+ do { \ -+ struct qman_fq *__fq478 = (fq); \ -+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ -+ spin_lock(&__fq478->fqlock); \ -+ } while (0) -+#define FQUNLOCK(fq) \ -+ do { \ -+ struct qman_fq *__fq478 = (fq); \ -+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ -+ spin_unlock(&__fq478->fqlock); \ -+ } while (0) -+ -+static inline void fq_set(struct qman_fq *fq, u32 mask) -+{ -+ set_bits(mask, &fq->flags); -+} -+static inline void fq_clear(struct qman_fq *fq, u32 mask) -+{ -+ clear_bits(mask, &fq->flags); -+} -+static inline int fq_isset(struct qman_fq *fq, u32 mask) -+{ -+ return fq->flags & mask; -+} -+static inline int fq_isclear(struct qman_fq *fq, u32 mask) -+{ -+ return !(fq->flags & mask); -+} -+ -+struct qman_portal { -+ struct qm_portal p; -+ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */ -+ unsigned long irq_sources; -+ u32 use_eqcr_ci_stashing; -+ u32 slowpoll; /* only used when interrupts are off */ -+ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */ -+#endif -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ raw_spinlock_t sharing_lock; /* only used if is_shared */ -+ int is_shared; -+ struct qman_portal *sharing_redirect; -+#endif -+ u32 sdqcr; -+ int dqrr_disable_ref; -+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global -+ * handler is called instead. */ -+ qman_cb_dc_ern cb_dc_ern; -+ /* When the cpu-affine portal is activated, this is non-NULL */ -+ const struct qm_portal_config *config; -+ /* This is needed for providing a non-NULL device to dma_map_***() */ -+ struct platform_device *pdev; -+ struct dpa_rbtree retire_table; -+ char irqname[MAX_IRQNAME]; -+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ -+ struct qman_cgrs *cgrs; -+ /* linked-list of CSCN handlers. */ -+ struct list_head cgr_cbs; -+ /* list lock */ -+ spinlock_t cgr_lock; -+ /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */ -+ struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX]; -+ /* 256-element array, each is a linked-list of CCSCN handlers. */ -+ struct list_head ccgr_cbs[QMAN_CEETM_MAX]; -+ /* list lock */ -+ spinlock_t ccgr_lock; -+ /* track if memory was allocated by the driver */ -+ u8 alloced; -+ /* power management data */ -+ u32 save_isdr; -+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -+ /* Keep a shadow copy of the DQRR on LE systems -+ as the SW needs to do byteswaps of read only -+ memory. Must be aligned to the size of the -+ ring to ensure easy index calcualtions based -+ on address */ -+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] -+ __attribute__((aligned(512))); -+#endif -+}; -+ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+#define PORTAL_IRQ_LOCK(p, irqflags) \ -+ do { \ -+ if ((p)->is_shared) \ -+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ -+ else \ -+ local_irq_save(irqflags); \ -+ } while (0) -+#define PORTAL_IRQ_UNLOCK(p, irqflags) \ -+ do { \ -+ if ((p)->is_shared) \ -+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ -+ irqflags); \ -+ else \ -+ local_irq_restore(irqflags); \ -+ } while (0) -+#else -+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) -+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) -+#endif -+ -+/* Global handler for DCP ERNs. Used when the portal receiving the message does -+ * not have a portal-specific handler. */ -+static qman_cb_dc_ern cb_dc_ern; -+ -+static cpumask_t affine_mask; -+static DEFINE_SPINLOCK(affine_mask_lock); -+static u16 affine_channels[NR_CPUS]; -+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); -+void *affine_portals[NR_CPUS]; -+ -+/* "raw" gets the cpu-local struct whether it's a redirect or not. */ -+static inline struct qman_portal *get_raw_affine_portal(void) -+{ -+ return &get_cpu_var(qman_affine_portal); -+} -+/* For ops that can redirect, this obtains the portal to use */ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+static inline struct qman_portal *get_affine_portal(void) -+{ -+ struct qman_portal *p = get_raw_affine_portal(); -+ if (p->sharing_redirect) -+ return p->sharing_redirect; -+ return p; -+} -+#else -+#define get_affine_portal() get_raw_affine_portal() -+#endif -+/* For every "get", there must be a "put" */ -+static inline void put_affine_portal(void) -+{ -+ put_cpu_var(qman_affine_portal); -+} -+/* Exception: poll functions assume the caller is cpu-affine and in no risk of -+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var() -+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution -+ * context to remain as non-atomic during poll-triggered callbacks as it was -+ * when the poll API was first called (eg. NAPI), so we go out of our way in -+ * this case to not disable pre-emption. */ -+static inline struct qman_portal *get_poll_portal(void) -+{ -+ return &get_cpu_var(qman_affine_portal); -+} -+#define put_poll_portal() -+ -+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux -+ * retirement notifications (the fact they are sometimes h/w-consumed means that -+ * contextB isn't always a s/w demux - and as we can't know which case it is -+ * when looking at the notification, we have to use the slow lookup for all of -+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID -+ * (though at most one of them should be the consumer), so this table isn't for -+ * all FQs - FQs are added when retirement commands are issued, and removed when -+ * they complete, which also massively reduces the size of this table. */ -+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid); -+ -+/* This is what everything can wait on, even if it migrates to a different cpu -+ * to the one whose affine portal it is waiting on. */ -+static DECLARE_WAIT_QUEUE_HEAD(affine_queue); -+ -+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) -+{ -+ int ret = fqtree_push(&p->retire_table, fq); -+ if (ret) -+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); -+ return ret; -+} -+ -+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) -+{ -+ fqtree_del(&p->retire_table, fq); -+} -+ -+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) -+{ -+ return fqtree_find(&p->retire_table, fqid); -+} -+ -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+static void **qman_fq_lookup_table; -+static size_t qman_fq_lookup_table_size; -+ -+int qman_setup_fq_lookup_table(size_t num_entries) -+{ -+ num_entries++; -+ /* Allocate 1 more entry since the first entry is not used */ -+ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *))); -+ if (!qman_fq_lookup_table) { -+ pr_err("QMan: Could not allocate fq lookup table\n"); -+ return -ENOMEM; -+ } -+ qman_fq_lookup_table_size = num_entries; -+ pr_info("QMan: Allocated lookup table at %p, entry count %lu\n", -+ qman_fq_lookup_table, -+ (unsigned long)qman_fq_lookup_table_size); -+ return 0; -+} -+ -+/* global structure that maintains fq object mapping */ -+static DEFINE_SPINLOCK(fq_hash_table_lock); -+ -+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) -+{ -+ u32 i; -+ -+ spin_lock(&fq_hash_table_lock); -+ /* Can't use index zero because this has special meaning -+ * in context_b field. */ -+ for (i = 1; i < qman_fq_lookup_table_size; i++) { -+ if (qman_fq_lookup_table[i] == NULL) { -+ *entry = i; -+ qman_fq_lookup_table[i] = fq; -+ spin_unlock(&fq_hash_table_lock); -+ return 0; -+ } -+ } -+ spin_unlock(&fq_hash_table_lock); -+ return -ENOMEM; -+} -+ -+static void clear_fq_table_entry(u32 entry) -+{ -+ spin_lock(&fq_hash_table_lock); -+ BUG_ON(entry >= qman_fq_lookup_table_size); -+ qman_fq_lookup_table[entry] = NULL; -+ spin_unlock(&fq_hash_table_lock); -+} -+ -+static inline struct qman_fq *get_fq_table_entry(u32 entry) -+{ -+ BUG_ON(entry >= qman_fq_lookup_table_size); -+ return qman_fq_lookup_table[entry]; -+} -+#endif -+ -+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd) -+{ -+ /* Byteswap the FQD to HW format */ -+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl); -+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq); -+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred); -+ fqd->context_b = cpu_to_be32(fqd->context_b); -+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque); -+} -+ -+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd) -+{ -+ /* Byteswap the FQD to CPU format */ -+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl); -+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq); -+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred); -+ fqd->context_b = be32_to_cpu(fqd->context_b); -+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque); -+} -+ -+/* Swap a 40 bit address */ -+static inline u64 cpu_to_be40(u64 in) -+{ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ return in; -+#else -+ u64 out = 0; -+ u8 *p = (u8 *) &out; -+ p[0] = in >> 32; -+ p[1] = in >> 24; -+ p[2] = in >> 16; -+ p[3] = in >> 8; -+ p[4] = in >> 0; -+ return out; -+#endif -+} -+static inline u64 be40_to_cpu(u64 in) -+{ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ return in; -+#else -+ u64 out = 0; -+ u8 *pout = (u8 *) &out; -+ u8 *pin = (u8 *) ∈ -+ pout[0] = pin[4]; -+ pout[1] = pin[3]; -+ pout[2] = pin[2]; -+ pout[3] = pin[1]; -+ pout[4] = pin[0]; -+ return out; -+#endif -+} -+ -+/* Swap a 24 bit value */ -+static inline u32 cpu_to_be24(u32 in) -+{ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ return in; -+#else -+ u32 out = 0; -+ u8 *p = (u8 *) &out; -+ p[0] = in >> 16; -+ p[1] = in >> 8; -+ p[2] = in >> 0; -+ return out; -+#endif -+} -+ -+static inline u32 be24_to_cpu(u32 in) -+{ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ return in; -+#else -+ u32 out = 0; -+ u8 *pout = (u8 *) &out; -+ u8 *pin = (u8 *) ∈ -+ pout[0] = pin[2]; -+ pout[1] = pin[1]; -+ pout[2] = pin[0]; -+ return out; -+#endif -+} -+ -+static inline u64 be48_to_cpu(u64 in) -+{ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ return in; -+#else -+ u64 out = 0; -+ u8 *pout = (u8 *) &out; -+ u8 *pin = (u8 *) ∈ -+ -+ pout[0] = pin[5]; -+ pout[1] = pin[4]; -+ pout[2] = pin[3]; -+ pout[3] = pin[2]; -+ pout[4] = pin[1]; -+ pout[5] = pin[0]; -+ return out; -+#endif -+} -+static inline void cpu_to_hw_fd(struct qm_fd *fd) -+{ -+ fd->addr = cpu_to_be40(fd->addr); -+ fd->status = cpu_to_be32(fd->status); -+ fd->opaque = cpu_to_be32(fd->opaque); -+} -+ -+static inline void hw_fd_to_cpu(struct qm_fd *fd) -+{ -+ fd->addr = be40_to_cpu(fd->addr); -+ fd->status = be32_to_cpu(fd->status); -+ fd->opaque = be32_to_cpu(fd->opaque); -+} -+ -+static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query) -+{ -+ cq_query->ccgid = be16_to_cpu(cq_query->ccgid); -+ cq_query->state = be16_to_cpu(cq_query->state); -+ cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr); -+ cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr); -+ cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr); -+ cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr); -+ cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr); -+ cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr); -+ cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr); -+ cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr); -+ cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr); -+ cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr); -+ cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt); -+} -+ -+static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q) -+{ -+ int i; -+ -+ ccgr_q->cm_query.cscn_targ_dcp = -+ be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp); -+ ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt); -+ ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt); -+ for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++) -+ ccgr_q->cm_query.cscn_targ_swp[i] = -+ be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]); -+} -+ -+/* In the case that slow- and fast-path handling are both done by qman_poll() -+ * (ie. because there is no interrupt handling), we ought to balance how often -+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer -+ * sources, so we call the fast poll 'n' times before calling the slow poll -+ * once. The idle decrementer constant is used when the last slow-poll detected -+ * no work to do, and the busy decrementer constant when the last slow-poll had -+ * work to do. */ -+#define SLOW_POLL_IDLE 1000 -+#define SLOW_POLL_BUSY 10 -+static u32 __poll_portal_slow(struct qman_portal *p, u32 is); -+static inline unsigned int __poll_portal_fast(struct qman_portal *p, -+ unsigned int poll_limit); -+ -+/* Portal interrupt handler */ -+static irqreturn_t portal_isr(__always_unused int irq, void *ptr) -+{ -+ struct qman_portal *p = ptr; -+ /* -+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because -+ * it could race against a Query Congestion State command also given -+ * as part of the handling of this interrupt source. We mustn't -+ * clear it a second time in this top-level function. -+ */ -+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & -+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI)); -+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources; -+ /* DQRR-handling if it's interrupt-driven */ -+ if (is & QM_PIRQ_DQRI) -+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); -+ /* Handling of anything else that's interrupt-driven */ -+ clear |= __poll_portal_slow(p, is); -+ qm_isr_status_clear(&p->p, clear); -+ return IRQ_HANDLED; -+} -+ -+/* This inner version is used privately by qman_create_affine_portal(), as well -+ * as by the exported qman_stop_dequeues(). */ -+static inline void qman_stop_dequeues_ex(struct qman_portal *p) -+{ -+ unsigned long irqflags __maybe_unused; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ if (!(p->dqrr_disable_ref++)) -+ qm_dqrr_set_maxfill(&p->p, 0); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+} -+ -+static int drain_mr(struct qm_portal *p) -+{ -+ const struct qm_mr_entry *msg; -+loop: -+ msg = qm_mr_current(p); -+ if (!msg) { -+ /* if MR was full and h/w had other FQRNI entries to produce, we -+ * need to allow it time to produce those entries once the -+ * existing entries are consumed. A worst-case situation -+ * (fully-loaded system) means h/w sequencers may have to do 3-4 -+ * other things before servicing the portal's MR pump, each of -+ * which (if slow) may take ~50 qman cycles (which is ~200 -+ * processor cycles). So rounding up and then multiplying this -+ * worst-case estimate by a factor of 10, just to be -+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume -+ * one entry at a time, so h/w has an opportunity to produce new -+ * entries well before the ring has been fully consumed, so -+ * we're being *really* paranoid here. */ -+ u64 now, then = mfatb(); -+ do { -+ now = mfatb(); -+ } while ((then + 10000) > now); -+ msg = qm_mr_current(p); -+ if (!msg) -+ return 0; -+ } -+ qm_mr_next(p); -+ qm_mr_cci_consume(p, 1); -+ goto loop; -+} -+ -+#ifdef CONFIG_SUSPEND -+static int _qman_portal_suspend_noirq(struct device *dev) -+{ -+ struct qman_portal *p = (struct qman_portal *)dev->platform_data; -+#ifdef CONFIG_PM_DEBUG -+ struct platform_device *pdev = to_platform_device(dev); -+#endif -+ -+ p->save_isdr = qm_isr_disable_read(&p->p); -+ qm_isr_disable_write(&p->p, 0xffffffff); -+ qm_isr_status_clear(&p->p, 0xffffffff); -+#ifdef CONFIG_PM_DEBUG -+ pr_info("Suspend for %s\n", pdev->name); -+#endif -+ return 0; -+} -+ -+static int _qman_portal_resume_noirq(struct device *dev) -+{ -+ struct qman_portal *p = (struct qman_portal *)dev->platform_data; -+ -+ /* restore isdr */ -+ qm_isr_disable_write(&p->p, p->save_isdr); -+ return 0; -+} -+#else -+#define _qman_portal_suspend_noirq NULL -+#define _qman_portal_resume_noirq NULL -+#endif -+ -+struct dev_pm_domain qman_portal_device_pm_domain = { -+ .ops = { -+ USE_PLATFORM_PM_SLEEP_OPS -+ .suspend_noirq = _qman_portal_suspend_noirq, -+ .resume_noirq = _qman_portal_resume_noirq, -+ } -+}; -+ -+struct qman_portal *qman_create_portal( -+ struct qman_portal *portal, -+ const struct qm_portal_config *config, -+ const struct qman_cgrs *cgrs) -+{ -+ struct qm_portal *__p; -+ char buf[16]; -+ int ret; -+ u32 isdr; -+ -+ if (!portal) { -+ portal = kmalloc(sizeof(*portal), GFP_KERNEL); -+ if (!portal) -+ return portal; -+ portal->alloced = 1; -+ } else -+ portal->alloced = 0; -+ -+ __p = &portal->p; -+ -+#ifdef CONFIG_FSL_PAMU -+ /* PAMU is required for stashing */ -+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? -+ 1 : 0); -+#else -+ portal->use_eqcr_ci_stashing = 0; -+#endif -+ -+ /* prep the low-level portal struct with the mapped addresses from the -+ * config, everything that follows depends on it and "config" is more -+ * for (de)reference... */ -+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; -+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; -+ /* -+ * If CI-stashing is used, the current defaults use a threshold of 3, -+ * and stash with high-than-DQRR priority. -+ */ -+ if (qm_eqcr_init(__p, qm_eqcr_pvb, -+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { -+ pr_err("Qman EQCR initialisation failed\n"); -+ goto fail_eqcr; -+ } -+ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb, -+ qm_dqrr_cdc, DQRR_MAXFILL)) { -+ pr_err("Qman DQRR initialisation failed\n"); -+ goto fail_dqrr; -+ } -+ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) { -+ pr_err("Qman MR initialisation failed\n"); -+ goto fail_mr; -+ } -+ if (qm_mc_init(__p)) { -+ pr_err("Qman MC initialisation failed\n"); -+ goto fail_mc; -+ } -+ if (qm_isr_init(__p)) { -+ pr_err("Qman ISR initialisation failed\n"); -+ goto fail_isr; -+ } -+ /* static interrupt-gating controls */ -+ qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH); -+ qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH); -+ qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD); -+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); -+ if (!portal->cgrs) -+ goto fail_cgrs; -+ /* initial snapshot is no-depletion */ -+ qman_cgrs_init(&portal->cgrs[1]); -+ if (cgrs) -+ portal->cgrs[0] = *cgrs; -+ else -+ /* if the given mask is NULL, assume all CGRs can be seen */ -+ qman_cgrs_fill(&portal->cgrs[0]); -+ INIT_LIST_HEAD(&portal->cgr_cbs); -+ spin_lock_init(&portal->cgr_lock); -+ if (num_ceetms) { -+ for (ret = 0; ret < num_ceetms; ret++) { -+ portal->ccgrs[ret] = kmalloc(2 * -+ sizeof(struct qman_ccgrs), GFP_KERNEL); -+ if (!portal->ccgrs[ret]) -+ goto fail_ccgrs; -+ qman_ccgrs_init(&portal->ccgrs[ret][1]); -+ qman_ccgrs_fill(&portal->ccgrs[ret][0]); -+ INIT_LIST_HEAD(&portal->ccgr_cbs[ret]); -+ } -+ } -+ spin_lock_init(&portal->ccgr_lock); -+ portal->bits = 0; -+ portal->slowpoll = 0; -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ portal->eqci_owned = NULL; -+#endif -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ raw_spin_lock_init(&portal->sharing_lock); -+ portal->is_shared = config->public_cfg.is_shared; -+ portal->sharing_redirect = NULL; -+#endif -+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | -+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | -+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; -+ portal->dqrr_disable_ref = 0; -+ portal->cb_dc_ern = NULL; -+ sprintf(buf, "qportal-%d", config->public_cfg.channel); -+ portal->pdev = platform_device_alloc(buf, -1); -+ if (!portal->pdev) { -+ pr_err("qman_portal - platform_device_alloc() failed\n"); -+ goto fail_devalloc; -+ } -+#ifdef CONFIG_ARM -+ portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); -+ portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask; -+#else -+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) { -+ pr_err("qman_portal - dma_set_mask() failed\n"); -+ goto fail_devadd; -+ } -+#endif -+ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain; -+ portal->pdev->dev.platform_data = portal; -+ ret = platform_device_add(portal->pdev); -+ if (ret) { -+ pr_err("qman_portal - platform_device_add() failed\n"); -+ goto fail_devadd; -+ } -+ dpa_rbtree_init(&portal->retire_table); -+ isdr = 0xffffffff; -+ qm_isr_disable_write(__p, isdr); -+ portal->irq_sources = 0; -+ qm_isr_enable_write(__p, portal->irq_sources); -+ qm_isr_status_clear(__p, 0xffffffff); -+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); -+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, -+ portal)) { -+ pr_err("request_irq() failed\n"); -+ goto fail_irq; -+ } -+ if ((config->public_cfg.cpu != -1) && -+ irq_can_set_affinity(config->public_cfg.irq) && -+ irq_set_affinity(config->public_cfg.irq, -+ cpumask_of(config->public_cfg.cpu))) { -+ pr_err("irq_set_affinity() failed\n"); -+ goto fail_affinity; -+ } -+ -+ /* Need EQCR to be empty before continuing */ -+ isdr ^= QM_PIRQ_EQCI; -+ qm_isr_disable_write(__p, isdr); -+ ret = qm_eqcr_get_fill(__p); -+ if (ret) { -+ pr_err("Qman EQCR unclean\n"); -+ goto fail_eqcr_empty; -+ } -+ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); -+ qm_isr_disable_write(__p, isdr); -+ while (qm_dqrr_current(__p) != NULL) -+ qm_dqrr_cdc_consume_n(__p, 0xffff); -+ drain_mr(__p); -+ /* Success */ -+ portal->config = config; -+ qm_isr_disable_write(__p, 0); -+ qm_isr_uninhibit(__p); -+ /* Write a sane SDQCR */ -+ qm_dqrr_sdqcr_set(__p, portal->sdqcr); -+ return portal; -+fail_eqcr_empty: -+fail_affinity: -+ free_irq(config->public_cfg.irq, portal); -+fail_irq: -+ platform_device_del(portal->pdev); -+fail_devadd: -+ platform_device_put(portal->pdev); -+fail_devalloc: -+ if (num_ceetms) -+ for (ret = 0; ret < num_ceetms; ret++) -+ kfree(portal->ccgrs[ret]); -+fail_ccgrs: -+ kfree(portal->cgrs); -+fail_cgrs: -+ qm_isr_finish(__p); -+fail_isr: -+ qm_mc_finish(__p); -+fail_mc: -+ qm_mr_finish(__p); -+fail_mr: -+ qm_dqrr_finish(__p); -+fail_dqrr: -+ qm_eqcr_finish(__p); -+fail_eqcr: -+ if (portal->alloced) -+ kfree(portal); -+ return NULL; -+} -+ -+struct qman_portal *qman_create_affine_portal( -+ const struct qm_portal_config *config, -+ const struct qman_cgrs *cgrs) -+{ -+ struct qman_portal *res; -+ struct qman_portal *portal; -+ -+ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu); -+ res = qman_create_portal(portal, config, cgrs); -+ if (res) { -+ spin_lock(&affine_mask_lock); -+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); -+ affine_channels[config->public_cfg.cpu] = -+ config->public_cfg.channel; -+ affine_portals[config->public_cfg.cpu] = portal; -+ spin_unlock(&affine_mask_lock); -+ } -+ return res; -+} -+ -+/* These checks are BUG_ON()s because the driver is already supposed to avoid -+ * these cases. */ -+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, -+ int cpu) -+{ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ struct qman_portal *p; -+ p = &per_cpu(qman_affine_portal, cpu); -+ /* Check that we don't already have our own portal */ -+ BUG_ON(p->config); -+ /* Check that we aren't already slaving to another portal */ -+ BUG_ON(p->is_shared); -+ /* Check that 'redirect' is prepared to have us */ -+ BUG_ON(!redirect->config->public_cfg.is_shared); -+ /* These are the only elements to initialise when redirecting */ -+ p->irq_sources = 0; -+ p->sharing_redirect = redirect; -+ affine_portals[cpu] = p; -+ return p; -+#else -+ BUG(); -+ return NULL; -+#endif -+} -+ -+void qman_destroy_portal(struct qman_portal *qm) -+{ -+ const struct qm_portal_config *pcfg; -+ int i; -+ -+ /* Stop dequeues on the portal */ -+ qm_dqrr_sdqcr_set(&qm->p, 0); -+ -+ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or -+ * something related to QM_PIRQ_EQCI, this may need fixing. -+ * Also, due to the prefetching model used for CI updates in the enqueue -+ * path, this update will only invalidate the CI cacheline *after* -+ * working on it, so we need to call this twice to ensure a full update -+ * irrespective of where the enqueue processing was at when the teardown -+ * began. */ -+ qm_eqcr_cce_update(&qm->p); -+ qm_eqcr_cce_update(&qm->p); -+ pcfg = qm->config; -+ -+ free_irq(pcfg->public_cfg.irq, qm); -+ -+ kfree(qm->cgrs); -+ if (num_ceetms) -+ for (i = 0; i < num_ceetms; i++) -+ kfree(qm->ccgrs[i]); -+ qm_isr_finish(&qm->p); -+ qm_mc_finish(&qm->p); -+ qm_mr_finish(&qm->p); -+ qm_dqrr_finish(&qm->p); -+ qm_eqcr_finish(&qm->p); -+ -+ platform_device_del(qm->pdev); -+ platform_device_put(qm->pdev); -+ -+ qm->config = NULL; -+ if (qm->alloced) -+ kfree(qm); -+} -+ -+const struct qm_portal_config *qman_destroy_affine_portal(void) -+{ -+ /* We don't want to redirect if we're a slave, use "raw" */ -+ struct qman_portal *qm = get_raw_affine_portal(); -+ const struct qm_portal_config *pcfg; -+ int cpu; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (qm->sharing_redirect) { -+ qm->sharing_redirect = NULL; -+ put_affine_portal(); -+ return NULL; -+ } -+ qm->is_shared = 0; -+#endif -+ pcfg = qm->config; -+ cpu = pcfg->public_cfg.cpu; -+ -+ qman_destroy_portal(qm); -+ -+ spin_lock(&affine_mask_lock); -+ cpumask_clear_cpu(cpu, &affine_mask); -+ spin_unlock(&affine_mask_lock); -+ put_affine_portal(); -+ return pcfg; -+} -+ -+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p) -+{ -+ return &p->config->public_cfg; -+} -+EXPORT_SYMBOL(qman_p_get_portal_config); -+ -+const struct qman_portal_config *qman_get_portal_config(void) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ const struct qman_portal_config *ret = qman_p_get_portal_config(p); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_get_portal_config); -+ -+/* Inline helper to reduce nesting in __poll_portal_slow() */ -+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_mr_entry *msg, u8 verb) -+{ -+ FQLOCK(fq); -+ switch (verb) { -+ case QM_MR_VERB_FQRL: -+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); -+ fq_clear(fq, QMAN_FQ_STATE_ORL); -+ table_del_fq(p, fq); -+ break; -+ case QM_MR_VERB_FQRN: -+ DPA_ASSERT((fq->state == qman_fq_state_parked) || -+ (fq->state == qman_fq_state_sched)); -+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); -+ fq_clear(fq, QMAN_FQ_STATE_CHANGING); -+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) -+ fq_set(fq, QMAN_FQ_STATE_NE); -+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) -+ fq_set(fq, QMAN_FQ_STATE_ORL); -+ else -+ table_del_fq(p, fq); -+ fq->state = qman_fq_state_retired; -+ break; -+ case QM_MR_VERB_FQPN: -+ DPA_ASSERT(fq->state == qman_fq_state_sched); -+ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); -+ fq->state = qman_fq_state_parked; -+ } -+ FQUNLOCK(fq); -+} -+ -+static u32 __poll_portal_slow(struct qman_portal *p, u32 is) -+{ -+ const struct qm_mr_entry *msg; -+ -+ if (is & QM_PIRQ_CSCI) { -+ struct qman_cgrs rr, c; -+ struct qm_mc_result *mcr; -+ struct qman_cgr *cgr; -+ unsigned long irqflags __maybe_unused; -+ -+ spin_lock_irqsave(&p->cgr_lock, irqflags); -+ /* -+ * The CSCI bit must be cleared _before_ issuing the -+ * Query Congestion State command, to ensure that a long -+ * CGR State Change callback cannot miss an intervening -+ * state change. -+ */ -+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); -+ qm_mc_start(&p->p); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ /* mask out the ones I'm not interested in */ -+ qman_cgrs_and(&rr, (const struct qman_cgrs *) -+ &mcr->querycongestion.state, &p->cgrs[0]); -+ /* check previous snapshot for delta, enter/exit congestion */ -+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]); -+ /* update snapshot */ -+ qman_cgrs_cp(&p->cgrs[1], &rr); -+ /* Invoke callback */ -+ list_for_each_entry(cgr, &p->cgr_cbs, node) -+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) -+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); -+ spin_unlock_irqrestore(&p->cgr_lock, irqflags); -+ } -+ if (is & QM_PIRQ_CCSCI) { -+ struct qman_ccgrs rr, c, congestion_result; -+ struct qm_mc_result *mcr; -+ struct qm_mc_command *mcc; -+ struct qm_ceetm_ccg *ccg; -+ unsigned long irqflags __maybe_unused; -+ int i, j, k; -+ -+ spin_lock_irqsave(&p->ccgr_lock, irqflags); -+ /* -+ * The CCSCI bit must be cleared _before_ issuing the -+ * Query Congestion State command, to ensure that a long -+ * CCGR State Change callback cannot miss an intervening -+ * state change. -+ */ -+ qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI); -+ -+ for (i = 0; i < num_ceetms; i++) { -+ for (j = 0; j < 2; j++) { -+ mcc = qm_mc_start(&p->p); -+ mcc->ccgr_query.ccgrid = cpu_to_be16( -+ CEETM_QUERY_CONGESTION_STATE | j); -+ mcc->ccgr_query.dcpid = i; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ for (k = 0; k < 8; k++) -+ mcr->ccgr_query.congestion_state.state. -+ __state[k] = be32_to_cpu( -+ mcr->ccgr_query. -+ congestion_state.state. -+ __state[k]); -+ congestion_result.q[j] = -+ mcr->ccgr_query.congestion_state.state; -+ } -+ /* mask out the ones I'm not interested in */ -+ qman_ccgrs_and(&rr, &congestion_result, -+ &p->ccgrs[i][0]); -+ /* -+ * check previous snapshot for delta, enter/exit -+ * congestion. -+ */ -+ qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]); -+ /* update snapshot */ -+ qman_ccgrs_cp(&p->ccgrs[i][1], &rr); -+ /* Invoke callback */ -+ list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node) -+ if (ccg->cb && qman_ccgrs_get(&c, -+ (ccg->parent->idx << 4) | ccg->idx)) -+ ccg->cb(ccg, ccg->cb_ctx, -+ qman_ccgrs_get(&rr, -+ (ccg->parent->idx << 4) -+ | ccg->idx)); -+ } -+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); -+ } -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (is & QM_PIRQ_EQCI) { -+ unsigned long irqflags; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ p->eqci_owned = NULL; -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ wake_up(&affine_queue); -+ } -+#endif -+ -+ if (is & QM_PIRQ_EQRI) { -+ unsigned long irqflags __maybe_unused; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ qm_eqcr_cce_update(&p->p); -+ qm_eqcr_set_ithresh(&p->p, 0); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ wake_up(&affine_queue); -+ } -+ -+ if (is & QM_PIRQ_MRI) { -+ struct qman_fq *fq; -+ u8 verb, num = 0; -+mr_loop: -+ qm_mr_pvb_update(&p->p); -+ msg = qm_mr_current(&p->p); -+ if (!msg) -+ goto mr_done; -+ verb = msg->verb & QM_MR_VERB_TYPE_MASK; -+ /* The message is a software ERN iff the 0x20 bit is set */ -+ if (verb & 0x20) { -+ switch (verb) { -+ case QM_MR_VERB_FQRNI: -+ /* nada, we drop FQRNIs on the floor */ -+ break; -+ case QM_MR_VERB_FQRN: -+ case QM_MR_VERB_FQRL: -+ /* Lookup in the retirement table */ -+ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid)); -+ BUG_ON(!fq); -+ fq_state_change(p, fq, msg, verb); -+ if (fq->cb.fqs) -+ fq->cb.fqs(p, fq, msg); -+ break; -+ case QM_MR_VERB_FQPN: -+ /* Parked */ -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ fq = get_fq_table_entry( -+ be32_to_cpu(msg->fq.contextB)); -+#else -+ fq = (void *)(uintptr_t) -+ be32_to_cpu(msg->fq.contextB); -+#endif -+ fq_state_change(p, fq, msg, verb); -+ if (fq->cb.fqs) -+ fq->cb.fqs(p, fq, msg); -+ break; -+ case QM_MR_VERB_DC_ERN: -+ /* DCP ERN */ -+ if (p->cb_dc_ern) -+ p->cb_dc_ern(p, msg); -+ else if (cb_dc_ern) -+ cb_dc_ern(p, msg); -+ else { -+ static int warn_once; -+ if (!warn_once) { -+ pr_crit("Leaking DCP ERNs!\n"); -+ warn_once = 1; -+ } -+ } -+ break; -+ default: -+ pr_crit("Invalid MR verb 0x%02x\n", verb); -+ } -+ } else { -+ /* Its a software ERN */ -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ pr_info("ROY\n"); -+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag)); -+#else -+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag); -+#endif -+ fq->cb.ern(p, fq, msg); -+ } -+ num++; -+ qm_mr_next(&p->p); -+ goto mr_loop; -+mr_done: -+ qm_mr_cci_consume(&p->p, num); -+ } -+ /* -+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific -+ * processing. If that interrupt source has meanwhile been re-asserted, -+ * we mustn't clear it here (or in the top-level interrupt handler). -+ */ -+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); -+} -+ -+/* remove some slowish-path stuff from the "fast path" and make sure it isn't -+ * inlined. */ -+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) -+{ -+ p->vdqcr_owned = NULL; -+ FQLOCK(fq); -+ fq_clear(fq, QMAN_FQ_STATE_VDQCR); -+ FQUNLOCK(fq); -+ wake_up(&affine_queue); -+} -+ -+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states -+ * that would conflict with other things if they ran at the same time on the -+ * same cpu are; -+ * -+ * (i) setting/clearing vdqcr_owned, and -+ * (ii) clearing the NE (Not Empty) flag. -+ * -+ * Both are safe. Because; -+ * -+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the -+ * vdqcr_owned field (which it does before setting VDQCR), and -+ * qman_volatile_dequeue() blocks interrupts and preemption while this is -+ * done so that we can't interfere. -+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as -+ * with (i) that API prevents us from interfering until it's safe. -+ * -+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far -+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett -+ * advantage comes from this function not having to "lock" anything at all. -+ * -+ * Note also that the callbacks are invoked at points which are safe against the -+ * above potential conflicts, but that this function itself is not re-entrant -+ * (this is because the function tracks one end of each FIFO in the portal and -+ * we do *not* want to lock that). So the consequence is that it is safe for -+ * user callbacks to call into any Qman API *except* qman_poll() (as that's the -+ * sole API that could be invoking the callback through this function). -+ */ -+static inline unsigned int __poll_portal_fast(struct qman_portal *p, -+ unsigned int poll_limit) -+{ -+ const struct qm_dqrr_entry *dq; -+ struct qman_fq *fq; -+ enum qman_cb_dqrr_result res; -+ unsigned int limit = 0; -+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -+ struct qm_dqrr_entry *shadow; -+#endif -+loop: -+ qm_dqrr_pvb_update(&p->p); -+ dq = qm_dqrr_current(&p->p); -+ if (!dq) -+ goto done; -+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -+ /* If running on an LE system the fields of the -+ dequeue entry must be swapped. Because the -+ QMan HW will ignore writes the DQRR entry is -+ copied and the index stored within the copy */ -+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; -+ *shadow = *dq; -+ dq = shadow; -+ shadow->fqid = be32_to_cpu(shadow->fqid); -+ shadow->contextB = be32_to_cpu(shadow->contextB); -+ shadow->seqnum = be16_to_cpu(shadow->seqnum); -+ hw_fd_to_cpu(&shadow->fd); -+#endif -+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { -+ /* VDQCR: don't trust contextB as the FQ may have been -+ * configured for h/w consumption and we're draining it -+ * post-retirement. */ -+ fq = p->vdqcr_owned; -+ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need -+ * to check for clearing it when doing volatile dequeues. It's -+ * one less thing to check in the critical path (SDQCR). */ -+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) -+ fq_clear(fq, QMAN_FQ_STATE_NE); -+ /* this is duplicated from the SDQCR code, but we have stuff to -+ * do before *and* after this callback, and we don't want -+ * multiple if()s in the critical path (SDQCR). */ -+ res = fq->cb.dqrr(p, fq, dq); -+ if (res == qman_cb_dqrr_stop) -+ goto done; -+ /* Check for VDQCR completion */ -+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) -+ clear_vdqcr(p, fq); -+ } else { -+ /* SDQCR: contextB points to the FQ */ -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ fq = get_fq_table_entry(dq->contextB); -+#else -+ fq = (void *)(uintptr_t)dq->contextB; -+#endif -+ /* Now let the callback do its stuff */ -+ res = fq->cb.dqrr(p, fq, dq); -+ -+ /* The callback can request that we exit without consuming this -+ * entry nor advancing; */ -+ if (res == qman_cb_dqrr_stop) -+ goto done; -+ } -+ /* Interpret 'dq' from a driver perspective. */ -+ /* Parking isn't possible unless HELDACTIVE was set. NB, -+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to -+ * check for HELDACTIVE to cover both. */ -+ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || -+ (res != qman_cb_dqrr_park)); -+ /* Defer just means "skip it, I'll consume it myself later on" */ -+ if (res != qman_cb_dqrr_defer) -+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park)); -+ /* Move forward */ -+ qm_dqrr_next(&p->p); -+ /* Entry processed and consumed, increment our counter. The callback can -+ * request that we exit after consuming the entry, and we also exit if -+ * we reach our processing limit, so loop back only if neither of these -+ * conditions is met. */ -+ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop)) -+ goto loop; -+done: -+ return limit; -+} -+ -+u32 qman_irqsource_get(void) -+{ -+ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they -+ * should shut the user out if they are not the primary CPU hosting the -+ * portal. That's why we use the "raw" interface. */ -+ struct qman_portal *p = get_raw_affine_portal(); -+ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE; -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_irqsource_get); -+ -+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused) -+{ -+ __maybe_unused unsigned long irqflags; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (p->sharing_redirect) -+ return -EINVAL; -+ else -+#endif -+ { -+ PORTAL_IRQ_LOCK(p, irqflags); -+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); -+ qm_isr_enable_write(&p->p, p->irq_sources); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_irqsource_add); -+ -+int qman_irqsource_add(u32 bits __maybe_unused) -+{ -+ struct qman_portal *p = get_raw_affine_portal(); -+ int ret; -+ ret = qman_p_irqsource_add(p, bits); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_irqsource_add); -+ -+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits) -+{ -+ __maybe_unused unsigned long irqflags; -+ u32 ier; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (p->sharing_redirect) { -+ put_affine_portal(); -+ return -EINVAL; -+ } -+#endif -+ /* Our interrupt handler only processes+clears status register bits that -+ * are in p->irq_sources. As we're trimming that mask, if one of them -+ * were to assert in the status register just before we remove it from -+ * the enable register, there would be an interrupt-storm when we -+ * release the IRQ lock. So we wait for the enable register update to -+ * take effect in h/w (by reading it back) and then clear all other bits -+ * in the status register. Ie. we clear them from ISR once it's certain -+ * IER won't allow them to reassert. */ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ bits &= QM_PIRQ_VISIBLE; -+ clear_bits(bits, &p->irq_sources); -+ qm_isr_enable_write(&p->p, p->irq_sources); -+ -+ ier = qm_isr_enable_read(&p->p); -+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a -+ * data-dependency, ie. to protect against re-ordering. */ -+ qm_isr_status_clear(&p->p, ~ier); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_irqsource_remove); -+ -+int qman_irqsource_remove(u32 bits) -+{ -+ struct qman_portal *p = get_raw_affine_portal(); -+ int ret; -+ ret = qman_p_irqsource_remove(p, bits); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_irqsource_remove); -+ -+const cpumask_t *qman_affine_cpus(void) -+{ -+ return &affine_mask; -+} -+EXPORT_SYMBOL(qman_affine_cpus); -+ -+u16 qman_affine_channel(int cpu) -+{ -+ if (cpu < 0) { -+ struct qman_portal *portal = get_raw_affine_portal(); -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ BUG_ON(portal->sharing_redirect); -+#endif -+ cpu = portal->config->public_cfg.cpu; -+ put_affine_portal(); -+ } -+ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask)); -+ return affine_channels[cpu]; -+} -+EXPORT_SYMBOL(qman_affine_channel); -+ -+void *qman_get_affine_portal(int cpu) -+{ -+ return affine_portals[cpu]; -+} -+EXPORT_SYMBOL(qman_get_affine_portal); -+ -+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) -+{ -+ int ret; -+ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (unlikely(p->sharing_redirect)) -+ ret = -EINVAL; -+ else -+#endif -+ { -+ BUG_ON(p->irq_sources & QM_PIRQ_DQRI); -+ ret = __poll_portal_fast(p, limit); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(qman_p_poll_dqrr); -+ -+int qman_poll_dqrr(unsigned int limit) -+{ -+ struct qman_portal *p = get_poll_portal(); -+ int ret; -+ ret = qman_p_poll_dqrr(p, limit); -+ put_poll_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_poll_dqrr); -+ -+u32 qman_p_poll_slow(struct qman_portal *p) -+{ -+ u32 ret; -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (unlikely(p->sharing_redirect)) -+ ret = (u32)-1; -+ else -+#endif -+ { -+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; -+ ret = __poll_portal_slow(p, is); -+ qm_isr_status_clear(&p->p, ret); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(qman_p_poll_slow); -+ -+u32 qman_poll_slow(void) -+{ -+ struct qman_portal *p = get_poll_portal(); -+ u32 ret; -+ ret = qman_p_poll_slow(p); -+ put_poll_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_poll_slow); -+ -+/* Legacy wrapper */ -+void qman_p_poll(struct qman_portal *p) -+{ -+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE -+ if (unlikely(p->sharing_redirect)) -+ return; -+#endif -+ if ((~p->irq_sources) & QM_PIRQ_SLOW) { -+ if (!(p->slowpoll--)) { -+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; -+ u32 active = __poll_portal_slow(p, is); -+ if (active) { -+ qm_isr_status_clear(&p->p, active); -+ p->slowpoll = SLOW_POLL_BUSY; -+ } else -+ p->slowpoll = SLOW_POLL_IDLE; -+ } -+ } -+ if ((~p->irq_sources) & QM_PIRQ_DQRI) -+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); -+} -+EXPORT_SYMBOL(qman_p_poll); -+ -+void qman_poll(void) -+{ -+ struct qman_portal *p = get_poll_portal(); -+ qman_p_poll(p); -+ put_poll_portal(); -+} -+EXPORT_SYMBOL(qman_poll); -+ -+void qman_p_stop_dequeues(struct qman_portal *p) -+{ -+ qman_stop_dequeues_ex(p); -+} -+EXPORT_SYMBOL(qman_p_stop_dequeues); -+ -+void qman_stop_dequeues(void) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ qman_p_stop_dequeues(p); -+ put_affine_portal(); -+} -+EXPORT_SYMBOL(qman_stop_dequeues); -+ -+void qman_p_start_dequeues(struct qman_portal *p) -+{ -+ unsigned long irqflags __maybe_unused; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ DPA_ASSERT(p->dqrr_disable_ref > 0); -+ if (!(--p->dqrr_disable_ref)) -+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+} -+EXPORT_SYMBOL(qman_p_start_dequeues); -+ -+void qman_start_dequeues(void) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ qman_p_start_dequeues(p); -+ put_affine_portal(); -+} -+EXPORT_SYMBOL(qman_start_dequeues); -+ -+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) -+{ -+ unsigned long irqflags __maybe_unused; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ pools &= p->config->public_cfg.pools; -+ p->sdqcr |= pools; -+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+} -+EXPORT_SYMBOL(qman_p_static_dequeue_add); -+ -+void qman_static_dequeue_add(u32 pools) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ qman_p_static_dequeue_add(p, pools); -+ put_affine_portal(); -+} -+EXPORT_SYMBOL(qman_static_dequeue_add); -+ -+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools) -+{ -+ unsigned long irqflags __maybe_unused; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ pools &= p->config->public_cfg.pools; -+ p->sdqcr &= ~pools; -+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+} -+EXPORT_SYMBOL(qman_p_static_dequeue_del); -+ -+void qman_static_dequeue_del(u32 pools) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ qman_p_static_dequeue_del(p, pools); -+ put_affine_portal(); -+} -+EXPORT_SYMBOL(qman_static_dequeue_del); -+ -+u32 qman_p_static_dequeue_get(struct qman_portal *p) -+{ -+ return p->sdqcr; -+} -+EXPORT_SYMBOL(qman_p_static_dequeue_get); -+ -+u32 qman_static_dequeue_get(void) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ u32 ret = qman_p_static_dequeue_get(p); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_static_dequeue_get); -+ -+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq, -+ int park_request) -+{ -+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); -+} -+EXPORT_SYMBOL(qman_p_dca); -+ -+void qman_dca(struct qm_dqrr_entry *dq, int park_request) -+{ -+ struct qman_portal *p = get_affine_portal(); -+ qman_p_dca(p, dq, park_request); -+ put_affine_portal(); -+} -+EXPORT_SYMBOL(qman_dca); -+ -+/*******************/ -+/* Frame queue API */ -+/*******************/ -+ -+static const char *mcr_result_str(u8 result) -+{ -+ switch (result) { -+ case QM_MCR_RESULT_NULL: -+ return "QM_MCR_RESULT_NULL"; -+ case QM_MCR_RESULT_OK: -+ return "QM_MCR_RESULT_OK"; -+ case QM_MCR_RESULT_ERR_FQID: -+ return "QM_MCR_RESULT_ERR_FQID"; -+ case QM_MCR_RESULT_ERR_FQSTATE: -+ return "QM_MCR_RESULT_ERR_FQSTATE"; -+ case QM_MCR_RESULT_ERR_NOTEMPTY: -+ return "QM_MCR_RESULT_ERR_NOTEMPTY"; -+ case QM_MCR_RESULT_PENDING: -+ return "QM_MCR_RESULT_PENDING"; -+ case QM_MCR_RESULT_ERR_BADCOMMAND: -+ return "QM_MCR_RESULT_ERR_BADCOMMAND"; -+ } -+ return ""; -+} -+ -+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) -+{ -+ struct qm_fqd fqd; -+ struct qm_mcr_queryfq_np np; -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ -+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { -+ int ret = qman_alloc_fqid(&fqid); -+ if (ret) -+ return ret; -+ } -+ spin_lock_init(&fq->fqlock); -+ fq->fqid = fqid; -+ fq->flags = flags; -+ fq->state = qman_fq_state_oos; -+ fq->cgr_groupid = 0; -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) -+ return -ENOMEM; -+#endif -+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) -+ return 0; -+ /* Everything else is AS_IS support */ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ mcc->queryfq.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); -+ if (mcr->result != QM_MCR_RESULT_OK) { -+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); -+ goto err; -+ } -+ fqd = mcr->queryfq.fqd; -+ hw_fqd_to_cpu(&fqd); -+ mcc = qm_mc_start(&p->p); -+ mcc->queryfq_np.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); -+ if (mcr->result != QM_MCR_RESULT_OK) { -+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); -+ goto err; -+ } -+ np = mcr->queryfq_np; -+ /* Phew, have queryfq and queryfq_np results, stitch together -+ * the FQ object from those. */ -+ fq->cgr_groupid = fqd.cgid; -+ switch (np.state & QM_MCR_NP_STATE_MASK) { -+ case QM_MCR_NP_STATE_OOS: -+ break; -+ case QM_MCR_NP_STATE_RETIRED: -+ fq->state = qman_fq_state_retired; -+ if (np.frm_cnt) -+ fq_set(fq, QMAN_FQ_STATE_NE); -+ break; -+ case QM_MCR_NP_STATE_TEN_SCHED: -+ case QM_MCR_NP_STATE_TRU_SCHED: -+ case QM_MCR_NP_STATE_ACTIVE: -+ fq->state = qman_fq_state_sched; -+ if (np.state & QM_MCR_NP_STATE_R) -+ fq_set(fq, QMAN_FQ_STATE_CHANGING); -+ break; -+ case QM_MCR_NP_STATE_PARKED: -+ fq->state = qman_fq_state_parked; -+ break; -+ default: -+ DPA_ASSERT(NULL == "invalid FQ state"); -+ } -+ if (fqd.fq_ctrl & QM_FQCTRL_CGE) -+ fq->state |= QMAN_FQ_STATE_CGR_EN; -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return 0; -+err: -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) -+ qman_release_fqid(fqid); -+ return -EIO; -+} -+EXPORT_SYMBOL(qman_create_fq); -+ -+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) -+{ -+ -+ /* We don't need to lock the FQ as it is a pre-condition that the FQ be -+ * quiesced. Instead, run some checks. */ -+ switch (fq->state) { -+ case qman_fq_state_parked: -+ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); -+ case qman_fq_state_oos: -+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) -+ qman_release_fqid(fq->fqid); -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ clear_fq_table_entry(fq->key); -+#endif -+ return; -+ default: -+ break; -+ } -+ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); -+} -+EXPORT_SYMBOL(qman_destroy_fq); -+ -+u32 qman_fq_fqid(struct qman_fq *fq) -+{ -+ return fq->fqid; -+} -+EXPORT_SYMBOL(qman_fq_fqid); -+ -+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) -+{ -+ if (state) -+ *state = fq->state; -+ if (flags) -+ *flags = fq->flags; -+} -+EXPORT_SYMBOL(qman_fq_state); -+ -+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? -+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; -+ -+ if ((fq->state != qman_fq_state_oos) && -+ (fq->state != qman_fq_state_parked)) -+ return -EINVAL; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) -+ return -EINVAL; -+#endif -+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { -+ /* And can't be set at the same time as TDTHRESH */ -+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) -+ return -EINVAL; -+ } -+ /* Issue an INITFQ_[PARKED|SCHED] management command */ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ FQLOCK(fq); -+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || -+ ((fq->state != qman_fq_state_oos) && -+ (fq->state != qman_fq_state_parked)))) { -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return -EBUSY; -+ } -+ mcc = qm_mc_start(&p->p); -+ if (opts) -+ mcc->initfq = *opts; -+ mcc->initfq.fqid = cpu_to_be32(fq->fqid); -+ mcc->initfq.count = 0; -+ -+ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a -+ * demux pointer. Otherwise, the caller-provided value is allowed to -+ * stand, don't overwrite it. */ -+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { -+ dma_addr_t phys_fq; -+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ mcc->initfq.fqd.context_b = fq->key; -+#else -+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; -+#endif -+ /* and the physical address - NB, if the user wasn't trying to -+ * set CONTEXTA, clear the stashing settings. */ -+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { -+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; -+ memset(&mcc->initfq.fqd.context_a, 0, -+ sizeof(mcc->initfq.fqd.context_a)); -+ } else { -+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), -+ DMA_TO_DEVICE); -+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); -+ } -+ } -+ if (flags & QMAN_INITFQ_FLAG_LOCAL) { -+ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel; -+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { -+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; -+ mcc->initfq.fqd.dest.wq = 4; -+ } -+ } -+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask); -+ cpu_to_hw_fqd(&mcc->initfq.fqd); -+ qm_mc_commit(&p->p, myverb); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return -EIO; -+ } -+ if (opts) { -+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { -+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) -+ fq_set(fq, QMAN_FQ_STATE_CGR_EN); -+ else -+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN); -+ } -+ if (opts->we_mask & QM_INITFQ_WE_CGID) -+ fq->cgr_groupid = opts->fqd.cgid; -+ } -+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? -+ qman_fq_state_sched : qman_fq_state_parked; -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return 0; -+} -+EXPORT_SYMBOL(qman_init_fq); -+ -+int qman_schedule_fq(struct qman_fq *fq) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ int ret = 0; -+ u8 res; -+ -+ if (fq->state != qman_fq_state_parked) -+ return -EINVAL; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) -+ return -EINVAL; -+#endif -+ /* Issue a ALTERFQ_SCHED management command */ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ FQLOCK(fq); -+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || -+ (fq->state != qman_fq_state_parked))) { -+ ret = -EBUSY; -+ goto out; -+ } -+ mcc = qm_mc_start(&p->p); -+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ ret = -EIO; -+ goto out; -+ } -+ fq->state = qman_fq_state_sched; -+out: -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_schedule_fq); -+ -+int qman_retire_fq(struct qman_fq *fq, u32 *flags) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ int rval; -+ u8 res; -+ -+ if ((fq->state != qman_fq_state_parked) && -+ (fq->state != qman_fq_state_sched)) -+ return -EINVAL; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) -+ return -EINVAL; -+#endif -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ FQLOCK(fq); -+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || -+ (fq->state == qman_fq_state_retired) || -+ (fq->state == qman_fq_state_oos))) { -+ rval = -EBUSY; -+ goto out; -+ } -+ rval = table_push_fq(p, fq); -+ if (rval) -+ goto out; -+ mcc = qm_mc_start(&p->p); -+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); -+ res = mcr->result; -+ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING, -+ * and defer the flags until FQRNI or FQRN (respectively) show up. But -+ * "Friendly" is to process OK immediately, and not set CHANGING. We do -+ * friendly, otherwise the caller doesn't necessarily have a fully -+ * "retired" FQ on return even if the retirement was immediate. However -+ * this does mean some code duplication between here and -+ * fq_state_change(). */ -+ if (likely(res == QM_MCR_RESULT_OK)) { -+ rval = 0; -+ /* Process 'fq' right away, we'll ignore FQRNI */ -+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) -+ fq_set(fq, QMAN_FQ_STATE_NE); -+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) -+ fq_set(fq, QMAN_FQ_STATE_ORL); -+ else -+ table_del_fq(p, fq); -+ if (flags) -+ *flags = fq->flags; -+ fq->state = qman_fq_state_retired; -+ if (fq->cb.fqs) { -+ /* Another issue with supporting "immediate" retirement -+ * is that we're forced to drop FQRNIs, because by the -+ * time they're seen it may already be "too late" (the -+ * fq may have been OOS'd and free()'d already). But if -+ * the upper layer wants a callback whether it's -+ * immediate or not, we have to fake a "MR" entry to -+ * look like an FQRNI... */ -+ struct qm_mr_entry msg; -+ msg.verb = QM_MR_VERB_FQRNI; -+ msg.fq.fqs = mcr->alterfq.fqs; -+ msg.fq.fqid = fq->fqid; -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ msg.fq.contextB = fq->key; -+#else -+ msg.fq.contextB = (u32)(uintptr_t)fq; -+#endif -+ fq->cb.fqs(p, fq, &msg); -+ } -+ } else if (res == QM_MCR_RESULT_PENDING) { -+ rval = 1; -+ fq_set(fq, QMAN_FQ_STATE_CHANGING); -+ } else { -+ rval = -EIO; -+ table_del_fq(p, fq); -+ } -+out: -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return rval; -+} -+EXPORT_SYMBOL(qman_retire_fq); -+ -+int qman_oos_fq(struct qman_fq *fq) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ int ret = 0; -+ u8 res; -+ -+ if (fq->state != qman_fq_state_retired) -+ return -EINVAL; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) -+ return -EINVAL; -+#endif -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ FQLOCK(fq); -+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || -+ (fq->state != qman_fq_state_retired))) { -+ ret = -EBUSY; -+ goto out; -+ } -+ mcc = qm_mc_start(&p->p); -+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ ret = -EIO; -+ goto out; -+ } -+ fq->state = qman_fq_state_oos; -+out: -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_oos_fq); -+ -+int qman_fq_flow_control(struct qman_fq *fq, int xon) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ int ret = 0; -+ u8 res; -+ u8 myverb; -+ -+ if ((fq->state == qman_fq_state_oos) || -+ (fq->state == qman_fq_state_retired) || -+ (fq->state == qman_fq_state_parked)) -+ return -EINVAL; -+ -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) -+ return -EINVAL; -+#endif -+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ FQLOCK(fq); -+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || -+ (fq->state == qman_fq_state_parked) || -+ (fq->state == qman_fq_state_oos) || -+ (fq->state == qman_fq_state_retired))) { -+ ret = -EBUSY; -+ goto out; -+ } -+ mcc = qm_mc_start(&p->p); -+ mcc->alterfq.fqid = fq->fqid; -+ mcc->alterfq.count = 0; -+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; -+ -+ qm_mc_commit(&p->p, myverb); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ ret = -EIO; -+ goto out; -+ } -+out: -+ FQUNLOCK(fq); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_fq_flow_control); -+ -+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) -+ memcpy_fromio(fqd, &mcr->queryfq.fqd, sizeof(*fqd)); -+ hw_fqd_to_cpu(fqd); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) -+ return -EIO; -+ return 0; -+} -+EXPORT_SYMBOL(qman_query_fq); -+ -+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) { -+ memcpy_fromio(np, &mcr->queryfq_np, sizeof(*np)); -+ np->fqd_link = be24_to_cpu(np->fqd_link); -+ np->odp_seq = be16_to_cpu(np->odp_seq); -+ np->orp_nesn = be16_to_cpu(np->orp_nesn); -+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq); -+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq); -+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr); -+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr); -+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr); -+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr); -+ np->ics_surp = be16_to_cpu(np->ics_surp); -+ np->byte_cnt = be32_to_cpu(np->byte_cnt); -+ np->frm_cnt = be24_to_cpu(np->frm_cnt); -+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr); -+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr); -+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr); -+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr); -+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr); -+ -+ -+ } -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res == QM_MCR_RESULT_ERR_FQID) -+ return -ERANGE; -+ else if (res != QM_MCR_RESULT_OK) -+ return -EIO; -+ return 0; -+} -+EXPORT_SYMBOL(qman_query_fq_np); -+ -+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res, myverb; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : -+ QM_MCR_VERB_QUERYWQ; -+ mcc = qm_mc_start(&p->p); -+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id); -+ qm_mc_commit(&p->p, myverb); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) { -+ int i, array_len; -+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id); -+ array_len = ARRAY_SIZE(mcr->querywq.wq_len); -+ for (i = 0; i < array_len; i++) -+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]); -+ } -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); -+ return -EIO; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_query_wq); -+ -+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, -+ struct qm_mcr_cgrtestwrite *result) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ mcc->cgrtestwrite.cgid = cgr->cgrid; -+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); -+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; -+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) -+ memcpy_fromio(result, &mcr->cgrtestwrite, sizeof(*result)); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); -+ return -EIO; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_testwrite_cgr); -+ -+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ int i; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ mcc->querycgr.cgid = cgr->cgrid; -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) -+ memcpy_fromio(cgrd, &mcr->querycgr, sizeof(*cgrd)); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); -+ return -EIO; -+ } -+ cgrd->cgr.wr_parm_g.word = -+ be32_to_cpu(cgrd->cgr.wr_parm_g.word); -+ cgrd->cgr.wr_parm_y.word = -+ be32_to_cpu(cgrd->cgr.wr_parm_y.word); -+ cgrd->cgr.wr_parm_r.word = -+ be32_to_cpu(cgrd->cgr.wr_parm_r.word); -+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ); -+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres); -+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++) -+ be32_to_cpus(&cgrd->cscn_targ_swp[i]); -+ return 0; -+} -+EXPORT_SYMBOL(qman_query_cgr); -+ -+int qman_query_congestion(struct qm_mcr_querycongestion *congestion) -+{ -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ int i; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ qm_mc_start(&p->p); -+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_MCC_VERB_QUERYCONGESTION); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) -+ memcpy_fromio(congestion, &mcr->querycongestion, -+ sizeof(*congestion)); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); -+ return -EIO; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++) -+ be32_to_cpus(&congestion->state.__state[i]); -+ return 0; -+} -+EXPORT_SYMBOL(qman_query_congestion); -+ -+/* internal function used as a wait_event() expression */ -+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) -+{ -+ unsigned long irqflags __maybe_unused; -+ int ret = -EBUSY; -+ PORTAL_IRQ_LOCK(p, irqflags); -+ if (!p->vdqcr_owned) { -+ FQLOCK(fq); -+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) -+ goto escape; -+ fq_set(fq, QMAN_FQ_STATE_VDQCR); -+ FQUNLOCK(fq); -+ p->vdqcr_owned = fq; -+ ret = 0; -+ } -+escape: -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ if (!ret) -+ qm_dqrr_vdqcr_set(&p->p, vdqcr); -+ return ret; -+} -+ -+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) -+{ -+ int ret; -+ *p = get_affine_portal(); -+ ret = set_p_vdqcr(*p, fq, vdqcr); -+ put_affine_portal(); -+ return ret; -+} -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq, -+ u32 vdqcr, u32 flags) -+{ -+ int ret = 0; -+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) -+ ret = wait_event_interruptible(affine_queue, -+ !(ret = set_p_vdqcr(p, fq, vdqcr))); -+ else -+ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr))); -+ return ret; -+} -+ -+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, -+ u32 vdqcr, u32 flags) -+{ -+ int ret = 0; -+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) -+ ret = wait_event_interruptible(affine_queue, -+ !(ret = set_vdqcr(p, fq, vdqcr))); -+ else -+ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr))); -+ return ret; -+} -+#endif -+ -+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq, -+ u32 flags __maybe_unused, u32 vdqcr) -+{ -+ int ret; -+ -+ if ((fq->state != qman_fq_state_parked) && -+ (fq->state != qman_fq_state_retired)) -+ return -EINVAL; -+ if (vdqcr & QM_VDQCR_FQID_MASK) -+ return -EINVAL; -+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) -+ return -EBUSY; -+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_VOLATILE_FLAG_WAIT) -+ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags); -+ else -+#endif -+ ret = set_p_vdqcr(p, fq, vdqcr); -+ if (ret) -+ return ret; -+ /* VDQCR is set */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_VOLATILE_FLAG_FINISH) { -+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) -+ /* NB: don't propagate any error - the caller wouldn't -+ * know whether the VDQCR was issued or not. A signal -+ * could arrive after returning anyway, so the caller -+ * can check signal_pending() if that's an issue. */ -+ wait_event_interruptible(affine_queue, -+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); -+ else -+ wait_event(affine_queue, -+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_volatile_dequeue); -+ -+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, -+ u32 vdqcr) -+{ -+ struct qman_portal *p; -+ int ret; -+ -+ if ((fq->state != qman_fq_state_parked) && -+ (fq->state != qman_fq_state_retired)) -+ return -EINVAL; -+ if (vdqcr & QM_VDQCR_FQID_MASK) -+ return -EINVAL; -+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) -+ return -EBUSY; -+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_VOLATILE_FLAG_WAIT) -+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags); -+ else -+#endif -+ ret = set_vdqcr(&p, fq, vdqcr); -+ if (ret) -+ return ret; -+ /* VDQCR is set */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_VOLATILE_FLAG_FINISH) { -+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) -+ /* NB: don't propagate any error - the caller wouldn't -+ * know whether the VDQCR was issued or not. A signal -+ * could arrive after returning anyway, so the caller -+ * can check signal_pending() if that's an issue. */ -+ wait_event_interruptible(affine_queue, -+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); -+ else -+ wait_event(affine_queue, -+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_volatile_dequeue); -+ -+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) -+{ -+ if (avail) -+ qm_eqcr_cce_prefetch(&p->p); -+ else -+ qm_eqcr_cce_update(&p->p); -+} -+ -+int qman_eqcr_is_empty(void) -+{ -+ unsigned long irqflags __maybe_unused; -+ struct qman_portal *p = get_affine_portal(); -+ u8 avail; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ update_eqcr_ci(p, 0); -+ avail = qm_eqcr_get_fill(&p->p); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return avail == 0; -+} -+EXPORT_SYMBOL(qman_eqcr_is_empty); -+ -+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) -+{ -+ if (affine) { -+ unsigned long irqflags __maybe_unused; -+ struct qman_portal *p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ p->cb_dc_ern = handler; -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ } else -+ cb_dc_ern = handler; -+} -+EXPORT_SYMBOL(qman_set_dc_ern); -+ -+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq; -+ u8 avail; -+ PORTAL_IRQ_LOCK(p, (*irqflags)); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (p->eqci_owned) { -+ PORTAL_IRQ_UNLOCK(p, (*irqflags)); -+ return NULL; -+ } -+ p->eqci_owned = fq; -+ } -+#endif -+ if (p->use_eqcr_ci_stashing) { -+ /* -+ * The stashing case is easy, only update if we need to in -+ * order to try and liberate ring entries. -+ */ -+ eq = qm_eqcr_start_stash(&p->p); -+ } else { -+ /* -+ * The non-stashing case is harder, need to prefetch ahead of -+ * time. -+ */ -+ avail = qm_eqcr_get_avail(&p->p); -+ if (avail < 2) -+ update_eqcr_ci(p, avail); -+ eq = qm_eqcr_start_no_stash(&p->p); -+ } -+ -+ if (unlikely(!eq)) { -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) -+ p->eqci_owned = NULL; -+#endif -+ PORTAL_IRQ_UNLOCK(p, (*irqflags)); -+ return NULL; -+ } -+ if (flags & QMAN_ENQUEUE_FLAG_DCA) -+ eq->dca = QM_EQCR_DCA_ENABLE | -+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? -+ QM_EQCR_DCA_PARK : 0) | -+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK); -+ eq->fqid = cpu_to_be32(fq->fqid); -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ eq->tag = cpu_to_be32(fq->key); -+#else -+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq); -+#endif -+ eq->fd = *fd; -+ cpu_to_hw_fd(&eq->fd); -+ return eq; -+} -+ -+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq; -+ *p = get_affine_portal(); -+ eq = try_p_eq_start(*p, irqflags, fq, fd, flags); -+ if (!eq) -+ put_affine_portal(); -+ return eq; -+} -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags); -+ if (!eq) -+ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH); -+ return eq; -+} -+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq; -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return NULL if signal occurs before completion. Signal -+ * can occur during return. Caller must check for signal */ -+ wait_event_interruptible(affine_queue, -+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); -+ else -+ wait_event(affine_queue, -+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); -+ return eq; -+} -+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags); -+ if (!eq) -+ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH); -+ return eq; -+} -+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p, -+ unsigned long *irqflags __maybe_unused, -+ struct qman_fq *fq, -+ const struct qm_fd *fd, -+ u32 flags) -+{ -+ struct qm_eqcr_entry *eq; -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return NULL if signal occurs before completion. Signal -+ * can occur during return. Caller must check for signal */ -+ wait_event_interruptible(affine_queue, -+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); -+ else -+ wait_event(affine_queue, -+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); -+ return eq; -+} -+#endif -+ -+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags) -+{ -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ /* Factor the below out, it's used from qman_enqueue_orp() too */ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_enqueue); -+ -+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) -+{ -+ struct qman_portal *p; -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_eq_start(&p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ /* Factor the below out, it's used from qman_enqueue_orp() too */ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_enqueue); -+ -+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags, -+ struct qman_fq *orp, u16 orp_seqnum) -+{ -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* Process ORP-specifics here */ -+ if (flags & QMAN_ENQUEUE_FLAG_NLIS) -+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS; -+ else { -+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; -+ if (flags & QMAN_ENQUEUE_FLAG_NESN) -+ orp_seqnum |= QM_EQCR_SEQNUM_NESN; -+ else -+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ -+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; -+ } -+ eq->seqnum = cpu_to_be16(orp_seqnum); -+ eq->orp = cpu_to_be32(orp->fqid); -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | -+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? -+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_enqueue_orp); -+ -+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, -+ struct qman_fq *orp, u16 orp_seqnum) -+{ -+ struct qman_portal *p; -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_eq_start(&p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* Process ORP-specifics here */ -+ if (flags & QMAN_ENQUEUE_FLAG_NLIS) -+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS; -+ else { -+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; -+ if (flags & QMAN_ENQUEUE_FLAG_NESN) -+ orp_seqnum |= QM_EQCR_SEQNUM_NESN; -+ else -+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ -+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; -+ } -+ eq->seqnum = cpu_to_be16(orp_seqnum); -+ eq->orp = cpu_to_be32(orp->fqid); -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | -+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? -+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_enqueue_orp); -+ -+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags, -+ qman_cb_precommit cb, void *cb_arg) -+{ -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* invoke user supplied callback function before writing commit verb */ -+ if (cb(cb_arg)) { -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ return -EINVAL; -+ } -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ /* Factor the below out, it's used from qman_enqueue_orp() too */ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_p_enqueue_precommit); -+ -+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, -+ u32 flags, qman_cb_precommit cb, void *cb_arg) -+{ -+ struct qman_portal *p; -+ struct qm_eqcr_entry *eq; -+ unsigned long irqflags __maybe_unused; -+ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT) -+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); -+ else -+#endif -+ eq = try_eq_start(&p, &irqflags, fq, fd, flags); -+ if (!eq) -+ return -EBUSY; -+ /* invoke user supplied callback function before writing commit verb */ -+ if (cb(cb_arg)) { -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return -EINVAL; -+ } -+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ -+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | -+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); -+ /* Factor the below out, it's used from qman_enqueue_orp() too */ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && -+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) -+ /* NB: return success even if signal occurs before -+ * condition is true. pvb_commit guarantees success */ -+ wait_event_interruptible(affine_queue, -+ (p->eqci_owned != fq)); -+ else -+ wait_event(affine_queue, (p->eqci_owned != fq)); -+ } -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_enqueue_precommit); -+ -+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, -+ struct qm_mcc_initcgr *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ u8 verb = QM_MCC_VERB_MODIFYCGR; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ mcc = qm_mc_start(&p->p); -+ if (opts) -+ mcc->initcgr = *opts; -+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask); -+ mcc->initcgr.cgr.wr_parm_g.word = -+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word); -+ mcc->initcgr.cgr.wr_parm_y.word = -+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word); -+ mcc->initcgr.cgr.wr_parm_r.word = -+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word); -+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ); -+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres); -+ -+ mcc->initcgr.cgid = cgr->cgrid; -+ if (flags & QMAN_CGR_FLAG_USE_INIT) -+ verb = QM_MCC_VERB_INITCGR; -+ qm_mc_commit(&p->p, verb); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); -+ res = mcr->result; -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; -+} -+EXPORT_SYMBOL(qman_modify_cgr); -+ -+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \ -+ QM_CHANNEL_SWPORTAL0)) -+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n)) -+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0) -+ -+static u8 qman_cgr_cpus[__CGR_NUM]; -+ -+int qman_create_cgr(struct qman_cgr *cgr, u32 flags, -+ struct qm_mcc_initcgr *opts) -+{ -+ unsigned long irqflags __maybe_unused; -+ struct qm_mcr_querycgr cgr_state; -+ struct qm_mcc_initcgr local_opts; -+ int ret; -+ struct qman_portal *p; -+ -+ /* We have to check that the provided CGRID is within the limits of the -+ * data-structures, for obvious reasons. However we'll let h/w take -+ * care of determining whether it's within the limits of what exists on -+ * the SoC. */ -+ if (cgr->cgrid >= __CGR_NUM) -+ return -EINVAL; -+ -+ preempt_disable(); -+ p = get_affine_portal(); -+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); -+ preempt_enable(); -+ -+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); -+ cgr->chan = p->config->public_cfg.channel; -+ spin_lock_irqsave(&p->cgr_lock, irqflags); -+ -+ /* if no opts specified, just add it to the list */ -+ if (!opts) -+ goto add_list; -+ -+ ret = qman_query_cgr(cgr, &cgr_state); -+ if (ret) -+ goto release_lock; -+ if (opts) -+ local_opts = *opts; -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ local_opts.cgr.cscn_targ_upd_ctrl = -+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); -+ else -+ /* Overwrite TARG */ -+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | -+ TARG_MASK(p); -+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; -+ -+ /* send init if flags indicate so */ -+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) -+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); -+ else -+ ret = qman_modify_cgr(cgr, 0, &local_opts); -+ if (ret) -+ goto release_lock; -+add_list: -+ list_add(&cgr->node, &p->cgr_cbs); -+ -+ /* Determine if newly added object requires its callback to be called */ -+ ret = qman_query_cgr(cgr, &cgr_state); -+ if (ret) { -+ /* we can't go back, so proceed and return success, but screen -+ * and wail to the log file */ -+ pr_crit("CGR HW state partially modified\n"); -+ ret = 0; -+ goto release_lock; -+ } -+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], -+ cgr->cgrid)) -+ cgr->cb(p, cgr, 1); -+release_lock: -+ spin_unlock_irqrestore(&p->cgr_lock, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_create_cgr); -+ -+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, -+ struct qm_mcc_initcgr *opts) -+{ -+ unsigned long irqflags __maybe_unused; -+ struct qm_mcc_initcgr local_opts; -+ struct qm_mcr_querycgr cgr_state; -+ int ret; -+ -+ /* We have to check that the provided CGRID is within the limits of the -+ * data-structures, for obvious reasons. However we'll let h/w take -+ * care of determining whether it's within the limits of what exists on -+ * the SoC. -+ */ -+ if (cgr->cgrid >= __CGR_NUM) -+ return -EINVAL; -+ -+ ret = qman_query_cgr(cgr, &cgr_state); -+ if (ret) -+ return ret; -+ -+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); -+ if (opts) -+ local_opts = *opts; -+ -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ local_opts.cgr.cscn_targ_upd_ctrl = -+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | -+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; -+ else -+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | -+ TARG_DCP_MASK(dcp_portal); -+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; -+ -+ /* send init if flags indicate so */ -+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) -+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, -+ &local_opts); -+ else -+ ret = qman_modify_cgr(cgr, 0, &local_opts); -+ -+ return ret; -+} -+EXPORT_SYMBOL(qman_create_cgr_to_dcp); -+ -+int qman_delete_cgr(struct qman_cgr *cgr) -+{ -+ unsigned long irqflags __maybe_unused; -+ struct qm_mcr_querycgr cgr_state; -+ struct qm_mcc_initcgr local_opts; -+ int ret = 0; -+ struct qman_cgr *i; -+ struct qman_portal *p = get_affine_portal(); -+ -+ if (cgr->chan != p->config->public_cfg.channel) { -+ pr_crit("Attempting to delete cgr from different portal " -+ "than it was create: create 0x%x, delete 0x%x\n", -+ cgr->chan, p->config->public_cfg.channel); -+ ret = -EINVAL; -+ goto put_portal; -+ } -+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); -+ spin_lock_irqsave(&p->cgr_lock, irqflags); -+ list_del(&cgr->node); -+ /* -+ * If there are no other CGR objects for this CGRID in the list, update -+ * CSCN_TARG accordingly -+ */ -+ list_for_each_entry(i, &p->cgr_cbs, node) -+ if ((i->cgrid == cgr->cgrid) && i->cb) -+ goto release_lock; -+ ret = qman_query_cgr(cgr, &cgr_state); -+ if (ret) { -+ /* add back to the list */ -+ list_add(&cgr->node, &p->cgr_cbs); -+ goto release_lock; -+ } -+ /* Overwrite TARG */ -+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG; -+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) -+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); -+ else -+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & -+ ~(TARG_MASK(p)); -+ ret = qman_modify_cgr(cgr, 0, &local_opts); -+ if (ret) -+ /* add back to the list */ -+ list_add(&cgr->node, &p->cgr_cbs); -+release_lock: -+ spin_unlock_irqrestore(&p->cgr_lock, irqflags); -+put_portal: -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_delete_cgr); -+ -+struct cgr_comp { -+ struct qman_cgr *cgr; -+ struct completion completion; -+}; -+ -+static int qman_delete_cgr_thread(void *p) -+{ -+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p; -+ int res; -+ -+ res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr); -+ complete(&cgr_comp->completion); -+ -+ return res; -+} -+ -+void qman_delete_cgr_safe(struct qman_cgr *cgr) -+{ -+ struct task_struct *thread; -+ struct cgr_comp cgr_comp; -+ -+ preempt_disable(); -+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { -+ init_completion(&cgr_comp.completion); -+ cgr_comp.cgr = cgr; -+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, -+ "cgr_del"); -+ -+ if (likely(!IS_ERR(thread))) { -+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); -+ wake_up_process(thread); -+ wait_for_completion(&cgr_comp.completion); -+ preempt_enable(); -+ return; -+ } -+ } -+ qman_delete_cgr(cgr); -+ preempt_enable(); -+} -+EXPORT_SYMBOL(qman_delete_cgr_safe); -+ -+int qm_get_clock(u64 *clock_hz) -+{ -+ if (!qman_clk) { -+ pr_warn("Qman clock speed is unknown\n"); -+ return -EINVAL; -+ } -+ *clock_hz = (u64)qman_clk; -+ return 0; -+} -+EXPORT_SYMBOL(qm_get_clock); -+ -+int qm_set_clock(u64 clock_hz) -+{ -+ if (qman_clk) -+ return -1; -+ qman_clk = (u32)clock_hz; -+ return 0; -+} -+EXPORT_SYMBOL(qm_set_clock); -+ -+/* CEETM management command */ -+static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->lfqmt_config = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_LFQMT_CONFIG); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE LFQMT failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+int qman_ceetm_query_lfqmt(int lfqid, -+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->lfqmt_query.lfqid = lfqid; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) -+ *lfqmt_query = mcr->lfqmt_query; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY LFQMT failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_query_lfqmt); -+ -+static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->cq_config = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ res = mcr->result; -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG); -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE CQ failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, -+ struct qm_mcr_ceetm_cq_query *cq_query) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->cq_query.cqid = cpu_to_be16(cqid); -+ mcc->cq_query.dcpid = dcpid; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) { -+ *cq_query = mcr->cq_query; -+ hw_cq_query_to_cpu(cq_query); -+ } -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY CQ failed\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_query_cq); -+ -+static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->dct_config = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE DCT failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts, -+ struct qm_mcr_ceetm_dct_query *dct_query) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p = get_affine_portal(); -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->dct_query = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY DCT failed\n"); -+ return -EIO; -+ } -+ -+ *dct_query = mcr->dct_query; -+ return 0; -+} -+ -+static int qman_ceetm_configure_class_scheduler( -+ struct qm_mcc_ceetm_class_scheduler_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->csch_config = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel, -+ struct qm_mcr_ceetm_class_scheduler_query *query) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->csch_query.cqcid = channel->idx; -+ mcc->csch_query.dcpid = channel->dcp_idx; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY CLASS SCHEDULER failed\n"); -+ return -EIO; -+ } -+ *query = mcr->csch_query; -+ return 0; -+} -+ -+static int qman_ceetm_configure_mapping_shaper_tcfc( -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->mst_config = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+static int qman_ceetm_query_mapping_shaper_tcfc( -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts, -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->mst_query = *opts; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); -+ res = mcr->result; -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY CHANNEL MAPPING failed\n"); -+ return -EIO; -+ } -+ -+ *response = mcr->mst_query; -+ return 0; -+} -+ -+static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->ccgr_config = *opts; -+ -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG); -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CONFIGURE CCGR failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query, -+ struct qm_mcr_ceetm_ccgr_query *response) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid); -+ mcc->ccgr_query.dcpid = ccgr_query->dcpid; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); -+ -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) { -+ *response = mcr->ccgr_query; -+ hw_ccgr_query_to_cpu(response); -+ } -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: QUERY CCGR failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_query_ccgr); -+ -+static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq, -+ u8 command_type, u16 xsfdr, -+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ switch (command_type) { -+ case 0: -+ case 1: -+ mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx; -+ break; -+ case 2: -+ mcc->cq_ppxr.xsfdr = xsfdr; -+ break; -+ default: -+ break; -+ } -+ mcc->cq_ppxr.ct = command_type; -+ mcc->cq_ppxr.dcpid = cq->parent->dcp_idx; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n"); -+ return -EIO; -+ } -+ *cq_ppxr = mcr->cq_ppxr; -+ return 0; -+} -+ -+static int qman_ceetm_query_statistics(u16 cid, -+ enum qm_dc_portal dcp_idx, -+ u16 command_type, -+ struct qm_mcr_ceetm_statistics_query *query_result) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->stats_query_write.cid = cid; -+ mcc->stats_query_write.dcpid = dcp_idx; -+ mcc->stats_query_write.ct = command_type; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); -+ -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE); -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: STATISTICS QUERY failed\n"); -+ return -EIO; -+ } -+ *query_result = mcr->stats_query; -+ return 0; -+} -+ -+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx, -+ u16 command_type, u64 frame_count, -+ u64 byte_count) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ mcc->stats_query_write.cid = cid; -+ mcc->stats_query_write.dcpid = dcp_idx; -+ mcc->stats_query_write.ct = command_type; -+ mcc->stats_query_write.frm_cnt = frame_count; -+ mcc->stats_query_write.byte_cnt = byte_count; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); -+ -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE); -+ -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ -+ res = mcr->result; -+ if (res != QM_MCR_RESULT_OK) { -+ pr_err("CEETM: STATISTICS WRITE failed\n"); -+ return -EIO; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_query_write_statistics); -+ -+int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate, -+ int rounding) -+{ -+ u16 pres; -+ u64 temp; -+ u64 qman_freq; -+ int ret; -+ -+ /* Read PRES from CEET_CFG_PRES register */ -+ ret = qman_ceetm_get_prescaler(&pres); -+ if (ret) -+ return -EINVAL; -+ -+ ret = qm_get_clock(&qman_freq); -+ if (ret) -+ return -EINVAL; -+ -+ /* token-rate = bytes-per-second * update-reference-period -+ * -+ * Where token-rate is N/8192 for a integer N, and -+ * update-reference-period is (2^22)/(PRES*QHz), where PRES -+ * is the prescalar value and QHz is the QMan clock frequency. -+ * So: -+ * -+ * token-rate = (byte-per-second*2^22)/PRES*QHZ) -+ * -+ * Converting to bits-per-second gives; -+ * -+ * token-rate = (bps*2^19) / (PRES*QHZ) -+ * N = (bps*2^32) / (PRES*QHz) -+ * -+ * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps -+ * (yet minimise rounding error if 'bps' is small), we reorganise -+ * the formula to use two 16-bit shifts rather than 1 32-bit shift. -+ * N = (((bps*2^16)/PRES)*2^16)/QHz -+ */ -+ temp = ROUNDING((bps << 16), pres, rounding); -+ temp = ROUNDING((temp << 16), qman_freq, rounding); -+ token_rate->whole = temp >> 13; -+ token_rate->fraction = temp & (((u64)1 << 13) - 1); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_bps2tokenrate); -+ -+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps, -+ int rounding) -+{ -+ u16 pres; -+ u64 temp; -+ u64 qman_freq; -+ int ret; -+ -+ /* Read PRES from CEET_CFG_PRES register */ -+ ret = qman_ceetm_get_prescaler(&pres); -+ if (ret) -+ return -EINVAL; -+ -+ ret = qm_get_clock(&qman_freq); -+ if (ret) -+ return -EINVAL; -+ -+ /* bytes-per-second = token-rate / update-reference-period -+ * -+ * where "token-rate" is N/8192 for an integer N, and -+ * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is -+ * the prescalar value and QHz is the QMan clock frequency. So; -+ * -+ * bytes-per-second = (N/8192) / (4194304/PRES*QHz) -+ * = N*PRES*QHz / (4194304*8192) -+ * = N*PRES*QHz / (2^35) -+ * -+ * Converting to bits-per-second gives; -+ * -+ * bps = N*PRES*QHZ / (2^32) -+ * -+ * Note, the numerator has a maximum width of 72 bits! So to -+ * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum -+ * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before -+ * multiplying by N (goes to maximum of 63 bits). -+ * -+ * temp = PRES*QHZ / (2^16) -+ * kbps = temp*N / (2^16) -+ */ -+ temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding); -+ temp *= ((token_rate->whole << 13) + token_rate->fraction); -+ *bps = ROUNDING(temp, (u64)(1) << 16, rounding); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_tokenrate2bps); -+ -+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx, -+ unsigned int sp_idx) -+{ -+ struct qm_ceetm_sp *p; -+ -+ DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) || -+ (dcp_idx == qm_dc_portal_fman1)); -+ -+ if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) || -+ (sp_idx > (qman_ceetms[dcp_idx].sp_range[0] + -+ qman_ceetms[dcp_idx].sp_range[1]))) { -+ pr_err("Sub-portal index doesn't exist\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) { -+ if ((p->idx == sp_idx) && (p->is_claimed == 0)) { -+ p->is_claimed = 1; -+ *sp = p; -+ return 0; -+ } -+ } -+ pr_err("The sub-portal#%d is not available!\n", sp_idx); -+ return -ENODEV; -+} -+EXPORT_SYMBOL(qman_ceetm_sp_claim); -+ -+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp) -+{ -+ struct qm_ceetm_sp *p; -+ -+ if (sp->lni && sp->lni->is_claimed == 1) { -+ pr_err("The dependency of sub-portal has not been released!\n"); -+ return -EBUSY; -+ } -+ -+ list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) { -+ if (p->idx == sp->idx) { -+ p->is_claimed = 0; -+ p->lni = NULL; -+ } -+ } -+ /* Disable CEETM mode of this sub-portal */ -+ qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx); -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_sp_release); -+ -+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx, -+ unsigned int lni_idx) -+{ -+ struct qm_ceetm_lni *p; -+ -+ if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) || -+ (lni_idx > (qman_ceetms[dcp_idx].lni_range[0] + -+ qman_ceetms[dcp_idx].lni_range[1]))) { -+ pr_err("The lni index is out of range\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) { -+ if ((p->idx == lni_idx) && (p->is_claimed == 0)) { -+ *lni = p; -+ p->is_claimed = 1; -+ return 0; -+ } -+ } -+ -+ pr_err("The LNI#%d is not available!\n", lni_idx); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(qman_ceetm_lni_claim); -+ -+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni) -+{ -+ struct qm_ceetm_lni *p; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ if (!list_empty(&lni->channels)) { -+ pr_err("The LNI dependencies are not released!\n"); -+ return -EBUSY; -+ } -+ -+ list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) { -+ if (p->idx == lni->idx) { -+ p->shaper_enable = 0; -+ p->shaper_couple = 0; -+ p->cr_token_rate.whole = 0; -+ p->cr_token_rate.fraction = 0; -+ p->er_token_rate.whole = 0; -+ p->er_token_rate.fraction = 0; -+ p->cr_token_bucket_limit = 0; -+ p->er_token_bucket_limit = 0; -+ p->is_claimed = 0; -+ } -+ } -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ config_opts.dcpid = lni->dcp_idx; -+ memset(&config_opts.shaper_config, 0, -+ sizeof(config_opts.shaper_config)); -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_release); -+ -+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx); -+ config_opts.dcpid = sp->dcp_idx; -+ config_opts.sp_mapping.map_lni_id = lni->idx; -+ sp->lni = lni; -+ -+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) -+ return -EINVAL; -+ -+ /* Enable CEETM mode for this sub-portal */ -+ return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx); -+} -+EXPORT_SYMBOL(qman_ceetm_sp_set_lni); -+ -+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx); -+ query_opts.dcpid = sp->dcp_idx; -+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { -+ pr_err("Can't get SP <-> LNI mapping\n"); -+ return -EINVAL; -+ } -+ *lni_idx = query_result.sp_mapping_query.map_lni_id; -+ sp->lni->idx = query_result.sp_mapping_query.map_lni_id; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_sp_get_lni); -+ -+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, -+ int oal) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ lni->shaper_enable = 1; -+ lni->shaper_couple = coupled; -+ lni->oal = oal; -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ config_opts.dcpid = lni->dcp_idx; -+ config_opts.shaper_config.cpl = coupled; -+ config_opts.shaper_config.oal = oal; -+ config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole -+ << 13) | lni->cr_token_rate.fraction); -+ config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole -+ << 13) | lni->er_token_rate.fraction); -+ config_opts.shaper_config.crtbl = -+ cpu_to_be16(lni->cr_token_bucket_limit); -+ config_opts.shaper_config.ertbl = -+ cpu_to_be16(lni->er_token_bucket_limit); -+ config_opts.shaper_config.mps = 60; -+ -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper); -+ -+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ if (!lni->shaper_enable) { -+ pr_err("The shaper has been disabled\n"); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; -+ config_opts.dcpid = lni->dcp_idx; -+ config_opts.shaper_config.cpl = (lni->shaper_couple << 7) | lni->oal; -+ config_opts.shaper_config.crtbl = lni->cr_token_bucket_limit; -+ config_opts.shaper_config.ertbl = lni->er_token_bucket_limit; -+ /* Set CR/ER rate with all 1's to configure an infinite rate, thus -+ * disable the shaping. -+ */ -+ config_opts.shaper_config.crtcr = 0xFFFFFF; -+ config_opts.shaper_config.ertcr = 0xFFFFFF; -+ config_opts.shaper_config.mps = 60; -+ lni->shaper_enable = 0; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper); -+ -+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni) -+{ -+ return lni->shaper_enable; -+} -+EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled); -+ -+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ lni->cr_token_rate.whole = token_rate->whole; -+ lni->cr_token_rate.fraction = token_rate->fraction; -+ lni->cr_token_bucket_limit = token_limit; -+ if (!lni->shaper_enable) -+ return 0; -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, -+ &query_result); -+ if (ret) { -+ pr_err("Fail to get current LNI shaper setting\n"); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ config_opts.dcpid = lni->dcp_idx; -+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13) -+ | (token_rate->fraction)); -+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); -+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; -+ config_opts.shaper_config.oal = query_result.shaper_query.oal; -+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; -+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; -+ config_opts.shaper_config.mps = query_result.shaper_query.mps; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate); -+ -+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni, -+ u64 bps, -+ u16 token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); -+ if (ret) { -+ pr_err("Can not convert bps to token rate\n"); -+ return -EINVAL; -+ } -+ -+ return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps); -+ -+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret) { -+ pr_err("The LNI CR rate or limit is not set\n"); -+ return -EINVAL; -+ } -+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13; -+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) & -+ 0x1FFF; -+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate); -+ -+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni, -+ u64 *bps, u16 *token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit); -+ if (ret) { -+ pr_err("The LNI CR rate or limit is not available\n"); -+ return -EINVAL; -+ } -+ -+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps); -+ -+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ lni->er_token_rate.whole = token_rate->whole; -+ lni->er_token_rate.fraction = token_rate->fraction; -+ lni->er_token_bucket_limit = token_limit; -+ if (!lni->shaper_enable) -+ return 0; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, -+ &query_result); -+ if (ret) { -+ pr_err("Fail to get current LNI shaper setting\n"); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ config_opts.dcpid = lni->dcp_idx; -+ config_opts.shaper_config.ertcr = cpu_to_be24( -+ (token_rate->whole << 13) | (token_rate->fraction)); -+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit); -+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; -+ config_opts.shaper_config.oal = query_result.shaper_query.oal; -+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; -+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; -+ config_opts.shaper_config.mps = query_result.shaper_query.mps; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate); -+ -+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni, -+ u64 bps, -+ u16 token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); -+ if (ret) { -+ pr_err("Can not convert bps to token rate\n"); -+ return -EINVAL; -+ } -+ return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps); -+ -+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret) { -+ pr_err("The LNI ER rate or limit is not set\n"); -+ return -EINVAL; -+ } -+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13; -+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) & -+ 0x1FFF; -+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate); -+ -+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni, -+ u64 *bps, u16 *token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit); -+ if (ret) { -+ pr_err("The LNI ER rate or limit is not available\n"); -+ return -EINVAL; -+ } -+ -+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps); -+ -+#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4) -+#define QMAN_CEETM_LNITCFCC_ENABLE 0x8 -+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, -+ unsigned int cq_level, -+ int traffic_class) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ u64 lnitcfcc; -+ -+ if ((cq_level > 15) | (traffic_class > 7)) { -+ pr_err("The CQ or traffic class id is out of range\n"); -+ return -EINVAL; -+ } -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { -+ pr_err("Fail to query tcfcc\n"); -+ return -EINVAL; -+ } -+ -+ lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc); -+ if (traffic_class == -1) { -+ /* disable tcfc for this CQ */ -+ lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE << -+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); -+ } else { -+ lnitcfcc &= ~((u64)0xF << -+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); -+ lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE | -+ traffic_class)) << -+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level); -+ } -+ config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc); -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); -+ config_opts.dcpid = lni->dcp_idx; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc); -+ -+#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7 -+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level, -+ int *traffic_class) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ u8 lnitcfcc; -+ -+ if (cq_level > 15) { -+ pr_err("the CQ level is out of range\n"); -+ return -EINVAL; -+ } -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); -+ query_opts.dcpid = lni->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret) -+ return ret; -+ lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >> -+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); -+ if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE) -+ *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK; -+ else -+ *traffic_class = -1; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc); -+ -+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, -+ struct qm_ceetm_lni *lni) -+{ -+ struct qm_ceetm_channel *p; -+ u32 channel_idx; -+ int ret = 0; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ if (lni->dcp_idx == qm_dc_portal_fman0) { -+ ret = qman_alloc_ceetm0_channel(&channel_idx); -+ } else if (lni->dcp_idx == qm_dc_portal_fman1) { -+ ret = qman_alloc_ceetm1_channel(&channel_idx); -+ } else { -+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", -+ lni->dcp_idx); -+ return -EINVAL; -+ } -+ -+ if (ret) { -+ pr_err("The is no channel available for LNI#%d\n", lni->idx); -+ return -ENODEV; -+ } -+ -+ p = kzalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) -+ return -ENOMEM; -+ p->idx = channel_idx; -+ p->dcp_idx = lni->dcp_idx; -+ list_add_tail(&p->node, &lni->channels); -+ INIT_LIST_HEAD(&p->class_queues); -+ INIT_LIST_HEAD(&p->ccgs); -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | -+ channel_idx); -+ config_opts.dcpid = lni->dcp_idx; -+ config_opts.channel_mapping.map_lni_id = lni->idx; -+ config_opts.channel_mapping.map_shaped = 0; -+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { -+ pr_err("Can't map channel#%d for LNI#%d\n", -+ channel_idx, lni->idx); -+ return -EINVAL; -+ } -+ *channel = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_claim); -+ -+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ if (!list_empty(&channel->class_queues)) { -+ pr_err("CEETM channel#%d has class queue unreleased!\n", -+ channel->idx); -+ return -EBUSY; -+ } -+ if (!list_empty(&channel->ccgs)) { -+ pr_err("CEETM channel#%d has ccg unreleased!\n", -+ channel->idx); -+ return -EBUSY; -+ } -+ -+ /* channel->dcp_idx corresponds to known fman validation */ -+ if ((channel->dcp_idx != qm_dc_portal_fman0) && -+ (channel->dcp_idx != qm_dc_portal_fman1)) { -+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", -+ channel->dcp_idx); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ memset(&config_opts.shaper_config, 0, -+ sizeof(config_opts.shaper_config)); -+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { -+ pr_err("Can't reset channel shapping parameters\n"); -+ return -EINVAL; -+ } -+ -+ if (channel->dcp_idx == qm_dc_portal_fman0) { -+ qman_release_ceetm0_channelid(channel->idx); -+ } else if (channel->dcp_idx == qm_dc_portal_fman1) { -+ qman_release_ceetm1_channelid(channel->idx); -+ } else { -+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", -+ channel->dcp_idx); -+ return -EINVAL; -+ } -+ list_del(&channel->node); -+ kfree(channel); -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_release); -+ -+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, -+ int coupled) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ if (channel->shaper_enable == 1) { -+ pr_err("This channel shaper has been enabled!\n"); -+ return -EINVAL; -+ } -+ -+ channel->shaper_enable = 1; -+ channel->shaper_couple = coupled; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | -+ channel->idx); -+ query_opts.dcpid = (u8)channel->dcp_idx; -+ -+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { -+ pr_err("Can't query channel mapping\n"); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.channel_mapping.map_lni_id = -+ query_result.channel_mapping_query.map_lni_id; -+ config_opts.channel_mapping.map_shaped = 1; -+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { -+ pr_err("Can't enable shaper for channel #%d\n", channel->idx); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ config_opts.shaper_config.cpl = coupled; -+ config_opts.shaper_config.crtcr = cpu_to_be24((channel->cr_token_rate. -+ whole << 13) | -+ channel->cr_token_rate.fraction); -+ config_opts.shaper_config.ertcr = cpu_to_be24((channel->er_token_rate. -+ whole << 13) | -+ channel->er_token_rate.fraction); -+ config_opts.shaper_config.crtbl = -+ cpu_to_be16(channel->cr_token_bucket_limit); -+ config_opts.shaper_config.ertbl = -+ cpu_to_be16(channel->er_token_bucket_limit); -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper); -+ -+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ -+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { -+ pr_err("Can't query channel mapping\n"); -+ return -EINVAL; -+ } -+ -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.channel_mapping.map_shaped = 0; -+ config_opts.channel_mapping.map_lni_id = -+ query_result.channel_mapping_query.map_lni_id; -+ -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper); -+ -+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ -+ query_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel->idx; -+ query_opts.dcpid = channel->dcp_idx; -+ -+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { -+ pr_err("Can't query channel mapping\n"); -+ return -EINVAL; -+ } -+ -+ return query_result.channel_mapping_query.map_shaped; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled); -+ -+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret) { -+ pr_err("Fail to get the current channel shaper setting\n"); -+ return -EINVAL; -+ } -+ -+ channel->cr_token_rate.whole = token_rate->whole; -+ channel->cr_token_rate.fraction = token_rate->fraction; -+ channel->cr_token_bucket_limit = token_limit; -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole -+ << 13) | (token_rate->fraction)); -+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); -+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; -+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; -+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate); -+ -+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel, -+ u64 bps, u16 token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); -+ if (ret) { -+ pr_err("Can not convert bps to token rate\n"); -+ return -EINVAL; -+ } -+ return qman_ceetm_channel_set_commit_rate(channel, &token_rate, -+ token_limit); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps); -+ -+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret | !query_result.shaper_query.crtcr | -+ !query_result.shaper_query.crtbl) { -+ pr_err("The channel commit rate or limit is not set\n"); -+ return -EINVAL; -+ } -+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13; -+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) & -+ 0x1FFF; -+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate); -+ -+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel, -+ u64 *bps, u16 *token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate, -+ token_limit); -+ if (ret) { -+ pr_err("The channel CR rate or limit is not available\n"); -+ return -EINVAL; -+ } -+ -+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps); -+ -+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret) { -+ pr_err("Fail to get the current channel shaper setting\n"); -+ return -EINVAL; -+ } -+ -+ channel->er_token_rate.whole = token_rate->whole; -+ channel->er_token_rate.fraction = token_rate->fraction; -+ channel->er_token_bucket_limit = token_limit; -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.shaper_config.ertcr = cpu_to_be24( -+ (token_rate->whole << 13) | (token_rate->fraction)); -+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit); -+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; -+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; -+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate); -+ -+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel, -+ u64 bps, u16 token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); -+ if (ret) { -+ pr_err("Can not convert bps to token rate\n"); -+ return -EINVAL; -+ } -+ return qman_ceetm_channel_set_excess_rate(channel, &token_rate, -+ token_limit); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps); -+ -+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret | !query_result.shaper_query.ertcr | -+ !query_result.shaper_query.ertbl) { -+ pr_err("The channel excess rate or limit is not set\n"); -+ return -EINVAL; -+ } -+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13; -+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) & -+ 0x1FFF; -+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate); -+ -+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel, -+ u64 *bps, u16 *token_limit) -+{ -+ struct qm_ceetm_rate token_rate; -+ int ret; -+ -+ ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate, -+ token_limit); -+ if (ret) { -+ pr_err("The channel ER rate or limit is not available\n"); -+ return -EINVAL; -+ } -+ -+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps); -+ -+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, -+ u16 token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; -+ -+ if (channel->shaper_enable) { -+ pr_err("This channel is a shaped one\n"); -+ return -EINVAL; -+ } -+ -+ channel->cr_token_bucket_limit = token_limit; -+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); -+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_weight); -+ -+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, -+ u16 *token_limit) -+{ -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; -+ int ret; -+ -+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | -+ channel->idx); -+ query_opts.dcpid = channel->dcp_idx; -+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); -+ if (ret | !query_result.shaper_query.crtbl) { -+ pr_err("This unshaped channel's uFQ wight is unavailable\n"); -+ return -EINVAL; -+ } -+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_weight); -+ -+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b, -+ unsigned int prio_a, unsigned int prio_b) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config config_opts; -+ struct qm_mcr_ceetm_class_scheduler_query query_result; -+ int i; -+ -+ if (prio_a > 7) { -+ pr_err("The priority of group A is out of range\n"); -+ return -EINVAL; -+ } -+ if (group_b && (prio_b > 7)) { -+ pr_err("The priority of group B is out of range\n"); -+ return -EINVAL; -+ } -+ -+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) { -+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx); -+ return -EINVAL; -+ } -+ -+ config_opts.cqcid = cpu_to_be16(channel->idx); -+ config_opts.dcpid = channel->dcp_idx; -+ config_opts.gpc_combine_flag = !group_b; -+ config_opts.gpc_prio_a = prio_a; -+ config_opts.gpc_prio_b = prio_b; -+ -+ for (i = 0; i < 8; i++) -+ config_opts.w[i] = query_result.w[i]; -+ config_opts.crem = query_result.crem; -+ config_opts.erem = query_result.erem; -+ -+ return qman_ceetm_configure_class_scheduler(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_group); -+ -+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b, -+ unsigned int *prio_a, unsigned int *prio_b) -+{ -+ struct qm_mcr_ceetm_class_scheduler_query query_result; -+ -+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) { -+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx); -+ return -EINVAL; -+ } -+ *group_b = !query_result.gpc_combine_flag; -+ *prio_a = query_result.gpc_prio_a; -+ *prio_b = query_result.gpc_prio_b; -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_get_group); -+ -+#define GROUP_A_ELIGIBILITY_SET (1 << 8) -+#define GROUP_B_ELIGIBILITY_SET (1 << 9) -+#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n)) -+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel -+ *channel, int group_b, int cre) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config csch_config; -+ struct qm_mcr_ceetm_class_scheduler_query csch_query; -+ int i; -+ -+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { -+ pr_err("Cannot get the channel %d scheduler setting.\n", -+ channel->idx); -+ return -EINVAL; -+ } -+ csch_config.cqcid = cpu_to_be16(channel->idx); -+ csch_config.dcpid = channel->dcp_idx; -+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; -+ csch_config.gpc_prio_a = csch_query.gpc_prio_a; -+ csch_config.gpc_prio_b = csch_query.gpc_prio_b; -+ -+ for (i = 0; i < 8; i++) -+ csch_config.w[i] = csch_query.w[i]; -+ csch_config.erem = csch_query.erem; -+ if (group_b) -+ csch_config.crem = (be16_to_cpu(csch_query.crem) -+ & ~GROUP_B_ELIGIBILITY_SET) -+ | (cre ? GROUP_B_ELIGIBILITY_SET : 0); -+ else -+ csch_config.crem = (be16_to_cpu(csch_query.crem) -+ & ~GROUP_A_ELIGIBILITY_SET) -+ | (cre ? GROUP_A_ELIGIBILITY_SET : 0); -+ -+ csch_config.crem = cpu_to_be16(csch_config.crem); -+ -+ if (qman_ceetm_configure_class_scheduler(&csch_config)) { -+ pr_err("Cannot config channel %d's scheduler with " -+ "group_%c's cr eligibility\n", channel->idx, -+ group_b ? 'b' : 'a'); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility); -+ -+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel -+ *channel, int group_b, int ere) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config csch_config; -+ struct qm_mcr_ceetm_class_scheduler_query csch_query; -+ int i; -+ -+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { -+ pr_err("Cannot get the channel %d scheduler setting.\n", -+ channel->idx); -+ return -EINVAL; -+ } -+ csch_config.cqcid = cpu_to_be16(channel->idx); -+ csch_config.dcpid = channel->dcp_idx; -+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; -+ csch_config.gpc_prio_a = csch_query.gpc_prio_a; -+ csch_config.gpc_prio_b = csch_query.gpc_prio_b; -+ -+ for (i = 0; i < 8; i++) -+ csch_config.w[i] = csch_query.w[i]; -+ csch_config.crem = csch_query.crem; -+ if (group_b) -+ csch_config.erem = (be16_to_cpu(csch_query.erem) -+ & ~GROUP_B_ELIGIBILITY_SET) -+ | (ere ? GROUP_B_ELIGIBILITY_SET : 0); -+ else -+ csch_config.erem = (be16_to_cpu(csch_query.erem) -+ & ~GROUP_A_ELIGIBILITY_SET) -+ | (ere ? GROUP_A_ELIGIBILITY_SET : 0); -+ -+ csch_config.erem = cpu_to_be16(csch_config.erem); -+ -+ if (qman_ceetm_configure_class_scheduler(&csch_config)) { -+ pr_err("Cannot config channel %d's scheduler with " -+ "group_%c's er eligibility\n", channel->idx, -+ group_b ? 'b' : 'a'); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility); -+ -+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel, -+ unsigned int idx, int cre) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config csch_config; -+ struct qm_mcr_ceetm_class_scheduler_query csch_query; -+ int i; -+ -+ if (idx > 7) { -+ pr_err("CQ index is out of range\n"); -+ return -EINVAL; -+ } -+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { -+ pr_err("Cannot get the channel %d scheduler setting.\n", -+ channel->idx); -+ return -EINVAL; -+ } -+ csch_config.cqcid = cpu_to_be16(channel->idx); -+ csch_config.dcpid = channel->dcp_idx; -+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; -+ csch_config.gpc_prio_a = csch_query.gpc_prio_a; -+ csch_config.gpc_prio_b = csch_query.gpc_prio_b; -+ for (i = 0; i < 8; i++) -+ csch_config.w[i] = csch_query.w[i]; -+ csch_config.erem = csch_query.erem; -+ csch_config.crem = (be16_to_cpu(csch_query.crem) -+ & ~CQ_ELIGIBILITY_SET(idx)) | -+ (cre ? CQ_ELIGIBILITY_SET(idx) : 0); -+ csch_config.crem = cpu_to_be16(csch_config.crem); -+ if (qman_ceetm_configure_class_scheduler(&csch_config)) { -+ pr_err("Cannot config channel scheduler to set " -+ "cr eligibility mask for CQ#%d\n", idx); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility); -+ -+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel, -+ unsigned int idx, int ere) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config csch_config; -+ struct qm_mcr_ceetm_class_scheduler_query csch_query; -+ int i; -+ -+ if (idx > 7) { -+ pr_err("CQ index is out of range\n"); -+ return -EINVAL; -+ } -+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { -+ pr_err("Cannot get the channel %d scheduler setting.\n", -+ channel->idx); -+ return -EINVAL; -+ } -+ csch_config.cqcid = cpu_to_be16(channel->idx); -+ csch_config.dcpid = channel->dcp_idx; -+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; -+ csch_config.gpc_prio_a = csch_query.gpc_prio_a; -+ csch_config.gpc_prio_b = csch_query.gpc_prio_b; -+ for (i = 0; i < 8; i++) -+ csch_config.w[i] = csch_query.w[i]; -+ csch_config.crem = csch_query.crem; -+ csch_config.erem = (be16_to_cpu(csch_query.erem) -+ & ~CQ_ELIGIBILITY_SET(idx)) | -+ (ere ? CQ_ELIGIBILITY_SET(idx) : 0); -+ csch_config.erem = cpu_to_be16(csch_config.erem); -+ if (qman_ceetm_configure_class_scheduler(&csch_config)) { -+ pr_err("Cannot config channel scheduler to set " -+ "er eligibility mask for CQ#%d\n", idx); -+ return -EINVAL; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility); -+ -+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, unsigned int idx, -+ struct qm_ceetm_ccg *ccg) -+{ -+ struct qm_ceetm_cq *p; -+ struct qm_mcc_ceetm_cq_config cq_config; -+ -+ if (idx > 7) { -+ pr_err("The independent class queue id is out of range\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &channel->class_queues, node) { -+ if (p->idx == idx) { -+ pr_err("The CQ#%d has been claimed!\n", idx); -+ return -EINVAL; -+ } -+ } -+ -+ p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) { -+ pr_err("Can't allocate memory for CQ#%d!\n", idx); -+ return -ENOMEM; -+ } -+ -+ list_add_tail(&p->node, &channel->class_queues); -+ p->idx = idx; -+ p->is_claimed = 1; -+ p->parent = channel; -+ INIT_LIST_HEAD(&p->bound_lfqids); -+ -+ if (ccg) { -+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); -+ cq_config.dcpid = channel->dcp_idx; -+ cq_config.ccgid = cpu_to_be16(ccg->idx); -+ if (qman_ceetm_configure_cq(&cq_config)) { -+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", -+ idx, ccg->idx); -+ list_del(&p->node); -+ kfree(p); -+ return -EINVAL; -+ } -+ } -+ -+ *cq = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cq_claim); -+ -+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, unsigned int idx, -+ struct qm_ceetm_ccg *ccg) -+{ -+ struct qm_ceetm_cq *p; -+ struct qm_mcc_ceetm_cq_config cq_config; -+ -+ if ((idx < 8) || (idx > 15)) { -+ pr_err("This grouped class queue id is out of range\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &channel->class_queues, node) { -+ if (p->idx == idx) { -+ pr_err("The CQ#%d has been claimed!\n", idx); -+ return -EINVAL; -+ } -+ } -+ -+ p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) { -+ pr_err("Can't allocate memory for CQ#%d!\n", idx); -+ return -ENOMEM; -+ } -+ -+ list_add_tail(&p->node, &channel->class_queues); -+ p->idx = idx; -+ p->is_claimed = 1; -+ p->parent = channel; -+ INIT_LIST_HEAD(&p->bound_lfqids); -+ -+ if (ccg) { -+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); -+ cq_config.dcpid = channel->dcp_idx; -+ cq_config.ccgid = cpu_to_be16(ccg->idx); -+ if (qman_ceetm_configure_cq(&cq_config)) { -+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", -+ idx, ccg->idx); -+ list_del(&p->node); -+ kfree(p); -+ return -EINVAL; -+ } -+ } -+ *cq = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cq_claim_A); -+ -+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, unsigned int idx, -+ struct qm_ceetm_ccg *ccg) -+{ -+ struct qm_ceetm_cq *p; -+ struct qm_mcc_ceetm_cq_config cq_config; -+ -+ if ((idx < 12) || (idx > 15)) { -+ pr_err("This grouped class queue id is out of range\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &channel->class_queues, node) { -+ if (p->idx == idx) { -+ pr_err("The CQ#%d has been claimed!\n", idx); -+ return -EINVAL; -+ } -+ } -+ -+ p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) { -+ pr_err("Can't allocate memory for CQ#%d!\n", idx); -+ return -ENOMEM; -+ } -+ -+ list_add_tail(&p->node, &channel->class_queues); -+ p->idx = idx; -+ p->is_claimed = 1; -+ p->parent = channel; -+ INIT_LIST_HEAD(&p->bound_lfqids); -+ -+ if (ccg) { -+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); -+ cq_config.dcpid = channel->dcp_idx; -+ cq_config.ccgid = cpu_to_be16(ccg->idx); -+ if (qman_ceetm_configure_cq(&cq_config)) { -+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", -+ idx, ccg->idx); -+ list_del(&p->node); -+ kfree(p); -+ return -EINVAL; -+ } -+ } -+ *cq = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cq_claim_B); -+ -+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq) -+{ -+ if (!list_empty(&cq->bound_lfqids)) { -+ pr_err("The CQ#%d has unreleased LFQID\n", cq->idx); -+ return -EBUSY; -+ } -+ list_del(&cq->node); -+ qman_ceetm_drain_cq(cq); -+ kfree(cq); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cq_release); -+ -+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, -+ struct qm_ceetm_weight_code *weight_code) -+{ -+ struct qm_mcc_ceetm_class_scheduler_config config_opts; -+ struct qm_mcr_ceetm_class_scheduler_query query_result; -+ int i; -+ -+ if (cq->idx < 8) { -+ pr_err("Can not set weight for ungrouped class queue\n"); -+ return -EINVAL; -+ } -+ -+ if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) { -+ pr_err("Can't query channel#%d's scheduler!\n", -+ cq->parent->idx); -+ return -EINVAL; -+ } -+ -+ config_opts.cqcid = cpu_to_be16(cq->parent->idx); -+ config_opts.dcpid = cq->parent->dcp_idx; -+ config_opts.crem = query_result.crem; -+ config_opts.erem = query_result.erem; -+ config_opts.gpc_combine_flag = query_result.gpc_combine_flag; -+ config_opts.gpc_prio_a = query_result.gpc_prio_a; -+ config_opts.gpc_prio_b = query_result.gpc_prio_b; -+ -+ for (i = 0; i < 8; i++) -+ config_opts.w[i] = query_result.w[i]; -+ config_opts.w[cq->idx - 8] = ((weight_code->y << 3) | -+ (weight_code->x & 0x7)); -+ return qman_ceetm_configure_class_scheduler(&config_opts); -+} -+EXPORT_SYMBOL(qman_ceetm_set_queue_weight); -+ -+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, -+ struct qm_ceetm_weight_code *weight_code) -+{ -+ struct qm_mcr_ceetm_class_scheduler_query query_result; -+ -+ if (cq->idx < 8) { -+ pr_err("Can not get weight for ungrouped class queue\n"); -+ return -EINVAL; -+ } -+ -+ if (qman_ceetm_query_class_scheduler(cq->parent, -+ &query_result)) { -+ pr_err("Can't get the weight code for CQ#%d!\n", cq->idx); -+ return -EINVAL; -+ } -+ weight_code->y = query_result.w[cq->idx - 8] >> 3; -+ weight_code->x = query_result.w[cq->idx - 8] & 0x7; -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_get_queue_weight); -+ -+/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as: -+ * effective weight = 2^x / (1 - (y/64)) -+ * = 2^(x+6) / (64 - y) -+ */ -+static void reduce_fraction(u32 *n, u32 *d) -+{ -+ u32 factor = 2; -+ u32 lesser = (*n < *d) ? *n : *d; -+ /* If factor exceeds the square-root of the lesser of *n and *d, -+ * then there's no point continuing. Proof: if there was a factor -+ * bigger than the square root, that would imply there exists -+ * another factor smaller than the square-root with which it -+ * multiplies to give 'lesser' - but that's a contradiction -+ * because the other factor would have already been found and -+ * divided out. -+ */ -+ while ((factor * factor) <= lesser) { -+ /* If 'factor' is a factor of *n and *d, divide them both -+ * by 'factor' as many times as possible. -+ */ -+ while (!(*n % factor) && !(*d % factor)) { -+ *n /= factor; -+ *d /= factor; -+ lesser /= factor; -+ } -+ if (factor == 2) -+ factor = 3; -+ else -+ factor += 2; -+ } -+} -+ -+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code, -+ u32 *numerator, -+ u32 *denominator) -+{ -+ *numerator = (u32) 1 << (weight_code->x + 6); -+ *denominator = 64 - weight_code->y; -+ reduce_fraction(numerator, denominator); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_wbfs2ratio); -+ -+/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive). -+ * So find 'x' by range, and then estimate 'y' using: -+ * 64 - y = 2^(x + 6) / weight -+ * = 2^(x + 6) / (n/d) -+ * = d * 2^(x+6) / n -+ * y = 64 - (d * 2^(x+6) / n) -+ */ -+int qman_ceetm_ratio2wbfs(u32 numerator, -+ u32 denominator, -+ struct qm_ceetm_weight_code *weight_code, -+ int rounding) -+{ -+ unsigned int y, x = 0; -+ /* search incrementing 'x' until: -+ * weight < 2^(x+1) -+ * n/d < 2^(x+1) -+ * n < d * 2^(x+1) -+ */ -+ while ((x < 8) && (numerator >= (denominator << (x + 1)))) -+ x++; -+ if (x >= 8) -+ return -ERANGE; -+ /* because of the subtraction, use '-rounding' */ -+ y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding); -+ if (y >= 32) -+ return -ERANGE; -+ weight_code->x = x; -+ weight_code->y = y; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_ratio2wbfs); -+ -+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio) -+{ -+ struct qm_ceetm_weight_code weight_code; -+ -+ if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) { -+ pr_err("Cannot get wbfs code for cq %x\n", cq->idx); -+ return -EINVAL; -+ } -+ return qman_ceetm_set_queue_weight(cq, &weight_code); -+} -+EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio); -+ -+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio) -+{ -+ struct qm_ceetm_weight_code weight_code; -+ u32 n, d; -+ -+ if (qman_ceetm_get_queue_weight(cq, &weight_code)) { -+ pr_err("Cannot query the weight code for cq%x\n", cq->idx); -+ return -EINVAL; -+ } -+ -+ if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) { -+ pr_err("Cannot get the ratio with wbfs code\n"); -+ return -EINVAL; -+ } -+ -+ *ratio = (n * (u32)100) / d; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio); -+ -+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, -+ u64 *frame_count, u64 *byte_count) -+{ -+ struct qm_mcr_ceetm_statistics_query result; -+ u16 cid, command_type; -+ enum qm_dc_portal dcp_idx; -+ int ret; -+ -+ cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx); -+ dcp_idx = cq->parent->dcp_idx; -+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) -+ command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS; -+ else -+ command_type = CEETM_QUERY_DEQUEUE_STATISTICS; -+ -+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); -+ if (ret) { -+ pr_err("Can't query the statistics of CQ#%d!\n", cq->idx); -+ return -EINVAL; -+ } -+ -+ *frame_count = be40_to_cpu(result.frm_cnt); -+ *byte_count = be48_to_cpu(result.byte_cnt); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics); -+ -+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq) -+{ -+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr; -+ int ret; -+ -+ do { -+ ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr); -+ if (ret) { -+ pr_err("Failed to pop frame from CQ\n"); -+ return -EINVAL; -+ } -+ } while (!(ppxr.stat & 0x2)); -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_drain_cq); -+ -+#define CEETM_LFQMT_LFQID_MSB 0xF00000 -+#define CEETM_LFQMT_LFQID_LSB 0x000FFF -+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, -+ struct qm_ceetm_cq *cq) -+{ -+ struct qm_ceetm_lfq *p; -+ u32 lfqid; -+ int ret = 0; -+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config; -+ -+ if (cq->parent->dcp_idx == qm_dc_portal_fman0) { -+ ret = qman_alloc_ceetm0_lfqid(&lfqid); -+ } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) { -+ ret = qman_alloc_ceetm1_lfqid(&lfqid); -+ } else { -+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", -+ cq->parent->dcp_idx); -+ return -EINVAL; -+ } -+ -+ if (ret) { -+ pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx); -+ return -ENODEV; -+ } -+ p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) -+ return -ENOMEM; -+ p->idx = lfqid; -+ p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB); -+ p->parent = cq->parent; -+ list_add_tail(&p->node, &cq->bound_lfqids); -+ -+ lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB | -+ (cq->parent->dcp_idx << 16) | -+ (lfqid & CEETM_LFQMT_LFQID_LSB)); -+ lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx)); -+ lfqmt_config.dctidx = cpu_to_be16(p->dctidx); -+ if (qman_ceetm_configure_lfqmt(&lfqmt_config)) { -+ pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n", -+ lfqid, cq->idx); -+ list_del(&p->node); -+ kfree(p); -+ return -EINVAL; -+ } -+ *lfq = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lfq_claim); -+ -+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq) -+{ -+ if (lfq->parent->dcp_idx == qm_dc_portal_fman0) { -+ qman_release_ceetm0_lfqid(lfq->idx); -+ } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) { -+ qman_release_ceetm1_lfqid(lfq->idx); -+ } else { -+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", -+ lfq->parent->dcp_idx); -+ return -EINVAL; -+ } -+ list_del(&lfq->node); -+ kfree(lfq); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lfq_release); -+ -+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a, -+ u32 context_b) -+{ -+ struct qm_mcc_ceetm_dct_config dct_config; -+ lfq->context_a = context_a; -+ lfq->context_b = context_b; -+ dct_config.dctidx = cpu_to_be16(lfq->dctidx); -+ dct_config.dcpid = lfq->parent->dcp_idx; -+ dct_config.context_b = cpu_to_be32(context_b); -+ dct_config.context_a = cpu_to_be64(context_a); -+ return qman_ceetm_configure_dct(&dct_config); -+} -+EXPORT_SYMBOL(qman_ceetm_lfq_set_context); -+ -+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a, -+ u32 *context_b) -+{ -+ struct qm_mcc_ceetm_dct_query dct_query; -+ struct qm_mcr_ceetm_dct_query query_result; -+ -+ dct_query.dctidx = cpu_to_be16(lfq->dctidx); -+ dct_query.dcpid = lfq->parent->dcp_idx; -+ if (qman_ceetm_query_dct(&dct_query, &query_result)) { -+ pr_err("Can't query LFQID#%d's context!\n", lfq->idx); -+ return -EINVAL; -+ } -+ *context_a = be64_to_cpu(query_result.context_a); -+ *context_b = be32_to_cpu(query_result.context_b); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_lfq_get_context); -+ -+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq) -+{ -+ spin_lock_init(&fq->fqlock); -+ fq->fqid = lfq->idx; -+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY; -+ if (lfq->ern) -+ fq->cb.ern = lfq->ern; -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) -+ return -ENOMEM; -+#endif -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_create_fq); -+ -+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, -+ struct qm_ceetm_channel *channel, -+ unsigned int idx, -+ void (*cscn)(struct qm_ceetm_ccg *, -+ void *cb_ctx, -+ int congested), -+ void *cb_ctx) -+{ -+ struct qm_ceetm_ccg *p; -+ -+ if (idx > 15) { -+ pr_err("The given ccg index is out of range\n"); -+ return -EINVAL; -+ } -+ -+ list_for_each_entry(p, &channel->ccgs, node) { -+ if (p->idx == idx) { -+ pr_err("The CCG#%d has been claimed\n", idx); -+ return -EINVAL; -+ } -+ } -+ -+ p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) { -+ pr_err("Can't allocate memory for CCG#%d!\n", idx); -+ return -ENOMEM; -+ } -+ -+ list_add_tail(&p->node, &channel->ccgs); -+ -+ p->idx = idx; -+ p->parent = channel; -+ p->cb = cscn; -+ p->cb_ctx = cb_ctx; -+ INIT_LIST_HEAD(&p->cb_node); -+ -+ *ccg = p; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_ccg_claim); -+ -+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg) -+{ -+ unsigned long irqflags __maybe_unused; -+ struct qm_mcc_ceetm_ccgr_config config_opts; -+ int ret = 0; -+ struct qman_portal *p = get_affine_portal(); -+ -+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config)); -+ spin_lock_irqsave(&p->ccgr_lock, irqflags); -+ if (!list_empty(&ccg->cb_node)) -+ list_del(&ccg->cb_node); -+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | -+ (ccg->parent->idx << 4) | ccg->idx); -+ config_opts.dcpid = ccg->parent->dcp_idx; -+ config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD); -+ config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p)); -+ ret = qman_ceetm_configure_ccgr(&config_opts); -+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); -+ put_affine_portal(); -+ -+ list_del(&ccg->node); -+ kfree(ccg); -+ return ret; -+} -+EXPORT_SYMBOL(qman_ceetm_ccg_release); -+ -+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask, -+ const struct qm_ceetm_ccg_params *params) -+{ -+ struct qm_mcc_ceetm_ccgr_config config_opts; -+ unsigned long irqflags __maybe_unused; -+ int ret; -+ struct qman_portal *p; -+ -+ if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM)) -+ return -EINVAL; -+ -+ p = get_affine_portal(); -+ -+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config)); -+ spin_lock_irqsave(&p->ccgr_lock, irqflags); -+ -+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | -+ (ccg->parent->idx << 4) | ccg->idx); -+ config_opts.dcpid = ccg->parent->dcp_idx; -+ config_opts.we_mask = we_mask; -+ if (we_mask & QM_CCGR_WE_CSCN_EN) { -+ config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD; -+ config_opts.cm_config.cscn_tupd = cpu_to_be16( -+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p)); -+ } -+ config_opts.we_mask = cpu_to_be16(config_opts.we_mask); -+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g; -+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y; -+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r; -+ config_opts.cm_config.ctl_td_en = params->td_en; -+ config_opts.cm_config.ctl_td_mode = params->td_mode; -+ config_opts.cm_config.ctl_cscn_en = params->cscn_en; -+ config_opts.cm_config.ctl_mode = params->mode; -+ config_opts.cm_config.oal = params->oal; -+ config_opts.cm_config.cs_thres.hword = -+ cpu_to_be16(params->cs_thres_in.hword); -+ config_opts.cm_config.cs_thres_x.hword = -+ cpu_to_be16(params->cs_thres_out.hword); -+ config_opts.cm_config.td_thres.hword = -+ cpu_to_be16(params->td_thres.hword); -+ config_opts.cm_config.wr_parm_g.word = -+ cpu_to_be32(params->wr_parm_g.word); -+ config_opts.cm_config.wr_parm_y.word = -+ cpu_to_be32(params->wr_parm_y.word); -+ config_opts.cm_config.wr_parm_r.word = -+ cpu_to_be32(params->wr_parm_r.word); -+ ret = qman_ceetm_configure_ccgr(&config_opts); -+ if (ret) { -+ pr_err("Configure CCGR CM failed!\n"); -+ goto release_lock; -+ } -+ -+ if (we_mask & QM_CCGR_WE_CSCN_EN) -+ if (list_empty(&ccg->cb_node)) -+ list_add(&ccg->cb_node, -+ &p->ccgr_cbs[ccg->parent->dcp_idx]); -+release_lock: -+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+EXPORT_SYMBOL(qman_ceetm_ccg_set); -+ -+#define CEETM_CCGR_CTL_MASK 0x01 -+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, -+ struct qm_ceetm_ccg_params *params) -+{ -+ struct qm_mcc_ceetm_ccgr_query query_opts; -+ struct qm_mcr_ceetm_ccgr_query query_result; -+ -+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | -+ (ccg->parent->idx << 4) | ccg->idx); -+ query_opts.dcpid = ccg->parent->dcp_idx; -+ -+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { -+ pr_err("Can't query CCGR#%d\n", ccg->idx); -+ return -EINVAL; -+ } -+ -+ params->wr_parm_r.word = -+ be32_to_cpu(query_result.cm_query.wr_parm_r.word); -+ params->wr_parm_y.word = -+ be32_to_cpu(query_result.cm_query.wr_parm_y.word); -+ params->wr_parm_g.word = -+ be32_to_cpu(query_result.cm_query.wr_parm_g.word); -+ params->td_thres.hword = -+ be16_to_cpu(query_result.cm_query.td_thres.hword); -+ params->cs_thres_out.hword = -+ be16_to_cpu(query_result.cm_query.cs_thres_x.hword); -+ params->cs_thres_in.hword = -+ be16_to_cpu(query_result.cm_query.cs_thres.hword); -+ params->oal = query_result.cm_query.oal; -+ params->wr_en_g = query_result.cm_query.ctl_wr_en_g; -+ params->wr_en_y = query_result.cm_query.ctl_wr_en_y; -+ params->wr_en_r = query_result.cm_query.ctl_wr_en_r; -+ params->td_en = query_result.cm_query.ctl_td_en; -+ params->td_mode = query_result.cm_query.ctl_td_mode; -+ params->cscn_en = query_result.cm_query.ctl_cscn_en; -+ params->mode = query_result.cm_query.ctl_mode; -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_ccg_get); -+ -+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, -+ u64 *frame_count, u64 *byte_count) -+{ -+ struct qm_mcr_ceetm_statistics_query result; -+ u16 cid, command_type; -+ enum qm_dc_portal dcp_idx; -+ int ret; -+ -+ cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx); -+ dcp_idx = ccg->parent->dcp_idx; -+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) -+ command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS; -+ else -+ command_type = CEETM_QUERY_REJECT_STATISTICS; -+ -+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); -+ if (ret) { -+ pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx); -+ return -EINVAL; -+ } -+ -+ *frame_count = be40_to_cpu(result.frm_cnt); -+ *byte_count = be48_to_cpu(result.byte_cnt); -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics); -+ -+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, -+ u16 swp_idx, -+ unsigned int *cscn_enabled) -+{ -+ struct qm_mcc_ceetm_ccgr_query query_opts; -+ struct qm_mcr_ceetm_ccgr_query query_result; -+ int i; -+ -+ DPA_ASSERT(swp_idx < 127); -+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | -+ (ccg->parent->idx << 4) | ccg->idx); -+ query_opts.dcpid = ccg->parent->dcp_idx; -+ -+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { -+ pr_err("Can't query CCGR#%d\n", ccg->idx); -+ return -EINVAL; -+ } -+ -+ i = swp_idx / 32; -+ i = 3 - i; -+ *cscn_enabled = be32_to_cpu(query_result.cm_query.cscn_targ_swp[i]) >> -+ (31 - swp_idx % 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cscn_swp_get); -+ -+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, -+ u16 dcp_idx, -+ u8 vcgid, -+ unsigned int cscn_enabled, -+ u16 we_mask, -+ const struct qm_ceetm_ccg_params *params) -+{ -+ struct qm_mcc_ceetm_ccgr_config config_opts; -+ int ret; -+ -+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | -+ (ccg->parent->idx << 4) | ccg->idx); -+ config_opts.dcpid = ccg->parent->dcp_idx; -+ config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD | -+ QM_CCGR_WE_CDV); -+ config_opts.cm_config.cdv = vcgid; -+ config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) | -+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx); -+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g; -+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y; -+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r; -+ config_opts.cm_config.ctl_td_en = params->td_en; -+ config_opts.cm_config.ctl_td_mode = params->td_mode; -+ config_opts.cm_config.ctl_cscn_en = params->cscn_en; -+ config_opts.cm_config.ctl_mode = params->mode; -+ config_opts.cm_config.cs_thres.hword = -+ cpu_to_be16(params->cs_thres_in.hword); -+ config_opts.cm_config.cs_thres_x.hword = -+ cpu_to_be16(params->cs_thres_out.hword); -+ config_opts.cm_config.td_thres.hword = -+ cpu_to_be16(params->td_thres.hword); -+ config_opts.cm_config.wr_parm_g.word = -+ cpu_to_be32(params->wr_parm_g.word); -+ config_opts.cm_config.wr_parm_y.word = -+ cpu_to_be32(params->wr_parm_y.word); -+ config_opts.cm_config.wr_parm_r.word = -+ cpu_to_be32(params->wr_parm_r.word); -+ -+ ret = qman_ceetm_configure_ccgr(&config_opts); -+ if (ret) { -+ pr_err("Configure CSCN_TARG_DCP failed!\n"); -+ return -EINVAL; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set); -+ -+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, -+ u16 dcp_idx, -+ u8 *vcgid, -+ unsigned int *cscn_enabled) -+{ -+ struct qm_mcc_ceetm_ccgr_query query_opts; -+ struct qm_mcr_ceetm_ccgr_query query_result; -+ -+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | -+ (ccg->parent->idx << 4) | ccg->idx); -+ query_opts.dcpid = ccg->parent->dcp_idx; -+ -+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { -+ pr_err("Can't query CCGR#%d\n", ccg->idx); -+ return -EINVAL; -+ } -+ -+ *vcgid = query_result.cm_query.cdv; -+ *cscn_enabled = (be16_to_cpu(query_result.cm_query.cscn_targ_dcp >> -+ dcp_idx)) & 0x1; -+ return 0; -+} -+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get); -+ -+int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state, -+ unsigned int dcp_idx) -+{ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ u8 res; -+ int i, j; -+ -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ -+ mcc = qm_mc_start(&p->p); -+ for (i = 0; i < 2; i++) { -+ mcc->ccgr_query.ccgrid = -+ cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i); -+ mcc->ccgr_query.dcpid = dcp_idx; -+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); -+ -+ while (!(mcr = qm_mc_result(&p->p))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_CEETM_VERB_CCGR_QUERY); -+ res = mcr->result; -+ if (res == QM_MCR_RESULT_OK) { -+ for (j = 0; j < 8; j++) -+ mcr->ccgr_query.congestion_state.state. -+ __state[j] = -+ be32_to_cpu(mcr->ccgr_query. -+ congestion_state.state.__state[j]); -+ -+ *(ccg_state + i) = -+ mcr->ccgr_query.congestion_state.state; -+ } else { -+ pr_err("QUERY CEETM CONGESTION STATE failed\n"); -+ return -EIO; -+ } -+ } -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return 0; -+} -+ -+int qman_set_wpm(int wpm_enable) -+{ -+ return qm_set_wpm(wpm_enable); -+} -+EXPORT_SYMBOL(qman_set_wpm); -+ -+int qman_get_wpm(int *wpm_enable) -+{ -+ return qm_get_wpm(wpm_enable); -+} -+EXPORT_SYMBOL(qman_get_wpm); -+ -+int qman_shutdown_fq(u32 fqid) -+{ -+ struct qman_portal *p; -+ unsigned long irqflags __maybe_unused; -+ int ret; -+ struct qm_portal *low_p; -+ p = get_affine_portal(); -+ PORTAL_IRQ_LOCK(p, irqflags); -+ low_p = &p->p; -+ ret = qm_shutdown_fq(&low_p, 1, fqid); -+ PORTAL_IRQ_UNLOCK(p, irqflags); -+ put_affine_portal(); -+ return ret; -+} -+ -+const struct qm_portal_config *qman_get_qm_portal_config( -+ struct qman_portal *portal) -+{ -+ return portal->sharing_redirect ? NULL : portal->config; -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_low.h -@@ -0,0 +1,1407 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_private.h" -+ -+/***************************/ -+/* Portal register assists */ -+/***************************/ -+ -+/* Cache-inhibited register offsets */ -+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) -+ -+#define QM_REG_EQCR_PI_CINH 0x0000 -+#define QM_REG_EQCR_CI_CINH 0x0004 -+#define QM_REG_EQCR_ITR 0x0008 -+#define QM_REG_DQRR_PI_CINH 0x0040 -+#define QM_REG_DQRR_CI_CINH 0x0044 -+#define QM_REG_DQRR_ITR 0x0048 -+#define QM_REG_DQRR_DCAP 0x0050 -+#define QM_REG_DQRR_SDQCR 0x0054 -+#define QM_REG_DQRR_VDQCR 0x0058 -+#define QM_REG_DQRR_PDQCR 0x005c -+#define QM_REG_MR_PI_CINH 0x0080 -+#define QM_REG_MR_CI_CINH 0x0084 -+#define QM_REG_MR_ITR 0x0088 -+#define QM_REG_CFG 0x0100 -+#define QM_REG_ISR 0x0e00 -+#define QM_REG_IIR 0x0e0c -+#define QM_REG_ITPR 0x0e14 -+ -+/* Cache-enabled register offsets */ -+#define QM_CL_EQCR 0x0000 -+#define QM_CL_DQRR 0x1000 -+#define QM_CL_MR 0x2000 -+#define QM_CL_EQCR_PI_CENA 0x3000 -+#define QM_CL_EQCR_CI_CENA 0x3100 -+#define QM_CL_DQRR_PI_CENA 0x3200 -+#define QM_CL_DQRR_CI_CENA 0x3300 -+#define QM_CL_MR_PI_CENA 0x3400 -+#define QM_CL_MR_CI_CENA 0x3500 -+#define QM_CL_CR 0x3800 -+#define QM_CL_RR0 0x3900 -+#define QM_CL_RR1 0x3940 -+ -+#endif -+ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ -+#define QM_REG_EQCR_PI_CINH 0x3000 -+#define QM_REG_EQCR_CI_CINH 0x3040 -+#define QM_REG_EQCR_ITR 0x3080 -+#define QM_REG_DQRR_PI_CINH 0x3100 -+#define QM_REG_DQRR_CI_CINH 0x3140 -+#define QM_REG_DQRR_ITR 0x3180 -+#define QM_REG_DQRR_DCAP 0x31C0 -+#define QM_REG_DQRR_SDQCR 0x3200 -+#define QM_REG_DQRR_VDQCR 0x3240 -+#define QM_REG_DQRR_PDQCR 0x3280 -+#define QM_REG_MR_PI_CINH 0x3300 -+#define QM_REG_MR_CI_CINH 0x3340 -+#define QM_REG_MR_ITR 0x3380 -+#define QM_REG_CFG 0x3500 -+#define QM_REG_ISR 0x3600 -+#define QM_REG_IIR 0x36C0 -+#define QM_REG_ITPR 0x3740 -+ -+/* Cache-enabled register offsets */ -+#define QM_CL_EQCR 0x0000 -+#define QM_CL_DQRR 0x1000 -+#define QM_CL_MR 0x2000 -+#define QM_CL_EQCR_PI_CENA 0x3000 -+#define QM_CL_EQCR_CI_CENA 0x3040 -+#define QM_CL_DQRR_PI_CENA 0x3100 -+#define QM_CL_DQRR_CI_CENA 0x3140 -+#define QM_CL_MR_PI_CENA 0x3300 -+#define QM_CL_MR_CI_CENA 0x3340 -+#define QM_CL_CR 0x3800 -+#define QM_CL_RR0 0x3900 -+#define QM_CL_RR1 0x3940 -+ -+#endif -+ -+ -+/* BTW, the drivers (and h/w programming model) already obtain the required -+ * synchronisation for portal accesses via lwsync(), hwsync(), and -+ * data-dependencies. Use of barrier()s or other order-preserving primitives -+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which -+ * simply ensure that the compiler treats the portal registers as volatile (ie. -+ * non-coherent). */ -+ -+/* Cache-inhibited register access. */ -+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o))) -+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \ -+ (qm)->addr_ci + (o)); -+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg) -+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val) -+ -+/* Cache-enabled (index) register access */ -+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o)) -+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o)) -+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o))) -+#define __qm_cl_out(qm, o, val) \ -+ do { \ -+ u32 *__tmpclout = (qm)->addr_ce + (o); \ -+ __raw_writel(cpu_to_be32(val), __tmpclout); \ -+ dcbf(__tmpclout); \ -+ } while (0) -+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o)) -+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA) -+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA) -+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA) -+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val) -+#define qm_cl_invalidate(reg)\ -+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA) -+ -+/* Cache-enabled ring access */ -+#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) -+ -+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf -+ * analysis, look at using the "extra" bit in the ring index registers to avoid -+ * cyclic issues. */ -+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) -+{ -+ /* 'first' is included, 'last' is excluded */ -+ if (first <= last) -+ return last - first; -+ return ringsize + last - first; -+} -+ -+/* Portal modes. -+ * Enum types; -+ * pmode == production mode -+ * cmode == consumption mode, -+ * dmode == h/w dequeue mode. -+ * Enum values use 3 letter codes. First letter matches the portal mode, -+ * remaining two letters indicate; -+ * ci == cache-inhibited portal register -+ * ce == cache-enabled portal register -+ * vb == in-band valid-bit (cache-enabled) -+ * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only -+ * As for "enum qm_dqrr_dmode", it should be self-explanatory. -+ */ -+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ -+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */ -+ qm_eqcr_pce = 1, /* PI index, cache-enabled */ -+ qm_eqcr_pvb = 2 /* valid-bit */ -+}; -+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ -+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ -+ qm_dqrr_dpull = 1 /* PDQCR */ -+}; -+enum qm_dqrr_pmode { /* s/w-only */ -+ qm_dqrr_pci, /* reads DQRR_PI_CINH */ -+ qm_dqrr_pce, /* reads DQRR_PI_CENA */ -+ qm_dqrr_pvb /* reads valid-bit */ -+}; -+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ -+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */ -+ qm_dqrr_cce = 1, /* CI index, cache-enabled */ -+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */ -+}; -+enum qm_mr_pmode { /* s/w-only */ -+ qm_mr_pci, /* reads MR_PI_CINH */ -+ qm_mr_pce, /* reads MR_PI_CENA */ -+ qm_mr_pvb /* reads valid-bit */ -+}; -+enum qm_mr_cmode { /* matches QCSP_CFG::MM */ -+ qm_mr_cci = 0, /* CI index, cache-inhibited */ -+ qm_mr_cce = 1 /* CI index, cache-enabled */ -+}; -+ -+ -+/* ------------------------- */ -+/* --- Portal structures --- */ -+ -+#define QM_EQCR_SIZE 8 -+#define QM_DQRR_SIZE 16 -+#define QM_MR_SIZE 8 -+ -+struct qm_eqcr { -+ struct qm_eqcr_entry *ring, *cursor; -+ u8 ci, available, ithresh, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ u32 busy; -+ enum qm_eqcr_pmode pmode; -+#endif -+}; -+ -+struct qm_dqrr { -+ const struct qm_dqrr_entry *ring, *cursor; -+ u8 pi, ci, fill, ithresh, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ enum qm_dqrr_dmode dmode; -+ enum qm_dqrr_pmode pmode; -+ enum qm_dqrr_cmode cmode; -+#endif -+}; -+ -+struct qm_mr { -+ const struct qm_mr_entry *ring, *cursor; -+ u8 pi, ci, fill, ithresh, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ enum qm_mr_pmode pmode; -+ enum qm_mr_cmode cmode; -+#endif -+}; -+ -+struct qm_mc { -+ struct qm_mc_command *cr; -+ struct qm_mc_result *rr; -+ u8 rridx, vbit; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ enum { -+ /* Can be _mc_start()ed */ -+ qman_mc_idle, -+ /* Can be _mc_commit()ed or _mc_abort()ed */ -+ qman_mc_user, -+ /* Can only be _mc_retry()ed */ -+ qman_mc_hw -+ } state; -+#endif -+}; -+ -+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned -+ -+struct qm_addr { -+ void __iomem *addr_ce; /* cache-enabled */ -+ void __iomem *addr_ci; /* cache-inhibited */ -+}; -+ -+struct qm_portal { -+ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to -+ * and including 'mc' fits within a cacheline (yay!). The 'config' part -+ * is setup-only, so isn't a cause for a concern. In other words, don't -+ * rearrange this structure on a whim, there be dragons ... */ -+ struct qm_addr addr; -+ struct qm_eqcr eqcr; -+ struct qm_dqrr dqrr; -+ struct qm_mr mr; -+ struct qm_mc mc; -+} QM_PORTAL_ALIGNMENT; -+ -+ -+/* ---------------- */ -+/* --- EQCR API --- */ -+ -+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ -+#define EQCR_CARRYCLEAR(p) \ -+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) -+ -+/* Bit-wise logic to convert a ring pointer to a ring index */ -+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) -+{ -+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1); -+} -+ -+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ -+static inline void EQCR_INC(struct qm_eqcr *eqcr) -+{ -+ /* NB: this is odd-looking, but experiments show that it generates fast -+ * code with essentially no branching overheads. We increment to the -+ * next EQCR pointer and handle overflow and 'vbit'. */ -+ struct qm_eqcr_entry *partial = eqcr->cursor + 1; -+ eqcr->cursor = EQCR_CARRYCLEAR(partial); -+ if (partial != eqcr->cursor) -+ eqcr->vbit ^= QM_EQCR_VERB_VBIT; -+} -+ -+static inline int qm_eqcr_init(struct qm_portal *portal, -+ enum qm_eqcr_pmode pmode, -+ unsigned int eq_stash_thresh, -+ int eq_stash_prio) -+{ -+ /* This use of 'register', as well as all other occurrences, is because -+ * it has been observed to generate much faster code with gcc than is -+ * otherwise the case. */ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ u32 cfg; -+ u8 pi; -+ -+ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR; -+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); -+ qm_cl_invalidate(EQCR_CI); -+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); -+ eqcr->cursor = eqcr->ring + pi; -+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? -+ QM_EQCR_VERB_VBIT : 0; -+ eqcr->available = QM_EQCR_SIZE - 1 - -+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); -+ eqcr->ithresh = qm_in(EQCR_ITR); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 0; -+ eqcr->pmode = pmode; -+#endif -+ cfg = (qm_in(CFG) & 0x00ffffff) | -+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ -+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */ -+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ -+ qm_out(CFG, cfg); -+ return 0; -+} -+ -+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) -+{ -+ return (qm_in(CFG) >> 28) & 0x7; -+} -+ -+static inline void qm_eqcr_finish(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ u8 pi, ci; -+ u32 cfg; -+ -+ /* -+ * Disable EQCI stashing because the QMan only -+ * presents the value it previously stashed to -+ * maintain coherency. Setting the stash threshold -+ * to 1 then 0 ensures that QMan has resyncronized -+ * its internal copy so that the portal is clean -+ * when it is reinitialized in the future -+ */ -+ cfg = (qm_in(CFG) & 0x0fffffff) | -+ (1 << 28); /* QCSP_CFG: EST */ -+ qm_out(CFG, cfg); -+ cfg &= 0x0fffffff; /* stash threshold = 0 */ -+ qm_out(CFG, cfg); -+ -+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); -+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); -+ -+ /* Refresh EQCR CI cache value */ -+ qm_cl_invalidate(EQCR_CI); -+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); -+ -+ DPA_ASSERT(!eqcr->busy); -+ if (pi != EQCR_PTR2IDX(eqcr->cursor)) -+ pr_crit("losing uncommited EQCR entries\n"); -+ if (ci != eqcr->ci) -+ pr_crit("missing existing EQCR completions\n"); -+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) -+ pr_crit("EQCR destroyed unquiesced\n"); -+} -+ -+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal -+ *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ DPA_ASSERT(!eqcr->busy); -+ if (!eqcr->available) -+ return NULL; -+ -+ -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 1; -+#endif -+ dcbz_64(eqcr->cursor); -+ return eqcr->cursor; -+} -+ -+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal -+ *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ u8 diff, old_ci; -+ -+ DPA_ASSERT(!eqcr->busy); -+ if (!eqcr->available) { -+ old_ci = eqcr->ci; -+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); -+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); -+ eqcr->available += diff; -+ if (!diff) -+ return NULL; -+ } -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 1; -+#endif -+ dcbz_64(eqcr->cursor); -+ return eqcr->cursor; -+} -+ -+static inline void qm_eqcr_abort(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; -+ DPA_ASSERT(eqcr->busy); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 0; -+#endif -+} -+ -+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next( -+ struct qm_portal *portal, u8 myverb) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ DPA_ASSERT(eqcr->busy); -+ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb); -+ if (eqcr->available == 1) -+ return NULL; -+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; -+ dcbf(eqcr->cursor); -+ EQCR_INC(eqcr); -+ eqcr->available--; -+ dcbz_64(eqcr->cursor); -+ return eqcr->cursor; -+} -+ -+#define EQCR_COMMIT_CHECKS(eqcr) \ -+do { \ -+ DPA_ASSERT(eqcr->busy); \ -+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \ -+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \ -+} while (0) -+ -+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ EQCR_COMMIT_CHECKS(eqcr); -+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci); -+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; -+ EQCR_INC(eqcr); -+ eqcr->available--; -+ dcbf(eqcr->cursor); -+ hwsync(); -+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor)); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 0; -+#endif -+} -+ -+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; -+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); -+ qm_cl_invalidate(EQCR_PI); -+ qm_cl_touch_rw(EQCR_PI); -+} -+ -+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ EQCR_COMMIT_CHECKS(eqcr); -+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); -+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; -+ EQCR_INC(eqcr); -+ eqcr->available--; -+ dcbf(eqcr->cursor); -+ lwsync(); -+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor)); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 0; -+#endif -+} -+ -+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ struct qm_eqcr_entry *eqcursor; -+ EQCR_COMMIT_CHECKS(eqcr); -+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb); -+ lwsync(); -+ eqcursor = eqcr->cursor; -+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit; -+ dcbf(eqcursor); -+ EQCR_INC(eqcr); -+ eqcr->available--; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ eqcr->busy = 0; -+#endif -+} -+ -+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ u8 diff, old_ci = eqcr->ci; -+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); -+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); -+ eqcr->available += diff; -+ return diff; -+} -+ -+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; -+ qm_cl_touch_ro(EQCR_CI); -+} -+ -+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ u8 diff, old_ci = eqcr->ci; -+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); -+ qm_cl_invalidate(EQCR_CI); -+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); -+ eqcr->available += diff; -+ return diff; -+} -+ -+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ return eqcr->ithresh; -+} -+ -+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ eqcr->ithresh = ithresh; -+ qm_out(EQCR_ITR, ithresh); -+} -+ -+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ return eqcr->available; -+} -+ -+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) -+{ -+ register struct qm_eqcr *eqcr = &portal->eqcr; -+ return QM_EQCR_SIZE - 1 - eqcr->available; -+} -+ -+ -+/* ---------------- */ -+/* --- DQRR API --- */ -+ -+/* FIXME: many possible improvements; -+ * - look at changing the API to use pointer rather than index parameters now -+ * that 'cursor' is a pointer, -+ * - consider moving other parameters to pointer if it could help (ci) -+ */ -+ -+#define DQRR_CARRYCLEAR(p) \ -+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6))) -+ -+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e) -+{ -+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); -+} -+ -+static inline const struct qm_dqrr_entry *DQRR_INC( -+ const struct qm_dqrr_entry *e) -+{ -+ return DQRR_CARRYCLEAR(e + 1); -+} -+ -+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) -+{ -+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) | -+ ((mf & (QM_DQRR_SIZE - 1)) << 20)); -+} -+ -+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); -+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); -+ qm_out(DQRR_CI_CINH, dqrr->ci); -+} -+ -+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); -+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); -+ qm_cl_out(DQRR_CI, dqrr->ci); -+} -+ -+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ -+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ -+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); -+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); -+} -+ -+static inline int qm_dqrr_init(struct qm_portal *portal, -+ const struct qm_portal_config *config, -+ enum qm_dqrr_dmode dmode, -+ __maybe_unused enum qm_dqrr_pmode pmode, -+ enum qm_dqrr_cmode cmode, u8 max_fill) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ u32 cfg; -+ -+ /* Make sure the DQRR will be idle when we enable */ -+ qm_out(DQRR_SDQCR, 0); -+ qm_out(DQRR_VDQCR, 0); -+ qm_out(DQRR_PDQCR, 0); -+ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR; -+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); -+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); -+ dqrr->cursor = dqrr->ring + dqrr->ci; -+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); -+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? -+ QM_DQRR_VERB_VBIT : 0; -+ dqrr->ithresh = qm_in(DQRR_ITR); -+ -+ /* Free up pending DQRR entries if any as per current DCM */ -+ if (dqrr->fill) { -+ enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3; -+ -+#ifdef CONFIG_FSL_DPA_CHECKING -+ dqrr->cmode = dcm; -+#endif -+ switch (dcm) { -+ case qm_dqrr_cci: -+ qm_dqrr_cci_consume(portal, dqrr->fill); -+ break; -+ case qm_dqrr_cce: -+ qm_dqrr_cce_consume(portal, dqrr->fill); -+ break; -+ case qm_dqrr_cdc: -+ qm_dqrr_cdc_consume_n(portal, (QM_DQRR_SIZE - 1)); -+ break; -+ default: -+ DPA_ASSERT(0); -+ } -+ } -+ -+#ifdef CONFIG_FSL_DPA_CHECKING -+ dqrr->dmode = dmode; -+ dqrr->pmode = pmode; -+ dqrr->cmode = cmode; -+#endif -+ /* Invalidate every ring entry before beginning */ -+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) -+ dcbi(qm_cl(dqrr->ring, cfg)); -+ cfg = (qm_in(CFG) & 0xff000f00) | -+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ -+ ((dmode & 1) << 18) | /* DP */ -+ ((cmode & 3) << 16) | /* DCM */ -+ 0xa0 | /* RE+SE */ -+ (0 ? 0x40 : 0) | /* Ignore RP */ -+ (0 ? 0x10 : 0); /* Ignore SP */ -+ qm_out(CFG, cfg); -+ qm_dqrr_set_maxfill(portal, max_fill); -+ return 0; -+} -+ -+static inline void qm_dqrr_finish(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if ((dqrr->cmode != qm_dqrr_cdc) && -+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) -+ pr_crit("Ignoring completed DQRR entries\n"); -+#endif -+} -+ -+static inline const struct qm_dqrr_entry *qm_dqrr_current( -+ struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ if (!dqrr->fill) -+ return NULL; -+ return dqrr->cursor; -+} -+ -+static inline u8 qm_dqrr_cursor(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ return DQRR_PTR2IDX(dqrr->cursor); -+} -+ -+static inline u8 qm_dqrr_next(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->fill); -+ dqrr->cursor = DQRR_INC(dqrr->cursor); -+ return --dqrr->fill; -+} -+ -+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ u8 diff, old_pi = dqrr->pi; -+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci); -+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); -+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); -+ dqrr->fill += diff; -+ return diff; -+} -+ -+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); -+ qm_cl_invalidate(DQRR_PI); -+ qm_cl_touch_ro(DQRR_PI); -+} -+ -+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ u8 diff, old_pi = dqrr->pi; -+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); -+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1); -+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); -+ dqrr->fill += diff; -+ return diff; -+} -+ -+static inline void qm_dqrr_pvb_update(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); -+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb); -+#ifndef CONFIG_FSL_PAMU -+ /* -+ * If PAMU is not available we need to invalidate the cache. -+ * When PAMU is available the cache is updated by stash -+ */ -+ dcbi(res); -+ dcbt_ro(res); -+#endif -+ -+ /* when accessing 'verb', use __raw_readb() to ensure that compiler -+ * inlining doesn't try to optimise out "excess reads". */ -+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { -+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); -+ if (!dqrr->pi) -+ dqrr->vbit ^= QM_DQRR_VERB_VBIT; -+ dqrr->fill++; -+ } -+} -+ -+ -+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); -+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); -+ qm_out(DQRR_CI_CINH, dqrr->ci); -+} -+ -+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); -+ qm_cl_invalidate(DQRR_CI); -+ qm_cl_touch_rw(DQRR_CI); -+} -+ -+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); -+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); -+ qm_cl_out(DQRR_CI, dqrr->ci); -+} -+ -+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx, -+ int park) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ DPA_ASSERT(idx < QM_DQRR_SIZE); -+ qm_out(DQRR_DCAP, (0 << 8) | /* S */ -+ ((park ? 1 : 0) << 6) | /* PK */ -+ idx); /* DCAP_CI */ -+} -+ -+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, -+ const struct qm_dqrr_entry *dq, -+ int park) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ u8 idx = DQRR_PTR2IDX(dq); -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ DPA_ASSERT((dqrr->ring + idx) == dq); -+ DPA_ASSERT(idx < QM_DQRR_SIZE); -+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ -+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ -+ idx); /* DQRR_DCAP::DCAP_CI */ -+} -+ -+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); -+} -+ -+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ qm_cl_invalidate(DQRR_CI); -+ qm_cl_touch_ro(DQRR_CI); -+} -+ -+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); -+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1); -+} -+ -+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); -+ return dqrr->ci; -+} -+ -+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx) -+{ -+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); -+ qm_out(DQRR_DCAP, (0 << 8) | /* S */ -+ (1 << 6) | /* PK */ -+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */ -+} -+ -+static inline void qm_dqrr_park_current(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); -+ qm_out(DQRR_DCAP, (0 << 8) | /* S */ -+ (1 << 6) | /* PK */ -+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */ -+} -+ -+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) -+{ -+ qm_out(DQRR_SDQCR, sdqcr); -+} -+ -+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal) -+{ -+ return qm_in(DQRR_SDQCR); -+} -+ -+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) -+{ -+ qm_out(DQRR_VDQCR, vdqcr); -+} -+ -+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal) -+{ -+ return qm_in(DQRR_VDQCR); -+} -+ -+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr) -+{ -+ qm_out(DQRR_PDQCR, pdqcr); -+} -+ -+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal) -+{ -+ return qm_in(DQRR_PDQCR); -+} -+ -+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal) -+{ -+ register struct qm_dqrr *dqrr = &portal->dqrr; -+ return dqrr->ithresh; -+} -+ -+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) -+{ -+ qm_out(DQRR_ITR, ithresh); -+} -+ -+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal) -+{ -+ return (qm_in(CFG) & 0x00f00000) >> 20; -+} -+ -+ -+/* -------------- */ -+/* --- MR API --- */ -+ -+#define MR_CARRYCLEAR(p) \ -+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6))) -+ -+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e) -+{ -+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1); -+} -+ -+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e) -+{ -+ return MR_CARRYCLEAR(e + 1); -+} -+ -+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, -+ enum qm_mr_cmode cmode) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ u32 cfg; -+ -+ mr->ring = portal->addr.addr_ce + QM_CL_MR; -+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); -+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); -+ mr->cursor = mr->ring + mr->ci; -+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); -+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; -+ mr->ithresh = qm_in(MR_ITR); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mr->pmode = pmode; -+ mr->cmode = cmode; -+#endif -+ cfg = (qm_in(CFG) & 0xfffff0ff) | -+ ((cmode & 1) << 8); /* QCSP_CFG:MM */ -+ qm_out(CFG, cfg); -+ return 0; -+} -+ -+static inline void qm_mr_finish(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ if (mr->ci != MR_PTR2IDX(mr->cursor)) -+ pr_crit("Ignoring completed MR entries\n"); -+} -+ -+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ if (!mr->fill) -+ return NULL; -+ return mr->cursor; -+} -+ -+static inline u8 qm_mr_cursor(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ return MR_PTR2IDX(mr->cursor); -+} -+ -+static inline u8 qm_mr_next(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->fill); -+ mr->cursor = MR_INC(mr->cursor); -+ return --mr->fill; -+} -+ -+static inline u8 qm_mr_pci_update(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ u8 diff, old_pi = mr->pi; -+ DPA_ASSERT(mr->pmode == qm_mr_pci); -+ mr->pi = qm_in(MR_PI_CINH); -+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); -+ mr->fill += diff; -+ return diff; -+} -+ -+static inline void qm_mr_pce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->pmode == qm_mr_pce); -+ qm_cl_invalidate(MR_PI); -+ qm_cl_touch_ro(MR_PI); -+} -+ -+static inline u8 qm_mr_pce_update(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ u8 diff, old_pi = mr->pi; -+ DPA_ASSERT(mr->pmode == qm_mr_pce); -+ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1); -+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); -+ mr->fill += diff; -+ return diff; -+} -+ -+static inline void qm_mr_pvb_update(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); -+ DPA_ASSERT(mr->pmode == qm_mr_pvb); -+ /* when accessing 'verb', use __raw_readb() to ensure that compiler -+ * inlining doesn't try to optimise out "excess reads". */ -+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { -+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); -+ if (!mr->pi) -+ mr->vbit ^= QM_MR_VERB_VBIT; -+ mr->fill++; -+ res = MR_INC(res); -+ } -+ dcbit_ro(res); -+} -+ -+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->cmode == qm_mr_cci); -+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); -+ qm_out(MR_CI_CINH, mr->ci); -+} -+ -+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->cmode == qm_mr_cci); -+ mr->ci = MR_PTR2IDX(mr->cursor); -+ qm_out(MR_CI_CINH, mr->ci); -+} -+ -+static inline void qm_mr_cce_prefetch(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->cmode == qm_mr_cce); -+ qm_cl_invalidate(MR_CI); -+ qm_cl_touch_rw(MR_CI); -+} -+ -+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->cmode == qm_mr_cce); -+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); -+ qm_cl_out(MR_CI, mr->ci); -+} -+ -+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ DPA_ASSERT(mr->cmode == qm_mr_cce); -+ mr->ci = MR_PTR2IDX(mr->cursor); -+ qm_cl_out(MR_CI, mr->ci); -+} -+ -+static inline u8 qm_mr_get_ci(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ return mr->ci; -+} -+ -+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal) -+{ -+ register struct qm_mr *mr = &portal->mr; -+ return mr->ithresh; -+} -+ -+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) -+{ -+ qm_out(MR_ITR, ithresh); -+} -+ -+ -+/* ------------------------------ */ -+/* --- Management command API --- */ -+ -+static inline int qm_mc_init(struct qm_portal *portal) -+{ -+ register struct qm_mc *mc = &portal->mc; -+ mc->cr = portal->addr.addr_ce + QM_CL_CR; -+ mc->rr = portal->addr.addr_ce + QM_CL_RR0; -+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & -+ QM_MCC_VERB_VBIT) ? 0 : 1; -+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = qman_mc_idle; -+#endif -+ return 0; -+} -+ -+static inline void qm_mc_finish(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == qman_mc_idle); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ if (mc->state != qman_mc_idle) -+ pr_crit("Losing incomplete MC command\n"); -+#endif -+} -+ -+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal) -+{ -+ register struct qm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == qman_mc_idle); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = qman_mc_user; -+#endif -+ dcbz_64(mc->cr); -+ return mc->cr; -+} -+ -+static inline void qm_mc_abort(struct qm_portal *portal) -+{ -+ __maybe_unused register struct qm_mc *mc = &portal->mc; -+ DPA_ASSERT(mc->state == qman_mc_user); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = qman_mc_idle; -+#endif -+} -+ -+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) -+{ -+ register struct qm_mc *mc = &portal->mc; -+ struct qm_mc_result *rr = mc->rr + mc->rridx; -+ DPA_ASSERT(mc->state == qman_mc_user); -+ lwsync(); -+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit; -+ dcbf(mc->cr); -+ dcbit_ro(rr); -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = qman_mc_hw; -+#endif -+} -+ -+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal) -+{ -+ register struct qm_mc *mc = &portal->mc; -+ struct qm_mc_result *rr = mc->rr + mc->rridx; -+ DPA_ASSERT(mc->state == qman_mc_hw); -+ /* The inactive response register's verb byte always returns zero until -+ * its command is submitted and completed. This includes the valid-bit, -+ * in case you were wondering... */ -+ if (!__raw_readb(&rr->verb)) { -+ dcbit_ro(rr); -+ return NULL; -+ } -+ mc->rridx ^= 1; -+ mc->vbit ^= QM_MCC_VERB_VBIT; -+#ifdef CONFIG_FSL_DPA_CHECKING -+ mc->state = qman_mc_idle; -+#endif -+ return rr; -+} -+ -+ -+/* ------------------------------------- */ -+/* --- Portal interrupt register API --- */ -+ -+static inline int qm_isr_init(__always_unused struct qm_portal *portal) -+{ -+ return 0; -+} -+ -+static inline void qm_isr_finish(__always_unused struct qm_portal *portal) -+{ -+} -+ -+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod) -+{ -+ qm_out(ITPR, iperiod); -+} -+ -+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n) -+{ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6)); -+#else -+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2)); -+#endif -+} -+ -+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, -+ u32 val) -+{ -+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val); -+#else -+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val); -+#endif -+} -+ -+/* Cleanup FQs */ -+static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count, -+ u32 fqid) -+{ -+ -+ struct qm_mc_command *mcc; -+ struct qm_mc_result *mcr; -+ u8 state; -+ int orl_empty, fq_empty, i, drain = 0; -+ u32 result; -+ u32 channel, wq; -+ u16 dest_wq; -+ -+ /* Determine the state of the FQID */ -+ mcc = qm_mc_start(portal[0]); -+ mcc->queryfq_np.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP); -+ while (!(mcr = qm_mc_result(portal[0]))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); -+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; -+ if (state == QM_MCR_NP_STATE_OOS) -+ return 0; /* Already OOS, no need to do anymore checks */ -+ -+ /* Query which channel the FQ is using */ -+ mcc = qm_mc_start(portal[0]); -+ mcc->queryfq.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ); -+ while (!(mcr = qm_mc_result(portal[0]))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); -+ -+ /* Need to store these since the MCR gets reused */ -+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq); -+ wq = dest_wq & 0x7; -+ channel = dest_wq>>3; -+ -+ switch (state) { -+ case QM_MCR_NP_STATE_TEN_SCHED: -+ case QM_MCR_NP_STATE_TRU_SCHED: -+ case QM_MCR_NP_STATE_ACTIVE: -+ case QM_MCR_NP_STATE_PARKED: -+ orl_empty = 0; -+ mcc = qm_mc_start(portal[0]); -+ mcc->alterfq.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE); -+ while (!(mcr = qm_mc_result(portal[0]))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_MCR_VERB_ALTER_RETIRE); -+ result = mcr->result; /* Make a copy as we reuse MCR below */ -+ -+ if (result == QM_MCR_RESULT_PENDING) { -+ /* Need to wait for the FQRN in the message ring, which -+ will only occur once the FQ has been drained. In -+ order for the FQ to drain the portal needs to be set -+ to dequeue from the channel the FQ is scheduled on */ -+ const struct qm_mr_entry *msg; -+ const struct qm_dqrr_entry *dqrr = NULL; -+ int found_fqrn = 0; -+ u16 dequeue_wq = 0; -+ -+ /* Flag that we need to drain FQ */ -+ drain = 1; -+ -+ if (channel >= qm_channel_pool1 && -+ channel < (qm_channel_pool1 + 15)) { -+ /* Pool channel, enable the bit in the portal */ -+ dequeue_wq = (channel - -+ qm_channel_pool1 + 1)<<4 | wq; -+ } else if (channel < qm_channel_pool1) { -+ /* Dedicated channel */ -+ dequeue_wq = wq; -+ } else { -+ pr_info("Cannot recover FQ 0x%x, it is " -+ "scheduled on channel 0x%x", -+ fqid, channel); -+ return -EBUSY; -+ } -+ /* Set the sdqcr to drain this channel */ -+ if (channel < qm_channel_pool1) -+ for (i = 0; i < portal_count; i++) -+ qm_dqrr_sdqcr_set(portal[i], -+ QM_SDQCR_TYPE_ACTIVE | -+ QM_SDQCR_CHANNELS_DEDICATED); -+ else -+ for (i = 0; i < portal_count; i++) -+ qm_dqrr_sdqcr_set( -+ portal[i], -+ QM_SDQCR_TYPE_ACTIVE | -+ QM_SDQCR_CHANNELS_POOL_CONV -+ (channel)); -+ while (!found_fqrn) { -+ /* Keep draining DQRR while checking the MR*/ -+ for (i = 0; i < portal_count; i++) { -+ qm_dqrr_pvb_update(portal[i]); -+ dqrr = qm_dqrr_current(portal[i]); -+ while (dqrr) { -+ qm_dqrr_cdc_consume_1ptr( -+ portal[i], dqrr, 0); -+ qm_dqrr_pvb_update(portal[i]); -+ qm_dqrr_next(portal[i]); -+ dqrr = qm_dqrr_current( -+ portal[i]); -+ } -+ /* Process message ring too */ -+ qm_mr_pvb_update(portal[i]); -+ msg = qm_mr_current(portal[i]); -+ while (msg) { -+ if ((msg->verb & -+ QM_MR_VERB_TYPE_MASK) -+ == QM_MR_VERB_FQRN) -+ found_fqrn = 1; -+ qm_mr_next(portal[i]); -+ qm_mr_cci_consume_to_current( -+ portal[i]); -+ qm_mr_pvb_update(portal[i]); -+ msg = qm_mr_current(portal[i]); -+ } -+ cpu_relax(); -+ } -+ } -+ } -+ if (result != QM_MCR_RESULT_OK && -+ result != QM_MCR_RESULT_PENDING) { -+ /* error */ -+ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n", -+ fqid, result); -+ return -1; -+ } -+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { -+ /* ORL had no entries, no need to wait until the -+ ERNs come in */ -+ orl_empty = 1; -+ } -+ /* Retirement succeeded, check to see if FQ needs -+ to be drained */ -+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { -+ /* FQ is Not Empty, drain using volatile DQ commands */ -+ fq_empty = 0; -+ do { -+ const struct qm_dqrr_entry *dqrr = NULL; -+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); -+ qm_dqrr_vdqcr_set(portal[0], vdqcr); -+ -+ /* Wait for a dequeue to occur */ -+ while (dqrr == NULL) { -+ qm_dqrr_pvb_update(portal[0]); -+ dqrr = qm_dqrr_current(portal[0]); -+ if (!dqrr) -+ cpu_relax(); -+ } -+ /* Process the dequeues, making sure to -+ empty the ring completely */ -+ while (dqrr) { -+ if (be32_to_cpu(dqrr->fqid) == fqid && -+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY) -+ fq_empty = 1; -+ qm_dqrr_cdc_consume_1ptr(portal[0], -+ dqrr, 0); -+ qm_dqrr_pvb_update(portal[0]); -+ qm_dqrr_next(portal[0]); -+ dqrr = qm_dqrr_current(portal[0]); -+ } -+ } while (fq_empty == 0); -+ } -+ for (i = 0; i < portal_count; i++) -+ qm_dqrr_sdqcr_set(portal[i], 0); -+ -+ /* Wait for the ORL to have been completely drained */ -+ while (orl_empty == 0) { -+ const struct qm_mr_entry *msg; -+ qm_mr_pvb_update(portal[0]); -+ msg = qm_mr_current(portal[0]); -+ while (msg) { -+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == -+ QM_MR_VERB_FQRL) -+ orl_empty = 1; -+ qm_mr_next(portal[0]); -+ qm_mr_cci_consume_to_current(portal[0]); -+ qm_mr_pvb_update(portal[0]); -+ msg = qm_mr_current(portal[0]); -+ } -+ cpu_relax(); -+ } -+ mcc = qm_mc_start(portal[0]); -+ mcc->alterfq.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); -+ while (!(mcr = qm_mc_result(portal[0]))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_MCR_VERB_ALTER_OOS); -+ if (mcr->result != QM_MCR_RESULT_OK) { -+ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n", -+ fqid, mcr->result); -+ return -1; -+ } -+ return 0; -+ case QM_MCR_NP_STATE_RETIRED: -+ /* Send OOS Command */ -+ mcc = qm_mc_start(portal[0]); -+ mcc->alterfq.fqid = cpu_to_be32(fqid); -+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); -+ while (!(mcr = qm_mc_result(portal[0]))) -+ cpu_relax(); -+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == -+ QM_MCR_VERB_ALTER_OOS); -+ if (mcr->result) { -+ pr_err("OOS Failed on FQID 0x%x\n", fqid); -+ return -1; -+ } -+ return 0; -+ } -+ return -1; -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_private.h -@@ -0,0 +1,398 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpa_sys.h" -+#include -+#include -+ -+#if defined(CONFIG_FSL_PAMU) -+#include -+#endif -+ -+#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64) -+#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP" -+#endif -+ -+#define QBMAN_ANY_PORTAL_IDX 0xffffffff -+ /* ----------------- */ -+ /* Congestion Groups */ -+ /* ----------------- */ -+/* This wrapper represents a bit-array for the state of the 256 Qman congestion -+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore -+ * those that don't concern us. We harness the structure and accessor details -+ * already used in the management command to query congestion groups. */ -+struct qman_cgrs { -+ struct __qm_mcr_querycongestion q; -+}; -+static inline void qman_cgrs_init(struct qman_cgrs *c) -+{ -+ memset(c, 0, sizeof(*c)); -+} -+static inline void qman_cgrs_fill(struct qman_cgrs *c) -+{ -+ memset(c, 0xff, sizeof(*c)); -+} -+static inline int qman_cgrs_get(struct qman_cgrs *c, int num) -+{ -+ return QM_MCR_QUERYCONGESTION(&c->q, num); -+} -+static inline void qman_cgrs_set(struct qman_cgrs *c, int num) -+{ -+ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num)); -+} -+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num) -+{ -+ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num)); -+} -+static inline int qman_cgrs_next(struct qman_cgrs *c, int num) -+{ -+ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num)) -+ ; -+ return num; -+} -+static inline void qman_cgrs_cp(struct qman_cgrs *dest, -+ const struct qman_cgrs *src) -+{ -+ *dest = *src; -+} -+static inline void qman_cgrs_and(struct qman_cgrs *dest, -+ const struct qman_cgrs *a, const struct qman_cgrs *b) -+{ -+ int ret; -+ u32 *_d = dest->q.__state; -+ const u32 *_a = a->q.__state; -+ const u32 *_b = b->q.__state; -+ for (ret = 0; ret < 8; ret++) -+ *(_d++) = *(_a++) & *(_b++); -+} -+static inline void qman_cgrs_xor(struct qman_cgrs *dest, -+ const struct qman_cgrs *a, const struct qman_cgrs *b) -+{ -+ int ret; -+ u32 *_d = dest->q.__state; -+ const u32 *_a = a->q.__state; -+ const u32 *_b = b->q.__state; -+ for (ret = 0; ret < 8; ret++) -+ *(_d++) = *(_a++) ^ *(_b++); -+} -+ -+ /* ----------------------- */ -+ /* CEETM Congestion Groups */ -+ /* ----------------------- */ -+/* This wrapper represents a bit-array for the state of the 512 Qman CEETM -+ * congestion groups. -+ */ -+struct qman_ccgrs { -+ struct __qm_mcr_querycongestion q[2]; -+}; -+static inline void qman_ccgrs_init(struct qman_ccgrs *c) -+{ -+ memset(c, 0, sizeof(*c)); -+} -+static inline void qman_ccgrs_fill(struct qman_ccgrs *c) -+{ -+ memset(c, 0xff, sizeof(*c)); -+} -+static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num) -+{ -+ if (num < __CGR_NUM) -+ return QM_MCR_QUERYCONGESTION(&c->q[0], num); -+ else -+ return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM)); -+} -+static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num) -+{ -+ while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num)) -+ ; -+ return num; -+} -+static inline void qman_ccgrs_cp(struct qman_ccgrs *dest, -+ const struct qman_ccgrs *src) -+{ -+ *dest = *src; -+} -+static inline void qman_ccgrs_and(struct qman_ccgrs *dest, -+ const struct qman_ccgrs *a, const struct qman_ccgrs *b) -+{ -+ int ret, i; -+ u32 *_d; -+ const u32 *_a, *_b; -+ for (i = 0; i < 2; i++) { -+ _d = dest->q[i].__state; -+ _a = a->q[i].__state; -+ _b = b->q[i].__state; -+ for (ret = 0; ret < 8; ret++) -+ *(_d++) = *(_a++) & *(_b++); -+ } -+} -+static inline void qman_ccgrs_xor(struct qman_ccgrs *dest, -+ const struct qman_ccgrs *a, const struct qman_ccgrs *b) -+{ -+ int ret, i; -+ u32 *_d; -+ const u32 *_a, *_b; -+ for (i = 0; i < 2; i++) { -+ _d = dest->q[i].__state; -+ _a = a->q[i].__state; -+ _b = b->q[i].__state; -+ for (ret = 0; ret < 8; ret++) -+ *(_d++) = *(_a++) ^ *(_b++); -+ } -+} -+ -+/* used by CCSR and portal interrupt code */ -+enum qm_isr_reg { -+ qm_isr_status = 0, -+ qm_isr_enable = 1, -+ qm_isr_disable = 2, -+ qm_isr_inhibit = 3 -+}; -+ -+struct qm_portal_config { -+ /* Corenet portal addresses; -+ * [0]==cache-enabled, [1]==cache-inhibited. */ -+ __iomem void *addr_virt[2]; -+ struct resource addr_phys[2]; -+ struct device dev; -+ struct iommu_domain *iommu_domain; -+ /* Allow these to be joined in lists */ -+ struct list_head list; -+ /* User-visible portal configuration settings */ -+ struct qman_portal_config public_cfg; -+ /* power management saved data */ -+ u32 saved_isdr; -+}; -+ -+/* Revision info (for errata and feature handling) */ -+#define QMAN_REV11 0x0101 -+#define QMAN_REV12 0x0102 -+#define QMAN_REV20 0x0200 -+#define QMAN_REV30 0x0300 -+#define QMAN_REV31 0x0301 -+#define QMAN_REV32 0x0302 -+ -+/* QMan REV_2 register contains the Cfg option */ -+#define QMAN_REV_CFG_0 0x0 -+#define QMAN_REV_CFG_1 0x1 -+#define QMAN_REV_CFG_2 0x2 -+#define QMAN_REV_CFG_3 0x3 -+ -+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ -+extern u8 qman_ip_cfg; -+extern u32 qman_clk; -+extern u16 qman_portal_max; -+ -+#ifdef CONFIG_FSL_QMAN_CONFIG -+/* Hooks from qman_driver.c to qman_config.c */ -+int qman_init_ccsr(struct device_node *node); -+void qman_liodn_fixup(u16 channel); -+int qman_set_sdest(u16 channel, unsigned int cpu_idx); -+size_t get_qman_fqd_size(void); -+#else -+static inline size_t get_qman_fqd_size(void) -+{ -+ return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ); -+} -+#endif -+ -+int qm_set_wpm(int wpm); -+int qm_get_wpm(int *wpm); -+ -+/* Hooks from qman_driver.c in to qman_high.c */ -+struct qman_portal *qman_create_portal( -+ struct qman_portal *portal, -+ const struct qm_portal_config *config, -+ const struct qman_cgrs *cgrs); -+ -+struct qman_portal *qman_create_affine_portal( -+ const struct qm_portal_config *config, -+ const struct qman_cgrs *cgrs); -+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, -+ int cpu); -+const struct qm_portal_config *qman_destroy_affine_portal(void); -+void qman_destroy_portal(struct qman_portal *qm); -+ -+/* Hooks from fsl_usdpaa.c to qman_driver.c */ -+struct qm_portal_config *qm_get_unused_portal(void); -+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx); -+ -+void qm_put_unused_portal(struct qm_portal_config *pcfg); -+void qm_set_liodns(struct qm_portal_config *pcfg); -+ -+/* This CGR feature is supported by h/w and required by unit-tests and the -+ * debugfs hooks, so is implemented in the driver. However it allows an explicit -+ * corruption of h/w fields by s/w that are usually incorruptible (because the -+ * counters are usually maintained entirely within h/w). As such, we declare -+ * this API internally. */ -+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, -+ struct qm_mcr_cgrtestwrite *result); -+ -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+/* If the fq object pointer is greater than the size of context_b field, -+ * than a lookup table is required. */ -+int qman_setup_fq_lookup_table(size_t num_entries); -+#endif -+ -+ -+/*************************************************/ -+/* QMan s/w corenet portal, low-level i/face */ -+/*************************************************/ -+ -+/* Note: most functions are only used by the high-level interface, so are -+ * inlined from qman_low.h. The stuff below is for use by other parts of the -+ * driver. */ -+ -+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one -+ * dequeue TYPE. Choose TOKEN (8-bit). -+ * If SOURCE == CHANNELS, -+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). -+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have -+ * priority. -+ * If SOURCE == SPECIFICWQ, -+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the -+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the -+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the -+ * same value. -+ */ -+#define QM_SDQCR_SOURCE_CHANNELS 0x0 -+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 -+#define QM_SDQCR_COUNT_EXACT1 0x0 -+#define QM_SDQCR_COUNT_UPTO3 0x20000000 -+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 -+#define QM_SDQCR_TYPE_MASK 0x03000000 -+#define QM_SDQCR_TYPE_NULL 0x0 -+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 -+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 -+#define QM_SDQCR_TYPE_ACTIVE 0x03000000 -+#define QM_SDQCR_TOKEN_MASK 0x00ff0000 -+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) -+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) -+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 -+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 -+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 -+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) -+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) -+ -+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ -+#define QM_VDQCR_FQID_MASK 0x00ffffff -+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) -+ -+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT. -+ * If MODE==SCHEDULED -+ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE. -+ * If CHANNELS, -+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels. -+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have -+ * priority. -+ * If SPECIFICWQ, -+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the -+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the -+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the -+ * same value. -+ * If MODE==UNSCHEDULED -+ * Choose FQID(). -+ */ -+#define QM_PDQCR_MODE_SCHEDULED 0x0 -+#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000 -+#define QM_PDQCR_SCHEDULED_CHANNELS 0x0 -+#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000 -+#define QM_PDQCR_COUNT_EXACT1 0x0 -+#define QM_PDQCR_COUNT_UPTO3 0x20000000 -+#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000 -+#define QM_PDQCR_TYPE_MASK 0x03000000 -+#define QM_PDQCR_TYPE_NULL 0x0 -+#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000 -+#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000 -+#define QM_PDQCR_TYPE_ACTIVE 0x03000000 -+#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000 -+#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) -+#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7 -+#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000 -+#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4) -+#define QM_PDQCR_SPECIFICWQ_WQ(n) (n) -+#define QM_PDQCR_FQID(n) ((n) & 0xffffff) -+ -+/* Used by all portal interrupt registers except 'inhibit' -+ * Channels with frame availability -+ */ -+#define QM_PIRQ_DQAVAIL 0x0000ffff -+ -+/* The DQAVAIL interrupt fields break down into these bits; */ -+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ -+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ -+#define QM_DQAVAIL_MASK 0xffff -+/* This mask contains all the "irqsource" bits visible to API users */ -+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) -+ -+/* These are qm__(). So for example, qm_disable_write() means "write -+ * the disable register" rather than "disable the ability to write". */ -+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status) -+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m) -+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable) -+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v) -+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable) -+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v) -+/* TODO: unfortunate name-clash here, reword? */ -+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1) -+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0) -+ -+#ifdef CONFIG_FSL_QMAN_CONFIG -+int qman_have_ccsr(void); -+#else -+#define qman_have_ccsr 0 -+#endif -+ -+__init int qman_init(void); -+__init int qman_resource_init(void); -+ -+/* CEETM related */ -+#define QMAN_CEETM_MAX 2 -+extern u8 num_ceetms; -+extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; -+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); -+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); -+int qman_ceetm_set_prescaler(enum qm_dc_portal portal); -+int qman_ceetm_get_prescaler(u16 *pres); -+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, -+ struct qm_mcr_ceetm_cq_query *cq_query); -+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query, -+ struct qm_mcr_ceetm_ccgr_query *response); -+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num); -+ -+extern void *affine_portals[NR_CPUS]; -+const struct qm_portal_config *qman_get_qm_portal_config( -+ struct qman_portal *portal); -+ -+/* power management */ -+#ifdef CONFIG_SUSPEND -+void suspend_unused_qportal(void); -+void resume_unused_qportal(void); -+#endif ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_test.c -@@ -0,0 +1,57 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_test.h" -+ -+MODULE_AUTHOR("Geoff Thorpe"); -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("Qman testing"); -+ -+static int test_init(void) -+{ -+ int loop = 1; -+ while (loop--) { -+#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO -+ qman_test_hotpotato(); -+#endif -+#ifdef CONFIG_FSL_QMAN_TEST_HIGH -+ qman_test_high(); -+#endif -+ } -+ return 0; -+} -+ -+static void test_exit(void) -+{ -+} -+ -+module_init(test_init); -+module_exit(test_exit); ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_test.h -@@ -0,0 +1,45 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+void qman_test_hotpotato(void); -+void qman_test_high(void); -+ ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_test_high.c -@@ -0,0 +1,216 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_test.h" -+ -+/*************/ -+/* constants */ -+/*************/ -+ -+#define CGR_ID 27 -+#define POOL_ID 2 -+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID -+#define NUM_ENQUEUES 10 -+#define NUM_PARTIAL 4 -+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ -+ QM_SDQCR_TYPE_PRIO_QOS | \ -+ QM_SDQCR_TOKEN_SET(0x98) | \ -+ QM_SDQCR_CHANNELS_DEDICATED | \ -+ QM_SDQCR_CHANNELS_POOL(POOL_ID)) -+#define PORTAL_OPAQUE ((void *)0xf00dbeef) -+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) -+ -+/*************************************/ -+/* Predeclarations (eg. for fq_base) */ -+/*************************************/ -+ -+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, -+ struct qman_fq *, -+ const struct qm_dqrr_entry *); -+static void cb_ern(struct qman_portal *, struct qman_fq *, -+ const struct qm_mr_entry *); -+static void cb_fqs(struct qman_portal *, struct qman_fq *, -+ const struct qm_mr_entry *); -+ -+/***************/ -+/* global vars */ -+/***************/ -+ -+static struct qm_fd fd, fd_dq; -+static struct qman_fq fq_base = { -+ .cb.dqrr = cb_dqrr, -+ .cb.ern = cb_ern, -+ .cb.fqs = cb_fqs -+}; -+static DECLARE_WAIT_QUEUE_HEAD(waitqueue); -+static int retire_complete, sdqcr_complete; -+ -+/**********************/ -+/* internal functions */ -+/**********************/ -+ -+/* Helpers for initialising and "incrementing" a frame descriptor */ -+static void fd_init(struct qm_fd *__fd) -+{ -+ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU); -+ __fd->format = qm_fd_contig_big; -+ __fd->length29 = 0x0000ffff; -+ __fd->cmd = 0xfeedf00d; -+} -+ -+static void fd_inc(struct qm_fd *__fd) -+{ -+ u64 t = qm_fd_addr_get64(__fd); -+ int z = t >> 40; -+ t <<= 1; -+ if (z) -+ t |= 1; -+ qm_fd_addr_set64(__fd, t); -+ __fd->length29--; -+ __fd->cmd++; -+} -+ -+/* The only part of the 'fd' we can't memcmp() is the ppid */ -+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) -+{ -+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; -+ if (!r) -+ r = a->format - b->format; -+ if (!r) -+ r = a->opaque - b->opaque; -+ if (!r) -+ r = a->cmd - b->cmd; -+ return r; -+} -+ -+/********/ -+/* test */ -+/********/ -+ -+static void do_enqueues(struct qman_fq *fq) -+{ -+ unsigned int loop; -+ for (loop = 0; loop < NUM_ENQUEUES; loop++) { -+ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | -+ (((loop + 1) == NUM_ENQUEUES) ? -+ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) -+ panic("qman_enqueue() failed\n"); -+ fd_inc(&fd); -+ } -+} -+ -+void qman_test_high(void) -+{ -+ unsigned int flags; -+ int res; -+ struct qman_fq *fq = &fq_base; -+ -+ pr_info("qman_test_high starting\n"); -+ fd_init(&fd); -+ fd_init(&fd_dq); -+ -+ /* Initialise (parked) FQ */ -+ if (qman_create_fq(0, FQ_FLAGS, fq)) -+ panic("qman_create_fq() failed\n"); -+ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) -+ panic("qman_init_fq() failed\n"); -+ -+ /* Do enqueues + VDQCR, twice. (Parked FQ) */ -+ do_enqueues(fq); -+ pr_info("VDQCR (till-empty);\n"); -+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, -+ QM_VDQCR_NUMFRAMES_TILLEMPTY)) -+ panic("qman_volatile_dequeue() failed\n"); -+ do_enqueues(fq); -+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); -+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, -+ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) -+ panic("qman_volatile_dequeue() failed\n"); -+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, -+ NUM_ENQUEUES); -+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, -+ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) -+ panic("qman_volatile_dequeue() failed\n"); -+ -+ do_enqueues(fq); -+ pr_info("scheduled dequeue (till-empty)\n"); -+ if (qman_schedule_fq(fq)) -+ panic("qman_schedule_fq() failed\n"); -+ wait_event(waitqueue, sdqcr_complete); -+ -+ /* Retire and OOS the FQ */ -+ res = qman_retire_fq(fq, &flags); -+ if (res < 0) -+ panic("qman_retire_fq() failed\n"); -+ wait_event(waitqueue, retire_complete); -+ if (flags & QMAN_FQ_STATE_BLOCKOOS) -+ panic("leaking frames\n"); -+ if (qman_oos_fq(fq)) -+ panic("qman_oos_fq() failed\n"); -+ qman_destroy_fq(fq, 0); -+ pr_info("qman_test_high finished\n"); -+} -+ -+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dq) -+{ -+ if (fd_cmp(&fd_dq, &dq->fd)) { -+ pr_err("BADNESS: dequeued frame doesn't match;\n"); -+ pr_err("Expected 0x%llx, got 0x%llx\n", -+ (unsigned long long)fd_dq.length29, -+ (unsigned long long)dq->fd.length29); -+ BUG(); -+ } -+ fd_inc(&fd_dq); -+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { -+ sdqcr_complete = 1; -+ wake_up(&waitqueue); -+ } -+ return qman_cb_dqrr_consume; -+} -+ -+static void cb_ern(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ panic("cb_ern() unimplemented"); -+} -+ -+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_mr_entry *msg) -+{ -+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); -+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) -+ panic("unexpected FQS message"); -+ pr_info("Retirement message received\n"); -+ retire_complete = 1; -+ wake_up(&waitqueue); -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c -@@ -0,0 +1,499 @@ -+/* Copyright 2009-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include "qman_test.h" -+ -+/* Algorithm: -+ * -+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates -+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The -+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will -+ * shuttle a "hot potato" frame around them such that every forwarding action -+ * moves it from one cpu to another. (The use of more than one handler per cpu -+ * is to allow enough handlers/FQs to truly test the significance of caching - -+ * ie. when cache-expiries are occurring.) -+ * -+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the -+ * first and last words of the frame data will undergo a transformation step on -+ * each forwarding action. To achieve this, each handler will be assigned a -+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is -+ * received by a handler, the mixer of the expected sender is XOR'd into all -+ * words of the entire frame, which is then validated against the original -+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of -+ * the current handler. Apart from validating that the frame is taking the -+ * expected path, this also provides some quasi-realistic overheads to each -+ * forwarding action - dereferencing *all* the frame data, computation, and -+ * conditional branching. There is a "special" handler designated to act as the -+ * instigator of the test by creating an enqueuing the "hot potato" frame, and -+ * to determine when the test has completed by counting HP_LOOPS iterations. -+ * -+ * Init phases: -+ * -+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them -+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU -+ * handlers and link-list them (but do no other handler setup). -+ * -+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each -+ * hp_cpu's 'iterator' to point to its first handler. With each loop, -+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler -+ * and advance the iterator for the next loop. This includes a final fixup, -+ * which connects the last handler to the first (and which is why phase 2 -+ * and 3 are separate). -+ * -+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each -+ * hp_cpu's 'iterator' to point to its first handler. With each loop, -+ * initialise FQ objects and advance the iterator for the next loop. -+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ -+ * initialisation targets the correct cpu. -+ */ -+ -+/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes -+ * the fn from irq context, which is too restrictive). */ -+struct bstrap { -+ void (*fn)(void); -+ atomic_t started; -+}; -+static int bstrap_fn(void *__bstrap) -+{ -+ struct bstrap *bstrap = __bstrap; -+ atomic_inc(&bstrap->started); -+ bstrap->fn(); -+ while (!kthread_should_stop()) -+ msleep(1); -+ return 0; -+} -+static int on_all_cpus(void (*fn)(void)) -+{ -+ int cpu; -+ for_each_cpu(cpu, cpu_online_mask) { -+ struct bstrap bstrap = { -+ .fn = fn, -+ .started = ATOMIC_INIT(0) -+ }; -+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap, -+ "hotpotato%d", cpu); -+ int ret; -+ if (IS_ERR(k)) -+ return -ENOMEM; -+ kthread_bind(k, cpu); -+ wake_up_process(k); -+ /* If we call kthread_stop() before the "wake up" has had an -+ * effect, then the thread may exit with -EINTR without ever -+ * running the function. So poll until it's started before -+ * requesting it to stop. */ -+ while (!atomic_read(&bstrap.started)) -+ msleep(10); -+ ret = kthread_stop(k); -+ if (ret) -+ return ret; -+ } -+ return 0; -+} -+ -+struct hp_handler { -+ -+ /* The following data is stashed when 'rx' is dequeued; */ -+ /* -------------- */ -+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */ -+ struct qman_fq rx; -+ /* The Tx FQ we should forward to */ -+ struct qman_fq tx; -+ /* The value we XOR post-dequeue, prior to validating */ -+ u32 rx_mixer; -+ /* The value we XOR pre-enqueue, after validating */ -+ u32 tx_mixer; -+ /* what the hotpotato address should be on dequeue */ -+ dma_addr_t addr; -+ u32 *frame_ptr; -+ -+ /* The following data isn't (necessarily) stashed on dequeue; */ -+ /* -------------- */ -+ u32 fqid_rx, fqid_tx; -+ /* list node for linking us into 'hp_cpu' */ -+ struct list_head node; -+ /* Just to check ... */ -+ unsigned int processor_id; -+} ____cacheline_aligned; -+ -+struct hp_cpu { -+ /* identify the cpu we run on; */ -+ unsigned int processor_id; -+ /* root node for the per-cpu list of handlers */ -+ struct list_head handlers; -+ /* list node for linking us into 'hp_cpu_list' */ -+ struct list_head node; -+ /* when repeatedly scanning 'hp_list', each time linking the n'th -+ * handlers together, this is used as per-cpu iterator state */ -+ struct hp_handler *iterator; -+}; -+ -+/* Each cpu has one of these */ -+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); -+ -+/* links together the hp_cpu structs, in first-come first-serve order. */ -+static LIST_HEAD(hp_cpu_list); -+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); -+ -+static unsigned int hp_cpu_list_length; -+ -+/* the "special" handler, that starts and terminates the test. */ -+static struct hp_handler *special_handler; -+static int loop_counter; -+ -+/* handlers are allocated out of this, so they're properly aligned. */ -+static struct kmem_cache *hp_handler_slab; -+ -+/* this is the frame data */ -+static void *__frame_ptr; -+static u32 *frame_ptr; -+static dma_addr_t frame_dma; -+ -+/* the main function waits on this */ -+static DECLARE_WAIT_QUEUE_HEAD(queue); -+ -+#define HP_PER_CPU 2 -+#define HP_LOOPS 8 -+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ -+#define HP_NUM_WORDS 80 -+/* First word of the LFSR-based frame data */ -+#define HP_FIRST_WORD 0xabbaf00d -+ -+static inline u32 do_lfsr(u32 prev) -+{ -+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); -+} -+ -+static void allocate_frame_data(void) -+{ -+ u32 lfsr = HP_FIRST_WORD; -+ int loop; -+ struct platform_device *pdev = platform_device_alloc("foobar", -1); -+ if (!pdev) -+ panic("platform_device_alloc() failed"); -+ if (platform_device_add(pdev)) -+ panic("platform_device_add() failed"); -+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); -+ if (!__frame_ptr) -+ panic("kmalloc() failed"); -+ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) & -+ ~(unsigned long)63); -+ for (loop = 0; loop < HP_NUM_WORDS; loop++) { -+ frame_ptr[loop] = lfsr; -+ lfsr = do_lfsr(lfsr); -+ } -+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, -+ DMA_BIDIRECTIONAL); -+ platform_device_del(pdev); -+ platform_device_put(pdev); -+} -+ -+static void deallocate_frame_data(void) -+{ -+ kfree(__frame_ptr); -+} -+ -+static inline void process_frame_data(struct hp_handler *handler, -+ const struct qm_fd *fd) -+{ -+ u32 *p = handler->frame_ptr; -+ u32 lfsr = HP_FIRST_WORD; -+ int loop; -+ if (qm_fd_addr_get64(fd) != handler->addr) -+ panic("bad frame address"); -+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { -+ *p ^= handler->rx_mixer; -+ if (*p != lfsr) -+ panic("corrupt frame data"); -+ *p ^= handler->tx_mixer; -+ lfsr = do_lfsr(lfsr); -+ } -+} -+ -+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dqrr) -+{ -+ struct hp_handler *handler = (struct hp_handler *)fq; -+ -+ process_frame_data(handler, &dqrr->fd); -+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) -+ panic("qman_enqueue() failed"); -+ return qman_cb_dqrr_consume; -+} -+ -+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dqrr) -+{ -+ struct hp_handler *handler = (struct hp_handler *)fq; -+ -+ process_frame_data(handler, &dqrr->fd); -+ if (++loop_counter < HP_LOOPS) { -+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) -+ panic("qman_enqueue() failed"); -+ } else { -+ pr_info("Received final (%dth) frame\n", loop_counter); -+ wake_up(&queue); -+ } -+ return qman_cb_dqrr_consume; -+} -+ -+static void create_per_cpu_handlers(void) -+{ -+ struct hp_handler *handler; -+ int loop; -+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus); -+ -+ hp_cpu->processor_id = smp_processor_id(); -+ spin_lock(&hp_lock); -+ list_add_tail(&hp_cpu->node, &hp_cpu_list); -+ hp_cpu_list_length++; -+ spin_unlock(&hp_lock); -+ INIT_LIST_HEAD(&hp_cpu->handlers); -+ for (loop = 0; loop < HP_PER_CPU; loop++) { -+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); -+ if (!handler) -+ panic("kmem_cache_alloc() failed"); -+ handler->processor_id = hp_cpu->processor_id; -+ handler->addr = frame_dma; -+ handler->frame_ptr = frame_ptr; -+ list_add_tail(&handler->node, &hp_cpu->handlers); -+ } -+ put_cpu_var(hp_cpus); -+} -+ -+static void destroy_per_cpu_handlers(void) -+{ -+ struct list_head *loop, *tmp; -+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus); -+ -+ spin_lock(&hp_lock); -+ list_del(&hp_cpu->node); -+ spin_unlock(&hp_lock); -+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) { -+ u32 flags; -+ struct hp_handler *handler = list_entry(loop, struct hp_handler, -+ node); -+ if (qman_retire_fq(&handler->rx, &flags)) -+ panic("qman_retire_fq(rx) failed"); -+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); -+ if (qman_oos_fq(&handler->rx)) -+ panic("qman_oos_fq(rx) failed"); -+ qman_destroy_fq(&handler->rx, 0); -+ qman_destroy_fq(&handler->tx, 0); -+ qman_release_fqid(handler->fqid_rx); -+ list_del(&handler->node); -+ kmem_cache_free(hp_handler_slab, handler); -+ } -+ put_cpu_var(hp_cpus); -+} -+ -+static inline u8 num_cachelines(u32 offset) -+{ -+ u8 res = (offset + (L1_CACHE_BYTES - 1)) -+ / (L1_CACHE_BYTES); -+ if (res > 3) -+ return 3; -+ return res; -+} -+#define STASH_DATA_CL \ -+ num_cachelines(HP_NUM_WORDS * 4) -+#define STASH_CTX_CL \ -+ num_cachelines(offsetof(struct hp_handler, fqid_rx)) -+ -+static void init_handler(void *__handler) -+{ -+ struct qm_mcc_initfq opts; -+ struct hp_handler *handler = __handler; -+ BUG_ON(handler->processor_id != smp_processor_id()); -+ /* Set up rx */ -+ memset(&handler->rx, 0, sizeof(handler->rx)); -+ if (handler == special_handler) -+ handler->rx.cb.dqrr = special_dqrr; -+ else -+ handler->rx.cb.dqrr = normal_dqrr; -+ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx)) -+ panic("qman_create_fq(rx) failed"); -+ memset(&opts, 0, sizeof(opts)); -+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; -+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; -+ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL; -+ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL; -+ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | -+ QMAN_INITFQ_FLAG_LOCAL, &opts)) -+ panic("qman_init_fq(rx) failed"); -+ /* Set up tx */ -+ memset(&handler->tx, 0, sizeof(handler->tx)); -+ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, -+ &handler->tx)) -+ panic("qman_create_fq(tx) failed"); -+} -+ -+static void init_phase2(void) -+{ -+ int loop; -+ u32 fqid = 0; -+ u32 lfsr = 0xdeadbeef; -+ struct hp_cpu *hp_cpu; -+ struct hp_handler *handler; -+ -+ for (loop = 0; loop < HP_PER_CPU; loop++) { -+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) { -+ int ret; -+ if (!loop) -+ hp_cpu->iterator = list_first_entry( -+ &hp_cpu->handlers, -+ struct hp_handler, node); -+ else -+ hp_cpu->iterator = list_entry( -+ hp_cpu->iterator->node.next, -+ struct hp_handler, node); -+ /* Rx FQID is the previous handler's Tx FQID */ -+ hp_cpu->iterator->fqid_rx = fqid; -+ /* Allocate new FQID for Tx */ -+ ret = qman_alloc_fqid(&fqid); -+ if (ret) -+ panic("qman_alloc_fqid() failed"); -+ hp_cpu->iterator->fqid_tx = fqid; -+ /* Rx mixer is the previous handler's Tx mixer */ -+ hp_cpu->iterator->rx_mixer = lfsr; -+ /* Get new mixer for Tx */ -+ lfsr = do_lfsr(lfsr); -+ hp_cpu->iterator->tx_mixer = lfsr; -+ } -+ } -+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ -+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); -+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); -+ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef)); -+ handler->fqid_rx = fqid; -+ handler->rx_mixer = lfsr; -+ /* and tag it as our "special" handler */ -+ special_handler = handler; -+} -+ -+static void init_phase3(void) -+{ -+ int loop; -+ struct hp_cpu *hp_cpu; -+ -+ for (loop = 0; loop < HP_PER_CPU; loop++) { -+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) { -+ if (!loop) -+ hp_cpu->iterator = list_first_entry( -+ &hp_cpu->handlers, -+ struct hp_handler, node); -+ else -+ hp_cpu->iterator = list_entry( -+ hp_cpu->iterator->node.next, -+ struct hp_handler, node); -+ preempt_disable(); -+ if (hp_cpu->processor_id == smp_processor_id()) -+ init_handler(hp_cpu->iterator); -+ else -+ smp_call_function_single(hp_cpu->processor_id, -+ init_handler, hp_cpu->iterator, 1); -+ preempt_enable(); -+ } -+ } -+} -+ -+static void send_first_frame(void *ignore) -+{ -+ u32 *p = special_handler->frame_ptr; -+ u32 lfsr = HP_FIRST_WORD; -+ int loop; -+ struct qm_fd fd; -+ -+ BUG_ON(special_handler->processor_id != smp_processor_id()); -+ memset(&fd, 0, sizeof(fd)); -+ qm_fd_addr_set64(&fd, special_handler->addr); -+ fd.format = qm_fd_contig_big; -+ fd.length29 = HP_NUM_WORDS * 4; -+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { -+ if (*p != lfsr) -+ panic("corrupt frame data"); -+ *p ^= special_handler->tx_mixer; -+ lfsr = do_lfsr(lfsr); -+ } -+ pr_info("Sending first frame\n"); -+ if (qman_enqueue(&special_handler->tx, &fd, 0)) -+ panic("qman_enqueue() failed"); -+} -+ -+void qman_test_hotpotato(void) -+{ -+ if (cpumask_weight(cpu_online_mask) < 2) { -+ pr_info("qman_test_hotpotato, skip - only 1 CPU\n"); -+ return; -+ } -+ -+ pr_info("qman_test_hotpotato starting\n"); -+ -+ hp_cpu_list_length = 0; -+ loop_counter = 0; -+ hp_handler_slab = kmem_cache_create("hp_handler_slab", -+ sizeof(struct hp_handler), L1_CACHE_BYTES, -+ SLAB_HWCACHE_ALIGN, NULL); -+ if (!hp_handler_slab) -+ panic("kmem_cache_create() failed"); -+ -+ allocate_frame_data(); -+ -+ /* Init phase 1 */ -+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); -+ if (on_all_cpus(create_per_cpu_handlers)) -+ panic("on_each_cpu() failed"); -+ pr_info("Number of cpus: %d, total of %d handlers\n", -+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); -+ -+ init_phase2(); -+ -+ init_phase3(); -+ -+ preempt_disable(); -+ if (special_handler->processor_id == smp_processor_id()) -+ send_first_frame(NULL); -+ else -+ smp_call_function_single(special_handler->processor_id, -+ send_first_frame, NULL, 1); -+ preempt_enable(); -+ -+ wait_event(queue, loop_counter == HP_LOOPS); -+ deallocate_frame_data(); -+ if (on_all_cpus(destroy_per_cpu_handlers)) -+ panic("on_each_cpu() failed"); -+ kmem_cache_destroy(hp_handler_slab); -+ pr_info("qman_test_hotpotato finished\n"); -+} ---- /dev/null -+++ b/drivers/staging/fsl_qbman/qman_utility.c -@@ -0,0 +1,129 @@ -+/* Copyright 2008-2011 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qman_private.h" -+ -+/* ----------------- */ -+/* --- FQID Pool --- */ -+ -+struct qman_fqid_pool { -+ /* Base and size of the FQID range */ -+ u32 fqid_base; -+ u32 total; -+ /* Number of FQIDs currently "allocated" */ -+ u32 used; -+ /* Allocation optimisation. When 'usedfqid_base = fqid_start; -+ pool->total = num; -+ pool->used = 0; -+ pool->next = 0; -+ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL); -+ if (!pool->bits) { -+ kfree(pool); -+ return NULL; -+ } -+ /* If num is not an even multiple of QLONG_BITS (or even 8, for -+ * byte-oriented searching) then we fill the trailing bits with 1, to -+ * make them look allocated (permanently). */ -+ for (i = num + 1; i < QNUM_BITS(num); i++) -+ set_bit(i, pool->bits); -+ return pool; -+} -+EXPORT_SYMBOL(qman_fqid_pool_create); -+ -+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool) -+{ -+ int ret = pool->used; -+ kfree(pool->bits); -+ kfree(pool); -+ return ret; -+} -+EXPORT_SYMBOL(qman_fqid_pool_destroy); -+ -+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid) -+{ -+ int ret; -+ if (pool->used == pool->total) -+ return -ENOMEM; -+ *fqid = pool->fqid_base + pool->next; -+ ret = test_and_set_bit(pool->next, pool->bits); -+ BUG_ON(ret); -+ if (++pool->used == pool->total) -+ return 0; -+ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next); -+ if (pool->next >= pool->total) -+ pool->next = find_first_zero_bit(pool->bits, pool->total); -+ BUG_ON(pool->next >= pool->total); -+ return 0; -+} -+EXPORT_SYMBOL(qman_fqid_pool_alloc); -+ -+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid) -+{ -+ int ret; -+ -+ fqid -= pool->fqid_base; -+ ret = test_and_clear_bit(fqid, pool->bits); -+ BUG_ON(!ret); -+ if (pool->used-- == pool->total) -+ pool->next = fqid; -+} -+EXPORT_SYMBOL(qman_fqid_pool_free); -+ -+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool) -+{ -+ return pool->used; -+} -+EXPORT_SYMBOL(qman_fqid_pool_used); ---- /dev/null -+++ b/include/linux/fsl_bman.h -@@ -0,0 +1,532 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef FSL_BMAN_H -+#define FSL_BMAN_H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* Last updated for v00.79 of the BG */ -+ -+/* Portal processing (interrupt) sources */ -+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ -+#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */ -+ -+/* This wrapper represents a bit-array for the depletion state of the 64 Bman -+ * buffer pools. */ -+struct bman_depletion { -+ u32 __state[2]; -+}; -+#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } } -+#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } } -+#define __bmdep_word(x) ((x) >> 5) -+#define __bmdep_shift(x) ((x) & 0x1f) -+#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x)) -+static inline void bman_depletion_init(struct bman_depletion *c) -+{ -+ c->__state[0] = c->__state[1] = 0; -+} -+static inline void bman_depletion_fill(struct bman_depletion *c) -+{ -+ c->__state[0] = c->__state[1] = ~0; -+} -+static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid) -+{ -+ return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid); -+} -+static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid) -+{ -+ c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid); -+} -+static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid) -+{ -+ c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid); -+} -+ -+/* ------------------------------------------------------- */ -+/* --- Bman data structures (and associated constants) --- */ -+ -+/* Represents s/w corenet portal mapped data structures */ -+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */ -+struct bm_mc_command; /* MC (Management Command) command */ -+struct bm_mc_result; /* MC result */ -+ -+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer -+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI, -+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */ -+struct bm_buffer { -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 __reserved1; -+ u8 bpid; -+ u16 hi; /* High 16-bits of 48-bit address */ -+ u32 lo; /* Low 32-bits of 48-bit address */ -+#else -+ u32 lo; -+ u16 hi; -+ u8 bpid; -+ u8 __reserved; -+#endif -+ }; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u64 __notaddress:16; -+ u64 addr:48; -+#else -+ u64 addr:48; -+ u64 __notaddress:16; -+#endif -+ }; -+ u64 opaque; -+ }; -+} __aligned(8); -+static inline u64 bm_buffer_get64(const struct bm_buffer *buf) -+{ -+ return buf->addr; -+} -+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) -+{ -+ return (dma_addr_t)buf->addr; -+} -+/* Macro, so we compile better if 'v' isn't always 64-bit */ -+#define bm_buffer_set64(buf, v) \ -+ do { \ -+ struct bm_buffer *__buf931 = (buf); \ -+ __buf931->hi = upper_32_bits(v); \ -+ __buf931->lo = lower_32_bits(v); \ -+ } while (0) -+ -+/* See 1.5.3.5.4: "Release Command" */ -+struct bm_rcr_entry { -+ union { -+ struct { -+ u8 __dont_write_directly__verb; -+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ -+ u8 __reserved1[62]; -+ }; -+ struct bm_buffer bufs[8]; -+ }; -+} __packed; -+#define BM_RCR_VERB_VBIT 0x80 -+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ -+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 -+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 -+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ -+ -+/* See 1.5.3.1: "Acquire Command" */ -+/* See 1.5.3.2: "Query Command" */ -+struct bm_mcc_acquire { -+ u8 bpid; -+ u8 __reserved1[62]; -+} __packed; -+struct bm_mcc_query { -+ u8 __reserved2[63]; -+} __packed; -+struct bm_mc_command { -+ u8 __dont_write_directly__verb; -+ union { -+ struct bm_mcc_acquire acquire; -+ struct bm_mcc_query query; -+ }; -+} __packed; -+#define BM_MCC_VERB_VBIT 0x80 -+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ -+#define BM_MCC_VERB_CMD_ACQUIRE 0x10 -+#define BM_MCC_VERB_CMD_QUERY 0x40 -+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ -+ -+/* See 1.5.3.3: "Acquire Response" */ -+/* See 1.5.3.4: "Query Response" */ -+struct bm_pool_state { -+ u8 __reserved1[32]; -+ /* "availability state" and "depletion state" */ -+ struct { -+ u8 __reserved1[8]; -+ /* Access using bman_depletion_***() */ -+ struct bman_depletion state; -+ } as, ds; -+}; -+struct bm_mc_result { -+ union { -+ struct { -+ u8 verb; -+ u8 __reserved1[63]; -+ }; -+ union { -+ struct { -+ u8 __reserved1; -+ u8 bpid; -+ u8 __reserved2[62]; -+ }; -+ struct bm_buffer bufs[8]; -+ } acquire; -+ struct bm_pool_state query; -+ }; -+} __packed; -+#define BM_MCR_VERB_VBIT 0x80 -+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK -+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE -+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY -+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 -+#define BM_MCR_VERB_CMD_ERR_ECC 0x70 -+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ -+/* Determine the "availability state" of pool 'p' from a query result 'r' */ -+#define BM_MCR_QUERY_AVAILABILITY(r, p) \ -+ bman_depletion_get(&r->query.as.state, p) -+/* Determine the "depletion state" of pool 'p' from a query result 'r' */ -+#define BM_MCR_QUERY_DEPLETION(r, p) \ -+ bman_depletion_get(&r->query.ds.state, p) -+ -+/*******************************************************************/ -+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ -+/*******************************************************************/ -+ -+ /* Portal and Buffer Pools */ -+ /* ----------------------- */ -+/* Represents a managed portal */ -+struct bman_portal; -+ -+/* This object type represents Bman buffer pools. */ -+struct bman_pool; -+ -+struct bman_portal_config { -+ /* This is used for any "core-affine" portals, ie. default portals -+ * associated to the corresponding cpu. -1 implies that there is no core -+ * affinity configured. */ -+ int cpu; -+ /* portal interrupt line */ -+ int irq; -+ /* the unique index of this portal */ -+ u32 index; -+ /* Is this portal shared? (If so, it has coarser locking and demuxes -+ * processing on behalf of other CPUs.) */ -+ int is_shared; -+ /* These are the buffer pool IDs that may be used via this portal. */ -+ struct bman_depletion mask; -+}; -+ -+/* This callback type is used when handling pool depletion entry/exit. The -+ * 'cb_ctx' value is the opaque value associated with the pool object in -+ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on -+ * depletion-exit. */ -+typedef void (*bman_cb_depletion)(struct bman_portal *bm, -+ struct bman_pool *pool, void *cb_ctx, int depleted); -+ -+/* This struct specifies parameters for a bman_pool object. */ -+struct bman_pool_params { -+ /* index of the buffer pool to encapsulate (0-63), ignored if -+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */ -+ u32 bpid; -+ /* bit-mask of BMAN_POOL_FLAG_*** options */ -+ u32 flags; -+ /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */ -+ bman_cb_depletion cb; -+ /* opaque user value passed as a parameter to 'cb' */ -+ void *cb_ctx; -+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB: -+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and* -+ * when run in the control plane (which controls Bman CCSR). This array -+ * matches the definition of bm_pool_set(). */ -+ u32 thresholds[4]; -+}; -+ -+/* Flags to bman_new_pool() */ -+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */ -+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */ -+#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */ -+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */ -+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */ -+#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */ -+ -+/* Flags to bman_release() */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */ -+#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ -+#endif -+#endif -+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */ -+ -+/* Flags to bman_acquire() */ -+#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */ -+ -+ /* Portal Management */ -+ /* ----------------- */ -+/** -+ * bman_get_portal_config - get portal configuration settings -+ * -+ * This returns a read-only view of the current cpu's affine portal settings. -+ */ -+const struct bman_portal_config *bman_get_portal_config(void); -+ -+/** -+ * bman_irqsource_get - return the portal work that is interrupt-driven -+ * -+ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently -+ * enabled for interrupt handling on the current cpu's affine portal. These -+ * sources will trigger the portal interrupt and the interrupt handler (or a -+ * tasklet/bottom-half it defers to) will perform the corresponding processing -+ * work. The bman_poll_***() functions will only process sources that are not in -+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU, -+ * this always returns zero. -+ */ -+u32 bman_irqsource_get(void); -+ -+/** -+ * bman_irqsource_add - add processing sources to be interrupt-driven -+ * @bits: bitmask of BM_PIRQ_**I processing sources -+ * -+ * Adds processing sources that should be interrupt-driven (rather than -+ * processed via bman_poll_***() functions). Returns zero for success, or -+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ -+int bman_irqsource_add(u32 bits); -+ -+/** -+ * bman_irqsource_remove - remove processing sources from being interrupt-driven -+ * @bits: bitmask of BM_PIRQ_**I processing sources -+ * -+ * Removes processing sources from being interrupt-driven, so that they will -+ * instead be processed via bman_poll_***() functions. Returns zero for success, -+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ -+int bman_irqsource_remove(u32 bits); -+ -+/** -+ * bman_affine_cpus - return a mask of cpus that have affine portals -+ */ -+const cpumask_t *bman_affine_cpus(void); -+ -+/** -+ * bman_poll_slow - process anything that isn't interrupt-driven. -+ * -+ * This function does any portal processing that isn't interrupt-driven. If the -+ * current CPU is sharing a portal hosted on another CPU, this function will -+ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources -+ * indicating what interrupt sources were actually processed by the call. -+ * -+ * NB, unlike the legacy wrapper bman_poll(), this function will -+ * deterministically check for the presence of portal processing work and do it, -+ * which implies some latency even if there's nothing to do. The bman_poll() -+ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by -+ * checking for (and doing) portal processing infrequently. Ie. such that -+ * qman_poll() and bman_poll() can be called from core-processing loops. Use -+ * bman_poll_slow() when you yourself are deciding when to incur the overhead of -+ * processing. -+ */ -+u32 bman_poll_slow(void); -+ -+/** -+ * bman_poll - process anything that isn't interrupt-driven. -+ * -+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the -+ * affine portal. This function does whatever processing is not triggered by -+ * interrupts. This is a legacy wrapper that can be used in core-processing -+ * loops but mitigates the performance overhead of portal processing by -+ * adaptively bypassing true portal processing most of the time. (Processing is -+ * done once every 10 calls if the previous processing revealed that work needed -+ * to be done, or once very 1000 calls if the previous processing revealed no -+ * work needed doing.) If you wish to control this yourself, call -+ * bman_poll_slow() instead, which always checks for portal processing work. -+ */ -+void bman_poll(void); -+ -+/** -+ * bman_rcr_is_empty - Determine if portal's RCR is empty -+ * -+ * For use in situations where a cpu-affine caller needs to determine when all -+ * releases for the local portal have been processed by Bman but can't use the -+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release(). -+ * The function forces tracking of RCR consumption (which normally doesn't -+ * happen until release processing needs to find space to put new release -+ * commands), and returns zero if the ring still has unprocessed entries, -+ * non-zero if it is empty. -+ */ -+int bman_rcr_is_empty(void); -+ -+/** -+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs -+ * @result: is set by the API to the base BPID of the allocated range -+ * @count: the number of BPIDs required -+ * @align: required alignment of the allocated range -+ * @partial: non-zero if the API can return fewer than @count BPIDs -+ * -+ * Returns the number of buffer pools allocated, or a negative error code. If -+ * @partial is non zero, the allocation request may return a smaller range of -+ * BPs than requested (though alignment will be as requested). If @partial is -+ * zero, the return value will either be 'count' or negative. -+ */ -+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial); -+static inline int bman_alloc_bpid(u32 *result) -+{ -+ int ret = bman_alloc_bpid_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+ -+/** -+ * bman_release_bpid_range - Release the specified range of buffer pool IDs -+ * @bpid: the base BPID of the range to deallocate -+ * @count: the number of BPIDs in the range -+ * -+ * This function can also be used to seed the allocator with ranges of BPIDs -+ * that it can subsequently allocate from. -+ */ -+void bman_release_bpid_range(u32 bpid, unsigned int count); -+static inline void bman_release_bpid(u32 bpid) -+{ -+ bman_release_bpid_range(bpid, 1); -+} -+ -+int bman_reserve_bpid_range(u32 bpid, unsigned int count); -+static inline int bman_reserve_bpid(u32 bpid) -+{ -+ return bman_reserve_bpid_range(bpid, 1); -+} -+ -+void bman_seed_bpid_range(u32 bpid, unsigned int count); -+ -+ -+int bman_shutdown_pool(u32 bpid); -+ -+ /* Pool management */ -+ /* --------------- */ -+/** -+ * bman_new_pool - Allocates a Buffer Pool object -+ * @params: parameters specifying the buffer pool ID and behaviour -+ * -+ * Creates a pool object for the given @params. A portal and the depletion -+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag -+ * is set. NB, the fields from @params are copied into the new pool object, so -+ * the structure provided by the caller can be released or reused after the -+ * function returns. -+ */ -+struct bman_pool *bman_new_pool(const struct bman_pool_params *params); -+ -+/** -+ * bman_free_pool - Deallocates a Buffer Pool object -+ * @pool: the pool object to release -+ * -+ */ -+void bman_free_pool(struct bman_pool *pool); -+ -+/** -+ * bman_get_params - Returns a pool object's parameters. -+ * @pool: the pool object -+ * -+ * The returned pointer refers to state within the pool object so must not be -+ * modified and can no longer be read once the pool object is destroyed. -+ */ -+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool); -+ -+/** -+ * bman_release - Release buffer(s) to the buffer pool -+ * @pool: the buffer pool object to release to -+ * @bufs: an array of buffers to release -+ * @num: the number of buffers in @bufs (1-8) -+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options -+ * -+ * Adds the given buffers to RCR entries. If the portal @p was created with the -+ * "COMPACT" flag, then it will be using a compaction algorithm to improve -+ * utilisation of RCR. As such, these buffers may join an existing ring entry -+ * and/or it may not be issued right away so as to allow future releases to join -+ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this -+ * behaviour by committing the RCR entry (or entries) right away. If the RCR -+ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT -+ * is selected, in which case it will sleep waiting for space to become -+ * available in RCR. If the function receives a signal before such time (and -+ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise, -+ * it returns zero. -+ */ -+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, -+ u32 flags); -+ -+/** -+ * bman_acquire - Acquire buffer(s) from a buffer pool -+ * @pool: the buffer pool object to acquire from -+ * @bufs: array for storing the acquired buffers -+ * @num: the number of buffers desired (@bufs is at least this big) -+ * -+ * Issues an "Acquire" command via the portal's management command interface. -+ * The return value will be the number of buffers obtained from the pool, or a -+ * negative error code if a h/w error or pool starvation was encountered. In -+ * the latter case, the content of @bufs is undefined. -+ */ -+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, -+ u32 flags); -+ -+/** -+ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool -+ * @pool: the buffer pool object the stockpile belongs -+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options -+ * -+ * Adds stockpile buffers to RCR entries until the stockpile is empty. -+ * The return value will be a negative error code if a h/w error occurred. -+ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full, -+ * -EAGAIN will be returned. -+ */ -+int bman_flush_stockpile(struct bman_pool *pool, u32 flags); -+ -+/** -+ * bman_query_pools - Query all buffer pool states -+ * @state: storage for the queried availability and depletion states -+ */ -+int bman_query_pools(struct bm_pool_state *state); -+ -+#ifdef CONFIG_FSL_BMAN_CONFIG -+/** -+ * bman_query_free_buffers - Query how many free buffers are in buffer pool -+ * @pool: the buffer pool object to query -+ * -+ * Return the number of the free buffers -+ */ -+u32 bman_query_free_buffers(struct bman_pool *pool); -+ -+/** -+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds -+ * @pool: the buffer pool object to which the thresholds will be set -+ * @thresholds: the new thresholds -+ */ -+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds); -+#endif -+ -+/** -+ * The below bman_p_***() variant might be called in a situation that the cpu -+ * which the portal affine to is not online yet. -+ * @bman_portal specifies which portal the API will use. -+*/ -+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits); -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* FSL_BMAN_H */ ---- /dev/null -+++ b/include/linux/fsl_qman.h -@@ -0,0 +1,3889 @@ -+/* Copyright 2008-2012 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef FSL_QMAN_H -+#define FSL_QMAN_H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* Last updated for v00.800 of the BG */ -+ -+/* Hardware constants */ -+#define QM_CHANNEL_SWPORTAL0 0 -+#define QMAN_CHANNEL_POOL1 0x21 -+#define QMAN_CHANNEL_CAAM 0x80 -+#define QMAN_CHANNEL_PME 0xa0 -+#define QMAN_CHANNEL_POOL1_REV3 0x401 -+#define QMAN_CHANNEL_CAAM_REV3 0x840 -+#define QMAN_CHANNEL_PME_REV3 0x860 -+#define QMAN_CHANNEL_DCE 0x8a0 -+#define QMAN_CHANNEL_DCE_QMANREV312 0x880 -+extern u16 qm_channel_pool1; -+extern u16 qm_channel_caam; -+extern u16 qm_channel_pme; -+extern u16 qm_channel_dce; -+enum qm_dc_portal { -+ qm_dc_portal_fman0 = 0, -+ qm_dc_portal_fman1 = 1, -+ qm_dc_portal_caam = 2, -+ qm_dc_portal_pme = 3, -+ qm_dc_portal_rman = 4, -+ qm_dc_portal_dce = 5 -+}; -+ -+/* Portal processing (interrupt) sources */ -+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */ -+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ -+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ -+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ -+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ -+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ -+/* This mask contains all the interrupt sources that need handling except DQRI, -+ * ie. that if present should trigger slow-path processing. */ -+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ -+ QM_PIRQ_MRI | QM_PIRQ_CCSCI) -+ -+/* --- Clock speed --- */ -+/* A qman driver instance may or may not know the current qman clock speed. -+ * However, certain CEETM calculations may not be possible if this is not known. -+ * The 'set' function will only succeed (return zero) if the driver did not -+ * already know the clock speed. Likewise, the 'get' function will only succeed -+ * if the driver does know the clock speed (either because it knew when booting, -+ * or was told via 'set'). In cases where software is running on a driver -+ * instance that does not know the clock speed (eg. on a hypervised data-plane), -+ * and the user can obtain the current qman clock speed by other means (eg. from -+ * a message sent from the control-plane), then the 'set' function can be used -+ * to enable rate-calculations in a driver where it would otherwise not be -+ * possible. */ -+int qm_get_clock(u64 *clock_hz); -+int qm_set_clock(u64 clock_hz); -+ -+/* For qman_static_dequeue_*** APIs */ -+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff -+/* for n in [1,15] */ -+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) -+/* for conversion from n of qm_channel */ -+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) -+{ -+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); -+} -+ -+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use -+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use -+ * FQID(n) to fill in the frame queue ID. */ -+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 -+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 -+#define QM_VDQCR_EXACT 0x40000000 -+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 -+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) -+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) -+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) -+ -+ -+/* ------------------------------------------------------- */ -+/* --- Qman data structures (and associated constants) --- */ -+ -+/* Represents s/w corenet portal mapped data structures */ -+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */ -+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */ -+struct qm_mr_entry; /* MR (Message Ring) entries */ -+struct qm_mc_command; /* MC (Management Command) command */ -+struct qm_mc_result; /* MC result */ -+ -+/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */ -+#define QM_FD_FORMAT_SG 0x4 -+#define QM_FD_FORMAT_LONG 0x2 -+#define QM_FD_FORMAT_COMPOUND 0x1 -+enum qm_fd_format { -+ /* 'contig' implies a contiguous buffer, whereas 'sg' implies a -+ * scatter-gather table. 'big' implies a 29-bit length with no offset -+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound' -+ * implies a s/g-like table, where each entry itself represents a frame -+ * (contiguous or scatter-gather) and the 29-bit "length" is -+ * interpreted purely for congestion calculations, ie. a "congestion -+ * weight". */ -+ qm_fd_contig = 0, -+ qm_fd_contig_big = QM_FD_FORMAT_LONG, -+ qm_fd_sg = QM_FD_FORMAT_SG, -+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, -+ qm_fd_compound = QM_FD_FORMAT_COMPOUND -+}; -+ -+/* Capitalised versions are un-typed but can be used in static expressions */ -+#define QM_FD_CONTIG 0 -+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG -+#define QM_FD_SG QM_FD_FORMAT_SG -+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG) -+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND -+ -+/* See 1.5.1.1: "Frame Descriptor (FD)" */ -+struct qm_fd { -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 dd:2; /* dynamic debug */ -+ u8 liodn_offset:6; -+ u8 bpid:8; /* Buffer Pool ID */ -+ u8 eliodn_offset:4; -+ u8 __reserved:4; -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+#else -+ u8 liodn_offset:6; -+ u8 dd:2; /* dynamic debug */ -+ u8 bpid:8; /* Buffer Pool ID */ -+ u8 __reserved:4; -+ u8 eliodn_offset:4; -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+#endif -+ }; -+ struct { -+ u64 __notaddress:24; -+ /* More efficient address accessor */ -+ u64 addr:40; -+ }; -+ u64 opaque_addr; -+ }; -+ /* The 'format' field indicates the interpretation of the remaining 29 -+ * bits of the 32-bit word. For packing reasons, it is duplicated in the -+ * other union elements. Note, union'd structs are difficult to use with -+ * static initialisation under gcc, in which case use the "opaque" form -+ * with one of the macros. */ -+ union { -+ /* For easier/faster copying of this part of the fd (eg. from a -+ * DQRR entry to an EQCR entry) copy 'opaque' */ -+ u32 opaque; -+ /* If 'format' is _contig or _sg, 20b length and 9b offset */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ enum qm_fd_format format:3; -+ u16 offset:9; -+ u32 length20:20; -+#else -+ u32 length20:20; -+ u16 offset:9; -+ enum qm_fd_format format:3; -+#endif -+ }; -+ /* If 'format' is _contig_big or _sg_big, 29b length */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ enum qm_fd_format _format1:3; -+ u32 length29:29; -+#else -+ u32 length29:29; -+ enum qm_fd_format _format1:3; -+#endif -+ }; -+ /* If 'format' is _compound, 29b "congestion weight" */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ enum qm_fd_format _format2:3; -+ u32 cong_weight:29; -+#else -+ u32 cong_weight:29; -+ enum qm_fd_format _format2:3; -+#endif -+ }; -+ }; -+ union { -+ u32 cmd; -+ u32 status; -+ }; -+} __aligned(8); -+#define QM_FD_DD_NULL 0x00 -+#define QM_FD_PID_MASK 0x3f -+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) -+{ -+ return fd->addr; -+} -+ -+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) -+{ -+ return (dma_addr_t)fd->addr; -+} -+/* Macro, so we compile better if 'v' isn't always 64-bit */ -+#define qm_fd_addr_set64(fd, v) \ -+ do { \ -+ struct qm_fd *__fd931 = (fd); \ -+ __fd931->addr = v; \ -+ } while (0) -+ -+/* For static initialisation of FDs (which is complicated by the use of unions -+ * in "struct qm_fd"), use the following macros. Note that; -+ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation -+ * use-case), -+ * - use capitalised QM_FD_*** formats for static initialisation. -+ */ -+#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \ -+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ -+ { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \ -+ { cmd } } -+#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \ -+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ -+ { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \ -+ { cmd } } -+ -+/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */ -+#define QM_SG_OFFSET_MASK 0x1FFF -+struct qm_sg_entry { -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 __reserved1[3]; -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+#else -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u8 __reserved1[3]; -+#endif -+ }; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u64 __notaddress:24; -+ u64 addr:40; -+#else -+ u64 addr:40; -+ u64 __notaddress:24; -+#endif -+ }; -+ u64 opaque; -+ }; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 extension:1; /* Extension bit */ -+ u32 final:1; /* Final bit */ -+ u32 length:30; -+#else -+ u32 length:30; -+ u32 final:1; /* Final bit */ -+ u32 extension:1; /* Extension bit */ -+#endif -+ }; -+ u32 sgt_efl; -+ }; -+ u8 __reserved2; -+ u8 bpid; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 __reserved3:3; -+ u16 offset:13; -+#else -+ u16 offset:13; -+ u16 __reserved3:3; -+#endif -+ }; -+ u16 opaque_offset; -+ }; -+} __packed; -+union qm_sg_efl { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 extension:1; /* Extension bit */ -+ u32 final:1; /* Final bit */ -+ u32 length:30; -+#else -+ u32 length:30; -+ u32 final:1; /* Final bit */ -+ u32 extension:1; /* Extension bit */ -+#endif -+ }; -+ u32 efl; -+}; -+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) -+{ -+ return be64_to_cpu(sg->opaque); -+} -+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) -+{ -+ return (dma_addr_t)be64_to_cpu(sg->opaque); -+} -+static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg) -+{ -+ union qm_sg_efl u; -+ -+ u.efl = be32_to_cpu(sg->sgt_efl); -+ return u.extension; -+} -+static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg) -+{ -+ union qm_sg_efl u; -+ -+ u.efl = be32_to_cpu(sg->sgt_efl); -+ return u.final; -+} -+static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg) -+{ -+ union qm_sg_efl u; -+ -+ u.efl = be32_to_cpu(sg->sgt_efl); -+ return u.length; -+} -+static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg) -+{ -+ return sg->bpid; -+} -+static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg) -+{ -+ u32 opaque_offset = be16_to_cpu(sg->opaque_offset); -+ -+ return opaque_offset & 0x1fff; -+} -+ -+/* Macro, so we compile better if 'v' isn't always 64-bit */ -+#define qm_sg_entry_set64(sg, v) \ -+ do { \ -+ struct qm_sg_entry *__sg931 = (sg); \ -+ __sg931->opaque = cpu_to_be64(v); \ -+ } while (0) -+#define qm_sg_entry_set_ext(sg, v) \ -+ do { \ -+ union qm_sg_efl __u932; \ -+ __u932.efl = be32_to_cpu((sg)->sgt_efl); \ -+ __u932.extension = v; \ -+ (sg)->sgt_efl = cpu_to_be32(__u932.efl); \ -+ } while (0) -+#define qm_sg_entry_set_final(sg, v) \ -+ do { \ -+ union qm_sg_efl __u933; \ -+ __u933.efl = be32_to_cpu((sg)->sgt_efl); \ -+ __u933.final = v; \ -+ (sg)->sgt_efl = cpu_to_be32(__u933.efl); \ -+ } while (0) -+#define qm_sg_entry_set_len(sg, v) \ -+ do { \ -+ union qm_sg_efl __u934; \ -+ __u934.efl = be32_to_cpu((sg)->sgt_efl); \ -+ __u934.length = v; \ -+ (sg)->sgt_efl = cpu_to_be32(__u934.efl); \ -+ } while (0) -+#define qm_sg_entry_set_bpid(sg, v) \ -+ do { \ -+ struct qm_sg_entry *__u935 = (sg); \ -+ __u935->bpid = v; \ -+ } while (0) -+#define qm_sg_entry_set_offset(sg, v) \ -+ do { \ -+ struct qm_sg_entry *__u936 = (sg); \ -+ __u936->opaque_offset = cpu_to_be16(v); \ -+ } while (0) -+ -+/* See 1.5.8.1: "Enqueue Command" */ -+struct qm_eqcr_entry { -+ u8 __dont_write_directly__verb; -+ u8 dca; -+ u16 seqnum; -+ u32 orp; /* 24-bit */ -+ u32 fqid; /* 24-bit */ -+ u32 tag; -+ struct qm_fd fd; -+ u8 __reserved3[32]; -+} __packed; -+#define QM_EQCR_VERB_VBIT 0x80 -+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ -+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 -+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */ -+#define QM_EQCR_VERB_COLOUR_GREEN 0x00 -+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08 -+#define QM_EQCR_VERB_COLOUR_RED 0x10 -+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18 -+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */ -+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */ -+#define QM_EQCR_DCA_ENABLE 0x80 -+#define QM_EQCR_DCA_PARK 0x40 -+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */ -+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ -+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ -+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ -+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */ -+ -+/* See 1.5.8.2: "Frame Dequeue Response" */ -+struct qm_dqrr_entry { -+ u8 verb; -+ u8 stat; -+ u16 seqnum; /* 15-bit */ -+ u8 tok; -+ u8 __reserved2[3]; -+ u32 fqid; /* 24-bit */ -+ u32 contextB; -+ struct qm_fd fd; -+ u8 __reserved4[32]; -+}; -+#define QM_DQRR_VERB_VBIT 0x80 -+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ -+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ -+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ -+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ -+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ -+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ -+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ -+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ -+ -+/* See 1.5.8.3: "ERN Message Response" */ -+/* See 1.5.8.4: "FQ State Change Notification" */ -+struct qm_mr_entry { -+ u8 verb; -+ union { -+ struct { -+ u8 dca; -+ u16 seqnum; -+ u8 rc; /* Rejection Code */ -+ u32 orp:24; -+ u32 fqid; /* 24-bit */ -+ u32 tag; -+ struct qm_fd fd; -+ } __packed ern; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ -+ u8 __reserved1:3; -+ enum qm_dc_portal portal:3; -+#else -+ enum qm_dc_portal portal:3; -+ u8 __reserved1:3; -+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ -+#endif -+ u16 __reserved2; -+ u8 rc; /* Rejection Code */ -+ u32 __reserved3:24; -+ u32 fqid; /* 24-bit */ -+ u32 tag; -+ struct qm_fd fd; -+ } __packed dcern; -+ struct { -+ u8 fqs; /* Frame Queue Status */ -+ u8 __reserved1[6]; -+ u32 fqid; /* 24-bit */ -+ u32 contextB; -+ u8 __reserved2[16]; -+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ -+ }; -+ u8 __reserved2[32]; -+} __packed; -+#define QM_MR_VERB_VBIT 0x80 -+/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs -+ * originating from direct-connect portals ("dcern") use 0x20 as a verb which -+ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from -+ * the other MR types by noting if the 0x20 bit is unset. */ -+#define QM_MR_VERB_TYPE_MASK 0x27 -+#define QM_MR_VERB_DC_ERN 0x20 -+#define QM_MR_VERB_FQRN 0x21 -+#define QM_MR_VERB_FQRNI 0x22 -+#define QM_MR_VERB_FQRL 0x23 -+#define QM_MR_VERB_FQPN 0x24 -+#define QM_MR_RC_MASK 0xf0 /* contains one of; */ -+#define QM_MR_RC_CGR_TAILDROP 0x00 -+#define QM_MR_RC_WRED 0x10 -+#define QM_MR_RC_ERROR 0x20 -+#define QM_MR_RC_ORPWINDOW_EARLY 0x30 -+#define QM_MR_RC_ORPWINDOW_LATE 0x40 -+#define QM_MR_RC_FQ_TAILDROP 0x50 -+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60 -+#define QM_MR_RC_ORP_ZERO 0x70 -+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ -+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ -+#define QM_MR_DCERN_COLOUR_GREEN 0x00 -+#define QM_MR_DCERN_COLOUR_YELLOW 0x01 -+#define QM_MR_DCERN_COLOUR_RED 0x02 -+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03 -+ -+/* An identical structure of FQD fields is present in the "Init FQ" command and -+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. -+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the -+ * latter has two inlines to assist with converting to/from the mant+exp -+ * representation. */ -+struct qm_fqd_stashing { -+ /* See QM_STASHING_EXCL_<...> */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 exclusive; -+ u8 __reserved1:2; -+ /* Numbers of cachelines */ -+ u8 annotation_cl:2; -+ u8 data_cl:2; -+ u8 context_cl:2; -+#else -+ u8 context_cl:2; -+ u8 data_cl:2; -+ u8 annotation_cl:2; -+ u8 __reserved1:2; -+ u8 exclusive; -+#endif -+} __packed; -+struct qm_fqd_taildrop { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 __reserved1:3; -+ u16 mant:8; -+ u16 exp:5; -+#else -+ u16 exp:5; -+ u16 mant:8; -+ u16 __reserved1:3; -+#endif -+} __packed; -+struct qm_fqd_oac { -+ /* See QM_OAC_<...> */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 oac:2; /* "Overhead Accounting Control" */ -+ u8 __reserved1:6; -+#else -+ u8 __reserved1:6; -+ u8 oac:2; /* "Overhead Accounting Control" */ -+#endif -+ /* Two's-complement value (-128 to +127) */ -+ signed char oal; /* "Overhead Accounting Length" */ -+} __packed; -+struct qm_fqd { -+ union { -+ u8 orpc; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 __reserved1:2; -+ u8 orprws:3; -+ u8 oa:1; -+ u8 olws:2; -+#else -+ u8 olws:2; -+ u8 oa:1; -+ u8 orprws:3; -+ u8 __reserved1:2; -+#endif -+ } __packed; -+ }; -+ u8 cgid; -+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */ -+ union { -+ u16 dest_wq; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 channel:13; /* qm_channel */ -+ u16 wq:3; -+#else -+ u16 wq:3; -+ u16 channel:13; /* qm_channel */ -+#endif -+ } __packed dest; -+ }; -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 __reserved2:1; -+ u16 ics_cred:15; -+#else -+ u16 __reserved2:1; -+ u16 ics_cred:15; -+#endif -+ /* For "Initialize Frame Queue" commands, the write-enable mask -+ * determines whether 'td' or 'oac_init' is observed. For query -+ * commands, this field is always 'td', and 'oac_query' (below) reflects -+ * the Overhead ACcounting values. */ -+ union { -+ struct qm_fqd_taildrop td; -+ struct qm_fqd_oac oac_init; -+ }; -+ u32 context_b; -+ union { -+ /* Treat it as 64-bit opaque */ -+ u64 opaque; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 hi; -+ u32 lo; -+#else -+ u32 lo; -+ u32 hi; -+#endif -+ }; -+ /* Treat it as s/w portal stashing config */ -+ /* See 1.5.6.7.1: "FQD Context_A field used for [...] */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ struct qm_fqd_stashing stashing; -+ /* 48-bit address of FQ context to -+ * stash, must be cacheline-aligned */ -+ u16 context_hi; -+ u32 context_lo; -+#else -+ u32 context_lo; -+ u16 context_hi; -+ struct qm_fqd_stashing stashing; -+#endif -+ } __packed; -+ } context_a; -+ struct qm_fqd_oac oac_query; -+} __packed; -+/* 64-bit converters for context_hi/lo */ -+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) -+{ -+ return ((u64)fqd->context_a.context_hi << 32) | -+ (u64)fqd->context_a.context_lo; -+} -+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) -+{ -+ return (dma_addr_t)qm_fqd_stashing_get64(fqd); -+} -+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) -+{ -+ return ((u64)fqd->context_a.hi << 32) | -+ (u64)fqd->context_a.lo; -+} -+/* Macro, so we compile better when 'v' isn't necessarily 64-bit */ -+#define qm_fqd_stashing_set64(fqd, v) \ -+ do { \ -+ struct qm_fqd *__fqd931 = (fqd); \ -+ __fqd931->context_a.context_hi = upper_32_bits(v); \ -+ __fqd931->context_a.context_lo = lower_32_bits(v); \ -+ } while (0) -+#define qm_fqd_context_a_set64(fqd, v) \ -+ do { \ -+ struct qm_fqd *__fqd931 = (fqd); \ -+ __fqd931->context_a.hi = upper_32_bits(v); \ -+ __fqd931->context_a.lo = lower_32_bits(v); \ -+ } while (0) -+/* convert a threshold value into mant+exp representation */ -+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val, -+ int roundup) -+{ -+ u32 e = 0; -+ int oddbit = 0; -+ if (val > 0xe0000000) -+ return -ERANGE; -+ while (val > 0xff) { -+ oddbit = val & 1; -+ val >>= 1; -+ e++; -+ if (roundup && oddbit) -+ val++; -+ } -+ td->exp = e; -+ td->mant = val; -+ return 0; -+} -+/* and the other direction */ -+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td) -+{ -+ return (u32)td->mant << td->exp; -+} -+ -+/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */ -+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ -+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ -+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ -+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ -+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */ -+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ -+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ -+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ -+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ -+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ -+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ -+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ -+ -+/* See 1.5.6.7.1: "FQD Context_A field used for [...] */ -+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ -+#define QM_STASHING_EXCL_ANNOTATION 0x04 -+#define QM_STASHING_EXCL_DATA 0x02 -+#define QM_STASHING_EXCL_CTX 0x01 -+ -+/* See 1.5.5.3: "Intra Class Scheduling" */ -+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */ -+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ -+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ -+ -+/* See 1.5.8.4: "FQ State Change Notification" */ -+/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields -+ * and associated commands/responses. The WRED parameters are calculated from -+ * these fields as follows; -+ * MaxTH = MA * (2 ^ Mn) -+ * Slope = SA / (2 ^ Sn) -+ * MaxP = 4 * (Pn + 1) -+ */ -+struct qm_cgr_wr_parm { -+ union { -+ u32 word; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 MA:8; -+ u32 Mn:5; -+ u32 SA:7; /* must be between 64-127 */ -+ u32 Sn:6; -+ u32 Pn:6; -+#else -+ u32 Pn:6; -+ u32 Sn:6; -+ u32 SA:7; /* must be between 64-127 */ -+ u32 Mn:5; -+ u32 MA:8; -+#endif -+ } __packed; -+ }; -+} __packed; -+/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding -+ * management commands, this is padded to a 16-bit structure field, so that's -+ * how we represent it here. The congestion state threshold is calculated from -+ * these fields as follows; -+ * CS threshold = TA * (2 ^ Tn) -+ */ -+struct qm_cgr_cs_thres { -+ union { -+ u16 hword; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 __reserved:3; -+ u16 TA:8; -+ u16 Tn:5; -+#else -+ u16 Tn:5; -+ u16 TA:8; -+ u16 __reserved:3; -+#endif -+ } __packed; -+ }; -+} __packed; -+/* This identical structure of CGR fields is present in the "Init/Modify CGR" -+ * commands and the "Query CGR" result. It's suctioned out here into its own -+ * struct. */ -+struct __qm_mc_cgr { -+ struct qm_cgr_wr_parm wr_parm_g; -+ struct qm_cgr_wr_parm wr_parm_y; -+ struct qm_cgr_wr_parm wr_parm_r; -+ u8 wr_en_g; /* boolean, use QM_CGR_EN */ -+ u8 wr_en_y; /* boolean, use QM_CGR_EN */ -+ u8 wr_en_r; /* boolean, use QM_CGR_EN */ -+ u8 cscn_en; /* boolean, use QM_CGR_EN */ -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ -+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ -+#else -+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ -+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ -+#endif -+ }; -+ u32 cscn_targ; /* use QM_CGR_TARG_* */ -+ }; -+ u8 cstd_en; /* boolean, use QM_CGR_EN */ -+ u8 cs; /* boolean, only used in query response */ -+ union { -+ /* use qm_cgr_cs_thres_set64() */ -+ struct qm_cgr_cs_thres cs_thres; -+ u16 __cs_thres; -+ }; -+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ -+} __packed; -+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ -+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ -+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ -+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ -+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ -+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ -+/* Convert CGR thresholds to/from "cs_thres" format */ -+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) -+{ -+ return (u64)th->TA << th->Tn; -+} -+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, -+ int roundup) -+{ -+ u32 e = 0; -+ int oddbit = 0; -+ while (val > 0xff) { -+ oddbit = val & 1; -+ val >>= 1; -+ e++; -+ if (roundup && oddbit) -+ val++; -+ } -+ th->Tn = e; -+ th->TA = val; -+ return 0; -+} -+ -+/* See 1.5.8.5.1: "Initialize FQ" */ -+/* See 1.5.8.5.2: "Query FQ" */ -+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ -+/* See 1.5.8.5.4: "Alter FQ State Commands " */ -+/* See 1.5.8.6.1: "Initialize/Modify CGR" */ -+/* See 1.5.8.6.2: "CGR Test Write" */ -+/* See 1.5.8.6.3: "Query CGR" */ -+/* See 1.5.8.6.4: "Query Congestion Group State" */ -+struct qm_mcc_initfq { -+ u8 __reserved1; -+ u16 we_mask; /* Write Enable Mask */ -+ u32 fqid; /* 24-bit */ -+ u16 count; /* Initialises 'count+1' FQDs */ -+ struct qm_fqd fqd; /* the FQD fields go here */ -+ u8 __reserved3[30]; -+} __packed; -+struct qm_mcc_queryfq { -+ u8 __reserved1[3]; -+ u32 fqid; /* 24-bit */ -+ u8 __reserved2[56]; -+} __packed; -+struct qm_mcc_queryfq_np { -+ u8 __reserved1[3]; -+ u32 fqid; /* 24-bit */ -+ u8 __reserved2[56]; -+} __packed; -+struct qm_mcc_alterfq { -+ u8 __reserved1[3]; -+ u32 fqid; /* 24-bit */ -+ u8 __reserved2; -+ u8 count; /* number of consecutive FQID */ -+ u8 __reserved3[10]; -+ u32 context_b; /* frame queue context b */ -+ u8 __reserved4[40]; -+} __packed; -+struct qm_mcc_initcgr { -+ u8 __reserved1; -+ u16 we_mask; /* Write Enable Mask */ -+ struct __qm_mc_cgr cgr; /* CGR fields */ -+ u8 __reserved2[2]; -+ u8 cgid; -+ u8 __reserved4[32]; -+} __packed; -+struct qm_mcc_cgrtestwrite { -+ u8 __reserved1[2]; -+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ -+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ -+ u8 __reserved2[23]; -+ u8 cgid; -+ u8 __reserved3[32]; -+} __packed; -+struct qm_mcc_querycgr { -+ u8 __reserved1[30]; -+ u8 cgid; -+ u8 __reserved2[32]; -+} __packed; -+struct qm_mcc_querycongestion { -+ u8 __reserved[63]; -+} __packed; -+struct qm_mcc_querywq { -+ u8 __reserved; -+ /* select channel if verb != QUERYWQ_DEDICATED */ -+ union { -+ u16 channel_wq; /* ignores wq (3 lsbits) */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 id:13; /* qm_channel */ -+ u16 __reserved1:3; -+#else -+ u16 __reserved1:3; -+ u16 id:13; /* qm_channel */ -+#endif -+ } __packed channel; -+ }; -+ u8 __reserved2[60]; -+} __packed; -+ -+struct qm_mcc_ceetm_lfqmt_config { -+ u8 __reserved1[4]; -+ u32 lfqid:24; -+ u8 __reserved2[2]; -+ u16 cqid; -+ u8 __reserved3[2]; -+ u16 dctidx; -+ u8 __reserved4[48]; -+} __packed; -+ -+struct qm_mcc_ceetm_lfqmt_query { -+ u8 __reserved1[4]; -+ u32 lfqid:24; -+ u8 __reserved2[56]; -+} __packed; -+ -+struct qm_mcc_ceetm_cq_config { -+ u8 __reserved1; -+ u16 cqid; -+ u8 dcpid; -+ u8 __reserved2; -+ u16 ccgid; -+ u8 __reserved3[56]; -+} __packed; -+ -+struct qm_mcc_ceetm_cq_query { -+ u8 __reserved1; -+ u16 cqid; -+ u8 dcpid; -+ u8 __reserved2[59]; -+} __packed; -+ -+struct qm_mcc_ceetm_dct_config { -+ u8 __reserved1; -+ u16 dctidx; -+ u8 dcpid; -+ u8 __reserved2[15]; -+ u32 context_b; -+ u64 context_a; -+ u8 __reserved3[32]; -+} __packed; -+ -+struct qm_mcc_ceetm_dct_query { -+ u8 __reserved1; -+ u16 dctidx; -+ u8 dcpid; -+ u8 __reserved2[59]; -+} __packed; -+ -+struct qm_mcc_ceetm_class_scheduler_config { -+ u8 __reserved1; -+ u16 cqcid; -+ u8 dcpid; -+ u8 __reserved2[6]; -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 gpc_reserved:1; -+ u8 gpc_combine_flag:1; -+ u8 gpc_prio_b:3; -+ u8 gpc_prio_a:3; -+#else -+ u8 gpc_prio_a:3; -+ u8 gpc_prio_b:3; -+ u8 gpc_combine_flag:1; -+ u8 gpc_reserved:1; -+#endif -+ u16 crem; -+ u16 erem; -+ u8 w[8]; -+ u8 __reserved3[40]; -+} __packed; -+ -+struct qm_mcc_ceetm_class_scheduler_query { -+ u8 __reserved1; -+ u16 cqcid; -+ u8 dcpid; -+ u8 __reserved2[59]; -+} __packed; -+ -+#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12) -+#define CEETM_COMMAND_SP_MAPPING (1 << 12) -+#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12) -+#define CEETM_COMMAND_LNI_SHAPER (3 << 12) -+#define CEETM_COMMAND_TCFC (4 << 12) -+ -+#define CEETM_CCGRID_MASK 0x01FF -+#define CEETM_CCGR_CM_CONFIGURE (0 << 14) -+#define CEETM_CCGR_DN_CONFIGURE (1 << 14) -+#define CEETM_CCGR_TEST_WRITE (2 << 14) -+#define CEETM_CCGR_CM_QUERY (0 << 14) -+#define CEETM_CCGR_DN_QUERY (1 << 14) -+#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14) -+#define CEETM_QUERY_CONGESTION_STATE (3 << 14) -+ -+struct qm_mcc_ceetm_mapping_shaper_tcfc_config { -+ u8 __reserved1; -+ u16 cid; -+ u8 dcpid; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 map_shaped:1; -+ u8 map_reserved:4; -+ u8 map_lni_id:3; -+#else -+ u8 map_lni_id:3; -+ u8 map_reserved:4; -+ u8 map_shaped:1; -+#endif -+ u8 __reserved2[58]; -+ } __packed channel_mapping; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 map_reserved:5; -+ u8 map_lni_id:3; -+#else -+ u8 map_lni_id:3; -+ u8 map_reserved:5; -+#endif -+ u8 __reserved2[58]; -+ } __packed sp_mapping; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 cpl:1; -+ u8 cpl_reserved:2; -+ u8 oal:5; -+#else -+ u8 oal:5; -+ u8 cpl_reserved:2; -+ u8 cpl:1; -+#endif -+ u32 crtcr:24; -+ u32 ertcr:24; -+ u16 crtbl; -+ u16 ertbl; -+ u8 mps; /* This will be hardcoded by driver with 60 */ -+ u8 __reserved2[47]; -+ } __packed shaper_config; -+ struct { -+ u8 __reserved2[11]; -+ u64 lnitcfcc; -+ u8 __reserved3[40]; -+ } __packed tcfc_config; -+ }; -+} __packed; -+ -+struct qm_mcc_ceetm_mapping_shaper_tcfc_query { -+ u8 __reserved1; -+ u16 cid; -+ u8 dcpid; -+ u8 __reserved2[59]; -+} __packed; -+ -+struct qm_mcc_ceetm_ccgr_config { -+ u8 __reserved1; -+ u16 ccgrid; -+ u8 dcpid; -+ u8 __reserved2; -+ u16 we_mask; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 ctl_reserved:1; -+ u8 ctl_wr_en_g:1; -+ u8 ctl_wr_en_y:1; -+ u8 ctl_wr_en_r:1; -+ u8 ctl_td_en:1; -+ u8 ctl_td_mode:1; -+ u8 ctl_cscn_en:1; -+ u8 ctl_mode:1; -+#else -+ u8 ctl_mode:1; -+ u8 ctl_cscn_en:1; -+ u8 ctl_td_mode:1; -+ u8 ctl_td_en:1; -+ u8 ctl_wr_en_r:1; -+ u8 ctl_wr_en_y:1; -+ u8 ctl_wr_en_g:1; -+ u8 ctl_reserved:1; -+#endif -+ u8 cdv; -+ u16 cscn_tupd; -+ u8 oal; -+ u8 __reserved3; -+ struct qm_cgr_cs_thres cs_thres; -+ struct qm_cgr_cs_thres cs_thres_x; -+ struct qm_cgr_cs_thres td_thres; -+ struct qm_cgr_wr_parm wr_parm_g; -+ struct qm_cgr_wr_parm wr_parm_y; -+ struct qm_cgr_wr_parm wr_parm_r; -+ } __packed cm_config; -+ struct { -+ u8 dnc; -+ u8 dn0; -+ u8 dn1; -+ u64 dnba:40; -+ u8 __reserved3[2]; -+ u16 dnth_0; -+ u8 __reserved4[2]; -+ u16 dnth_1; -+ u8 __reserved5[8]; -+ } __packed dn_config; -+ struct { -+ u8 __reserved3[3]; -+ u64 i_cnt:40; -+ u8 __reserved4[16]; -+ } __packed test_write; -+ }; -+ u8 __reserved5[32]; -+} __packed; -+ -+struct qm_mcc_ceetm_ccgr_query { -+ u8 __reserved1; -+ u16 ccgrid; -+ u8 dcpid; -+ u8 __reserved2[59]; -+} __packed; -+ -+struct qm_mcc_ceetm_cq_peek_pop_xsfdrread { -+ u8 __reserved1; -+ u16 cqid; -+ u8 dcpid; -+ u8 ct; -+ u16 xsfdr; -+ u8 __reserved2[56]; -+} __packed; -+ -+#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00 -+#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01 -+#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02 -+#define CEETM_QUERY_REJECT_STATISTICS 0x03 -+#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04 -+#define CEETM_WRITE_REJECT_STATISTICS 0x05 -+struct qm_mcc_ceetm_statistics_query_write { -+ u8 __reserved1; -+ u16 cid; -+ u8 dcpid; -+ u8 ct; -+ u8 __reserved2[13]; -+ u64 frm_cnt:40; -+ u8 __reserved3[2]; -+ u64 byte_cnt:48; -+ u8 __reserved[32]; -+} __packed; -+ -+struct qm_mc_command { -+ u8 __dont_write_directly__verb; -+ union { -+ struct qm_mcc_initfq initfq; -+ struct qm_mcc_queryfq queryfq; -+ struct qm_mcc_queryfq_np queryfq_np; -+ struct qm_mcc_alterfq alterfq; -+ struct qm_mcc_initcgr initcgr; -+ struct qm_mcc_cgrtestwrite cgrtestwrite; -+ struct qm_mcc_querycgr querycgr; -+ struct qm_mcc_querycongestion querycongestion; -+ struct qm_mcc_querywq querywq; -+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config; -+ struct qm_mcc_ceetm_lfqmt_query lfqmt_query; -+ struct qm_mcc_ceetm_cq_config cq_config; -+ struct qm_mcc_ceetm_cq_query cq_query; -+ struct qm_mcc_ceetm_dct_config dct_config; -+ struct qm_mcc_ceetm_dct_query dct_query; -+ struct qm_mcc_ceetm_class_scheduler_config csch_config; -+ struct qm_mcc_ceetm_class_scheduler_query csch_query; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config; -+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query; -+ struct qm_mcc_ceetm_ccgr_config ccgr_config; -+ struct qm_mcc_ceetm_ccgr_query ccgr_query; -+ struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr; -+ struct qm_mcc_ceetm_statistics_query_write stats_query_write; -+ }; -+} __packed; -+#define QM_MCC_VERB_VBIT 0x80 -+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ -+#define QM_MCC_VERB_INITFQ_PARKED 0x40 -+#define QM_MCC_VERB_INITFQ_SCHED 0x41 -+#define QM_MCC_VERB_QUERYFQ 0x44 -+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ -+#define QM_MCC_VERB_QUERYWQ 0x46 -+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 -+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ -+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ -+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ -+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ -+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ -+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ -+#define QM_MCC_VERB_INITCGR 0x50 -+#define QM_MCC_VERB_MODIFYCGR 0x51 -+#define QM_MCC_VERB_CGRTESTWRITE 0x52 -+#define QM_MCC_VERB_QUERYCGR 0x58 -+#define QM_MCC_VERB_QUERYCONGESTION 0x59 -+/* INITFQ-specific flags */ -+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ -+#define QM_INITFQ_WE_OAC 0x0100 -+#define QM_INITFQ_WE_ORPC 0x0080 -+#define QM_INITFQ_WE_CGID 0x0040 -+#define QM_INITFQ_WE_FQCTRL 0x0020 -+#define QM_INITFQ_WE_DESTWQ 0x0010 -+#define QM_INITFQ_WE_ICSCRED 0x0008 -+#define QM_INITFQ_WE_TDTHRESH 0x0004 -+#define QM_INITFQ_WE_CONTEXTB 0x0002 -+#define QM_INITFQ_WE_CONTEXTA 0x0001 -+/* INITCGR/MODIFYCGR-specific flags */ -+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ -+#define QM_CGR_WE_WR_PARM_G 0x0400 -+#define QM_CGR_WE_WR_PARM_Y 0x0200 -+#define QM_CGR_WE_WR_PARM_R 0x0100 -+#define QM_CGR_WE_WR_EN_G 0x0080 -+#define QM_CGR_WE_WR_EN_Y 0x0040 -+#define QM_CGR_WE_WR_EN_R 0x0020 -+#define QM_CGR_WE_CSCN_EN 0x0010 -+#define QM_CGR_WE_CSCN_TARG 0x0008 -+#define QM_CGR_WE_CSTD_EN 0x0004 -+#define QM_CGR_WE_CS_THRES 0x0002 -+#define QM_CGR_WE_MODE 0x0001 -+ -+/* See 1.5.9.7 CEETM Management Commands */ -+#define QM_CEETM_VERB_LFQMT_CONFIG 0x70 -+#define QM_CEETM_VERB_LFQMT_QUERY 0x71 -+#define QM_CEETM_VERB_CQ_CONFIG 0x72 -+#define QM_CEETM_VERB_CQ_QUERY 0x73 -+#define QM_CEETM_VERB_DCT_CONFIG 0x74 -+#define QM_CEETM_VERB_DCT_QUERY 0x75 -+#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76 -+#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77 -+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78 -+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79 -+#define QM_CEETM_VERB_CCGR_CONFIG 0x7A -+#define QM_CEETM_VERB_CCGR_QUERY 0x7B -+#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C -+#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D -+ -+/* See 1.5.8.5.1: "Initialize FQ" */ -+/* See 1.5.8.5.2: "Query FQ" */ -+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ -+/* See 1.5.8.5.4: "Alter FQ State Commands " */ -+/* See 1.5.8.6.1: "Initialize/Modify CGR" */ -+/* See 1.5.8.6.2: "CGR Test Write" */ -+/* See 1.5.8.6.3: "Query CGR" */ -+/* See 1.5.8.6.4: "Query Congestion Group State" */ -+struct qm_mcr_initfq { -+ u8 __reserved1[62]; -+} __packed; -+struct qm_mcr_queryfq { -+ u8 __reserved1[8]; -+ struct qm_fqd fqd; /* the FQD fields are here */ -+ u8 __reserved2[30]; -+} __packed; -+struct qm_mcr_queryfq_np { -+ u8 __reserved1; -+ u8 state; /* QM_MCR_NP_STATE_*** */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 __reserved2; -+ u32 fqd_link:24; -+ u16 __reserved3:2; -+ u16 odp_seq:14; -+ u16 __reserved4:2; -+ u16 orp_nesn:14; -+ u16 __reserved5:1; -+ u16 orp_ea_hseq:15; -+ u16 __reserved6:1; -+ u16 orp_ea_tseq:15; -+ u8 __reserved7; -+ u32 orp_ea_hptr:24; -+ u8 __reserved8; -+ u32 orp_ea_tptr:24; -+ u8 __reserved9; -+ u32 pfdr_hptr:24; -+ u8 __reserved10; -+ u32 pfdr_tptr:24; -+ u8 __reserved11[5]; -+ u8 __reserved12:7; -+ u8 is:1; -+ u16 ics_surp; -+ u32 byte_cnt; -+ u8 __reserved13; -+ u32 frm_cnt:24; -+ u32 __reserved14; -+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ -+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ -+ u16 __reserved15; -+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ -+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ -+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ -+#else -+ u8 __reserved2; -+ u32 fqd_link:24; -+ -+ u16 odp_seq:14; -+ u16 __reserved3:2; -+ -+ u16 orp_nesn:14; -+ u16 __reserved4:2; -+ -+ u16 orp_ea_hseq:15; -+ u16 __reserved5:1; -+ -+ u16 orp_ea_tseq:15; -+ u16 __reserved6:1; -+ -+ u8 __reserved7; -+ u32 orp_ea_hptr:24; -+ -+ u8 __reserved8; -+ u32 orp_ea_tptr:24; -+ -+ u8 __reserved9; -+ u32 pfdr_hptr:24; -+ -+ u8 __reserved10; -+ u32 pfdr_tptr:24; -+ -+ u8 __reserved11[5]; -+ u8 is:1; -+ u8 __reserved12:7; -+ u16 ics_surp; -+ u32 byte_cnt; -+ u8 __reserved13; -+ u32 frm_cnt:24; -+ u32 __reserved14; -+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ -+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ -+ u16 __reserved15; -+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ -+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ -+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ -+#endif -+} __packed; -+ -+ -+struct qm_mcr_alterfq { -+ u8 fqs; /* Frame Queue Status */ -+ u8 __reserved1[61]; -+} __packed; -+struct qm_mcr_initcgr { -+ u8 __reserved1[62]; -+} __packed; -+struct qm_mcr_cgrtestwrite { -+ u16 __reserved1; -+ struct __qm_mc_cgr cgr; /* CGR fields */ -+ u8 __reserved2[3]; -+ u32 __reserved3:24; -+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ -+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ -+ u32 __reserved4:24; -+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ -+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ -+ u16 lgt; /* Last Group Tick */ -+ u16 wr_prob_g; -+ u16 wr_prob_y; -+ u16 wr_prob_r; -+ u8 __reserved5[8]; -+} __packed; -+struct qm_mcr_querycgr { -+ u16 __reserved1; -+ struct __qm_mc_cgr cgr; /* CGR fields */ -+ u8 __reserved2[3]; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 __reserved3:24; -+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ -+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ -+#else -+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ -+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ -+ u32 __reserved3:24; -+#endif -+ }; -+ u64 i_bcnt; -+ }; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u32 __reserved4:24; -+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ -+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ -+#else -+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ -+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ -+ u32 __reserved4:24; -+#endif -+ }; -+ u64 a_bcnt; -+ }; -+ union { -+ u32 cscn_targ_swp[4]; -+ u8 __reserved5[16]; -+ }; -+} __packed; -+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) -+{ -+ return be64_to_cpu(q->i_bcnt); -+} -+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) -+{ -+ return be64_to_cpu(q->a_bcnt); -+} -+static inline u64 qm_mcr_cgrtestwrite_i_get64( -+ const struct qm_mcr_cgrtestwrite *q) -+{ -+ return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo); -+} -+static inline u64 qm_mcr_cgrtestwrite_a_get64( -+ const struct qm_mcr_cgrtestwrite *q) -+{ -+ return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo); -+} -+/* Macro, so we compile better if 'v' isn't always 64-bit */ -+#define qm_mcr_querycgr_i_set64(q, v) \ -+ do { \ -+ struct qm_mcr_querycgr *__q931 = (fd); \ -+ __q931->i_bcnt_hi = upper_32_bits(v); \ -+ __q931->i_bcnt_lo = lower_32_bits(v); \ -+ } while (0) -+#define qm_mcr_querycgr_a_set64(q, v) \ -+ do { \ -+ struct qm_mcr_querycgr *__q931 = (fd); \ -+ __q931->a_bcnt_hi = upper_32_bits(v); \ -+ __q931->a_bcnt_lo = lower_32_bits(v); \ -+ } while (0) -+struct __qm_mcr_querycongestion { -+ u32 __state[8]; -+}; -+struct qm_mcr_querycongestion { -+ u8 __reserved[30]; -+ /* Access this struct using QM_MCR_QUERYCONGESTION() */ -+ struct __qm_mcr_querycongestion state; -+} __packed; -+struct qm_mcr_querywq { -+ union { -+ u16 channel_wq; /* ignores wq (3 lsbits) */ -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u16 id:13; /* qm_channel */ -+ u16 __reserved:3; -+#else -+ u16 __reserved:3; -+ u16 id:13; /* qm_channel */ -+#endif -+ } __packed channel; -+ }; -+ u8 __reserved[28]; -+ u32 wq_len[8]; -+} __packed; -+ -+/* QMAN CEETM Management Command Response */ -+struct qm_mcr_ceetm_lfqmt_config { -+ u8 __reserved1[62]; -+} __packed; -+struct qm_mcr_ceetm_lfqmt_query { -+ u8 __reserved1[8]; -+ u16 cqid; -+ u8 __reserved2[2]; -+ u16 dctidx; -+ u8 __reserved3[2]; -+ u16 ccgid; -+ u8 __reserved4[44]; -+} __packed; -+ -+struct qm_mcr_ceetm_cq_config { -+ u8 __reserved1[62]; -+} __packed; -+ -+struct qm_mcr_ceetm_cq_query { -+ u8 __reserved1[4]; -+ u16 ccgid; -+ u16 state; -+ u32 pfdr_hptr:24; -+ u32 pfdr_tptr:24; -+ u16 od1_xsfdr; -+ u16 od2_xsfdr; -+ u16 od3_xsfdr; -+ u16 od4_xsfdr; -+ u16 od5_xsfdr; -+ u16 od6_xsfdr; -+ u16 ra1_xsfdr; -+ u16 ra2_xsfdr; -+ u8 __reserved2; -+ u32 frm_cnt:24; -+ u8 __reserved333[28]; -+} __packed; -+ -+struct qm_mcr_ceetm_dct_config { -+ u8 __reserved1[62]; -+} __packed; -+ -+struct qm_mcr_ceetm_dct_query { -+ u8 __reserved1[18]; -+ u32 context_b; -+ u64 context_a; -+ u8 __reserved2[32]; -+} __packed; -+ -+struct qm_mcr_ceetm_class_scheduler_config { -+ u8 __reserved1[62]; -+} __packed; -+ -+struct qm_mcr_ceetm_class_scheduler_query { -+ u8 __reserved1[9]; -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 gpc_reserved:1; -+ u8 gpc_combine_flag:1; -+ u8 gpc_prio_b:3; -+ u8 gpc_prio_a:3; -+#else -+ u8 gpc_prio_a:3; -+ u8 gpc_prio_b:3; -+ u8 gpc_combine_flag:1; -+ u8 gpc_reserved:1; -+#endif -+ u16 crem; -+ u16 erem; -+ u8 w[8]; -+ u8 __reserved2[5]; -+ u32 wbfslist:24; -+ u32 d8; -+ u32 d9; -+ u32 d10; -+ u32 d11; -+ u32 d12; -+ u32 d13; -+ u32 d14; -+ u32 d15; -+} __packed; -+ -+struct qm_mcr_ceetm_mapping_shaper_tcfc_config { -+ u16 cid; -+ u8 __reserved2[60]; -+} __packed; -+ -+struct qm_mcr_ceetm_mapping_shaper_tcfc_query { -+ u16 cid; -+ u8 __reserved1; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 map_shaped:1; -+ u8 map_reserved:4; -+ u8 map_lni_id:3; -+#else -+ u8 map_lni_id:3; -+ u8 map_reserved:4; -+ u8 map_shaped:1; -+#endif -+ u8 __reserved2[58]; -+ } __packed channel_mapping_query; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 map_reserved:5; -+ u8 map_lni_id:3; -+#else -+ u8 map_lni_id:3; -+ u8 map_reserved:5; -+#endif -+ u8 __reserved2[58]; -+ } __packed sp_mapping_query; -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 cpl:1; -+ u8 cpl_reserved:2; -+ u8 oal:5; -+#else -+ u8 oal:5; -+ u8 cpl_reserved:2; -+ u8 cpl:1; -+#endif -+ u32 crtcr:24; -+ u32 ertcr:24; -+ u16 crtbl; -+ u16 ertbl; -+ u8 mps; -+ u8 __reserved2[15]; -+ u32 crat; -+ u32 erat; -+ u8 __reserved3[24]; -+ } __packed shaper_query; -+ struct { -+ u8 __reserved1[11]; -+ u64 lnitcfcc; -+ u8 __reserved3[40]; -+ } __packed tcfc_query; -+ }; -+} __packed; -+ -+struct qm_mcr_ceetm_ccgr_config { -+ u8 __reserved1[46]; -+ union { -+ u8 __reserved2[8]; -+ struct { -+ u16 timestamp; -+ u16 wr_porb_g; -+ u16 wr_prob_y; -+ u16 wr_prob_r; -+ } __packed test_write; -+ }; -+ u8 __reserved3[8]; -+} __packed; -+ -+struct qm_mcr_ceetm_ccgr_query { -+ u8 __reserved1[6]; -+ union { -+ struct { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ u8 ctl_reserved:1; -+ u8 ctl_wr_en_g:1; -+ u8 ctl_wr_en_y:1; -+ u8 ctl_wr_en_r:1; -+ u8 ctl_td_en:1; -+ u8 ctl_td_mode:1; -+ u8 ctl_cscn_en:1; -+ u8 ctl_mode:1; -+#else -+ u8 ctl_mode:1; -+ u8 ctl_cscn_en:1; -+ u8 ctl_td_mode:1; -+ u8 ctl_td_en:1; -+ u8 ctl_wr_en_r:1; -+ u8 ctl_wr_en_y:1; -+ u8 ctl_wr_en_g:1; -+ u8 ctl_reserved:1; -+#endif -+ u8 cdv; -+ u8 __reserved2[2]; -+ u8 oal; -+ u8 __reserved3; -+ struct qm_cgr_cs_thres cs_thres; -+ struct qm_cgr_cs_thres cs_thres_x; -+ struct qm_cgr_cs_thres td_thres; -+ struct qm_cgr_wr_parm wr_parm_g; -+ struct qm_cgr_wr_parm wr_parm_y; -+ struct qm_cgr_wr_parm wr_parm_r; -+ u16 cscn_targ_dcp; -+ u8 dcp_lsn; -+ u64 i_cnt:40; -+ u8 __reserved4[3]; -+ u64 a_cnt:40; -+ u32 cscn_targ_swp[4]; -+ } __packed cm_query; -+ struct { -+ u8 dnc; -+ u8 dn0; -+ u8 dn1; -+ u64 dnba:40; -+ u8 __reserved2[2]; -+ u16 dnth_0; -+ u8 __reserved3[2]; -+ u16 dnth_1; -+ u8 __reserved4[10]; -+ u16 dnacc_0; -+ u8 __reserved5[2]; -+ u16 dnacc_1; -+ u8 __reserved6[24]; -+ } __packed dn_query; -+ struct { -+ u8 __reserved2[24]; -+ struct __qm_mcr_querycongestion state; -+ } __packed congestion_state; -+ -+ }; -+} __packed; -+ -+struct qm_mcr_ceetm_cq_peek_pop_xsfdrread { -+ u8 stat; -+ u8 __reserved1[11]; -+ u16 dctidx; -+ struct qm_fd fd; -+ u8 __reserved2[32]; -+} __packed; -+ -+struct qm_mcr_ceetm_statistics_query { -+ u8 __reserved1[17]; -+ u64 frm_cnt:40; -+ u8 __reserved2[2]; -+ u64 byte_cnt:48; -+ u8 __reserved3[32]; -+} __packed; -+ -+struct qm_mc_result { -+ u8 verb; -+ u8 result; -+ union { -+ struct qm_mcr_initfq initfq; -+ struct qm_mcr_queryfq queryfq; -+ struct qm_mcr_queryfq_np queryfq_np; -+ struct qm_mcr_alterfq alterfq; -+ struct qm_mcr_initcgr initcgr; -+ struct qm_mcr_cgrtestwrite cgrtestwrite; -+ struct qm_mcr_querycgr querycgr; -+ struct qm_mcr_querycongestion querycongestion; -+ struct qm_mcr_querywq querywq; -+ struct qm_mcr_ceetm_lfqmt_config lfqmt_config; -+ struct qm_mcr_ceetm_lfqmt_query lfqmt_query; -+ struct qm_mcr_ceetm_cq_config cq_config; -+ struct qm_mcr_ceetm_cq_query cq_query; -+ struct qm_mcr_ceetm_dct_config dct_config; -+ struct qm_mcr_ceetm_dct_query dct_query; -+ struct qm_mcr_ceetm_class_scheduler_config csch_config; -+ struct qm_mcr_ceetm_class_scheduler_query csch_query; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config; -+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query; -+ struct qm_mcr_ceetm_ccgr_config ccgr_config; -+ struct qm_mcr_ceetm_ccgr_query ccgr_query; -+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr; -+ struct qm_mcr_ceetm_statistics_query stats_query; -+ }; -+} __packed; -+ -+#define QM_MCR_VERB_RRID 0x80 -+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK -+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED -+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED -+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ -+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP -+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ -+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED -+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED -+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE -+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE -+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS -+#define QM_MCR_RESULT_NULL 0x00 -+#define QM_MCR_RESULT_OK 0xf0 -+#define QM_MCR_RESULT_ERR_FQID 0xf1 -+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 -+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ -+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 -+#define QM_MCR_RESULT_PENDING 0xf8 -+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff -+#define QM_MCR_NP_STATE_FE 0x10 -+#define QM_MCR_NP_STATE_R 0x08 -+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ -+#define QM_MCR_NP_STATE_OOS 0x00 -+#define QM_MCR_NP_STATE_RETIRED 0x01 -+#define QM_MCR_NP_STATE_TEN_SCHED 0x02 -+#define QM_MCR_NP_STATE_TRU_SCHED 0x03 -+#define QM_MCR_NP_STATE_PARKED 0x04 -+#define QM_MCR_NP_STATE_ACTIVE 0x05 -+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ -+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ -+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ -+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ -+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ -+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ -+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ -+/* This extracts the state for congestion group 'n' from a query response. -+ * Eg. -+ * u8 cgr = [...]; -+ * struct qm_mc_result *res = [...]; -+ * printf("congestion group %d congestion state: %d\n", cgr, -+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr)); -+ */ -+#define __CGR_WORD(num) (num >> 5) -+#define __CGR_SHIFT(num) (num & 0x1f) -+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) -+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p, -+ u8 cgr) -+{ -+ return be32_to_cpu(p->__state[__CGR_WORD(cgr)]) & -+ (0x80000000 >> __CGR_SHIFT(cgr)); -+} -+ -+ -+/*********************/ -+/* Utility interface */ -+/*********************/ -+ -+/* Represents an allocator over a range of FQIDs. NB, accesses are not locked, -+ * spinlock them yourself if needed. */ -+struct qman_fqid_pool; -+ -+/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy() -+ * always succeeds, but returns non-zero if there were "leaked" FQID -+ * allocations. */ -+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num); -+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool); -+/* Alloc/free a FQID from the range. _alloc() returns zero for success. */ -+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid); -+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid); -+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool); -+ -+/*******************************************************************/ -+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ -+/*******************************************************************/ -+ -+ /* Portal and Frame Queues */ -+ /* ----------------------- */ -+/* Represents a managed portal */ -+struct qman_portal; -+ -+/* This object type represents Qman frame queue descriptors (FQD), it is -+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is -+ * defined further down. */ -+struct qman_fq; -+ -+/* This object type represents a Qman congestion group, it is defined further -+ * down. */ -+struct qman_cgr; -+ -+struct qman_portal_config { -+ /* If the caller enables DQRR stashing (and thus wishes to operate the -+ * portal from only one cpu), this is the logical CPU that the portal -+ * will stash to. Whether stashing is enabled or not, this setting is -+ * also used for any "core-affine" portals, ie. default portals -+ * associated to the corresponding cpu. -1 implies that there is no core -+ * affinity configured. */ -+ int cpu; -+ /* portal interrupt line */ -+ int irq; -+ /* the unique index of this portal */ -+ u32 index; -+ /* Is this portal shared? (If so, it has coarser locking and demuxes -+ * processing on behalf of other CPUs.) */ -+ int is_shared; -+ /* The portal's dedicated channel id, use this value for initialising -+ * frame queues to target this portal when scheduled. */ -+ u16 channel; -+ /* A mask of which pool channels this portal has dequeue access to -+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */ -+ u32 pools; -+}; -+ -+/* This enum, and the callback type that returns it, are used when handling -+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the -+ * portal object (for handling dequeues that do not demux because contextB is -+ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */ -+enum qman_cb_dqrr_result { -+ /* DQRR entry can be consumed */ -+ qman_cb_dqrr_consume, -+ /* Like _consume, but requests parking - FQ must be held-active */ -+ qman_cb_dqrr_park, -+ /* Does not consume, for DCA mode only. This allows out-of-order -+ * consumes by explicit calls to qman_dca() and/or the use of implicit -+ * DCA via EQCR entries. */ -+ qman_cb_dqrr_defer, -+ /* Stop processing without consuming this ring entry. Exits the current -+ * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an -+ * interrupt handler, the callback would typically call -+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, -+ * otherwise the interrupt will reassert immediately. */ -+ qman_cb_dqrr_stop, -+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */ -+ qman_cb_dqrr_consume_stop -+}; -+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, -+ struct qman_fq *fq, -+ const struct qm_dqrr_entry *dqrr); -+ -+/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They -+ * are always consumed after the callback returns. */ -+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, -+ const struct qm_mr_entry *msg); -+ -+/* This callback type is used when handling DCP ERNs */ -+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm, -+ const struct qm_mr_entry *msg); -+ -+/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + -+ * held-active + held-suspended are just "sched". Things like "retired" will not -+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until -+ * then, to indicate it's completing and to gate attempts to retry the retire -+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's -+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring -+ * index rather than the FQ that ring entry corresponds to), so repeated park -+ * commands are allowed (if you're silly enough to try) but won't change FQ -+ * state, and the resulting park notifications move FQs from "sched" to -+ * "parked". */ -+enum qman_fq_state { -+ qman_fq_state_oos, -+ qman_fq_state_parked, -+ qman_fq_state_sched, -+ qman_fq_state_retired -+}; -+ -+/* Frame queue objects (struct qman_fq) are stored within memory passed to -+ * qman_create_fq(), as this allows stashing of caller-provided demux callback -+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the -+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, -+ * they should; -+ * -+ * (a) extend the qman_fq structure with their state; eg. -+ * -+ * // myfq is allocated and driver_fq callbacks filled in; -+ * struct my_fq { -+ * struct qman_fq base; -+ * int an_extra_field; -+ * [ ... add other fields to be associated with each FQ ...] -+ * } *myfq = some_my_fq_allocator(); -+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); -+ * -+ * // in a dequeue callback, access extra fields from 'fq' via a cast; -+ * struct my_fq *myfq = (struct my_fq *)fq; -+ * do_something_with(myfq->an_extra_field); -+ * [...] -+ * -+ * (b) when and if configuring the FQ for context stashing, specify how ever -+ * many cachelines are required to stash 'struct my_fq', to accelerate not -+ * only the Qman driver but the callback as well. -+ */ -+ -+struct qman_fq_cb { -+ qman_cb_dqrr dqrr; /* for dequeued frames */ -+ qman_cb_mr ern; /* for s/w ERNs */ -+ qman_cb_mr fqs; /* frame-queue state changes*/ -+}; -+ -+struct qman_fq { -+ /* Caller of qman_create_fq() provides these demux callbacks */ -+ struct qman_fq_cb cb; -+ /* These are internal to the driver, don't touch. In particular, they -+ * may change, be removed, or extended (so you shouldn't rely on -+ * sizeof(qman_fq) being a constant). */ -+ spinlock_t fqlock; -+ u32 fqid; -+ volatile unsigned long flags; -+ enum qman_fq_state state; -+ int cgr_groupid; -+ struct rb_node node; -+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -+ u32 key; -+#endif -+}; -+ -+/* This callback type is used when handling congestion group entry/exit. -+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */ -+typedef void (*qman_cb_cgr)(struct qman_portal *qm, -+ struct qman_cgr *cgr, int congested); -+ -+struct qman_cgr { -+ /* Set these prior to qman_create_cgr() */ -+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ -+ qman_cb_cgr cb; -+ /* These are private to the driver */ -+ u16 chan; /* portal channel this object is created on */ -+ struct list_head node; -+}; -+ -+/* Flags to qman_create_fq() */ -+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ -+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ -+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ -+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */ -+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */ -+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ -+ -+/* Flags to qman_destroy_fq() */ -+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */ -+ -+/* Flags from qman_fq_state() */ -+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ -+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ -+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ -+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ -+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ -+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ -+ -+/* Flags to qman_init_fq() */ -+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ -+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ -+ -+/* Flags to qman_volatile_dequeue() */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ -+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ -+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ -+#endif -+ -+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware, -+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so -+ * any change here should be audited in PME.) */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT -+#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */ -+#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */ -+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC -+#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ -+#endif -+#endif -+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */ -+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */ -+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */ -+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \ -+ (((u32)(p) << 2) & 0x00000f00) -+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */ -+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008 -+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010 -+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018 -+/* For the ORP-specific qman_enqueue_orp() variant; -+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment -+ * of a frame. */ -+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000 -+/* - this flag performs no enqueue but fills in an ORP sequence number that -+ * would otherwise block it (eg. if a frame has been dropped). */ -+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000 -+/* - this flag performs no enqueue but advances NESN to the given sequence -+ * number. */ -+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000 -+ -+/* Flags to qman_modify_cgr() */ -+#define QMAN_CGR_FLAG_USE_INIT 0x00000001 -+#define QMAN_CGR_MODE_FRAME 0x00000001 -+ -+ /* Portal Management */ -+ /* ----------------- */ -+/** -+ * qman_get_portal_config - get portal configuration settings -+ * -+ * This returns a read-only view of the current cpu's affine portal settings. -+ */ -+const struct qman_portal_config *qman_get_portal_config(void); -+ -+/** -+ * qman_irqsource_get - return the portal work that is interrupt-driven -+ * -+ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently -+ * enabled for interrupt handling on the current cpu's affine portal. These -+ * sources will trigger the portal interrupt and the interrupt handler (or a -+ * tasklet/bottom-half it defers to) will perform the corresponding processing -+ * work. The qman_poll_***() functions will only process sources that are not in -+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU, -+ * this always returns zero. -+ */ -+u32 qman_irqsource_get(void); -+ -+/** -+ * qman_irqsource_add - add processing sources to be interrupt-driven -+ * @bits: bitmask of QM_PIRQ_**I processing sources -+ * -+ * Adds processing sources that should be interrupt-driven (rather than -+ * processed via qman_poll_***() functions). Returns zero for success, or -+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. -+ */ -+int qman_irqsource_add(u32 bits); -+ -+/** -+ * qman_irqsource_remove - remove processing sources from being interrupt-driven -+ * @bits: bitmask of QM_PIRQ_**I processing sources -+ * -+ * Removes processing sources from being interrupt-driven, so that they will -+ * instead be processed via qman_poll_***() functions. Returns zero for success, -+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. -+ */ -+int qman_irqsource_remove(u32 bits); -+ -+/** -+ * qman_affine_cpus - return a mask of cpus that have affine portals -+ */ -+const cpumask_t *qman_affine_cpus(void); -+ -+/** -+ * qman_affine_channel - return the channel ID of an portal -+ * @cpu: the cpu whose affine portal is the subject of the query -+ * -+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a -+ * bug to call this function for any value of @cpu (other than -1) that is not a -+ * member of the mask returned from qman_affine_cpus(). -+ */ -+u16 qman_affine_channel(int cpu); -+ -+/** -+ * qman_get_affine_portal - return the portal pointer affine to cpu -+ * @cpu: the cpu whose affine portal is the subject of the query -+ * -+ */ -+void *qman_get_affine_portal(int cpu); -+ -+/** -+ * qman_poll_dqrr - process DQRR (fast-path) entries -+ * @limit: the maximum number of DQRR entries to process -+ * -+ * Use of this function requires that DQRR processing not be interrupt-driven. -+ * Ie. the value returned by qman_irqsource_get() should not include -+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU, -+ * this function will return -EINVAL, otherwise the return value is >=0 and -+ * represents the number of DQRR entries processed. -+ */ -+int qman_poll_dqrr(unsigned int limit); -+ -+/** -+ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven. -+ * -+ * This function does any portal processing that isn't interrupt-driven. If the -+ * current CPU is sharing a portal hosted on another CPU, this function will -+ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources -+ * indicating what interrupt sources were actually processed by the call. -+ */ -+u32 qman_poll_slow(void); -+ -+/** -+ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow() -+ * -+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the -+ * affine portal. There are two classes of portal processing in question; -+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking -+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR -+ * thresholds, congestion state changes, etc). This function does whatever -+ * processing is not triggered by interrupts. -+ * -+ * Note, if DQRR and some slow-path processing are poll-driven (rather than -+ * interrupt-driven) then this function uses a heuristic to determine how often -+ * to run slow-path processing - as slow-path processing introduces at least a -+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is -+ * close to zero-cost if there is no work to be done. Applications can tune this -+ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly -+ * rather than going via this wrapper. -+ */ -+void qman_poll(void); -+ -+/** -+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal -+ * -+ * Disables DQRR processing of the portal. This is reference-counted, so -+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to -+ * truly re-enable dequeuing. -+ */ -+void qman_stop_dequeues(void); -+ -+/** -+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal -+ * -+ * Enables DQRR processing of the portal. This is reference-counted, so -+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to -+ * truly re-enable dequeuing. -+ */ -+void qman_start_dequeues(void); -+ -+/** -+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR -+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) -+ * -+ * Adds a set of pool channels to the portal's static dequeue command register -+ * (SDQCR). The requested pools are limited to those the portal has dequeue -+ * access to. -+ */ -+void qman_static_dequeue_add(u32 pools); -+ -+/** -+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR -+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) -+ * -+ * Removes a set of pool channels from the portal's static dequeue command -+ * register (SDQCR). The requested pools are limited to those the portal has -+ * dequeue access to. -+ */ -+void qman_static_dequeue_del(u32 pools); -+ -+/** -+ * qman_static_dequeue_get - return the portal's current SDQCR -+ * -+ * Returns the portal's current static dequeue command register (SDQCR). The -+ * entire register is returned, so if only the currently-enabled pool channels -+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK. -+ */ -+u32 qman_static_dequeue_get(void); -+ -+/** -+ * qman_dca - Perform a Discrete Consumption Acknowledgement -+ * @dq: the DQRR entry to be consumed -+ * @park_request: indicates whether the held-active @fq should be parked -+ * -+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had -+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this -+ * does not take a 'portal' argument but implies the core affine portal from the -+ * cpu that is currently executing the function. For reasons of locking, this -+ * function must be called from the same CPU as that which processed the DQRR -+ * entry in the first place. -+ */ -+void qman_dca(struct qm_dqrr_entry *dq, int park_request); -+ -+/** -+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty -+ * -+ * For use in situations where a cpu-affine caller needs to determine when all -+ * enqueues for the local portal have been processed by Qman but can't use the -+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue(). -+ * The function forces tracking of EQCR consumption (which normally doesn't -+ * happen until enqueue processing needs to find space to put new enqueue -+ * commands), and returns zero if the ring still has unprocessed entries, -+ * non-zero if it is empty. -+ */ -+int qman_eqcr_is_empty(void); -+ -+/** -+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications -+ * @handler: callback for processing DCP ERNs -+ * @affine: whether this handler is specific to the locally affine portal -+ * -+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or -+ * DCP) is configured not to receive enqueue rejections, then any enqueues -+ * through that DCP that are rejected will be sent to a given software portal. -+ * If @affine is non-zero, then this handler will only be used for DCP ERNs -+ * received on the portal affine to the current CPU. If multiple CPUs share a -+ * portal and they all call this function, they will be setting the handler for -+ * the same portal! If @affine is zero, then this handler will be global to all -+ * portals handled by this instance of the driver. Only those portals that do -+ * not have their own affine handler will use the global handler. -+ */ -+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine); -+ -+ /* FQ management */ -+ /* ------------- */ -+/** -+ * qman_create_fq - Allocates a FQ -+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service" -+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options -+ * @fq: memory for storing the 'fq', with callbacks filled in -+ * -+ * Creates a frame queue object for the given @fqid, unless the -+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is -+ * dynamically allocated (or the function fails if none are available). Once -+ * created, the caller should not touch the memory at 'fq' except as extended to -+ * adjacent memory for user-defined fields (see the definition of "struct -+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to -+ * pre-existing frame-queues that aren't to be otherwise interfered with, it -+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag -+ * causes the driver to honour any contextB modifications requested in the -+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a -+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by -+ * software portals, the contextB field is controlled by the driver and can't be -+ * modified by the caller. If the AS_IS flag is specified, management commands -+ * will be used on portal @p to query state for frame queue @fqid and construct -+ * a frame queue object based on that, rather than assuming/requiring that it be -+ * Out of Service. -+ */ -+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); -+ -+/** -+ * qman_destroy_fq - Deallocates a FQ -+ * @fq: the frame queue object to release -+ * @flags: bit-mask of QMAN_FQ_FREE_*** options -+ * -+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is -+ * not deallocated but the caller regains ownership, to do with as desired. The -+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag -+ * is specified, in which case it may also be in the 'parked' state. -+ */ -+void qman_destroy_fq(struct qman_fq *fq, u32 flags); -+ -+/** -+ * qman_fq_fqid - Queries the frame queue ID of a FQ object -+ * @fq: the frame queue object to query -+ */ -+u32 qman_fq_fqid(struct qman_fq *fq); -+ -+/** -+ * qman_fq_state - Queries the state of a FQ object -+ * @fq: the frame queue object to query -+ * @state: pointer to state enum to return the FQ scheduling state -+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask -+ * -+ * Queries the state of the FQ object, without performing any h/w commands. -+ * This captures the state, as seen by the driver, at the time the function -+ * executes. -+ */ -+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); -+ -+/** -+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" -+ * @fq: the frame queue object to modify, must be 'parked' or new. -+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options -+ * @opts: the FQ-modification settings, as defined in the low-level API -+ * -+ * The @opts parameter comes from the low-level portal API. Select -+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled -+ * rather than parked. NB, @opts can be NULL. -+ * -+ * Note that some fields and options within @opts may be ignored or overwritten -+ * by the driver; -+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only -+ * affects one frame queue: @fq). -+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated -+ * 'fqd' structure's 'context_b' field are sometimes overwritten; -+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is -+ * initialised to a value used by the driver for demux. -+ * - if context_b is initialised for demux, so is context_a in case stashing -+ * is requested (see item 4). -+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue -+ * objects.) -+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's -+ * 'dest::channel' field will be overwritten to match the portal used to issue -+ * the command. If the WE_DESTWQ write-enable bit had already been set by the -+ * caller, the channel workqueue will be left as-is, otherwise the write-enable -+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag -+ * isn't set, the destination channel/workqueue fields and the write-enable bit -+ * are left as-is. -+ * 4. if the driver overwrites context_a/b for demux, then if -+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite -+ * context_a.address fields and will leave the stashing fields provided by the -+ * user alone, otherwise it will zero out the context_a.stashing fields. -+ */ -+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); -+ -+/** -+ * qman_schedule_fq - Schedules a FQ -+ * @fq: the frame queue object to schedule, must be 'parked' -+ * -+ * Schedules the frame queue, which must be Parked, which takes it to -+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. -+ */ -+int qman_schedule_fq(struct qman_fq *fq); -+ -+/** -+ * qman_retire_fq - Retires a FQ -+ * @fq: the frame queue object to retire -+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately -+ * -+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if -+ * the retirement was started asynchronously, otherwise it returns negative for -+ * failure. When this function returns zero, @flags is set to indicate whether -+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up -+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent -+ * FQRN message shows up on the portal's message ring. -+ * -+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or -+ * Active state), the completion will be via the message ring as a FQRN - but -+ * the corresponding callback may occur before this function returns!! Ie. the -+ * caller should be prepared to accept the callback as the function is called, -+ * not only once it has returned. -+ */ -+int qman_retire_fq(struct qman_fq *fq, u32 *flags); -+ -+/** -+ * qman_oos_fq - Puts a FQ "out of service" -+ * @fq: the frame queue object to be put out-of-service, must be 'retired' -+ * -+ * The frame queue must be retired and empty, and if any order restoration list -+ * was released as ERNs at the time of retirement, they must all be consumed. -+ */ -+int qman_oos_fq(struct qman_fq *fq); -+ -+/** -+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ -+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos', -+ * or 'retired' or 'parked' state -+ * @xon: boolean to set fq in XON or XOFF state -+ * -+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate, -+ * otherwise the IFSI interrupt will be asserted. -+ */ -+int qman_fq_flow_control(struct qman_fq *fq, int xon); -+ -+/** -+ * qman_query_fq - Queries FQD fields (via h/w query command) -+ * @fq: the frame queue object to be queried -+ * @fqd: storage for the queried FQD fields -+ */ -+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); -+ -+/** -+ * qman_query_fq_np - Queries non-programmable FQD fields -+ * @fq: the frame queue object to be queried -+ * @np: storage for the queried FQD fields -+ */ -+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); -+ -+/** -+ * qman_query_wq - Queries work queue lengths -+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated -+ * to this software portal. Otherwise, query length of WQs in a -+ * channel specified in wq. -+ * @wq: storage for the queried WQs lengths. Also specified the channel to -+ * to query if query_dedicated is zero. -+ */ -+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq); -+ -+/** -+ * qman_volatile_dequeue - Issue a volatile dequeue command -+ * @fq: the frame queue object to dequeue from -+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options -+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() -+ * -+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. -+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and -+ * the VDQCR is already in use, otherwise returns non-zero for failure. If -+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once -+ * the VDQCR command has finished executing (ie. once the callback for the last -+ * DQRR entry resulting from the VDQCR command has been called). If not using -+ * the FINISH flag, completion can be determined either by detecting the -+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits -+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue -+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the -+ * "flags" retrieved from qman_fq_state(). -+ */ -+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); -+ -+/** -+ * qman_enqueue - Enqueue a frame to a frame queue -+ * @fq: the frame queue object to enqueue to -+ * @fd: a descriptor of the frame to be enqueued -+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options -+ * -+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by -+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' -+ * field is ignored. The return value is non-zero on error, such as ring full -+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR -+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this -+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal -+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status -+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will -+ * perform an implied "discrete consumption acknowledgement" on the dequeue -+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x) -+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries, -+ * this implicit DCA can delay the release of a "held active" frame queue -+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing -+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is -+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption -+ * acknowledgement should "park request" the "held active" frame queue. Ie. -+ * when the portal eventually releases that frame queue, it will be left in the -+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the -+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag -+ * is requested, and the FQ is a member of a congestion group, then this -+ * function returns -EAGAIN if the congestion group is currently congested. -+ * Note, this does not eliminate ERNs, as the async interface means we can be -+ * sending enqueue commands to an un-congested FQ that becomes congested before -+ * the enqueue commands are processed, but it does minimise needless thrashing -+ * of an already busy hardware resource by throttling many of the to-be-dropped -+ * enqueues "at the source". -+ */ -+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags); -+ -+typedef int (*qman_cb_precommit) (void *arg); -+/** -+ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb -+ * @fq: the frame queue object to enqueue to -+ * @fd: a descriptor of the frame to be enqueued -+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options -+ * @cb: user supplied callback function to invoke before writing commit verb. -+ * @cb_arg: callback function argument -+ * -+ * This is similar to qman_enqueue except that it will invoke a user supplied -+ * callback function just before writng the commit verb. This is useful -+ * when the user want to do something *just before* enqueuing the request and -+ * the enqueue can't fail. -+ */ -+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, -+ u32 flags, qman_cb_precommit cb, void *cb_arg); -+ -+/** -+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP -+ * @fq: the frame queue object to enqueue to -+ * @fd: a descriptor of the frame to be enqueued -+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options -+ * @orp: the frame queue object used as an order restoration point. -+ * @orp_seqnum: the sequence number of this frame in the order restoration path -+ * -+ * Similar to qman_enqueue(), but with the addition of an Order Restoration -+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this -+ * enqueue operation to employ order restoration. Each frame queue object acts -+ * as an Order Definition Point (ODP) by providing each frame dequeued from it -+ * with an incrementing sequence number, this value is generally ignored unless -+ * that sequence of dequeued frames will need order restoration later. Each -+ * frame queue object also encapsulates an Order Restoration Point (ORP), which -+ * is a re-assembly context for re-ordering frames relative to their sequence -+ * numbers as they are enqueued. The ORP does not have to be within the frame -+ * queue that receives the enqueued frame, in fact it is usually the frame -+ * queue from which the frames were originally dequeued. For the purposes of -+ * order restoration, multiple frames (or "fragments") can be enqueued for a -+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all -+ * enqueues except the final fragment of a given sequence number. Ordering -+ * between sequence numbers is guaranteed, even if fragments of different -+ * sequence numbers are interlaced with one another. Fragments of the same -+ * sequence number will retain the order in which they are enqueued. If no -+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given -+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been -+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given -+ * sequence number should become the ORP's "Next Expected Sequence Number". -+ * -+ * Side note: a frame queue object can be used purely as an ORP, without -+ * carrying any frames at all. Care should be taken not to deallocate a frame -+ * queue object that is being actively used as an ORP, as a future allocation -+ * of the frame queue object may start using the internal ORP before the -+ * previous use has finished. -+ */ -+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, -+ struct qman_fq *orp, u16 orp_seqnum); -+ -+/** -+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs -+ * @result: is set by the API to the base FQID of the allocated range -+ * @count: the number of FQIDs required -+ * @align: required alignment of the allocated range -+ * @partial: non-zero if the API can return fewer than @count FQIDs -+ * -+ * Returns the number of frame queues allocated, or a negative error code. If -+ * @partial is non zero, the allocation request may return a smaller range of -+ * FQs than requested (though alignment will be as requested). If @partial is -+ * zero, the return value will either be 'count' or negative. -+ */ -+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial); -+static inline int qman_alloc_fqid(u32 *result) -+{ -+ int ret = qman_alloc_fqid_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+ -+/** -+ * qman_release_fqid_range - Release the specified range of frame queue IDs -+ * @fqid: the base FQID of the range to deallocate -+ * @count: the number of FQIDs in the range -+ * -+ * This function can also be used to seed the allocator with ranges of FQIDs -+ * that it can subsequently allocate from. -+ */ -+void qman_release_fqid_range(u32 fqid, unsigned int count); -+static inline void qman_release_fqid(u32 fqid) -+{ -+ qman_release_fqid_range(fqid, 1); -+} -+ -+void qman_seed_fqid_range(u32 fqid, unsigned int count); -+ -+ -+int qman_shutdown_fq(u32 fqid); -+ -+/** -+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs -+ * @fqid: the base FQID of the range to deallocate -+ * @count: the number of FQIDs in the range -+ */ -+int qman_reserve_fqid_range(u32 fqid, unsigned int count); -+static inline int qman_reserve_fqid(u32 fqid) -+{ -+ return qman_reserve_fqid_range(fqid, 1); -+} -+ -+ /* Pool-channel management */ -+ /* ----------------------- */ -+/** -+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs -+ * @result: is set by the API to the base pool-channel ID of the allocated range -+ * @count: the number of pool-channel IDs required -+ * @align: required alignment of the allocated range -+ * @partial: non-zero if the API can return fewer than @count -+ * -+ * Returns the number of pool-channel IDs allocated, or a negative error code. -+ * If @partial is non zero, the allocation request may return a smaller range of -+ * than requested (though alignment will be as requested). If @partial is zero, -+ * the return value will either be 'count' or negative. -+ */ -+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial); -+static inline int qman_alloc_pool(u32 *result) -+{ -+ int ret = qman_alloc_pool_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+ -+/** -+ * qman_release_pool_range - Release the specified range of pool-channel IDs -+ * @id: the base pool-channel ID of the range to deallocate -+ * @count: the number of pool-channel IDs in the range -+ */ -+void qman_release_pool_range(u32 id, unsigned int count); -+static inline void qman_release_pool(u32 id) -+{ -+ qman_release_pool_range(id, 1); -+} -+ -+/** -+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs -+ * @id: the base pool-channel ID of the range to reserve -+ * @count: the number of pool-channel IDs in the range -+ */ -+int qman_reserve_pool_range(u32 id, unsigned int count); -+static inline int qman_reserve_pool(u32 id) -+{ -+ return qman_reserve_pool_range(id, 1); -+} -+ -+void qman_seed_pool_range(u32 id, unsigned int count); -+ -+ /* CGR management */ -+ /* -------------- */ -+/** -+ * qman_create_cgr - Register a congestion group object -+ * @cgr: the 'cgr' object, with fields filled in -+ * @flags: QMAN_CGR_FLAG_* values -+ * @opts: optional state of CGR settings -+ * -+ * Registers this object to receiving congestion entry/exit callbacks on the -+ * portal affine to the cpu portal on which this API is executed. If opts is -+ * NULL then only the callback (cgr->cb) function is registered. If @flags -+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset -+ * any unspecified parameters) will be used rather than a modify hw hardware -+ * (which only modifies the specified parameters). -+ */ -+int qman_create_cgr(struct qman_cgr *cgr, u32 flags, -+ struct qm_mcc_initcgr *opts); -+ -+/** -+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal -+ * @cgr: the 'cgr' object, with fields filled in -+ * @flags: QMAN_CGR_FLAG_* values -+ * @dcp_portal: the DCP portal to which the cgr object is registered. -+ * @opts: optional state of CGR settings -+ * -+ */ -+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, -+ struct qm_mcc_initcgr *opts); -+ -+/** -+ * qman_delete_cgr - Deregisters a congestion group object -+ * @cgr: the 'cgr' object to deregister -+ * -+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API -+ * is executed. This must be excuted on the same affine portal on which it was -+ * created. -+ */ -+int qman_delete_cgr(struct qman_cgr *cgr); -+ -+/** -+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU -+ * @cgr: the 'cgr' object to deregister -+ * -+ * This will select the proper CPU and run there qman_delete_cgr(). -+ */ -+void qman_delete_cgr_safe(struct qman_cgr *cgr); -+ -+/** -+ * qman_modify_cgr - Modify CGR fields -+ * @cgr: the 'cgr' object to modify -+ * @flags: QMAN_CGR_FLAG_* values -+ * @opts: the CGR-modification settings -+ * -+ * The @opts parameter comes from the low-level portal API, and can be NULL. -+ * Note that some fields and options within @opts may be ignored or overwritten -+ * by the driver, in particular the 'cgrid' field is ignored (this operation -+ * only affects the given CGR object). If @flags contains -+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any -+ * unspecified parameters) will be used rather than a modify hw hardware (which -+ * only modifies the specified parameters). -+ */ -+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, -+ struct qm_mcc_initcgr *opts); -+ -+/** -+* qman_query_cgr - Queries CGR fields -+* @cgr: the 'cgr' object to query -+* @result: storage for the queried congestion group record -+*/ -+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result); -+ -+/** -+ * qman_query_congestion - Queries the state of all congestion groups -+ * @congestion: storage for the queried state of all congestion groups -+ */ -+int qman_query_congestion(struct qm_mcr_querycongestion *congestion); -+ -+/** -+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs -+ * @result: is set by the API to the base CGR ID of the allocated range -+ * @count: the number of CGR IDs required -+ * @align: required alignment of the allocated range -+ * @partial: non-zero if the API can return fewer than @count -+ * -+ * Returns the number of CGR IDs allocated, or a negative error code. -+ * If @partial is non zero, the allocation request may return a smaller range of -+ * than requested (though alignment will be as requested). If @partial is zero, -+ * the return value will either be 'count' or negative. -+ */ -+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial); -+static inline int qman_alloc_cgrid(u32 *result) -+{ -+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+ -+/** -+ * qman_release_cgrid_range - Release the specified range of CGR IDs -+ * @id: the base CGR ID of the range to deallocate -+ * @count: the number of CGR IDs in the range -+ */ -+void qman_release_cgrid_range(u32 id, unsigned int count); -+static inline void qman_release_cgrid(u32 id) -+{ -+ qman_release_cgrid_range(id, 1); -+} -+ -+/** -+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID -+ * @id: the base CGR ID of the range to reserve -+ * @count: the number of CGR IDs in the range -+ */ -+int qman_reserve_cgrid_range(u32 id, unsigned int count); -+static inline int qman_reserve_cgrid(u32 id) -+{ -+ return qman_reserve_cgrid_range(id, 1); -+} -+ -+void qman_seed_cgrid_range(u32 id, unsigned int count); -+ -+ -+ /* Helpers */ -+ /* ------- */ -+/** -+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS -+ * @fqid: the FQID that will be initialised by other s/w -+ * -+ * In many situations, a FQID is provided for communication between s/w -+ * entities, and whilst the consumer is responsible for initialising and -+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using -+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie; -+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...); -+ * However, data can not be enqueued to the FQ until it is initialised out of -+ * the OOS state - this function polls for that condition. It is particularly -+ * useful for users of IPC functions - each endpoint's Rx FQ is the other -+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object -+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to -+ * synchronise. The function returns zero for success, +1 if the FQ is still in -+ * the OOS state, or negative if there was an error. -+ */ -+static inline int qman_poll_fq_for_init(struct qman_fq *fq) -+{ -+ struct qm_mcr_queryfq_np np; -+ int err; -+ err = qman_query_fq_np(fq, &np); -+ if (err) -+ return err; -+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS) -+ return 1; -+ return 0; -+} -+ -+ /* -------------- */ -+ /* CEETM :: types */ -+ /* -------------- */ -+/** -+ * Token Rate Structure -+ * Shaping rates are based on a "credit" system and a pre-configured h/w -+ * internal timer. The following type represents a shaper "rate" parameter as a -+ * fractional number of "tokens". Here's how it works. This (fractional) number -+ * of tokens is added to the shaper's "credit" every time the h/w timer elapses -+ * (up to a limit which is set by another shaper parameter). Every time a frame -+ * is enqueued through a shaper, the shaper deducts as many tokens as there are -+ * bytes of data in the enqueued frame. A shaper will not allow itself to -+ * enqueue any frames if its token count is negative. As such; -+ * -+ * The rate at which data is enqueued is limited by the -+ * rate at which tokens are added. -+ * -+ * Therefore if the user knows the period between these h/w timer updates in -+ * seconds, they can calculate the maximum traffic rate of the shaper (in -+ * bytes-per-second) from the token rate. And vice versa, they can calculate -+ * the token rate to use in order to achieve a given traffic rate. -+ */ -+struct qm_ceetm_rate { -+ /* The token rate is; whole + (fraction/8192) */ -+ u32 whole:11; /* 0..2047 */ -+ u32 fraction:13; /* 0..8191 */ -+}; -+ -+struct qm_ceetm_weight_code { -+ /* The weight code is; 5 msbits + 3 lsbits */ -+ u8 y:5; -+ u8 x:3; -+}; -+ -+struct qm_ceetm { -+ unsigned int idx; -+ struct list_head sub_portals; -+ struct list_head lnis; -+ unsigned int sp_range[2]; -+ unsigned int lni_range[2]; -+}; -+ -+struct qm_ceetm_sp { -+ struct list_head node; -+ unsigned int idx; -+ unsigned int dcp_idx; -+ int is_claimed; -+ struct qm_ceetm_lni *lni; -+}; -+ -+/* Logical Network Interface */ -+struct qm_ceetm_lni { -+ struct list_head node; -+ unsigned int idx; -+ unsigned int dcp_idx; -+ int is_claimed; -+ struct qm_ceetm_sp *sp; -+ struct list_head channels; -+ int shaper_enable; -+ int shaper_couple; -+ int oal; -+ struct qm_ceetm_rate cr_token_rate; -+ struct qm_ceetm_rate er_token_rate; -+ u16 cr_token_bucket_limit; -+ u16 er_token_bucket_limit; -+}; -+ -+/* Class Queue Channel */ -+struct qm_ceetm_channel { -+ struct list_head node; -+ unsigned int idx; -+ unsigned int lni_idx; -+ unsigned int dcp_idx; -+ struct list_head class_queues; -+ struct list_head ccgs; -+ u8 shaper_enable; -+ u8 shaper_couple; -+ struct qm_ceetm_rate cr_token_rate; -+ struct qm_ceetm_rate er_token_rate; -+ u16 cr_token_bucket_limit; -+ u16 er_token_bucket_limit; -+}; -+ -+struct qm_ceetm_ccg; -+ -+/* This callback type is used when handling congestion entry/exit. The -+ * 'cb_ctx' value is the opaque value associated with ccg object. -+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. -+ */ -+typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx, -+ int congested); -+ -+/* Class Congestion Group */ -+struct qm_ceetm_ccg { -+ struct qm_ceetm_channel *parent; -+ struct list_head node; -+ struct list_head cb_node; -+ qman_cb_ccgr cb; -+ void *cb_ctx; -+ unsigned int idx; -+}; -+ -+/* Class Queue */ -+struct qm_ceetm_cq { -+ struct qm_ceetm_channel *parent; -+ struct qm_ceetm_ccg *ccg; -+ struct list_head node; -+ unsigned int idx; -+ int is_claimed; -+ struct list_head bound_lfqids; -+ struct list_head binding_node; -+}; -+ -+/* Logical Frame Queue */ -+struct qm_ceetm_lfq { -+ struct qm_ceetm_channel *parent; -+ struct list_head node; -+ unsigned int idx; -+ unsigned int dctidx; -+ u64 context_a; -+ u32 context_b; -+ qman_cb_mr ern; -+}; -+ -+/** -+ * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps -+ * (ie. bits-per-second), compute the 'token_rate' fraction that best -+ * approximates that rate. -+ * @bps: the desired shaper rate in bps. -+ * @token_rate: the output token rate computed with the given kbps. -+ * @rounding: dictates how to round if an exact conversion is not possible; if -+ * it is negative then 'token_rate' will round down to the highest value that -+ * does not exceed the desired rate, if it is positive then 'token_rate' will -+ * round up to the lowest value that is greater than or equal to the desired -+ * rate, and if it is zero then it will round to the nearest approximation, -+ * whether that be up or down. -+ * -+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. -+ */ -+int qman_ceetm_bps2tokenrate(u64 bps, -+ struct qm_ceetm_rate *token_rate, -+ int rounding); -+ -+/** -+ * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the -+ * corresponding number of 'bps'. -+ * @token_rate: the input desired token_rate fraction. -+ * @bps: the output shaper rate in bps computed with the give token rate. -+ * @rounding: has the same semantics as the previous function. -+ * -+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. -+ */ -+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, -+ u64 *bps, -+ int rounding); -+ -+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, -+ int partial); -+static inline int qman_alloc_ceetm0_channel(u32 *result) -+{ -+ int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+void qman_release_ceetm0_channel_range(u32 channelid, u32 count); -+static inline void qman_release_ceetm0_channelid(u32 channelid) -+{ -+ qman_release_ceetm0_channel_range(channelid, 1); -+} -+ -+int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count); -+static inline int qman_reserve_ceetm0_channelid(u32 channelid) -+{ -+ return qman_reserve_ceetm0_channel_range(channelid, 1); -+} -+ -+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count); -+ -+ -+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, -+ int partial); -+static inline int qman_alloc_ceetm1_channel(u32 *result) -+{ -+ int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+void qman_release_ceetm1_channel_range(u32 channelid, u32 count); -+static inline void qman_release_ceetm1_channelid(u32 channelid) -+{ -+ qman_release_ceetm1_channel_range(channelid, 1); -+} -+int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count); -+static inline int qman_reserve_ceetm1_channelid(u32 channelid) -+{ -+ return qman_reserve_ceetm1_channel_range(channelid, 1); -+} -+ -+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count); -+ -+ -+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, -+ int partial); -+static inline int qman_alloc_ceetm0_lfqid(u32 *result) -+{ -+ int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count); -+static inline void qman_release_ceetm0_lfqid(u32 lfqid) -+{ -+ qman_release_ceetm0_lfqid_range(lfqid, 1); -+} -+int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count); -+static inline int qman_reserve_ceetm0_lfqid(u32 lfqid) -+{ -+ return qman_reserve_ceetm0_lfqid_range(lfqid, 1); -+} -+ -+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count); -+ -+ -+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, -+ int partial); -+static inline int qman_alloc_ceetm1_lfqid(u32 *result) -+{ -+ int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0); -+ return (ret > 0) ? 0 : ret; -+} -+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count); -+static inline void qman_release_ceetm1_lfqid(u32 lfqid) -+{ -+ qman_release_ceetm1_lfqid_range(lfqid, 1); -+} -+int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count); -+static inline int qman_reserve_ceetm1_lfqid(u32 lfqid) -+{ -+ return qman_reserve_ceetm1_lfqid_range(lfqid, 1); -+} -+ -+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count); -+ -+ -+ /* ----------------------------- */ -+ /* CEETM :: sub-portals */ -+ /* ----------------------------- */ -+ -+/** -+ * qman_ceetm_claim_sp - Claims the given sub-portal, provided it is available -+ * to us and configured for traffic-management. -+ * @sp: the returned sub-portal object, if successful. -+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM -+ * instance), -+ * @sp_idx" is the desired sub-portal index from 0 to 15. -+ * -+ * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL -+ * if the sp_idx is out of range. -+ * -+ * Note that if there are multiple driver domains (eg. a linux kernel versus -+ * user-space drivers in USDPAA, or multiple guests running under a hypervisor) -+ * then a sub-portal may be accessible by more than one instance of a qman -+ * driver and so it may be claimed multiple times. If this is the case, it is -+ * up to the system architect to prevent conflicting configuration actions -+ * coming from the different driver domains. The qman drivers do not have any -+ * behind-the-scenes coordination to prevent this from happening. -+ */ -+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, -+ enum qm_dc_portal dcp_idx, -+ unsigned int sp_idx); -+ -+/** -+ * qman_ceetm_sp_release - Releases a previously claimed sub-portal. -+ * @sp: the sub-portal to be released. -+ * -+ * Returns 0 for success, or -EBUSY for failure if the dependencies are not -+ * released. -+ */ -+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp); -+ -+ /* ----------------------------------- */ -+ /* CEETM :: logical network interfaces */ -+ /* ----------------------------------- */ -+ -+/** -+ * qman_ceetm_lni_claim - Claims an unclaimed LNI. -+ * @lni: the returned LNI object, if successful. -+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM -+ * instance) -+ * @lni_idx: is the desired LNI index. -+ * -+ * Returns zero for success, or -EINVAL on failure, which will happen if the LNI -+ * is not available or has already been claimed (and not yet successfully -+ * released), or lni_dix is out of range. -+ * -+ * Note that there may be multiple driver domains (or instances) that need to -+ * transmit out the same LNI, so this claim is only guaranteeing exclusivity -+ * within the domain of the driver being called. See qman_ceetm_sp_claim() and -+ * qman_ceetm_sp_get_lni() for more information. -+ */ -+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, -+ enum qm_dc_portal dcp_id, -+ unsigned int lni_idx); -+ -+/** -+ * qman_ceetm_lni_releaes - Releases a previously claimed LNI. -+ * @lni: the lni needs to be released. -+ * -+ * This will only succeed if all dependent objects have been released. -+ * Returns zero for success, or -EBUSY if the dependencies are not released. -+ */ -+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni); -+ -+/** -+ * qman_ceetm_sp_set_lni -+ * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently -+ * mapped to. -+ * @sp: the given sub-portal. -+ * @lni(in "set"function): the LNI object which the sp will be mappaed to. -+ * @lni_idx(in "get" function): the LNI index which the sp is mapped to. -+ * -+ * Returns zero for success, or -EINVAL for the "set" function when this sp-lni -+ * mapping has been set, or configure mapping command returns error, and -+ * -EINVAL for "get" function when this sp-lni mapping is not set or the query -+ * mapping command returns error. -+ * -+ * This may be useful in situations where multiple driver domains have access -+ * to the same sub-portals in order to all be able to transmit out the same -+ * physical interface (perhaps they're on different IP addresses or VPNs, so -+ * Fman is splitting Rx traffic and here we need to converge Tx traffic). In -+ * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed -+ * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains -+ * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim() -+ * in order to determine the LNI that the control-plane had assigned. This is -+ * why the "get" returns an index, whereas the "set" takes an (already claimed) -+ * LNI object. -+ */ -+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, -+ struct qm_ceetm_lni *lni); -+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, -+ unsigned int *lni_idx); -+ -+/** -+ * qman_ceetm_lni_enable_shaper -+ * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI. -+ * @lni: the given LNI. -+ * @coupled: indicates whether CR and ER are coupled. -+ * @oal: the overhead accounting length which is added to the actual length of -+ * each frame when performing shaper calculations. -+ * -+ * When the number of (unused) committed-rate tokens reach the committed-rate -+ * token limit, 'coupled' indicates whether surplus tokens should be added to -+ * the excess-rate token count (up to the excess-rate token limit). -+ * When LNI is claimed, the shaper is disabled by default. The enable function -+ * will turn on this shaper for this lni. -+ * Whenever a claimed LNI is first enabled for shaping, its committed and -+ * excess token rates and limits are zero, so will need to be changed to do -+ * anything useful. The shaper can subsequently be enabled/disabled without -+ * resetting the shaping parameters, but the shaping parameters will be reset -+ * when the LNI is released. -+ * -+ * Returns zero for success, or errno for "enable" function in the cases as: -+ * a) -EINVAL if the shaper is already enabled, -+ * b) -EIO if the configure shaper command returns error. -+ * For "disable" function, returns: -+ * a) -EINVAL if the shaper is has already disabled. -+ * b) -EIO if calling configure shaper command returns error. -+ */ -+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, -+ int oal); -+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni); -+ -+/** -+ * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status -+ * @lni: the give LNI -+ */ -+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni); -+ -+/** -+ * qman_ceetm_lni_set_commit_rate -+ * qman_ceetm_lni_get_commit_rate -+ * qman_ceetm_lni_set_excess_rate -+ * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and -+ * token limit for the given LNI. -+ * @lni: the given LNI. -+ * @token_rate: the desired token rate for "set" fuction, or the token rate of -+ * the LNI queried by "get" function. -+ * @token_limit: the desired token bucket limit for "set" function, or the token -+ * limit of the given LNI queried by "get" function. -+ * -+ * Returns zero for success. The "set" function returns -EINVAL if the given -+ * LNI is unshapped or -EIO if the configure shaper command returns error. -+ * The "get" function returns -EINVAL if the token rate or the token limit is -+ * not set or the query command returns error. -+ */ -+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit); -+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit); -+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit); -+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit); -+/** -+ * qman_ceetm_lni_set_commit_rate_bps -+ * qman_ceetm_lni_get_commit_rate_bps -+ * qman_ceetm_lni_set_excess_rate_bps -+ * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate -+ * and token limit for the given LNI. -+ * @lni: the given LNI. -+ * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate -+ * of the LNI queried by "get" function. -+ * @token_limit: the desired token bucket limit for "set" function, or the token -+ * limit of the given LNI queried by "get" function. -+ * -+ * Returns zero for success. The "set" function returns -EINVAL if the given -+ * LNI is unshapped or -EIO if the configure shaper command returns error. -+ * The "get" function returns -EINVAL if the token rate or the token limit is -+ * not set or the query command returns error. -+ */ -+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni, -+ u64 bps, -+ u16 token_limit); -+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni, -+ u64 *bps, u16 *token_limit); -+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni, -+ u64 bps, -+ u16 token_limit); -+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni, -+ u64 *bps, u16 *token_limit); -+ -+/** -+ * qman_ceetm_lni_set_tcfcc -+ * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control". -+ * @lni: the given LNI. -+ * @cq_level: is between 0 and 15, representing individual class queue levels -+ * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15 -+ * for every channel). -+ * @traffic_class: is between 0 and 7 when associating a given class queue level -+ * to a traffic class, or -1 when disabling traffic class flow control for this -+ * class queue level. -+ * -+ * Return zero for success, or -EINVAL if the cq_level or traffic_class is out -+ * of range as indicated above, or -EIO if the configure/query tcfcc command -+ * returns error. -+ * -+ * Refer to the section of QMan CEETM traffic class flow control in the -+ * Reference Manual. -+ */ -+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, -+ unsigned int cq_level, -+ int traffic_class); -+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, -+ unsigned int cq_level, -+ int *traffic_class); -+ -+ /* ----------------------------- */ -+ /* CEETM :: class queue channels */ -+ /* ----------------------------- */ -+ -+/** -+ * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to -+ * the given LNI. -+ * @channel: the returned class queue channel object, if successful. -+ * @lni: the LNI that the channel belongs to. -+ * -+ * Channels are always initially "unshaped". -+ * -+ * Return zero for success, or -ENODEV if there is no channel available(all 32 -+ * channels are claimed) or -EINVAL if the channel mapping command returns -+ * error. -+ */ -+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, -+ struct qm_ceetm_lni *lni); -+ -+/** -+ * qman_ceetm_channel_release - Releases a previously claimed CQ channel. -+ * @channel: the channel needs to be released. -+ * -+ * Returns zero for success, or -EBUSY if the dependencies are still in use. -+ * -+ * Note any shaping of the channel will be cleared to leave it in an unshaped -+ * state. -+ */ -+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel); -+ -+/** -+ * qman_ceetm_channel_enable_shaper -+ * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel. -+ * @channel: the given channel. -+ * @coupled: indicates whether surplus CR tokens should be added to the -+ * excess-rate token count (up to the excess-rate token limit) when the number -+ * of (unused) committed-rate tokens reach the committed_rate token limit. -+ * -+ * Whenever a claimed channel is first enabled for shaping, its committed and -+ * excess token rates and limits are zero, so will need to be changed to do -+ * anything useful. The shaper can subsequently be enabled/disabled without -+ * resetting the shaping parameters, but the shaping parameters will be reset -+ * when the channel is released. -+ * -+ * Return 0 for success, or -EINVAL for failure, in the case that the channel -+ * shaper has been enabled/disabled or the management command returns error. -+ */ -+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, -+ int coupled); -+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel); -+ -+/** -+ * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status. -+ * @channel: the give channel. -+ */ -+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel); -+ -+/** -+ * qman_ceetm_channel_set_commit_rate -+ * qman_ceetm_channel_get_commit_rate -+ * qman_ceetm_channel_set_excess_rate -+ * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters. -+ * @channel: the given channel. -+ * @token_rate: the desired token rate for "set" function, or the queried token -+ * rate for "get" function. -+ * @token_limit: the desired token limit for "set" function, or the queried -+ * token limit for "get" function. -+ * -+ * Return zero for success. The "set" function returns -EINVAL if the channel -+ * is unshaped, or -EIO if the configure shapper command returns error. The -+ * "get" function returns -EINVAL if token rate of token limit is not set, or -+ * the query shaper command returns error. -+ */ -+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit); -+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit); -+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, -+ const struct qm_ceetm_rate *token_rate, -+ u16 token_limit); -+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, -+ struct qm_ceetm_rate *token_rate, -+ u16 *token_limit); -+/** -+ * qman_ceetm_channel_set_commit_rate_bps -+ * qman_ceetm_channel_get_commit_rate_bps -+ * qman_ceetm_channel_set_excess_rate_bps -+ * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper -+ * parameters. -+ * @channel: the given channel. -+ * @token_rate: the desired shaper rate in bps for "set" function, or the -+ * shaper rate in bps for "get" function. -+ * @token_limit: the desired token limit for "set" function, or the queried -+ * token limit for "get" function. -+ * -+ * Return zero for success. The "set" function returns -EINVAL if the channel -+ * is unshaped, or -EIO if the configure shapper command returns error. The -+ * "get" function returns -EINVAL if token rate of token limit is not set, or -+ * the query shaper command returns error. -+ */ -+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel, -+ u64 bps, u16 token_limit); -+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel, -+ u64 *bps, u16 *token_limit); -+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel, -+ u64 bps, u16 token_limit); -+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel, -+ u64 *bps, u16 *token_limit); -+ -+/** -+ * qman_ceetm_channel_set_weight -+ * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel -+ * @channel: the given channel. -+ * @token_limit: the desired token limit as the weight of the unshaped channel -+ * for "set" function, or the queried token limit for "get" function. -+ * -+ * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel. -+ * It allows the unshaped channels to be included in the CR time eligible list, -+ * and thus use the configured CR token limit value as their fair queuing -+ * weight. -+ * -+ * Return zero for success, or -EINVAL if the channel is a shaped channel or -+ * the management command returns error. -+ */ -+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, -+ u16 token_limit); -+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, -+ u16 *token_limit); -+ -+/** -+ * qman_ceetm_channel_set_group -+ * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler. -+ * @channel: the given channel. -+ * @group_b: indicates whether there is group B in this channel. -+ * @prio_a: the priority of group A. -+ * @prio_b: the priority of group B. -+ * -+ * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues -+ * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in -+ * group A, otherwise they are split into group A (CQ8-11) and group B -+ * (CQ12-C15). The individual class queues and the group(s) are in strict -+ * priority order relative to each other. Within the group(s), the scheduling -+ * is not strict priority order, but the result of scheduling within a group -+ * is in strict priority order relative to the other class queues in the -+ * channel. 'prio_a' and 'prio_b' control the priority order of the groups -+ * relative to the individual class queues, and take values from 0-7. Eg. if -+ * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict -+ * priority order would be; -+ * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7 -+ * -+ * Return 0 for success. For "set" function, returns -EINVAL if prio_a or -+ * prio_b are out of the range 0 - 7 (priority of group A or group B can not -+ * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if -+ * the configure scheduler command returns error. For "get" function, return -+ * -EINVAL if the query scheduler command returns error. -+ */ -+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, -+ int group_b, -+ unsigned int prio_a, -+ unsigned int prio_b); -+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, -+ int *group_b, -+ unsigned int *prio_a, -+ unsigned int *prio_b); -+ -+/** -+ * qman_ceetm_channel_set_group_cr_eligibility -+ * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility -+ * @channel: the given channel object -+ * @group_b: indicates whether there is group B in this channel. -+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable. -+ * -+ * Return zero for success, or -EINVAL if eligibility setting fails. -+*/ -+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel -+ *channel, int group_b, int cre); -+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel -+ *channel, int group_b, int ere); -+ -+/** -+ * qman_ceetm_channel_set_cq_cr_eligibility -+ * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility -+ * @channel: the given channel object -+ * @idx: is from 0 to 7 (representing CQ0 to CQ7). -+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable. -+ * -+ * Return zero for success, or -EINVAL if eligibility setting fails. -+*/ -+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel, -+ unsigned int idx, int cre); -+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel, -+ unsigned int idx, int ere); -+ -+ /* --------------------- */ -+ /* CEETM :: class queues */ -+ /* --------------------- */ -+ -+/** -+ * qman_ceetm_cq_claim - Claims an individual class queue. -+ * @cq: the returned class queue object, if successful. -+ * @channel: the class queue channel. -+ * @idx: is from 0 to 7 (representing CQ0 to CQ7). -+ * @ccg: represents the class congestion group that this class queue should be -+ * subscribed to, or NULL if no congestion group membership is desired. -+ * -+ * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or -+ * if this class queue has been claimed, or configure class queue command -+ * returns error, or returns -ENOMEM if allocating CQ memory fails. -+ */ -+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, -+ unsigned int idx, -+ struct qm_ceetm_ccg *ccg); -+ -+/** -+ * qman_ceetm_cq_claim_A - Claims a class queue group A. -+ * @cq: the returned class queue object, if successful. -+ * @channel: the class queue channel. -+ * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11. -+ * @ccg: represents the class congestion group that this class queue should be -+ * subscribed to, or NULL if no congestion group membership is desired. -+ * -+ * Return zero for success, or -EINVAL if @idx is out the range or if -+ * this class queue has been claimed or configure class queue command returns -+ * error, or returns -ENOMEM if allocating CQ memory fails. -+ */ -+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, -+ unsigned int idx, -+ struct qm_ceetm_ccg *ccg); -+ -+/** -+ * qman_ceetm_cq_claim_B - Claims a class queue group B. -+ * @cq: the returned class queue object, if successful. -+ * @channel: the class queue channel. -+ * @idx: is from 0 to 3 (CQ12 to CQ15). -+ * @ccg: represents the class congestion group that this class queue should be -+ * subscribed to, or NULL if no congestion group membership is desired. -+ * -+ * Return zero for success, or -EINVAL if @idx is out the range or if -+ * this class queue has been claimed or configure class queue command returns -+ * error, or returns -ENOMEM if allocating CQ memory fails. -+ */ -+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, -+ struct qm_ceetm_channel *channel, -+ unsigned int idx, -+ struct qm_ceetm_ccg *ccg); -+ -+/** -+ * qman_ceetm_cq_release - Releases a previously claimed class queue. -+ * @cq: The class queue to be released. -+ * -+ * Return zero for success, or -EBUSY if the dependent objects (eg. logical -+ * FQIDs) have not been released. -+ */ -+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq); -+ -+/** -+ * qman_ceetm_set_queue_weight -+ * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class -+ * queue. -+ * @cq: the given class queue. -+ * @weight_code: the desired weight code to set for the given class queue for -+ * "set" function or the queired weight code for "get" function. -+ * -+ * Grouped class queues have a default weight code of zero, which corresponds to -+ * a scheduler weighting of 1. This function can be used to modify a grouped -+ * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio() -+ * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values -+ * and the corresponding sharing weight.) -+ * -+ * Returns zero for success, or -EIO if the configure weight command returns -+ * error for "set" function, or -EINVAL if the query command returns -+ * error for "get" function. -+ * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference -+ * Manual for weight and weight code. -+ */ -+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, -+ struct qm_ceetm_weight_code *weight_code); -+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, -+ struct qm_ceetm_weight_code *weight_code); -+ -+/** -+ * qman_ceetm_set_queue_weight_in_ratio -+ * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a -+ * grouped class queue. -+ * @cq: the given class queue. -+ * @ratio: the weight in ratio. It should be the real ratio number multiplied -+ * by 100 to get rid of fraction. -+ * -+ * Returns zero for success, or -EIO if the configure weight command returns -+ * error for "set" function, or -EINVAL if the query command returns -+ * error for "get" function. -+ */ -+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio); -+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio); -+ -+/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0, -+ * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights -+ * corresponding to intermediate weight codes are calculated using linear -+ * interpolation on the inverted values. Or put another way, the inverse weights -+ * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals -+ * between these are divided linearly into 32 intermediate values, the inverses -+ * of which form the remaining weight codes. -+ * -+ * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of -+ * scheduling within a group of class queues (group A or B). Weights are used to -+ * normalise the class queues to an underlying BFS algorithm where all class -+ * queues are assumed to require "equal bandwidth". So the weights referred to -+ * by the weight codes act as divisors on the size of frames being enqueued. Ie. -+ * one class queue in a group is assigned a weight of 2 whilst the other class -+ * queues in the group keep the default weight of 1, then the WBFS scheduler -+ * will effectively treat all frames enqueued on the weight-2 class queue as -+ * having half the number of bytes they really have. Ie. if all other things are -+ * equal, that class queue would get twice as much bytes-per-second bandwidth as -+ * the others. So weights should be chosen to provide bandwidth ratios between -+ * members of the same class queue group. These weights have no bearing on -+ * behaviour outside that group's WBFS mechanism though. -+ */ -+ -+/** -+ * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional -+ * representation of the corresponding weight is given (in order to not lose -+ * any precision). -+ * @weight_code: The given weight code in WBFS. -+ * @numerator: the numerator part of the weight computed by the weight code. -+ * @denominator: the denominator part of the weight computed by the weight code -+ * -+ * Returns zero for success or -EINVAL if the given weight code is illegal. -+ */ -+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code, -+ u32 *numerator, -+ u32 *denominator); -+/** -+ * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code -+ * If the user needs to know how close this is, convert the resulting weight -+ * code back to a weight and compare. -+ * @numerator: numerator part of the given weight. -+ * @denominator: denominator part of the given weight. -+ * @weight_code: the weight code computed from the given weight. -+ * -+ * Returns zero for success, or -ERANGE if "numerator/denominator" is outside -+ * the range of weights. -+ */ -+int qman_ceetm_ratio2wbfs(u32 numerator, -+ u32 denominator, -+ struct qm_ceetm_weight_code *weight_code, -+ int rounding); -+ -+#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1 -+/** -+ * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM -+ * CQ counters. -+ * @cq: the given CQ object. -+ * @flags: indicates whether the statistics counter will be cleared after query. -+ * @frame_count: The number of the frames that have been counted since the -+ * counter was cleared last time. -+ * @byte_count: the number of bytes in all frames that have been counted. -+ * -+ * Return zero for success or -EINVAL if query statistics command returns error. -+ * -+ */ -+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, -+ u64 *frame_count, u64 *byte_count); -+ -+/** -+ * qman_ceetm_drain_cq - drain the CQ till it is empty. -+ * @cq: the give CQ object. -+ * Return 0 for success or -EINVAL for unsuccessful command to empty CQ. -+ */ -+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq); -+ -+ /* ---------------------- */ -+ /* CEETM :: logical FQIDs */ -+ /* ---------------------- */ -+/** -+ * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with -+ * the given class queue. -+ * @lfq: the returned lfq object, if successful. -+ * @cq: the class queue which needs to claim a LFQID. -+ * -+ * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if -+ * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails. -+ */ -+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, -+ struct qm_ceetm_cq *cq); -+ -+/** -+ * qman_ceetm_lfq_release - Releases a previously claimed logical FQID. -+ * @lfq: the lfq to be released. -+ * -+ * Return zero for success. -+ */ -+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq); -+ -+/** -+ * qman_ceetm_lfq_set_context -+ * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the -+ * "dequeue context table" associated with the logical FQID. -+ * @lfq: the given logical FQ object. -+ * @context_a: contextA of the dequeue context. -+ * @context_b: contextB of the dequeue context. -+ * -+ * Returns zero for success, or -EINVAL if there is error to set/get the -+ * context pair. -+ */ -+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, -+ u64 context_a, -+ u32 context_b); -+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, -+ u64 *context_a, -+ u32 *context_b); -+ -+/** -+ * qman_ceetm_create_fq - Initialise a FQ object for the LFQ. -+ * @lfq: the given logic fq. -+ * @fq: the fq object created for the given logic fq. -+ * -+ * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to -+ * target a logical FQID (and the class queue it is associated with). -+ * Note that this FQ object can only be used for enqueues, and -+ * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter, -+ * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only -+ * valid as long as the underlying 'lfq' remains claimed. It is the user's -+ * responsibility to ensure that the underlying 'lfq' is not released until any -+ * enqueues to this FQ object have completed. The only field the user needs to -+ * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that -+ * could conceivably be called on this FQ object. This API can be called -+ * multiple times to create multiple FQ objects referring to the same logical -+ * FQID, and any enqueue rejections will respect the callback of the object that -+ * issued the enqueue (and will identify the object via the parameter passed to -+ * the callback too). There is no 'flags' parameter to this API as there is for -+ * qman_create_fq() - the created FQ object behaves as though qman_create_fq() -+ * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY. -+ * -+ * Returns 0 for success. -+ */ -+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq); -+ -+ /* -------------------------------- */ -+ /* CEETM :: class congestion groups */ -+ /* -------------------------------- */ -+ -+/** -+ * qman_ceetm_ccg_claim - Claims an unused CCG. -+ * @ccg: the returned CCG object, if successful. -+ * @channel: the given class queue channel -+ * @cscn: the callback function of this CCG. -+ * @cb_ctx: the corresponding context to be used used if state change -+ * notifications are later enabled for this CCG. -+ * -+ * The congestion group is local to the given class queue channel, so only -+ * class queues within the channel can be associated with that congestion group. -+ * The association of class queues to congestion groups occurs when the class -+ * queues are claimed, see qman_ceetm_cq_claim() and related functions. -+ * Congestion groups are in a "zero" state when initially claimed, and they are -+ * returned to that state when released. -+ * -+ * Return zero for success, or -EINVAL if no CCG in the channel is available. -+ */ -+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, -+ struct qm_ceetm_channel *channel, -+ unsigned int idx, -+ void (*cscn)(struct qm_ceetm_ccg *, -+ void *cb_ctx, -+ int congested), -+ void *cb_ctx); -+ -+/** -+ * qman_ceetm_ccg_release - Releases a previously claimed CCG. -+ * @ccg: the given ccg. -+ * -+ * Returns zero for success, or -EBUSY if the given ccg's dependent objects -+ * (class queues that are associated with the CCG) have not been released. -+ */ -+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg); -+ -+/* This struct is used to specify attributes for a CCG. The 'we_mask' field -+ * controls which CCG attributes are to be updated, and the remainder specify -+ * the values for those attributes. A CCG counts either frames or the bytes -+ * within those frames, but not both ('mode'). A CCG can optionally cause -+ * enqueues to be rejected, due to tail-drop or WRED, or both (they are -+ * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be -+ * level-triggered due to a single threshold ('td_thres') or edge-triggered due -+ * to a "congestion state", but not both ('td_mode'). Congestion state has -+ * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and -+ * notifications can be sent to software the CCG goes in to and out of this -+ * congested state ('cscn_en'). */ -+struct qm_ceetm_ccg_params { -+ /* Boolean fields together in a single bitfield struct */ -+ struct { -+ /* Whether to count bytes or frames. 1==frames */ -+ u8 mode:1; -+ /* En/disable tail-drop. 1==enable */ -+ u8 td_en:1; -+ /* Tail-drop on congestion-state or threshold. 1=threshold */ -+ u8 td_mode:1; -+ /* Generate congestion state change notifications. 1==enable */ -+ u8 cscn_en:1; -+ /* Enable WRED rejections (per colour). 1==enable */ -+ u8 wr_en_g:1; -+ u8 wr_en_y:1; -+ u8 wr_en_r:1; -+ } __packed; -+ /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */ -+ struct qm_cgr_cs_thres td_thres; -+ /* Congestion state thresholds, for entry and exit. */ -+ struct qm_cgr_cs_thres cs_thres_in; -+ struct qm_cgr_cs_thres cs_thres_out; -+ /* Overhead accounting length. Per-packet "tax", from -128 to +127 */ -+ signed char oal; -+ /* Congestion state change notification for DCP portal, virtual CCGID*/ -+ /* WRED parameters. */ -+ struct qm_cgr_wr_parm wr_parm_g; -+ struct qm_cgr_wr_parm wr_parm_y; -+ struct qm_cgr_wr_parm wr_parm_r; -+}; -+/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of -+ * the CCGR are to be updated. */ -+#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */ -+#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */ -+#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */ -+#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */ -+#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */ -+#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */ -+#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */ -+#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */ -+#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */ -+#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */ -+#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */ -+#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */ -+#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */ -+#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */ -+#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */ -+#define QM_CCGR_WE_CDV 0x8000 /* cdv */ -+ -+/** -+ * qman_ceetm_ccg_set -+ * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes. -+ * @ccg: the given CCG object. -+ * @we_mask: the write enable mask. -+ * @params: the parameters setting for this ccg -+ * -+ * Return 0 for success, or -EIO if configure ccg command returns error for -+ * "set" function, or -EINVAL if query ccg command returns error for "get" -+ * function. -+ */ -+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, -+ u16 we_mask, -+ const struct qm_ceetm_ccg_params *params); -+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, -+ struct qm_ceetm_ccg_params *params); -+ -+/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target -+ * mask. -+ * qman_ceetm_cscn_swp_get - Query whether a given software portal index is -+ * in the cscn target mask. -+ * @ccg: the give CCG object. -+ * @swp_idx: the index of the software portal. -+ * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from -+ * the target mask. -+ * @we_mask: the write enable mask. -+ * @params: the parameters setting for this ccg -+ * -+ * Return 0 for success, or -EINVAL if command in set/get function fails. -+ */ -+int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg, -+ u16 swp_idx, -+ unsigned int cscn_enabled, -+ u16 we_mask, -+ const struct qm_ceetm_ccg_params *params); -+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, -+ u16 swp_idx, -+ unsigned int *cscn_enabled); -+ -+/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\ -+ * target mask. -+ * qman_ceetm_cscn_swp_get - Query whether a given direct connect portal index -+ * is in the cscn target mask. -+ * @ccg: the give CCG object. -+ * @dcp_idx: the index of the direct connect portal. -+ * @vcgid: congestion state change notification for dcp portal, virtual CGID. -+ * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from -+ * the target mask. -+ * @we_mask: the write enable mask. -+ * @params: the parameters setting for this ccg -+ * -+ * Return 0 for success, or -EINVAL if command in set/get function fails. -+ */ -+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, -+ u16 dcp_idx, -+ u8 vcgid, -+ unsigned int cscn_enabled, -+ u16 we_mask, -+ const struct qm_ceetm_ccg_params *params); -+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, -+ u16 dcp_idx, -+ u8 *vcgid, -+ unsigned int *cscn_enabled); -+ -+/** -+ * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by -+ * CEETM CCG counters. -+ * @ccg: the given CCG object. -+ * @flags: indicates whether the statistics counter will be cleared after query. -+ * @frame_count: The number of the frames that have been counted since the -+ * counter was cleared last time. -+ * @byte_count: the number of bytes in all frames that have been counted. -+ * -+ * Return zero for success or -EINVAL if query statistics command returns error. -+ * -+ */ -+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, -+ u64 *frame_count, u64 *byte_count); -+ -+/** -+ * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table -+ * @lfqid: Logical Frame Queue ID -+ * @lfqmt_query: Results of the query command -+ * -+ * Returns zero for success or -EIO if the query command returns error. -+ * -+ */ -+int qman_ceetm_query_lfqmt(int lfqid, -+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query); -+ -+/** -+ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics -+ * @cid: Target ID (CQID or CCGRID) -+ * @dcp_idx: CEETM portal ID -+ * @command_type: One of the following: -+ * 0 = Query dequeue statistics. CID carries the CQID to be queried. -+ * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried -+ * 2 = Write dequeue statistics. CID carries the CQID to be written. -+ * 3 = Query reject statistics. CID carries the CCGRID to be queried. -+ * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried -+ * 5 = Write reject statistics. CID carries the CCGRID to be written -+ * @frame_count: Frame count value to be written if this is a write command -+ * @byte_count: Bytes count value to be written if this is a write command -+ * -+ * Returns zero for success or -EIO if the query command returns error. -+ */ -+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx, -+ u16 command_type, u64 frame_count, -+ u64 byte_count); -+ -+/** -+ * qman_set_wpm - Set waterfall power management -+ * -+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. -+ * -+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not -+ * accessible. -+ */ -+int qman_set_wpm(int wpm_enable); -+ -+/** -+ * qman_get_swp - Query the waterfall power management setting -+ * -+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. -+ * -+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not -+ * accessible. -+ */ -+int qman_get_wpm(int *wpm_enable); -+ -+/* The below qman_p_***() variants might be called in a migration situation -+ * (e.g. cpu hotplug). They are used to continue accessing the portal that -+ * execution was affine to prior to migration. -+ * @qman_portal specifies which portal the APIs will use. -+*/ -+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal -+ *p); -+int qman_p_irqsource_add(struct qman_portal *p, u32 bits); -+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits); -+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit); -+u32 qman_p_poll_slow(struct qman_portal *p); -+void qman_p_poll(struct qman_portal *p); -+void qman_p_stop_dequeues(struct qman_portal *p); -+void qman_p_start_dequeues(struct qman_portal *p); -+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); -+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools); -+u32 qman_p_static_dequeue_get(struct qman_portal *p); -+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq, -+ int park_request); -+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq, -+ u32 flags __maybe_unused, u32 vdqcr); -+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags); -+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags, -+ struct qman_fq *orp, u16 orp_seqnum); -+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, -+ const struct qm_fd *fd, u32 flags, -+ qman_cb_precommit cb, void *cb_arg); -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* FSL_QMAN_H */ ---- /dev/null -+++ b/include/linux/fsl_usdpaa.h -@@ -0,0 +1,372 @@ -+/* Copyright 2011-2012 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+#ifndef FSL_USDPAA_H -+#define FSL_USDPAA_H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#include -+#include -+#include /* For "enum qm_channel" */ -+#include -+ -+#ifdef CONFIG_FSL_USDPAA -+ -+/******************************/ -+/* Allocation of resource IDs */ -+/******************************/ -+ -+/* This enum is used to distinguish between the type of underlying object being -+ * manipulated. */ -+enum usdpaa_id_type { -+ usdpaa_id_fqid, -+ usdpaa_id_bpid, -+ usdpaa_id_qpool, -+ usdpaa_id_cgrid, -+ usdpaa_id_ceetm0_lfqid, -+ usdpaa_id_ceetm0_channelid, -+ usdpaa_id_ceetm1_lfqid, -+ usdpaa_id_ceetm1_channelid, -+ usdpaa_id_max /* <-- not a valid type, represents the number of types */ -+}; -+#define USDPAA_IOCTL_MAGIC 'u' -+struct usdpaa_ioctl_id_alloc { -+ uint32_t base; /* Return value, the start of the allocated range */ -+ enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */ -+ uint32_t num; /* how many IDs to allocate (and return value) */ -+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */ -+ int partial; /* whether to allow less than 'num' */ -+}; -+struct usdpaa_ioctl_id_release { -+ /* Input; */ -+ enum usdpaa_id_type id_type; -+ uint32_t base; -+ uint32_t num; -+}; -+struct usdpaa_ioctl_id_reserve { -+ enum usdpaa_id_type id_type; -+ uint32_t base; -+ uint32_t num; -+}; -+ -+ -+/* ioctl() commands */ -+#define USDPAA_IOCTL_ID_ALLOC \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc) -+#define USDPAA_IOCTL_ID_RELEASE \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release) -+#define USDPAA_IOCTL_ID_RESERVE \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve) -+ -+/**********************/ -+/* Mapping DMA memory */ -+/**********************/ -+ -+/* Maximum length for a map name, including NULL-terminator */ -+#define USDPAA_DMA_NAME_MAX 16 -+/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named. -+ * For a sharable and named map, specify _SHARED (whether creating one or -+ * binding to an existing one). If _SHARED is specified and _CREATE is not, then -+ * the mapping must already exist. If _SHARED and _CREATE are specified and the -+ * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are -+ * specified and the mapping already exists, the mapping will fail unless _LAZY -+ * is specified. When mapping to a pre-existing sharable map, the length must be -+ * an exact match. Lengths must be a power-of-4 multiple of page size. -+ * -+ * Note that this does not actually map the memory to user-space, that is done -+ * by a subsequent mmap() using the page offset returned from this ioctl(). The -+ * ioctl() is what gives the process permission to do this, and a page-offset -+ * with which to do so. -+ */ -+#define USDPAA_DMA_FLAG_SHARE 0x01 -+#define USDPAA_DMA_FLAG_CREATE 0x02 -+#define USDPAA_DMA_FLAG_LAZY 0x04 -+#define USDPAA_DMA_FLAG_RDONLY 0x08 -+struct usdpaa_ioctl_dma_map { -+ /* Output parameters - virtual and physical addresses */ -+ void *ptr; -+ uint64_t phys_addr; -+ /* Input parameter, the length of the region to be created (or if -+ * mapping an existing region, this must match it). Must be a power-of-4 -+ * multiple of page size. */ -+ uint64_t len; -+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */ -+ uint32_t flags; -+ /* If _FLAG_SHARE is specified, the name of the region to be created (or -+ * of the existing mapping to use). */ -+ char name[USDPAA_DMA_NAME_MAX]; -+ /* If this ioctl() creates the mapping, this is an input parameter -+ * stating whether the region supports locking. If mapping an existing -+ * region, this is a return value indicating the same thing. */ -+ int has_locking; -+ /* In the case of a successful map with _CREATE and _LAZY, this return -+ * value indicates whether we created the mapped region or whether it -+ * already existed. */ -+ int did_create; -+}; -+ -+#ifdef CONFIG_COMPAT -+struct usdpaa_ioctl_dma_map_compat { -+ /* Output parameters - virtual and physical addresses */ -+ compat_uptr_t ptr; -+ uint64_t phys_addr; -+ /* Input parameter, the length of the region to be created (or if -+ * mapping an existing region, this must match it). Must be a power-of-4 -+ * multiple of page size. */ -+ uint64_t len; -+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */ -+ uint32_t flags; -+ /* If _FLAG_SHARE is specified, the name of the region to be created (or -+ * of the existing mapping to use). */ -+ char name[USDPAA_DMA_NAME_MAX]; -+ /* If this ioctl() creates the mapping, this is an input parameter -+ * stating whether the region supports locking. If mapping an existing -+ * region, this is a return value indicating the same thing. */ -+ int has_locking; -+ /* In the case of a successful map with _CREATE and _LAZY, this return -+ * value indicates whether we created the mapped region or whether it -+ * already existed. */ -+ int did_create; -+}; -+ -+#define USDPAA_IOCTL_DMA_MAP_COMPAT \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat) -+#endif -+ -+ -+#define USDPAA_IOCTL_DMA_MAP \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map) -+/* munmap() does not remove the DMA map, just the user-space mapping to it. -+ * This ioctl will do both (though you can munmap() before calling the ioctl -+ * too). */ -+#define USDPAA_IOCTL_DMA_UNMAP \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char) -+/* We implement a cross-process locking scheme per DMA map. Call this ioctl() -+ * with a mmap()'d address, and the process will (interruptible) sleep if the -+ * lock is already held by another process. Process destruction will -+ * automatically clean up any held locks. */ -+#define USDPAA_IOCTL_DMA_LOCK \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char) -+#define USDPAA_IOCTL_DMA_UNLOCK \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char) -+ -+/***************************************/ -+/* Mapping and using QMan/BMan portals */ -+/***************************************/ -+enum usdpaa_portal_type { -+ usdpaa_portal_qman, -+ usdpaa_portal_bman, -+}; -+ -+#define QBMAN_ANY_PORTAL_IDX 0xffffffff -+ -+struct usdpaa_ioctl_portal_map { -+ /* Input parameter, is a qman or bman portal required. */ -+ -+ enum usdpaa_portal_type type; -+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX -+ for don't care. The portal index will be populated by the -+ driver when the ioctl() successfully completes */ -+ uint32_t index; -+ -+ /* Return value if the map succeeds, this gives the mapped -+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ -+ struct usdpaa_portal_map { -+ void *cinh; -+ void *cena; -+ } addr; -+ /* Qman-specific return values */ -+ uint16_t channel; -+ uint32_t pools; -+}; -+ -+#ifdef CONFIG_COMPAT -+struct compat_usdpaa_ioctl_portal_map { -+ /* Input parameter, is a qman or bman portal required. */ -+ enum usdpaa_portal_type type; -+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX -+ for don't care. The portal index will be populated by the -+ driver when the ioctl() successfully completes */ -+ uint32_t index; -+ /* Return value if the map succeeds, this gives the mapped -+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ -+ struct usdpaa_portal_map_compat { -+ compat_uptr_t cinh; -+ compat_uptr_t cena; -+ } addr; -+ /* Qman-specific return values */ -+ uint16_t channel; -+ uint32_t pools; -+}; -+#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map) -+#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat) -+#endif -+ -+#define USDPAA_IOCTL_PORTAL_MAP \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map) -+#define USDPAA_IOCTL_PORTAL_UNMAP \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map) -+ -+struct usdpaa_ioctl_irq_map { -+ enum usdpaa_portal_type type; /* Type of portal to map */ -+ int fd; /* File descriptor that contains the portal */ -+ void *portal_cinh; /* Cache inhibited area to identify the portal */ -+}; -+ -+#define USDPAA_IOCTL_PORTAL_IRQ_MAP \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map) -+ -+#ifdef CONFIG_COMPAT -+ -+struct compat_ioctl_irq_map { -+ enum usdpaa_portal_type type; /* Type of portal to map */ -+ compat_int_t fd; /* File descriptor that contains the portal */ -+ compat_uptr_t portal_cinh; /* Used identify the portal */}; -+ -+#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \ -+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map) -+#endif -+ -+/* ioctl to query the amount of DMA memory used in the system */ -+struct usdpaa_ioctl_dma_used { -+ uint64_t free_bytes; -+ uint64_t total_bytes; -+}; -+#define USDPAA_IOCTL_DMA_USED \ -+ _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used) -+ -+/* ioctl to allocate a raw portal */ -+struct usdpaa_ioctl_raw_portal { -+ /* inputs */ -+ enum usdpaa_portal_type type; /* Type of portal to allocate */ -+ -+ /* set to non zero to turn on stashing */ -+ uint8_t enable_stash; -+ /* Stashing attributes for the portal */ -+ uint32_t cpu; -+ uint32_t cache; -+ uint32_t window; -+ -+ /* Specifies the stash request queue this portal should use */ -+ uint8_t sdest; -+ -+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX -+ * for don't care. The portal index will be populated by the -+ * driver when the ioctl() successfully completes */ -+ uint32_t index; -+ -+ /* outputs */ -+ uint64_t cinh; -+ uint64_t cena; -+}; -+ -+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal) -+ -+#define USDPAA_IOCTL_FREE_RAW_PORTAL \ -+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal) -+ -+#ifdef CONFIG_COMPAT -+ -+struct compat_ioctl_raw_portal { -+ /* inputs */ -+ enum usdpaa_portal_type type; /* Type of portal to allocate */ -+ -+ /* set to non zero to turn on stashing */ -+ uint8_t enable_stash; -+ /* Stashing attributes for the portal */ -+ uint32_t cpu; -+ uint32_t cache; -+ uint32_t window; -+ /* Specifies the stash request queue this portal should use */ -+ uint8_t sdest; -+ -+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX -+ * for don't care. The portal index will be populated by the -+ * driver when the ioctl() successfully completes */ -+ uint32_t index; -+ -+ /* outputs */ -+ uint64_t cinh; -+ uint64_t cena; -+}; -+ -+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \ -+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal) -+ -+#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \ -+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal) -+ -+#endif -+ -+#ifdef __KERNEL__ -+ -+/* Early-boot hook */ -+int __init fsl_usdpaa_init_early(void); -+ -+/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect -+ * faults within its ranges via this hook. */ -+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* CONFIG_FSL_USDPAA */ -+ -+#ifdef __KERNEL__ -+/* This interface is needed in a few places and though it's not specific to -+ * USDPAA as such, creating a new header for it doesn't make any sense. The -+ * qbman kernel driver implements this interface and uses it as the backend for -+ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this -+ * interface for tracking per-process allocations handed out to user-space. */ -+struct dpa_alloc { -+ struct list_head free; -+ spinlock_t lock; -+ struct list_head used; -+}; -+#define DECLARE_DPA_ALLOC(name) \ -+ struct dpa_alloc name = { \ -+ .free = { \ -+ .prev = &name.free, \ -+ .next = &name.free \ -+ }, \ -+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ -+ .used = { \ -+ .prev = &name.used, \ -+ .next = &name.used \ -+ } \ -+ } -+static inline void dpa_alloc_init(struct dpa_alloc *alloc) -+{ -+ INIT_LIST_HEAD(&alloc->free); -+ INIT_LIST_HEAD(&alloc->used); -+ spin_lock_init(&alloc->lock); -+} -+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, -+ int partial); -+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count); -+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count); -+ -+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire -+ * desired range is not available, or 0 for success. */ -+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count); -+/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when -+ * 'alloc' is empty. */ -+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count); -+/* Returns 1 if the specified id is alloced, 0 otherwise */ -+int dpa_alloc_check(struct dpa_alloc *list, u32 id); -+#endif /* __KERNEL__ */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* FSL_USDPAA_H */ diff --git a/target/linux/layerscape/patches-4.4/7018-devres-add-devm_alloc_percpu.patch b/target/linux/layerscape/patches-4.4/7018-devres-add-devm_alloc_percpu.patch deleted file mode 100644 index 835403e58..000000000 --- a/target/linux/layerscape/patches-4.4/7018-devres-add-devm_alloc_percpu.patch +++ /dev/null @@ -1,138 +0,0 @@ -From f7792176d939e79b4f525114a95d0fd8266bef8e Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Wed, 19 Nov 2014 13:06:54 +0200 -Subject: [PATCH 18/70] devres: add devm_alloc_percpu() - -Introduce managed counterparts for alloc_percpu() and free_percpu(). -Add devm_alloc_percpu() and devm_free_percpu() into the managed -interfaces list. - -Signed-off-by: Madalin Bucur - -Change-Id: I93546348e7b0e1974fda8b6c7a3b3710ce45b724 -Reviewed-on: http://git.am.freescale.net:8181/24140 -Reviewed-by: Madalin-Cristian Bucur -Tested-by: Madalin-Cristian Bucur - -Conflicts: - Documentation/driver-model/devres.txt - drivers/base/devres.c - -Conflicts: - drivers/base/devres.c ---- - Documentation/driver-model/devres.txt | 4 +++ - drivers/base/devres.c | 63 +++++++++++++++++++++++++++++++++ - include/linux/device.h | 19 ++++++++++ - 3 files changed, 86 insertions(+) - ---- a/Documentation/driver-model/devres.txt -+++ b/Documentation/driver-model/devres.txt -@@ -321,6 +321,10 @@ PHY - devm_usb_get_phy() - devm_usb_put_phy() - -+PER-CPU MEM -+ devm_alloc_percpu() -+ devm_free_percpu() -+ - PINCTRL - devm_pinctrl_get() - devm_pinctrl_put() ---- a/drivers/base/devres.c -+++ b/drivers/base/devres.c -@@ -985,3 +985,66 @@ void devm_free_pages(struct device *dev, - &devres)); - } - EXPORT_SYMBOL_GPL(devm_free_pages); -+ -+static void devm_percpu_release(struct device *dev, void *pdata) -+{ -+ void __percpu *p; -+ -+ p = *(void __percpu **)pdata; -+ free_percpu(p); -+} -+ -+static int devm_percpu_match(struct device *dev, void *data, void *p) -+{ -+ struct devres *devr = container_of(data, struct devres, data); -+ -+ return *(void **)devr->data == p; -+} -+ -+/** -+ * __devm_alloc_percpu - Resource-managed alloc_percpu -+ * @dev: Device to allocate per-cpu memory for -+ * @size: Size of per-cpu memory to allocate -+ * @align: Alignement of per-cpu memory to allocate -+ * -+ * Managed alloc_percpu. Per-cpu memory allocated with this function is -+ * automatically freed on driver detach. -+ * -+ * RETURNS: -+ * Pointer to allocated memory on success, NULL on failure. -+ */ -+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, -+ size_t align) -+{ -+ void *p; -+ void __percpu *pcpu; -+ -+ pcpu = __alloc_percpu(size, align); -+ if (!pcpu) -+ return NULL; -+ -+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); -+ if (!p) -+ return NULL; -+ -+ *(void __percpu **)p = pcpu; -+ -+ devres_add(dev, p); -+ -+ return pcpu; -+} -+EXPORT_SYMBOL_GPL(__devm_alloc_percpu); -+ -+/** -+ * devm_free_percpu - Resource-managed free_percpu -+ * @dev: Device this memory belongs to -+ * @pdata: Per-cpu memory to free -+ * -+ * Free memory allocated with devm_alloc_percpu(). -+ */ -+void devm_free_percpu(struct device *dev, void __percpu *pdata) -+{ -+ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, -+ (void *)pdata)); -+} -+EXPORT_SYMBOL_GPL(devm_free_percpu); ---- a/include/linux/device.h -+++ b/include/linux/device.h -@@ -683,6 +683,25 @@ void __iomem *devm_ioremap_resource(stru - int devm_add_action(struct device *dev, void (*action)(void *), void *data); - void devm_remove_action(struct device *dev, void (*action)(void *), void *data); - -+/** -+ * devm_alloc_percpu - Resource-managed alloc_percpu -+ * @dev: Device to allocate per-cpu memory for -+ * @type: Type to allocate per-cpu memory for -+ * -+ * Managed alloc_percpu. Per-cpu memory allocated with this function is -+ * automatically freed on driver detach. -+ * -+ * RETURNS: -+ * Pointer to allocated memory on success, NULL on failure. -+ */ -+#define devm_alloc_percpu(dev, type) \ -+ (typeof(type) __percpu *)__devm_alloc_percpu(dev, sizeof(type), \ -+ __alignof__(type)) -+ -+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, -+ size_t align); -+void devm_free_percpu(struct device *dev, void __percpu *pdata); -+ - struct device_dma_parameters { - /* - * a low level driver may set these to teach IOMMU code about diff --git a/target/linux/layerscape/patches-4.4/7019-net-readd-skb_recycle.patch b/target/linux/layerscape/patches-4.4/7019-net-readd-skb_recycle.patch deleted file mode 100644 index 062233f18..000000000 --- a/target/linux/layerscape/patches-4.4/7019-net-readd-skb_recycle.patch +++ /dev/null @@ -1,59 +0,0 @@ -From abc24ef7a69f54c3317beea98078831ba9bfa2cd Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Tue, 5 Jan 2016 12:12:07 +0200 -Subject: [PATCH 19/70] net: readd skb_recycle() - -Adding back skb_recycle() as it's used by the DPAA Ethernet driver. -This was removed from the upstream kernel because it was lacking users. - -Signed-off-by: Madalin Bucur ---- - include/linux/skbuff.h | 1 + - net/core/skbuff.c | 26 ++++++++++++++++++++++++++ - 2 files changed, 27 insertions(+) - ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -799,6 +799,7 @@ void kfree_skb(struct sk_buff *skb); - void kfree_skb_list(struct sk_buff *segs); - void skb_tx_error(struct sk_buff *skb); - void consume_skb(struct sk_buff *skb); -+void skb_recycle(struct sk_buff *skb); - void __kfree_skb(struct sk_buff *skb); - extern struct kmem_cache *skbuff_head_cache; - ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -766,6 +766,32 @@ void consume_skb(struct sk_buff *skb) - } - EXPORT_SYMBOL(consume_skb); - -+/** -+ * skb_recycle - clean up an skb for reuse -+ * @skb: buffer -+ * -+ * Recycles the skb to be reused as a receive buffer. This -+ * function does any necessary reference count dropping, and -+ * cleans up the skbuff as if it just came from __alloc_skb(). -+ */ -+void skb_recycle(struct sk_buff *skb) -+{ -+ struct skb_shared_info *shinfo; -+ u8 head_frag = skb->head_frag; -+ -+ skb_release_head_state(skb); -+ -+ shinfo = skb_shinfo(skb); -+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); -+ atomic_set(&shinfo->dataref, 1); -+ -+ memset(skb, 0, offsetof(struct sk_buff, tail)); -+ skb->data = skb->head + NET_SKB_PAD; -+ skb->head_frag = head_frag; -+ skb_reset_tail_pointer(skb); -+} -+EXPORT_SYMBOL(skb_recycle); -+ - /* Make sure a field is enclosed inside headers_start/headers_end section */ - #define CHECK_SKB_FIELD(field) \ - BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ diff --git a/target/linux/layerscape/patches-4.4/7020-net-add-custom-NETIF-flags.patch b/target/linux/layerscape/patches-4.4/7020-net-add-custom-NETIF-flags.patch deleted file mode 100644 index c89a58c37..000000000 --- a/target/linux/layerscape/patches-4.4/7020-net-add-custom-NETIF-flags.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 608986fc6b41633753701d5df9c1ab6a13856608 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Mon, 11 Jul 2016 14:14:15 +0800 -Subject: [PATCH 20/70] net: add custom NETIF flags - -commit 6a53650c0c4513429d91e6ad7253340f3d3b9da5 -[context adjustment] - -These flags are used by DPAA Ethernet to impose different behaviors -in the networking stack. - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Madalin Bucur -Integrated-by: Zhao Qiang ---- - include/linux/netdev_features.h | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/include/linux/netdev_features.h -+++ b/include/linux/netdev_features.h -@@ -66,6 +66,9 @@ enum { - NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ - NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ - NETIF_F_BUSY_POLL_BIT, /* Busy poll */ -+ /* Freescale DPA support */ -+ NETIF_F_HW_QDISC_BIT, /* Supports hardware Qdisc */ -+ NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */ - - /* - * Add your fresh new feature above and remember to update -@@ -124,6 +127,9 @@ enum { - #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) - #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) - #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) -+/* Freescale DPA support */ -+#define NETIF_F_HW_QDISC __NETIF_F(HW_QDISC) -+#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ) - - #define for_each_netdev_feature(mask_addr, bit) \ - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/target/linux/layerscape/patches-4.4/7021-net-Make-the-netdev-watchdog-aware-of-hardware-multi.patch b/target/linux/layerscape/patches-4.4/7021-net-Make-the-netdev-watchdog-aware-of-hardware-multi.patch deleted file mode 100644 index 69bba053a..000000000 --- a/target/linux/layerscape/patches-4.4/7021-net-Make-the-netdev-watchdog-aware-of-hardware-multi.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 6aa1eca98ca44f515e10d8058d0ff6db3c8a3c11 Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Fri, 28 Sep 2012 17:04:33 +0300 -Subject: [PATCH 21/70] net: Make the netdev watchdog aware of hardware - multiqueue devices - -If the netdev declares the NETIF_F_HW_ACCEL_MQ (accelerated multiqueue) -capability, tell the watchdog to consider the per-netdev trans_start -field rather than any individual multiqueue's timestamp. That is -justified by the fact that queues only go in and out of congestion -in groups, not individually, as far as the net device is concerned. - -Change-Id: I07a6693bf1f0bb1e9396c5e232452223a511ecc1 -Signed-off-by: Bogdan Hamciuc -Reviewed-on: http://git.am.freescale.net:8181/1033 -Reviewed-by: Fleming Andrew-AFLEMING -Tested-by: Fleming Andrew-AFLEMING ---- - net/sched/sch_generic.c | 17 ++++++++++++++++- - 1 file changed, 16 insertions(+), 1 deletion(-) - ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -290,8 +290,23 @@ static void dev_watchdog(unsigned long a - txq = netdev_get_tx_queue(dev, i); - /* - * old device drivers set dev->trans_start -+ * -+ * (Actually, not only "old" devices, but also -+ * those which perform queue management in a -+ * separate hw accelerator. So even though the -+ * net device itself is single-queued, it makes -+ * sense (and is safe, too) to use kernel's -+ * multiqueue interface, specifically to avoid -+ * unnecessary device locking in SMP systems. -+ * In this case, we ought to consider not an -+ * individual txq's timestamp as a congestion -+ * indicator, but the "old" per-netdev field.) - */ -- trans_start = txq->trans_start ? : dev->trans_start; -+ if (dev->features & NETIF_F_HW_ACCEL_MQ) -+ trans_start = dev->trans_start; -+ else -+ trans_start = txq->trans_start ? : -+ dev->trans_start; - if (netif_xmit_stopped(txq) && - time_after(jiffies, (trans_start + - dev->watchdog_timeo))) { diff --git a/target/linux/layerscape/patches-4.4/7024-Add-APIs-to-setup-HugeTLB-mappings-for-USDPAA.patch b/target/linux/layerscape/patches-4.4/7024-Add-APIs-to-setup-HugeTLB-mappings-for-USDPAA.patch deleted file mode 100644 index f0247f0de..000000000 --- a/target/linux/layerscape/patches-4.4/7024-Add-APIs-to-setup-HugeTLB-mappings-for-USDPAA.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 2dcd28f7b4f5b92686da45f4ec46802026fb8573 Mon Sep 17 00:00:00 2001 -From: Roy Pledge -Date: Mon, 11 Jul 2016 14:34:59 +0800 -Subject: [PATCH 24/70] Add APIs to setup HugeTLB mappings for USDPAA - -commit 189653d40d5dc41b8bd4cfb61f766bd8a89b1e34 -[context adjustment] - -Signed-off-by: Zhao Qiang ---- - arch/powerpc/mm/fsl_booke_mmu.c | 14 +++++++++++++- - arch/powerpc/mm/mmu_decl.h | 6 ++++++ - 2 files changed, 19 insertions(+), 1 deletion(-) - ---- a/arch/powerpc/mm/fsl_booke_mmu.c -+++ b/arch/powerpc/mm/fsl_booke_mmu.c -@@ -105,7 +105,7 @@ unsigned long p_mapped_by_tlbcam(phys_ad - * an unsigned long (for example, 32-bit implementations cannot support a 4GB - * size). - */ --static void settlbcam(int index, unsigned long virt, phys_addr_t phys, -+void settlbcam(int index, unsigned long virt, phys_addr_t phys, - unsigned long size, unsigned long flags, unsigned int pid) - { - unsigned int tsize; -@@ -143,6 +143,18 @@ static void settlbcam(int index, unsigne - tlbcam_addrs[index].phys = phys; - } - -+void cleartlbcam(unsigned long virt, unsigned int pid) -+{ -+ int i = 0; -+ for (i = 0; i < NUM_TLBCAMS; i++) { -+ if (tlbcam_addrs[i].start == virt) { -+ TLBCAM[i].MAS1 = 0; -+ loadcam_entry(i); -+ return; -+ } -+ } -+} -+ - unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, - phys_addr_t phys) - { ---- a/arch/powerpc/mm/mmu_decl.h -+++ b/arch/powerpc/mm/mmu_decl.h -@@ -91,6 +91,12 @@ extern void _tlbia(void); - - #endif /* CONFIG_PPC_MMU_NOHASH */ - -+void settlbcam(int index, unsigned long virt, phys_addr_t phys, -+ unsigned long size, unsigned long flags, unsigned int pid); -+ -+void cleartlbcam(unsigned long virt, unsigned int pid); -+ -+ - #ifdef CONFIG_PPC32 - - extern void mapin_ram(void); diff --git a/target/linux/layerscape/patches-4.4/7029-fmd-SGMII-PCS-needs-to-be-reprogrammed-after-sleep.patch b/target/linux/layerscape/patches-4.4/7029-fmd-SGMII-PCS-needs-to-be-reprogrammed-after-sleep.patch deleted file mode 100644 index 8c10e3d9f..000000000 --- a/target/linux/layerscape/patches-4.4/7029-fmd-SGMII-PCS-needs-to-be-reprogrammed-after-sleep.patch +++ /dev/null @@ -1,228 +0,0 @@ -From afcfdda960da9d9ad4c4d21c7dd0cc7791cf36c7 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Wed, 4 May 2016 19:24:53 +0300 -Subject: [PATCH 29/70] fmd: SGMII PCS needs to be reprogrammed after sleep - -Signed-off-by: Madalin Bucur ---- - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 6 ++ - .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1 + - .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 12 ++++ - .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 1 + - .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 58 +++++++++++--------- - .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 1 + - .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 13 +++++ - .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 2 + - .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 14 +++++ - 9 files changed, 83 insertions(+), 25 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c -@@ -175,6 +175,12 @@ static int dpaa_resume(struct device *de - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - -+ err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev)); -+ if (err) { -+ netdev_err(net_dev, "fm_mac_resume = %d\n", err); -+ goto resume_failed; -+ } -+ - err = fm_port_resume(mac_dev->port_dev[TX]); - if (err) { - netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err); ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c -@@ -1371,6 +1371,7 @@ static void InitFmMacControllerDriver(t_ - - p_FmMacControllerDriver->f_FM_MAC_Enable = DtsecEnable; - p_FmMacControllerDriver->f_FM_MAC_Disable = DtsecDisable; -+ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL; - - p_FmMacControllerDriver->f_FM_MAC_SetException = DtsecSetException; - ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c -@@ -299,6 +299,18 @@ t_Error FM_MAC_Disable (t_Handle h_FmMac - RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); - } - -+t_Error FM_MAC_Resume (t_Handle h_FmMac) -+{ -+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; -+ -+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); -+ -+ if (p_FmMacControllerDriver->f_FM_MAC_Resume) -+ return p_FmMacControllerDriver->f_FM_MAC_Resume(h_FmMac); -+ -+ return E_OK; -+} -+ - /* ......................................................................... */ - - t_Error FM_MAC_Enable1588TimeStamp (t_Handle h_FmMac) ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h -@@ -91,6 +91,7 @@ typedef struct { - - t_Error (*f_FM_MAC_Enable) (t_Handle h_FmMac, e_CommMode mode); - t_Error (*f_FM_MAC_Disable) (t_Handle h_FmMac, e_CommMode mode); -+ t_Error (*f_FM_MAC_Resume) (t_Handle h_FmMac); - t_Error (*f_FM_MAC_Enable1588TimeStamp) (t_Handle h_FmMac); - t_Error (*f_FM_MAC_Disable1588TimeStamp) (t_Handle h_FmMac); - t_Error (*f_FM_MAC_Reset) (t_Handle h_FmMac, bool wait); ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c -@@ -812,6 +812,37 @@ static uint16_t MemacGetMaxFrameLength(t - return fman_memac_get_max_frame_len(p_Memac->p_MemMap); - } - -+static t_Error MemacInitInternalPhy(t_Handle h_Memac) -+{ -+ t_Memac *p_Memac = (t_Memac *)h_Memac; -+ uint8_t i, phyAddr; -+ -+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII) -+ { -+ /* Configure internal SGMII PHY */ -+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -+ SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR); -+ else -+ SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR); -+ } -+ else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII) -+ { -+ /* Configure 4 internal SGMII PHYs */ -+ for (i = 0; i < 4; i++) -+ { -+ /* QSGMII PHY address occupies 3 upper bits of 5-bit -+ phyAddress; the lower 2 bits are used to extend -+ register address space and access each one of 4 -+ ports inside QSGMII. */ -+ phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i); -+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -+ SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr); -+ else -+ SetupSgmiiInternalPhy(p_Memac, phyAddr); -+ } -+ } -+ return E_OK; -+} - - /*****************************************************************************/ - /* mEMAC Init & Free API */ -@@ -825,7 +856,6 @@ static t_Error MemacInit(t_Handle h_Mema - struct memac_cfg *p_MemacDriverParam; - enum enet_interface enet_interface; - enum enet_speed enet_speed; -- uint8_t i, phyAddr; - t_EnetAddr ethAddr; - e_FmMacType portType; - t_Error err; -@@ -887,30 +917,7 @@ static t_Error MemacInit(t_Handle h_Mema - } - #endif /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 */ - -- if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII) -- { -- /* Configure internal SGMII PHY */ -- if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -- SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR); -- else -- SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR); -- } -- else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII) -- { -- /* Configure 4 internal SGMII PHYs */ -- for (i = 0; i < 4; i++) -- { -- /* QSGMII PHY address occupies 3 upper bits of 5-bit -- phyAddress; the lower 2 bits are used to extend -- register address space and access each one of 4 -- ports inside QSGMII. */ -- phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i); -- if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -- SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr); -- else -- SetupSgmiiInternalPhy(p_Memac, phyAddr); -- } -- } -+ MemacInitInternalPhy(h_Memac); - - /* Max Frame Length */ - err = FmSetMacMaxFrame(p_Memac->fmMacControllerDriver.h_Fm, -@@ -1008,6 +1015,7 @@ static void InitFmMacControllerDriver(t_ - - p_FmMacControllerDriver->f_FM_MAC_Enable = MemacEnable; - p_FmMacControllerDriver->f_FM_MAC_Disable = MemacDisable; -+ p_FmMacControllerDriver->f_FM_MAC_Resume = MemacInitInternalPhy; - - p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = MemacSetTxAutoPauseFrames; - p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = MemacSetTxPauseFrames; ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c -@@ -897,6 +897,7 @@ static void InitFmMacControllerDriver(t_ - - p_FmMacControllerDriver->f_FM_MAC_Enable = TgecEnable; - p_FmMacControllerDriver->f_FM_MAC_Disable = TgecDisable; -+ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL; - - p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = TgecTxMacPause; - p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = TgecSetTxPauseFrames; ---- a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h -+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h -@@ -467,6 +467,19 @@ t_Error FM_MAC_Enable(t_Handle h_FmMac, - t_Error FM_MAC_Disable(t_Handle h_FmMac, e_CommMode mode); - - /**************************************************************************//** -+ @Function FM_MAC_Resume -+ -+ @Description Re-init the MAC after suspend -+ -+ @Param[in] h_FmMac A handle to a FM MAC Module. -+ -+ @Return E_OK on success; Error code otherwise. -+ -+ @Cautions Allowed only following FM_MAC_Init(). -+*//***************************************************************************/ -+t_Error FM_MAC_Resume(t_Handle h_FmMac); -+ -+/**************************************************************************//** - @Function FM_MAC_Enable1588TimeStamp - - @Description Enables the TSU operation. ---- a/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h -+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h -@@ -765,6 +765,8 @@ int fm_mac_enable(struct fm_mac_dev *fm_ - - int fm_mac_disable(struct fm_mac_dev *fm_mac_dev); - -+int fm_mac_resume(struct fm_mac_dev *fm_mac_dev); -+ - int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev, - bool enable); - ---- a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c -@@ -1733,6 +1733,20 @@ int fm_mac_disable(struct fm_mac_dev *fm - } - EXPORT_SYMBOL(fm_mac_disable); - -+int fm_mac_resume(struct fm_mac_dev *fm_mac_dev) -+{ -+ int _errno; -+ t_Error err; -+ -+ err = FM_MAC_Resume(fm_mac_dev); -+ _errno = -GET_ERROR_TYPE(err); -+ if (unlikely(_errno < 0)) -+ pr_err("FM_MAC_Resume() = 0x%08x\n", err); -+ -+ return _errno; -+} -+EXPORT_SYMBOL(fm_mac_resume); -+ - int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev, - bool enable) - { diff --git a/target/linux/layerscape/patches-4.4/7030-fmd-use-kernel-api-for-64bit-division.patch b/target/linux/layerscape/patches-4.4/7030-fmd-use-kernel-api-for-64bit-division.patch deleted file mode 100644 index cd2283ca7..000000000 --- a/target/linux/layerscape/patches-4.4/7030-fmd-use-kernel-api-for-64bit-division.patch +++ /dev/null @@ -1,178 +0,0 @@ -From c86be7b4f9ca4f4c8c916a9c350edbcd0e86a528 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Mon, 18 Apr 2016 16:41:59 +0300 -Subject: [PATCH 30/70] fmd: use kernel api for 64bit division - -Signed-off-by: Madalin Bucur ---- - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 1 + - .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 5 +++-- - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 1 + - .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 12 ++++++------ - .../freescale/sdk_fman/Peripherals/FM/fm.c | 5 +++-- - .../freescale/sdk_fman/Peripherals/FM/fman.c | 7 +++---- - .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 1 + - .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 2 +- - 8 files changed, 19 insertions(+), 15 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c -@@ -36,6 +36,7 @@ - - @Description FM Coarse Classifier implementation - *//***************************************************************************/ -+#include - #include "std_ext.h" - #include "error_ext.h" - #include "string_ext.h" ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c -@@ -36,6 +36,7 @@ - - @Description FM PCD POLICER... - *//***************************************************************************/ -+#include - #include "std_ext.h" - #include "error_ext.h" - #include "string_ext.h" -@@ -205,13 +206,13 @@ static void GetInfoRateReg(e_FmPcdPlcrRa - div = 1000000000; /* nano */ - div *= 10; /* 10 nano */ - } -- *p_Integer = (tmp< - #include "std_ext.h" - #include "error_ext.h" - #include "string_ext.h" ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c -@@ -38,7 +38,7 @@ - - @Cautions None - *//***************************************************************************/ -- -+#include - #include "error_ext.h" - #include "debug_ext.h" - #include "string_ext.h" -@@ -470,11 +470,11 @@ t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, - ("Alarm time must be equal or larger than RTC period - %d nanoseconds", - p_Rtc->clockPeriodNanoSec)); -- if (p_FmRtcAlarmParams->alarmTime % (uint64_t)p_Rtc->clockPeriodNanoSec) -+ tmpAlarm = p_FmRtcAlarmParams->alarmTime; -+ if (do_div(tmpAlarm, p_Rtc->clockPeriodNanoSec)) - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, - ("Alarm time must be a multiple of RTC period - %d nanoseconds", - p_Rtc->clockPeriodNanoSec)); -- tmpAlarm = p_FmRtcAlarmParams->alarmTime/(uint64_t)p_Rtc->clockPeriodNanoSec; - - if (p_FmRtcAlarmParams->f_AlarmCallback) - { -@@ -508,11 +508,11 @@ t_Error FM_RTC_SetPeriodicPulse(t_Handle - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, - ("Periodic pulse must be equal or larger than RTC period - %d nanoseconds", - p_Rtc->clockPeriodNanoSec)); -- if (p_FmRtcPeriodicPulseParams->periodicPulsePeriod % (uint64_t)p_Rtc->clockPeriodNanoSec) -+ tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod; -+ if (do_div(tmpFiper, p_Rtc->clockPeriodNanoSec)) - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, - ("Periodic pulse must be a multiple of RTC period - %d nanoseconds", - p_Rtc->clockPeriodNanoSec)); -- tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod/(uint64_t)p_Rtc->clockPeriodNanoSec; - if (tmpFiper & 0xffffffff00000000LL) - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, - ("Periodic pulse/RTC Period must be smaller than 4294967296", -@@ -628,7 +628,7 @@ t_Error FM_RTC_SetCurrentTime(t_Handle h - SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); - SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); - -- ts = ts/p_Rtc->clockPeriodNanoSec; -+ do_div(ts, p_Rtc->clockPeriodNanoSec); - fman_rtc_set_timer(p_Rtc->p_MemMap, (int64_t)ts); - - return E_OK; ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c -@@ -43,6 +43,7 @@ - #include "sprint_ext.h" - #include "debug_ext.h" - #include "fm_muram_ext.h" -+#include - - #include "fm_common.h" - #include "fm_ipc.h" -@@ -5087,9 +5088,9 @@ t_Error FM_CtrlMonGetCounters(t_Handle h - effValue = (uint64_t) - ((uint64_t)GET_UINT32(p_MonRegs->tpc2h) << 32 | GET_UINT32(p_MonRegs->tpc2l)); - -- p_Mon->percentCnt[0] = (uint8_t)((clkCnt - utilValue) * 100 / clkCnt); -+ p_Mon->percentCnt[0] = (uint8_t)div64_u64((clkCnt - utilValue) * 100, clkCnt); - if (clkCnt != utilValue) -- p_Mon->percentCnt[1] = (uint8_t)(((clkCnt - utilValue) - effValue) * 100 / (clkCnt - utilValue)); -+ p_Mon->percentCnt[1] = (uint8_t)div64_u64(((clkCnt - utilValue) - effValue) * 100, clkCnt - utilValue); - else - p_Mon->percentCnt[1] = 0; - ---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c -+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c -@@ -31,7 +31,7 @@ - */ - - -- -+#include - #include "fsl_fman.h" - #include "dpaa_integration_ext.h" - -@@ -186,10 +186,9 @@ void fman_enable_time_stamp(struct fman_ - * we do not div back, since we write this value as a fraction - * see spec */ - -- frac = (((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq) -- / fm_clk_freq; -+ frac = ((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq; - /* we check remainder of the division in order to round up if not int */ -- if (((ts_freq << 16) - (intgr << 16)*fm_clk_freq) % fm_clk_freq) -+ if (do_div(frac, fm_clk_freq)) - frac++; - - tmp = (intgr << FPM_TS_INT_SHIFT) | (uint16_t)frac; ---- a/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h -+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h -@@ -37,6 +37,7 @@ - - #if defined(NCSW_LINUX) && defined(__KERNEL__) - #include -+#include - - #elif defined(__MWERKS__) - #define LOW(x) ( sizeof(x)==8 ? *(1+(int32_t*)&x) : (*(int32_t*)&x)) ---- a/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h -+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h -@@ -142,7 +142,7 @@ do - } while (0) - - /* Ceiling division - not the fastest way, but safer in terms of overflow */ --#define DIV_CEIL(x,y) (((x)/(y)) + ((((((x)/(y)))*(y)) == (x)) ? 0 : 1)) -+#define DIV_CEIL(x,y) (div64_u64((x),(y)) + (((div64_u64((x),(y))*(y)) == (x)) ? 0 : 1)) - - /* Round up a number to be a multiple of a second number */ - #define ROUND_UP(x,y) ((((x) + (y) - 1) / (y)) * (y)) diff --git a/target/linux/layerscape/patches-4.4/7031-fsl_qbman-Enable-DPAA1-QBMan-for-ARM64-platforms.patch b/target/linux/layerscape/patches-4.4/7031-fsl_qbman-Enable-DPAA1-QBMan-for-ARM64-platforms.patch deleted file mode 100644 index 24f79895d..000000000 --- a/target/linux/layerscape/patches-4.4/7031-fsl_qbman-Enable-DPAA1-QBMan-for-ARM64-platforms.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d0a7d97bdfb7a755d55bb53725d71f81aab90fe7 Mon Sep 17 00:00:00 2001 -From: Roy Pledge -Date: Thu, 14 Apr 2016 10:21:02 +0800 -Subject: [PATCH 31/70] fsl_qbman: Enable DPAA1 QBMan for ARM64 platforms - -commit ddb0ae0f9d638efc70402d4fb570bca3a3178091 -[context adjustment] - -Enable the QBMan device for ARM64 platofrms. This is needed for -ARM based SoCs that have DPAA1 such as the LS1043A - -Signed-off-by: Roy Pledge -Integrated-by: Zhao Qiang ---- - arch/arm64/Kconfig | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -187,6 +187,11 @@ source "arch/arm64/Kconfig.platforms" - - menu "Bus support" - -+config HAS_FSL_QBMAN -+ bool "Datapath Acceleration Queue and Buffer management" -+ help -+ Datapath Acceleration Queue and Buffer management -+ - config PCI - bool "PCI support" - help diff --git a/target/linux/layerscape/patches-4.4/7064-dpaa_eth-repair-issue-introduced-with-2.5G-support.patch b/target/linux/layerscape/patches-4.4/7064-dpaa_eth-repair-issue-introduced-with-2.5G-support.patch deleted file mode 100644 index 929e2103b..000000000 --- a/target/linux/layerscape/patches-4.4/7064-dpaa_eth-repair-issue-introduced-with-2.5G-support.patch +++ /dev/null @@ -1,22 +0,0 @@ -From e555d24e5df9b2d5b8a613ff46af68fc3ff3a360 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Wed, 30 Mar 2016 16:12:33 +0300 -Subject: [PATCH 64/70] dpaa_eth: repair issue introduced with 2.5G support - -Signed-off-by: Madalin Bucur ---- - drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -@@ -346,7 +346,8 @@ static int __cold mac_probe(struct platf - SUPPORTED_100baseT_Half); - - /* Gigabit support (no half-duplex) */ -- if (mac_dev->max_speed == 1000) -+ if (mac_dev->max_speed == SPEED_1000 || -+ mac_dev->max_speed == SPEED_2500) - mac_dev->if_support |= SUPPORTED_1000baseT_Full; - - /* The 10G interface only supports one mode */ diff --git a/target/linux/layerscape/patches-4.4/7065-dpaa_eth-replace-sgmii-2500-with-qsgmii.patch b/target/linux/layerscape/patches-4.4/7065-dpaa_eth-replace-sgmii-2500-with-qsgmii.patch deleted file mode 100644 index ed44a1f27..000000000 --- a/target/linux/layerscape/patches-4.4/7065-dpaa_eth-replace-sgmii-2500-with-qsgmii.patch +++ /dev/null @@ -1,39 +0,0 @@ -From a768af05b1ffe644f54303036e5c048952e0f721 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Mon, 4 Apr 2016 20:23:21 +0300 -Subject: [PATCH 65/70] dpaa_eth: replace sgmii-2500 with qsgmii - -Signed-off-by: Madalin Bucur ---- - drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 6 ------ - 1 file changed, 6 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -@@ -74,7 +74,6 @@ static const char phy_str[][11] = { - [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", - [PHY_INTERFACE_MODE_RTBI] = "rtbi", - [PHY_INTERFACE_MODE_XGMII] = "xgmii", -- [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500" - }; - - static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) -@@ -101,7 +100,6 @@ static const uint16_t phy2speed[] = { - [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, - [PHY_INTERFACE_MODE_XGMII] = SPEED_10000, -- [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500 - }; - - static struct mac_device * __cold -@@ -341,10 +339,6 @@ static int __cold mac_probe(struct platf - mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_100baseT_Half); - -- if (strstr(char_prop, "sgmii-2500")) -- mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | -- SUPPORTED_100baseT_Half); -- - /* Gigabit support (no half-duplex) */ - if (mac_dev->max_speed == SPEED_1000 || - mac_dev->max_speed == SPEED_2500) diff --git a/target/linux/layerscape/patches-4.4/7066-fmd-add-2.5G-SGMII-mode-suport.patch b/target/linux/layerscape/patches-4.4/7066-fmd-add-2.5G-SGMII-mode-suport.patch deleted file mode 100644 index 4014ab4e2..000000000 --- a/target/linux/layerscape/patches-4.4/7066-fmd-add-2.5G-SGMII-mode-suport.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 95d12688fa875f7a00590aaf6de5f34d55531d68 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Thu, 14 Apr 2016 14:12:38 +0300 -Subject: [PATCH 66/70] fmd: add 2.5G SGMII mode suport - -Signed-off-by: Madalin Bucur ---- - drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c -@@ -74,6 +74,7 @@ static const char phy_str[][11] = { - [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", - [PHY_INTERFACE_MODE_RTBI] = "rtbi", - [PHY_INTERFACE_MODE_XGMII] = "xgmii", -+ [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500", - }; - - static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) -@@ -100,6 +101,7 @@ static const uint16_t phy2speed[] = { - [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, - [PHY_INTERFACE_MODE_XGMII] = SPEED_10000, -+ [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500, - }; - - static struct mac_device * __cold -@@ -335,7 +337,8 @@ static int __cold mac_probe(struct platf - mac_dev->max_speed = mac_dev->speed; - mac_dev->if_support = DTSEC_SUPPORTED; - /* We don't support half-duplex in SGMII mode */ -- if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii")) -+ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") || -+ strstr(char_prop, "sgmii-2500")) - mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_100baseT_Half); - diff --git a/target/linux/layerscape/patches-4.4/7067-net-phy-add-SGMII-2500-PHY.patch b/target/linux/layerscape/patches-4.4/7067-net-phy-add-SGMII-2500-PHY.patch deleted file mode 100644 index 42e6b5cd9..000000000 --- a/target/linux/layerscape/patches-4.4/7067-net-phy-add-SGMII-2500-PHY.patch +++ /dev/null @@ -1,20 +0,0 @@ -From 6e4322d938f01f58156b0a5929daa648ecad4754 Mon Sep 17 00:00:00 2001 -From: Madalin Bucur -Date: Thu, 14 Apr 2016 12:03:52 +0300 -Subject: [PATCH 67/70] net: phy: add SGMII 2500 PHY - -Signed-off-by: Madalin Bucur ---- - include/linux/phy.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/phy.h -+++ b/include/linux/phy.h -@@ -77,6 +77,7 @@ typedef enum { - PHY_INTERFACE_MODE_XGMII, - PHY_INTERFACE_MODE_MOCA, - PHY_INTERFACE_MODE_QSGMII, -+ PHY_INTERFACE_MODE_SGMII_2500, - PHY_INTERFACE_MODE_MAX, - } phy_interface_t; - diff --git a/target/linux/layerscape/patches-4.4/7068-dpaa_ethernet-fix-link-state-detect-for-10G-interfac.patch b/target/linux/layerscape/patches-4.4/7068-dpaa_ethernet-fix-link-state-detect-for-10G-interfac.patch deleted file mode 100644 index 08dafdda0..000000000 --- a/target/linux/layerscape/patches-4.4/7068-dpaa_ethernet-fix-link-state-detect-for-10G-interfac.patch +++ /dev/null @@ -1,63 +0,0 @@ -From f77f25498902f84d53a64a6397db2fa4b0d0dd4b Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Thu, 31 Mar 2016 10:53:06 +0800 -Subject: [PATCH 68/70] dpaa_ethernet: fix link state detect for 10G interface - -There are drivers to support 10G PHYs with copper interface, so we -change binding between MAC and 10G PHY to use phy_state_machine to -detect link state. - -Signed-off-by: Shaohui Xie ---- - drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 20 ++++++++------------ - 1 file changed, 8 insertions(+), 12 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c -@@ -279,20 +279,15 @@ static int __cold start(struct mac_devic - - _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev)); - -- if (!_errno && phy_dev) { -- if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) -- phy_start(phy_dev); -- else if (phy_dev->drv->read_status) -- phy_dev->drv->read_status(phy_dev); -- } -+ if (!_errno && phy_dev) -+ phy_start(phy_dev); - - return _errno; - } - - static int __cold stop(struct mac_device *mac_dev) - { -- if (mac_dev->phy_dev && -- (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)) -+ if (mac_dev->phy_dev) - phy_stop(mac_dev->phy_dev); - - return fm_mac_disable(mac_dev->get_mac_handle(mac_dev)); -@@ -477,8 +472,8 @@ static int xgmac_init_phy(struct net_dev - phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id, - mac_dev->phy_if); - else -- phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0, -- mac_dev->phy_if); -+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -+ &adjust_link, 0, mac_dev->phy_if); - if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { - netdev_err(net_dev, "Could not attach to PHY %s\n", - mac_dev->phy_node ? -@@ -510,8 +505,9 @@ static int memac_init_phy(struct net_dev - mac_dev->phy_dev = NULL; - return 0; - } else -- phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0, -- mac_dev->phy_if); -+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -+ &adjust_link, 0, -+ mac_dev->phy_if); - } else { - if (!mac_dev->phy_node) - phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id, diff --git a/target/linux/layerscape/patches-4.4/7072-LS1012-Add-PPFE-driver-in-Linux.patch b/target/linux/layerscape/patches-4.4/7072-LS1012-Add-PPFE-driver-in-Linux.patch deleted file mode 100644 index 21339e819..000000000 --- a/target/linux/layerscape/patches-4.4/7072-LS1012-Add-PPFE-driver-in-Linux.patch +++ /dev/null @@ -1,15167 +0,0 @@ -From 0157efe2fbe2fe56c34727d326cd74284c06cbd5 Mon Sep 17 00:00:00 2001 -From: Bhaskar Upadhaya -Date: Wed, 24 Aug 2016 10:51:21 +0800 -Subject: [PATCH 072/113] LS1012: Add PPFE driver in Linux - -commit 7584b690d4c8e4e435c2e6abcdb38d6595a0c302 -[context adjustment] -[don't apply fsl-ls1012a-rdb.dts and fsl-ls1012a.dtsi] -[Let PPFE driver can be selectd as a module] - -Signed-off-by: Bhaskar Upadhaya -Integrated-by: Zhao Qiang -Integrated-by: Yutang Jiang ---- - drivers/staging/Kconfig | 2 + - drivers/staging/Makefile | 1 + - drivers/staging/fsl_ppfe/Kconfig | 5 + - drivers/staging/fsl_ppfe/Makefile | 44 + - drivers/staging/fsl_ppfe/config.h | 8 + - drivers/staging/fsl_ppfe/control_link.lds | 32 + - drivers/staging/fsl_ppfe/include/pfe/cbus.h | 88 + - drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 + - .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 242 ++ - drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h | 243 ++ - .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 250 ++ - drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 78 + - drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h | 29 + - drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 96 + - .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 51 + - .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 128 + - .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 + - drivers/staging/fsl_ppfe/include/pfe/class.h | 133 + - drivers/staging/fsl_ppfe/include/pfe/class/ccu.h | 28 + - drivers/staging/fsl_ppfe/include/pfe/class/efet.h | 44 + - .../staging/fsl_ppfe/include/pfe/class/mac_hash.h | 55 + - drivers/staging/fsl_ppfe/include/pfe/class/perg.h | 39 + - .../staging/fsl_ppfe/include/pfe/class/vlan_hash.h | 46 + - drivers/staging/fsl_ppfe/include/pfe/gpt.h | 44 + - drivers/staging/fsl_ppfe/include/pfe/pe.h | 626 +++++ - drivers/staging/fsl_ppfe/include/pfe/pfe.h | 444 +++ - drivers/staging/fsl_ppfe/include/pfe/tmu.h | 68 + - .../staging/fsl_ppfe/include/pfe/tmu/phy_queue.h | 56 + - drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h | 72 + - drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h | 37 + - drivers/staging/fsl_ppfe/include/pfe/uart.h | 31 + - drivers/staging/fsl_ppfe/include/pfe/util.h | 49 + - drivers/staging/fsl_ppfe/include/pfe/util/eape.h | 57 + - drivers/staging/fsl_ppfe/include/pfe/util/efet.h | 119 + - drivers/staging/fsl_ppfe/include/pfe/util/inq.h | 28 + - drivers/staging/fsl_ppfe/pfe_ctrl.c | 363 +++ - drivers/staging/fsl_ppfe/pfe_ctrl.h | 111 + - drivers/staging/fsl_ppfe/pfe_ctrl_hal.c | 207 ++ - drivers/staging/fsl_ppfe/pfe_ctrl_hal.h | 129 + - drivers/staging/fsl_ppfe/pfe_debugfs.c | 109 + - drivers/staging/fsl_ppfe/pfe_debugfs.h | 8 + - drivers/staging/fsl_ppfe/pfe_eth.c | 2956 ++++++++++++++++++++ - drivers/staging/fsl_ppfe/pfe_eth.h | 384 +++ - drivers/staging/fsl_ppfe/pfe_firmware.c | 322 +++ - drivers/staging/fsl_ppfe/pfe_firmware.h | 41 + - drivers/staging/fsl_ppfe/pfe_hal.c | 2217 +++++++++++++++ - drivers/staging/fsl_ppfe/pfe_hif.c | 939 +++++++ - drivers/staging/fsl_ppfe/pfe_hif.h | 322 +++ - drivers/staging/fsl_ppfe/pfe_hif_lib.c | 658 +++++ - drivers/staging/fsl_ppfe/pfe_hif_lib.h | 219 ++ - drivers/staging/fsl_ppfe/pfe_hw.c | 188 ++ - drivers/staging/fsl_ppfe/pfe_hw.h | 32 + - drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 341 +++ - drivers/staging/fsl_ppfe/pfe_mod.c | 140 + - drivers/staging/fsl_ppfe/pfe_mod.h | 163 ++ - drivers/staging/fsl_ppfe/pfe_perfmon.c | 175 ++ - drivers/staging/fsl_ppfe/pfe_perfmon.h | 41 + - drivers/staging/fsl_ppfe/pfe_platform.c | 358 +++ - drivers/staging/fsl_ppfe/pfe_sysfs.c | 855 ++++++ - drivers/staging/fsl_ppfe/pfe_sysfs.h | 34 + - drivers/staging/fsl_ppfe/platform.h | 25 + - include/linux/skbuff.h | 11 + - net/core/skbuff.c | 84 + - 63 files changed, 14821 insertions(+) - create mode 100644 drivers/staging/fsl_ppfe/Kconfig - create mode 100644 drivers/staging/fsl_ppfe/Makefile - create mode 100644 drivers/staging/fsl_ppfe/config.h - create mode 100644 drivers/staging/fsl_ppfe/control_link.lds - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/ccu.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/efet.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/mac_hash.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/perg.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/vlan_hash.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/gpt.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pe.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/phy_queue.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/uart.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/eape.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/efet.h - create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/inq.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl_hal.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl_hal.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h - create mode 100644 drivers/staging/fsl_ppfe/pfe_platform.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c - create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h - create mode 100644 drivers/staging/fsl_ppfe/platform.h - ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -112,4 +112,6 @@ source "drivers/staging/wilc1000/Kconfig - - source "drivers/staging/most/Kconfig" - -+source "drivers/staging/fsl_ppfe/Kconfig" -+ - endif # STAGING ---- a/drivers/staging/Makefile -+++ b/drivers/staging/Makefile -@@ -48,3 +48,4 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ - obj-$(CONFIG_FSL_DPA) += fsl_qbman/ - obj-$(CONFIG_WILC1000) += wilc1000/ - obj-$(CONFIG_MOST) += most/ -+obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/Kconfig -@@ -0,0 +1,5 @@ -+config FSL_PPFE -+ tristate "Freescale PPFE Driver" -+ default m -+ help -+ only compiled as module ! ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/Makefile -@@ -0,0 +1,44 @@ -+# -+# Copyright (C) 2007 Freescale Semiconductor, Inc. -+# -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 2 of the License, or -+# (at your option) any later version. -+# -+# This program is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with this program; if not, write to the Free Software -+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ -+ -+all: modules -+ -+modules clean: -+ make CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH) -C $(KERNELDIR) M=`pwd` $@ -+ -+EXTRA_CFLAGS += -I$(src)/include -I$(src) -DCOMCERTO_2000 -DCONFIG_PLATFORM_LS1012A -DGEMAC_MTIP -DCONFIG_UTIL_DISABLED -+ -+EXTRA_LDFLAGS += -T$(srctree)/$(src)/control_link.lds -+ -+#only compiled as module ! -+obj-$(CONFIG_FSL_PPFE) += pfe.o -+ -+pfe-y += pfe_mod.o \ -+ pfe_hw.o \ -+ pfe_firmware.o \ -+ pfe_ctrl.o \ -+ pfe_ctrl_hal.o \ -+ pfe_hif.o \ -+ pfe_hif_lib.o\ -+ pfe_eth.o \ -+ pfe_perfmon.o \ -+ pfe_sysfs.o \ -+ pfe_debugfs.o \ -+ pfe_ls1012a_platform.o \ -+ pfe_hal.o \ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/config.h -@@ -0,0 +1,8 @@ -+#ifndef _CONFIG_H_ -+#define _CONFIG_H_ -+#define CFG_WIFI_OFFLOAD (1 << 1) -+#define CFG_ICC (1 << 11) -+#define CFG_RTP (1 << 14) -+#define CFG_ELLIPTIC (1 << 15) -+#define CFG_ALL (0 | CFG_WIFI_OFFLOAD | CFG_ICC | CFG_RTP | CFG_ELLIPTIC ) -+#endif /* _CONFIG_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/control_link.lds -@@ -0,0 +1,32 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+SECTIONS -+{ -+ .class_dmem_sh : SUBALIGN(8) { -+ __class_dmem_sh = .; -+ *(SORT(.class_dmem_sh_*)) -+ } -+ -+ .tmu_dmem_sh : SUBALIGN(8) { -+ __tmu_dmem_sh = .; -+ *(SORT(.tmu_dmem_sh_*)) -+ } -+ -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h -@@ -0,0 +1,88 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CBUS_H_ -+#define _CBUS_H_ -+ -+#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000) -+#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000) -+#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000) -+#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000) -+#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000) -+#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000) -+#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000) /* FIXME not documented */ -+#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000) /* FIXME not documented */ -+#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000) -+#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000) -+#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000) -+#define LMEM_SIZE 0x10000 -+#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE) -+#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000) -+#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000) -+#if defined(CONFIG_PLATFORM_C2000) -+#define EMAC3_BASE_ADDR (CBUS_BASE_ADDR + 0x330000) -+#define EGPI3_BASE_ADDR (CBUS_BASE_ADDR + 0x340000) -+#endif -+#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000) -+#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000) -+#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000) -+ -+#define IS_LMEM(addr, len) (((unsigned long)(addr) >= (unsigned long)LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= (unsigned long)LMEM_END)) -+ -+/** -+* \defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR -+* XXX_MEM_ACCESS_ADDR register bit definitions. -+* @{ -+*/ -+#define PE_MEM_ACCESS_WRITE (1<<31) /**< Internal Memory Write. */ -+#define PE_MEM_ACCESS_IMEM (1<<15) -+#define PE_MEM_ACCESS_DMEM (1<<16) -+#define PE_MEM_ACCESS_BYTE_ENABLE(offset,size) (((((1 << (size)) - 1) << (4 - (offset) - (size))) & 0xf) << 24) /**< Byte Enables of the Internal memory access. These are interpred in BE */ -+// @} -+#if defined(CONFIG_PLATFORM_LS1012A) -+#include "cbus/emac_mtip.h" -+#else -+#include "cbus/emac.h" -+#endif //CONFIG_PLATFORM_LS1012A -+#include "cbus/gpi.h" -+#include "cbus/bmu.h" -+#include "cbus/hif.h" -+#include "cbus/tmu_csr.h" -+#include "cbus/class_csr.h" -+#include "cbus/hif_nocpy.h" -+#include "cbus/util_csr.h" -+#include "cbus/gpt.h" -+ -+ -+/* PFE cores states */ -+#define CORE_DISABLE 0x00000000 -+#define CORE_ENABLE 0x00000001 -+#define CORE_SW_RESET 0x00000002 -+ -+/* LMEM defines */ -+#define LMEM_HDR_SIZE 0x0010 -+#define LMEM_BUF_SIZE_LN2 0x7 -+#define LMEM_BUF_SIZE (1 << LMEM_BUF_SIZE_LN2) -+ -+/* DDR defines */ -+#define DDR_HDR_SIZE 0x0100 -+#define DDR_BUF_SIZE_LN2 0xb -+#define DDR_BUF_SIZE (1 << DDR_BUF_SIZE_LN2) -+ -+ -+#endif /* _CBUS_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _BMU_H_ -+#define _BMU_H_ -+ -+#define BMU_VERSION 0x000 -+#define BMU_CTRL 0x004 -+#define BMU_UCAST_CONFIG 0x008 -+#define BMU_UCAST_BASE_ADDR 0x00c -+#define BMU_BUF_SIZE 0x010 -+#define BMU_BUF_CNT 0x014 -+#define BMU_THRES 0x018 -+#define BMU_INT_SRC 0x020 -+#define BMU_INT_ENABLE 0x024 -+#define BMU_ALLOC_CTRL 0x030 -+#define BMU_FREE_CTRL 0x034 -+#define BMU_FREE_ERR_ADDR 0x038 -+#define BMU_CURR_BUF_CNT 0x03c -+#define BMU_MCAST_CNT 0x040 -+#define BMU_MCAST_ALLOC_CTRL 0x044 -+#define BMU_REM_BUF_CNT 0x048 -+#define BMU_LOW_WATERMARK 0x050 -+#define BMU_HIGH_WATERMARK 0x054 -+#define BMU_INT_MEM_ACCESS 0x100 -+ -+typedef struct { -+ unsigned long baseaddr; -+ u32 count; -+ u32 size; -+} BMU_CFG; -+ -+ -+#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2 -+#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2 -+ -+#define BMU2_MCAST_ALLOC_CTRL BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL -+ -+#endif /* _BMU_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h -@@ -0,0 +1,242 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CLASS_CSR_H_ -+#define _CLASS_CSR_H_ -+ -+/** @file class_csr.h. -+ * class_csr - block containing all the classifier control and status register. Mapped on CBUS and accessible from all PE's and ARM. -+ */ -+ -+ -+#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000) -+#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004) -+#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010) -+#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014) /**< (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */ -+#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f) /**< LMEM header size for the Classifier block.\ Data in the LMEM is written from this offset. */ -+#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16) /**< DDR header size for the Classifier block.\ Data in the DDR is written from this offset. */ -+ -+#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020) /**< DMEM address of first [15:0] and second [31:16] buffers on QB side. */ -+#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024) /**< DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */ -+ -+#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060) /**< DMEM address of first [15:0] and second [31:16] buffers on RO side. */ -+#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064) /**< DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */ -+ -+/** @name Class PE memory access. Allows external PE's and HOST to read/write PMEM/DMEM memory ranges for each classifier PE. -+ */ -+//@{ -+#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100) /**< {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]}, See \ref XXX_MEM_ACCESS_ADDR for details. */ -+#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104) /**< Internal Memory Access Write Data [31:0] */ -+#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108) /**< Internal Memory Access Read Data [31:0] */ -+//@} -+#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114) -+#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118) -+ -+#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c) -+#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120) -+#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124) -+#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128) -+#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c) -+#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130) -+#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134) -+#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138) -+#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c) -+#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140) -+#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144) -+#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148) -+#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c) -+#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150) -+#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154) -+#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158) -+#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c) -+#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160) -+#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164) -+#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168) -+#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c) -+#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170) -+#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174) -+#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178) -+#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c) -+#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180) -+#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184) -+#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188) -+#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c) -+#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190) -+#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194) -+#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198) -+#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c) -+#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0) -+#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4) -+#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8) -+#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac) -+#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0) -+#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4) -+#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8) -+#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc) -+#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0) -+#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4) -+#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8) -+#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc) -+#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0) -+#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4) -+#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8) -+#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc) -+#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0) -+#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4) -+#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8) -+#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec) -+#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0) -+#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4) -+#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8) -+ -+#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200) -+#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204) -+#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208) -+#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c) -+#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210) -+#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214) -+#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218) -+#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c) -+#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220) -+#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224) -+ -+#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228) -+ -+#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c) -+#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230) -+ -+#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234) /**< (route_entry_size[9:0], route_hash_size[23:16] (this is actually ln2(size))) */ -+#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff) -+#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16) -+ -+#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238) -+ -+#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c) -+#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240) -+#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244) -+#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248) -+#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c) -+#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250) -+#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254) -+ -+#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258) -+#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000) //bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE -+ -+#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c) -+ -+#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260) -+#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264) -+#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268) -+#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c) -+#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270) -+#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274) -+#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278) -+#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c) -+#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280) -+#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284) -+#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288) -+#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c) -+ -+#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290) -+#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294) -+ -+#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298) -+#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c) -+ -+#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0) -+ -+#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4) -+#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8) -+#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac) -+#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0) -+#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4) -+#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8) -+ -+#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc) -+ -+/* CLASS defines */ -+#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */ -+#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */ -+ -+#define CLASS_PBUF0_BASE_ADDR 0x000 /* Can be configured */ -+#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */ -+#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */ -+#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */ -+ -+#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET) -+#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET) -+#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET) -+#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET) -+ -+#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | CLASS_PBUF0_BASE_ADDR) -+#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | CLASS_PBUF2_BASE_ADDR) -+ -+#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) | CLASS_PBUF0_HEADER_BASE_ADDR) -+#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) | CLASS_PBUF2_HEADER_BASE_ADDR) -+ -+#define CLASS_ROUTE_SIZE 128 -+#define CLASS_MAX_ROUTE_SIZE 256 -+#define CLASS_ROUTE_HASH_BITS 20 -+#define CLASS_ROUTE_HASH_MASK ((1 << CLASS_ROUTE_HASH_BITS) - 1) -+ -+#define CLASS_ROUTE0_BASE_ADDR 0x400 /* Can be configured */ -+#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */ -+#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */ -+#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */ -+ -+#define CLASS_SA_SIZE 128 -+#define CLASS_IPSEC_SA0_BASE_ADDR 0x600 -+#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE) /* not used */ -+#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE) /* not used */ -+#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE) /* not used */ -+ -+/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */ -+#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE*4) - (CLASS_ROUTE_SIZE*4) - (CLASS_SA_SIZE)) -+#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)) -+ -+ -+#define TWO_LEVEL_ROUTE (1 << 0) -+#define PHYNO_IN_HASH (1 << 1) -+#define HW_ROUTE_FETCH (1 << 3) -+#define HW_BRIDGE_FETCH (1 << 5) -+#define IP_ALIGNED (1 << 6) -+#define ARC_HIT_CHECK_EN (1 << 7) -+#define CLASS_TOE (1 << 11) -+#define HASH_NORMAL (0 << 12) -+#define HASH_CRC_PORT (1 << 12) -+#define HASH_CRC_IP (2 << 12) -+#define HASH_CRC_PORT_IP (3 << 12) -+#define QB2BUS_LE (1 << 15) -+ -+#define TCP_CHKSUM_DROP (1 << 0) -+#define UDP_CHKSUM_DROP (1 << 1) -+#define IPV4_CHKSUM_DROP (1 << 9) -+ -+/*CLASS_HIF_PARSE bits*/ -+#define HIF_PKT_CLASS_EN (1 << 0) -+#define HIF_PKT_OFFSET(ofst) ((ofst&0xF) << 1) -+ -+typedef struct { -+ u32 toe_mode; -+ unsigned long route_table_baseaddr; -+ u32 route_table_hash_bits; -+ u32 pe_sys_clk_ratio; -+ u32 resume; -+} CLASS_CFG; -+ -+#endif /* _CLASS_CSR_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h -@@ -0,0 +1,243 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _EMAC_H_ -+#define _EMAC_H_ -+ -+#define EMAC_NETWORK_CONTROL 0x000 -+#define EMAC_NETWORK_CONFIG 0x004 -+#define EMAC_NETWORK_STATUS 0x008 -+#define EMAC_DMA_CONFIG 0x010 -+ -+#define EMAC_PHY_MANAGEMENT 0x034 -+ -+#define EMAC_HASH_BOT 0x080 -+#define EMAC_HASH_TOP 0x084 -+ -+#define EMAC_SPEC1_ADD_BOT 0x088 -+#define EMAC_SPEC1_ADD_TOP 0x08c -+#define EMAC_SPEC2_ADD_BOT 0x090 -+#define EMAC_SPEC2_ADD_TOP 0x094 -+#define EMAC_SPEC3_ADD_BOT 0x098 -+#define EMAC_SPEC3_ADD_TOP 0x09c -+#define EMAC_SPEC4_ADD_BOT 0x0a0 -+#define EMAC_SPEC4_ADD_TOP 0x0a4 -+#define EMAC_WOL 0x0b8 -+ -+#define EMAC_STACKED_VLAN_REG 0x0c0 -+ -+#define EMAC_SPEC1_ADD_MASK_BOT 0x0c8 -+#define EMAC_SPEC1_ADD_MASK_TOP 0x0cc -+ -+#define EMAC_RMON_BASE_OFST 0x100 -+ -+#define EMAC_SPEC5_ADD_BOT 0x300 -+#define EMAC_SPEC5_ADD_TOP 0x304 -+#define EMAC_SPEC6_ADD_BOT 0x308 -+#define EMAC_SPEC6_ADD_TOP 0x30c -+#define EMAC_SPEC7_ADD_BOT 0x310 -+#define EMAC_SPEC7_ADD_TOP 0x314 -+#define EMAC_SPEC8_ADD_BOT 0x318 -+#define EMAC_SPEC8_ADD_TOP 0x31c -+#define EMAC_SPEC9_ADD_BOT 0x320 -+#define EMAC_SPEC9_ADD_TOP 0x324 -+#define EMAC_SPEC10_ADD_BOT 0x328 -+#define EMAC_SPEC10_ADD_TOP 0x32c -+#define EMAC_SPEC11_ADD_BOT 0x330 -+#define EMAC_SPEC11_ADD_TOP 0x334 -+#define EMAC_SPEC12_ADD_BOT 0x338 -+#define EMAC_SPEC12_ADD_TOP 0x33c -+#define EMAC_SPEC13_ADD_BOT 0x340 -+#define EMAC_SPEC13_ADD_TOP 0x344 -+#define EMAC_SPEC14_ADD_BOT 0x348 -+#define EMAC_SPEC14_ADD_TOP 0x34c -+#define EMAC_SPEC15_ADD_BOT 0x350 -+#define EMAC_SPEC15_ADD_TOP 0x354 -+#define EMAC_SPEC16_ADD_BOT 0x358 -+#define EMAC_SPEC16_ADD_TOP 0x35c -+#define EMAC_SPEC17_ADD_BOT 0x360 -+#define EMAC_SPEC17_ADD_TOP 0x364 -+#define EMAC_SPEC18_ADD_BOT 0x368 -+#define EMAC_SPEC18_ADD_TOP 0x36c -+#define EMAC_SPEC19_ADD_BOT 0x370 -+#define EMAC_SPEC19_ADD_TOP 0x374 -+#define EMAC_SPEC20_ADD_BOT 0x378 -+#define EMAC_SPEC20_ADD_TOP 0x37c -+#define EMAC_SPEC21_ADD_BOT 0x380 -+#define EMAC_SPEC21_ADD_TOP 0x384 -+#define EMAC_SPEC22_ADD_BOT 0x388 -+#define EMAC_SPEC22_ADD_TOP 0x38c -+#define EMAC_SPEC23_ADD_BOT 0x390 -+#define EMAC_SPEC23_ADD_TOP 0x394 -+#define EMAC_SPEC24_ADD_BOT 0x398 -+#define EMAC_SPEC24_ADD_TOP 0x39c -+#define EMAC_SPEC25_ADD_BOT 0x3a0 -+#define EMAC_SPEC25_ADD_TOP 0x3a4 -+#define EMAC_SPEC26_ADD_BOT 0x3a8 -+#define EMAC_SPEC26_ADD_TOP 0x3ac -+#define EMAC_SPEC27_ADD_BOT 0x3b0 -+#define EMAC_SPEC27_ADD_TOP 0x3b4 -+#define EMAC_SPEC28_ADD_BOT 0x3b8 -+#define EMAC_SPEC28_ADD_TOP 0x3bc -+#define EMAC_SPEC29_ADD_BOT 0x3c0 -+#define EMAC_SPEC29_ADD_TOP 0x3c4 -+#define EMAC_SPEC30_ADD_BOT 0x3c8 -+#define EMAC_SPEC30_ADD_TOP 0x3cc -+#define EMAC_SPEC31_ADD_BOT 0x3d0 -+#define EMAC_SPEC31_ADD_TOP 0x3d4 -+#define EMAC_SPEC32_ADD_BOT 0x3d8 -+#define EMAC_SPEC32_ADD_TOP 0x3dc -+ -+#define EMAC_SPEC_ADDR_MAX 32 -+ -+#define EMAC_CONTROL 0x7a0 -+ -+/* GEMAC definitions and settings */ -+ -+#define EMAC_PORT_0 0 -+#define EMAC_PORT_1 1 -+#define EMAC_PORT_2 2 -+ -+/* The possible operating speeds of the MAC, currently supporting 10, 100 and -+ * 1000Mb modes. -+ */ -+typedef enum {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS} MAC_SPEED; -+ -+#define GMII 1 -+#define MII 2 -+#define RMII 3 -+#define RGMII 4 -+#define SGMII 5 -+ -+#define DUP_HALF 0x00 -+#define DUP_FULL 0x01 -+ -+/* EMAC_NETWORK_CONTROL bits definition */ -+#define EMAC_LB_PHY (1 << 0) -+#define EMAC_LB_MAC (1 << 1) -+#define EMAC_RX_ENABLE (1 << 2) -+#define EMAC_TX_ENABLE (1 << 3) -+#define EMAC_MDIO_EN (1 << 4) /* Enable MDIO port */ -+ -+/* WoL (Wake on Lan bit definition) */ -+#define EMAC_WOL_MAGIC (1 << 16) -+#define EMAC_WOL_ARP (1 << 17) -+#define EMAC_WOL_SPEC_ADDR (1 << 18) -+#define EMAC_WOL_MULTI (1 << 19) -+ -+/* EMAC_NETWORK_CONFIG bits definition */ -+#define EMAC_SPEED_100 (1 << 0) -+#define EMAC_HALF_DUP (0 << 1) -+#define EMAC_FULL_DUP (1 << 1) -+#define EMAC_DUPLEX_MASK (1 << 1) -+#define EMAC_ENABLE_JUMBO_FRAME (1 << 3) -+#define EMAC_ENABLE_COPY_ALL (1 << 4) -+#define EMAC_NO_BROADCAST (1 << 5) -+#define EMAC_ENABLE_MULTICAST (1 << 6) -+#define EMAC_ENABLE_UNICAST (1 << 7) -+#define EMAC_ENABLE_1536_RX (1 << 8) -+#define EMAC_SPEED_1000 (1 << 10) -+#define EMAC_PCS_ENABLE (1 << 11) -+#define EMAC_ENABLE_PAUSE_RX (1 << 13) -+#define EMAC_REMOVE_FCS (1 << 17) -+#define EMAC_ENABLE_CHKSUM_RX (1 << 24) -+#define EMAC_MDC_DIV_MASK (0x7 << 18) /* PCLK divisor for MDC */ -+#define EMAC_DATA_BUS_WIDTH_SHIFT 21 -+#define EMAC_DATA_BUS_WIDTH_MASK (0x3 << EMAC_DATA_BUS_WIDTH_SHIFT) -+#define EMAC_DATA_BUS_WIDTH_32 (0x00 << EMAC_DATA_BUS_WIDTH_SHIFT) -+#define EMAC_DATA_BUS_WIDTH_64 (0x01 << EMAC_DATA_BUS_WIDTH_SHIFT) -+#define EMAC_DATA_BUS_WIDTH_128 (0x10 << EMAC_DATA_BUS_WIDTH_SHIFT) -+#define EMAC_ENABLE_FCS_RX (1 << 26) -+#define EMAC_SGMII_MODE_ENABLE (1 << 27) -+ -+#define EMAC_SPEED_MASK (EMAC_SPEED_100 | EMAC_SPEED_1000) -+ -+/* EMAC_STACKED_VLAN_REG bits definition */ -+#define EMAC_ENABLE_STACKED_VLAN (1 << 31) -+ -+/* EMAC_CONTROL bits definition */ -+#define EMAC_TWO_BYTES_IP_ALIGN (1 << 0) // two bytes IP alignement -+ -+/* EMAC_NET_STATUS bits definition */ -+#define EMAC_PHY_IDLE (1<<2) /* PHY management is idle */ -+#define EMAC_MDIO_IN (1<<1) /* Status of mdio_in pin */ -+#define EMAC_LINK_STATUS (1<<0) /* Status of link pin */ -+ -+/* EMAC_DMA_CONFIG Bit definitions */ -+#define EMAC_ENABLE_CHKSUM_TX (1<<11) -+ -+//RMII enable – bit 1 / RGMII enable – bit 2 -+#define EMAC_RMII_MODE_ENABLE ((1 << 1) | (0 << 2)) -+#define EMAC_RMII_MODE_DISABLE (0 << 1) -+#define EMAC_RGMII_MODE_ENABLE ((0 << 1) | (1 << 2)) -+#define EMAC_RGMII_MODE_DISABLE (0 << 2) -+#define EMAC_MII_MODE_ENABLE (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE) -+#define EMAC_GMII_MODE_ENABLE (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE) -+#define EMAC_MODE_MASK (0x3 << 1) -+ -+/* Default configuration */ -+#define EMAC0_DEFAULT_DUPLEX_MODE FULLDUPLEX -+#define EMAC0_DEFAULT_EMAC_MODE RGMII -+#define EMAC0_DEFAULT_EMAC_SPEED SPEED_1000M -+ -+#define EMAC1_DEFAULT_DUPLEX_MODE FULLDUPLEX -+#define EMAC1_DEFAULT_EMAC_MODE RGMII -+#define EMAC1_DEFAULT_EMAC_SPEED SPEED_1000M -+ -+#define EMAC2_DEFAULT_DUPLEX_MODE FULLDUPLEX -+#define EMAC2_DEFAULT_EMAC_MODE RGMII -+#define EMAC2_DEFAULT_EMAC_SPEED SPEED_1000M -+ -+/* EMAC Hash size */ -+#define EMAC_HASH_REG_BITS 64 -+ -+/* The Address organisation for the MAC device. All addresses are split into -+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of -+ * the address and the other field are the high order bits - this may be 16-bits -+ * in the case of MAC addresses, or 32-bits for the hash address. -+ * In terms of memory storage, the first item (bottom) is assumed to be at a -+ * lower address location than 'top'. i.e. top should be at address location of -+ * 'bottom' + 4 bytes. -+ */ -+typedef struct { -+ u32 bottom; /* Lower 32-bits of address. */ -+ u32 top; /* Upper 32-bits of address. */ -+} MAC_ADDR; -+ -+ -+/* The following is the organisation of the address filters section of the MAC -+ * registers. The Cadence MAC contains four possible specific address match -+ * addresses, if an incoming frame corresponds to any one of these four -+ * addresses then the frame will be copied to memory. -+ * It is not necessary for all four of the address match registers to be -+ * programmed, this is application dependant. -+ */ -+typedef struct { -+ MAC_ADDR one; /* Specific address register 1. */ -+ MAC_ADDR two; /* Specific address register 2. */ -+ MAC_ADDR three; /* Specific address register 3. */ -+ MAC_ADDR four; /* Specific address register 4. */ -+} SPEC_ADDR; -+ -+typedef struct { -+ u32 mode; -+ u32 speed; -+ u32 duplex; -+} GEMAC_CFG; -+ -+#endif /* _EMAC_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h -@@ -0,0 +1,250 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _EMAC_H_ -+#define _EMAC_H_ -+ -+#define EMAC_IEVENT_REG 0x004 -+#define EMAC_IMASK_REG 0x008 -+#define EMAC_R_DES_ACTIVE_REG 0x010 -+#define EMAC_X_DES_ACTIVE_REG 0x014 -+#define EMAC_ECNTRL_REG 0x024 -+#define EMAC_MII_DATA_REG 0x040 -+#define EMAC_MII_CTRL_REG 0x044 -+#define EMAC_MIB_CTRL_STS_REG 0x064 -+#define EMAC_RCNTRL_REG 0x084 -+#define EMAC_TCNTRL_REG 0x0C4 -+#define EMAC_PHY_ADDR_LOW 0x0E4 -+#define EMAC_PHY_ADDR_HIGH 0x0E8 -+#define EMAC_GAUR 0x120 -+#define EMAC_GALR 0x124 -+#define EMAC_TFWR_STR_FWD 0x144 -+#define EMAC_RX_SECTIOM_FULL 0x190 -+#define EMAC_TX_SECTION_EMPTY 0x1A0 -+#define EMAC_TRUNC_FL 0x1B0 -+ -+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ -+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ -+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ -+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ -+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ -+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ -+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ -+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ -+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ -+#define RMON_T_COL 0x224 /* RMON TX collision count */ -+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ -+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ -+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ -+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ -+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ -+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ -+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ -+#define RMON_T_OCTETS 0x244 /* RMON TX octets */ -+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ -+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ -+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ -+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ -+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ -+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ -+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ -+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ -+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ -+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ -+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ -+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ -+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ -+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ -+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ -+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ -+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ -+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ -+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ -+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ -+#define RMON_R_RESVD_O 0x2a4 /* Reserved */ -+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ -+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ -+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ -+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ -+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ -+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ -+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ -+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ -+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ -+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ -+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ -+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ -+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ -+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ -+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ -+ -+#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/ -+#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/ -+ -+/* GEMAC definitions and settings */ -+ -+#define EMAC_PORT_0 0 -+#define EMAC_PORT_1 1 -+ -+/* GEMAC Bit definitions */ -+#define EMAC_IEVENT_HBERR 0x80000000 -+#define EMAC_IEVENT_BABR 0x40000000 -+#define EMAC_IEVENT_BABT 0x20000000 -+#define EMAC_IEVENT_GRA 0x10000000 -+#define EMAC_IEVENT_TXF 0x08000000 -+#define EMAC_IEVENT_TXB 0x04000000 -+#define EMAC_IEVENT_RXF 0x02000000 -+#define EMAC_IEVENT_RXB 0x01000000 -+#define EMAC_IEVENT_MII 0x00800000 -+#define EMAC_IEVENT_EBERR 0x00400000 -+#define EMAC_IEVENT_LC 0x00200000 -+#define EMAC_IEVENT_RL 0x00100000 -+#define EMAC_IEVENT_UN 0x00080000 -+ -+#define EMAC_IMASK_HBERR 0x80000000 -+#define EMAC_IMASK_BABR 0x40000000 -+#define EMAC_IMASKT_BABT 0x20000000 -+#define EMAC_IMASK_GRA 0x10000000 -+#define EMAC_IMASKT_TXF 0x08000000 -+#define EMAC_IMASK_TXB 0x04000000 -+#define EMAC_IMASKT_RXF 0x02000000 -+#define EMAC_IMASK_RXB 0x01000000 -+#define EMAC_IMASK_MII 0x00800000 -+#define EMAC_IMASK_EBERR 0x00400000 -+#define EMAC_IMASK_LC 0x00200000 -+#define EMAC_IMASKT_RL 0x00100000 -+#define EMAC_IMASK_UN 0x00080000 -+ -+#define EMAC_RCNTRL_MAX_FL_SHIFT 16 -+#define EMAC_RCNTRL_LOOP 0x00000001 -+#define EMAC_RCNTRL_DRT 0x00000002 -+#define EMAC_RCNTRL_MII_MODE 0x00000004 -+#define EMAC_RCNTRL_PROM 0x00000008 -+#define EMAC_RCNTRL_BC_REJ 0x00000010 -+#define EMAC_RCNTRL_FCE 0x00000020 -+#define EMAC_RCNTRL_RGMII 0x00000040 -+#define EMAC_RCNTRL_SGMII 0x00000080 -+#define EMAC_RCNTRL_RMII 0x00000100 -+#define EMAC_RCNTRL_RMII_10T 0x00000200 -+#define EMAC_RCNTRL_CRC_FWD 0x00004000 -+ -+#define EMAC_TCNTRL_GTS 0x00000001 -+#define EMAC_TCNTRL_HBC 0x00000002 -+#define EMAC_TCNTRL_FDEN 0x00000004 -+#define EMAC_TCNTRL_TFC_PAUSE 0x00000008 -+#define EMAC_TCNTRL_RFC_PAUSE 0x00000010 -+ -+#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */ -+#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */ -+#define EMAC_ECNTRL_SPEED 0x00000020 -+#define EMAC_ECNTRL_DBSWAP 0x00000100 -+ -+#define EMAC_X_WMRK_STRFWD 0x00000100 -+ -+#define EMAC_X_DES_ACTIVE_TDAR 0x01000000 -+#define EMAC_R_DES_ACTIVE_RDAR 0x01000000 -+ -+ -+ -+/* The possible operating speeds of the MAC, currently supporting 10, 100 and -+ * 1000Mb modes. -+ */ -+typedef enum {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS} MAC_SPEED; -+ -+#define GMII 1 -+#define MII 2 -+#define RMII 3 -+#define RGMII 4 -+#define SGMII 5 -+ -+#define DUPLEX_HALF 0x00 -+#define DUPLEX_FULL 0x01 -+ -+ -+/* Default configuration */ -+#define EMAC0_DEFAULT_DUPLEX_MODE FULLDUPLEX -+#define EMAC0_DEFAULT_EMAC_MODE RGMII -+#define EMAC0_DEFAULT_EMAC_SPEED SPEED_1000M -+ -+#define EMAC1_DEFAULT_DUPLEX_MODE FULLDUPLEX -+#define EMAC1_DEFAULT_EMAC_MODE SGMII -+#define EMAC1_DEFAULT_EMAC_SPEED SPEED_1000M -+ -+/* MII-related definitios */ -+#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */ -+#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */ -+#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */ -+#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */ -+#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */ -+#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */ -+#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */ -+ -+#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */ -+#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */ -+#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */ -+#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */ -+ -+#define EMAC_MII_DATA_RA(v) ((v & EMAC_MII_DATA_RA_MASK) << EMAC_MII_DATA_RA_SHIFT) -+#define EMAC_MII_DATA_PA(v) ((v & EMAC_MII_DATA_RA_MASK) << EMAC_MII_DATA_PA_SHIFT) -+#define EMAC_MII_DATA(v) (v & 0xffff) -+ -+#define EMAC_MII_SPEED_SHIFT 1 -+#define EMAC_HOLDTIME_SHIFT 8 -+#define EMAC_HOLDTIME_MASK 0x7 -+#define EMAC_HOLDTIME(v) ((v & EMAC_HOLDTIME_MASK) << EMAC_HOLDTIME_SHIFT) -+ -+/* The Address organisation for the MAC device. All addresses are split into -+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of -+ * the address and the other field are the high order bits - this may be 16-bits -+ * in the case of MAC addresses, or 32-bits for the hash address. -+ * In terms of memory storage, the first item (bottom) is assumed to be at a -+ * lower address location than 'top'. i.e. top should be at address location of -+ * 'bottom' + 4 bytes. -+ */ -+typedef struct { -+ u32 bottom; /* Lower 32-bits of address. */ -+ u32 top; /* Upper 32-bits of address. */ -+} MAC_ADDR; -+ -+ -+/* The following is the organisation of the address filters section of the MAC -+ * registers. The Cadence MAC contains four possible specific address match -+ * addresses, if an incoming frame corresponds to any one of these four -+ * addresses then the frame will be copied to memory. -+ * It is not necessary for all four of the address match registers to be -+ * programmed, this is application dependant. -+ */ -+typedef struct { -+ MAC_ADDR one; /* Specific address register 1. */ -+ MAC_ADDR two; /* Specific address register 2. */ -+ MAC_ADDR three; /* Specific address register 3. */ -+ MAC_ADDR four; /* Specific address register 4. */ -+} SPEC_ADDR; -+ -+typedef struct { -+ u32 mode; -+ u32 speed; -+ u32 duplex; -+} GEMAC_CFG; -+ -+/* EMAC Hash size */ -+#define EMAC_HASH_REG_BITS 64 -+ -+#define EMAC_SPEC_ADDR_MAX 4 -+ -+#endif /* _EMAC_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h -@@ -0,0 +1,78 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _GPI_H_ -+#define _GPI_H_ -+ -+#define GPI_VERSION 0x00 -+#define GPI_CTRL 0x04 -+#define GPI_RX_CONFIG 0x08 -+#define GPI_HDR_SIZE 0x0c -+#define GPI_BUF_SIZE 0x10 -+#define GPI_LMEM_ALLOC_ADDR 0x14 -+#define GPI_LMEM_FREE_ADDR 0x18 -+#define GPI_DDR_ALLOC_ADDR 0x1c -+#define GPI_DDR_FREE_ADDR 0x20 -+#define GPI_CLASS_ADDR 0x24 -+#define GPI_DRX_FIFO 0x28 -+#define GPI_TRX_FIFO 0x2c -+#define GPI_INQ_PKTPTR 0x30 -+#define GPI_DDR_DATA_OFFSET 0x34 -+#define GPI_LMEM_DATA_OFFSET 0x38 -+#define GPI_TMLF_TX 0x4c -+#define GPI_DTX_ASEQ 0x50 -+#define GPI_FIFO_STATUS 0x54 -+#define GPI_FIFO_DEBUG 0x58 -+#define GPI_TX_PAUSE_TIME 0x5c -+#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60 -+#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64 -+#define GPI_TOE_CHKSUM_EN 0x68 -+#define GPI_OVERRUN_DROPCNT 0x6c -+ -+typedef struct { -+ u32 lmem_rtry_cnt; -+ u32 tmlf_txthres; -+ u32 aseq_len; -+} GPI_CFG; -+ -+ -+/* GPI commons defines */ -+#define GPI_LMEM_BUF_EN 0x1 -+#define GPI_DDR_BUF_EN 0x1 -+ -+/* EGPI 1 defines */ -+#define EGPI1_LMEM_RTRY_CNT 0x40 -+#define EGPI1_TMLF_TXTHRES 0xBC -+#define EGPI1_ASEQ_LEN 0x50 -+ -+/* EGPI 2 defines */ -+#define EGPI2_LMEM_RTRY_CNT 0x40 -+#define EGPI2_TMLF_TXTHRES 0xBC -+#define EGPI2_ASEQ_LEN 0x40 -+ -+/* EGPI 3 defines */ -+#define EGPI3_LMEM_RTRY_CNT 0x40 -+#define EGPI3_TMLF_TXTHRES 0xBC -+#define EGPI3_ASEQ_LEN 0x40 -+ -+/* HGPI defines */ -+#define HGPI_LMEM_RTRY_CNT 0x40 -+#define HGPI_TMLF_TXTHRES 0xBC -+#define HGPI_ASEQ_LEN 0x40 -+ -+#endif /* _GPI_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h -@@ -0,0 +1,29 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CBUS_GPT_H_ -+#define _CBUS_GPT_H_ -+ -+#define CBUS_GPT_VERSION (CBUS_GPT_BASE_ADDR + 0x00) -+#define CBUS_GPT_STATUS (CBUS_GPT_BASE_ADDR + 0x04) -+#define CBUS_GPT_CONFIG (CBUS_GPT_BASE_ADDR + 0x08) -+#define CBUS_GPT_COUNTER (CBUS_GPT_BASE_ADDR + 0x0c) -+#define CBUS_GPT_PERIOD (CBUS_GPT_BASE_ADDR + 0x10) -+#define CBUS_GPT_WIDTH (CBUS_GPT_BASE_ADDR + 0x14) -+ -+#endif /* _CBUS_GPT_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h -@@ -0,0 +1,96 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _HIF_H_ -+#define _HIF_H_ -+ -+/** @file hif.h. -+ * hif - PFE hif block control and status register. Mapped on CBUS and accessible from all PE's and ARM. -+ */ -+#define HIF_VERSION (HIF_BASE_ADDR + 0x00) -+#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04) -+#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08) -+#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c) -+#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10) -+#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14) -+#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20) -+#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24) -+#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30) -+#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34) -+#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38) -+#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c) -+#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40) -+#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44) -+#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48) -+#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c) -+#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50) -+ -+/*HIF_INT_SRC/ HIF_INT_ENABLE control bits */ -+#define HIF_INT (1 << 0) -+#define HIF_RXBD_INT (1 << 1) -+#define HIF_RXPKT_INT (1 << 2) -+#define HIF_TXBD_INT (1 << 3) -+#define HIF_TXPKT_INT (1 << 4) -+ -+/*HIF_TX_CTRL bits */ -+#define HIF_CTRL_DMA_EN (1<<0) -+#define HIF_CTRL_BDP_POLL_CTRL_EN (1<<1) -+#define HIF_CTRL_BDP_CH_START_WSTB (1<<2) -+ -+/*HIF_INT_ENABLE bits */ -+#define HIF_INT_EN (1 << 0) -+#define HIF_RXBD_INT_EN (1 << 1) -+#define HIF_RXPKT_INT_EN (1 << 2) -+#define HIF_TXBD_INT_EN (1 << 3) -+#define HIF_TXPKT_INT_EN (1 << 4) -+ -+/*HIF_POLL_CTRL bits*/ -+#define HIF_RX_POLL_CTRL_CYCLE 0x0400 -+#define HIF_TX_POLL_CTRL_CYCLE 0x0400 -+ -+/*HIF_INT_COAL bits*/ -+#define HIF_INT_COAL_ENABLE (1 << 31) -+ -+/*Buffer descriptor control bits */ -+#define BD_CTRL_BUFLEN_MASK 0x3fff -+#define BD_BUF_LEN(x) (x & BD_CTRL_BUFLEN_MASK) -+#define BD_CTRL_CBD_INT_EN (1 << 16) -+#define BD_CTRL_PKT_INT_EN (1 << 17) -+#define BD_CTRL_LIFM (1 << 18) -+#define BD_CTRL_LAST_BD (1 << 19) -+#define BD_CTRL_DIR (1 << 20) -+#define BD_CTRL_LMEM_CPY (1 << 21) /*Valid only for HIF_NOCPY*/ -+#define BD_CTRL_PKT_XFER (1 << 24) -+#define BD_CTRL_DESC_EN (1 << 31) -+#define BD_CTRL_PARSE_DISABLE (1 << 25) -+#define BD_CTRL_BRFETCH_DISABLE (1 << 26) -+#define BD_CTRL_RTFETCH_DISABLE (1 << 27) -+ -+/*Buffer descriptor status bits*/ -+#define BD_STATUS_CONN_ID(x) ((x) & 0xffff) -+#define BD_STATUS_DIR_PROC_ID (1 << 16) -+#define BD_STATUS_CONN_ID_EN (1 << 17)) -+#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18) -+#define BD_STATUS_LE_DATA (1 << 21) -+#define BD_STATUS_CHKSUM_EN (1 << 22) -+ -+/*HIF Buffer descriptor status bits */ -+#define DIR_PROC_ID (1 << 16) -+#define PROC_ID(id) ((id) << 18) -+ -+#endif /* _HIF_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h -@@ -0,0 +1,51 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _HIF_NOCPY_H_ -+#define _HIF_NOCPY_H_ -+ -+#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00) -+#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04) -+#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08) -+#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c) -+#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10) -+#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14) -+#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20) -+#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24) -+#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30) -+#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34) -+#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38) -+#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c) -+#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40) -+#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44) -+#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48) -+#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c) -+#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50) -+#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54) -+#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60) -+#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64) -+#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68) -+#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70) -+#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74) -+#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c) -+#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80) -+#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84) -+#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90) -+ -+ -+#endif /* _HIF_NOCPY_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h -@@ -0,0 +1,128 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _TMU_CSR_H_ -+#define _TMU_CSR_H_ -+ -+#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000) -+#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004) -+#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008) -+#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c) -+#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010) -+#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014) -+#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018) -+#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c) -+#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020) -+#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024) -+#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028) -+#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c) -+#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030) -+#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034) -+#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038) -+#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c) -+#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040) -+#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044) -+#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048) -+#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c) -+#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050) -+#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054) -+#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058) -+#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c) -+#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060) -+#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064) -+#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068) -+#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c) -+#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070) -+#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074) -+#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078) -+#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c) -+#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080) -+#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084) -+#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088) -+#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c) -+#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090) -+#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094) -+#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098) -+#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c) -+#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0) -+#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4) -+#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8) -+#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac) -+#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0) -+#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4) -+#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY0 */ -+#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc) -+#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0) -+#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4) -+#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8) -+#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc) -+#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0) -+#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4) -+#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8) -+#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc) -+#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0) -+ -+#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4) /**< [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory Write [27:24] Byte Enables of the Internal memory access [23:0] Address of the internal memory. This address is used to access both the PM and DM of all the PE's */ -+#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8) /**< Internal Memory Access Write Data */ -+#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec) /**< Internal Memory Access Read Data. The commands are blocked at the mem_access only */ -+ -+#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0) /**< [31:0] PHY0 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4) /**< [31:0] PHY1 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8) /**< [31:0] PHY2 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc) /**< [31:0] PHY3 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100) -+#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104) -+ -+#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108) -+#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c) -+#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110) -+ -+#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114) -+#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118) -+#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c) -+#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134) /**< [31:0] PHY4 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY1 */ -+#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY2 */ -+#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY3 */ -+#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144) -+#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148) /**< [31:0] PHY5 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */ -+ -+#define SW_RESET (1 << 0) /**< Global software reset */ -+#define INQ_RESET (1 << 2) -+#define TEQ_RESET (1 << 3) -+#define TDQ_RESET (1 << 4) -+#define PE_RESET (1 << 5) -+#define MEM_INIT (1 << 6) -+#define MEM_INIT_DONE (1 << 7) -+#define LLM_INIT (1 << 8) -+#define LLM_INIT_DONE (1 << 9) -+#define ECC_MEM_INIT_DONE (1<<10) -+ -+typedef struct { -+ u32 pe_sys_clk_ratio; -+ unsigned long llm_base_addr; -+ u32 llm_queue_len; -+} TMU_CFG; -+ -+/* Not HW related for pfe_ctrl / pfe common defines */ -+#define DEFAULT_MAX_QDEPTH 80 -+#define DEFAULT_Q0_QDEPTH 511 //We keep one large queue for host tx qos -+#define DEFAULT_TMU3_QDEPTH 127 -+ -+ -+#endif /* _TMU_CSR_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h -@@ -0,0 +1,61 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _UTIL_CSR_H_ -+#define _UTIL_CSR_H_ -+ -+#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000) -+#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004) -+#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010) -+ -+#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014) -+ -+#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020) -+#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024) -+#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060) -+#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064) -+ -+#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100) -+#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104) -+#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108) -+ -+#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114) -+#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118) -+ -+#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200) -+#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204) -+#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208) -+#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c) -+#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210) -+#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214) -+#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218) -+#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c) -+#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220) -+#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224) -+ -+#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228) -+#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c) -+#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230) -+ -+#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234) -+ -+typedef struct { -+ u32 pe_sys_clk_ratio; -+} UTIL_CFG; -+ -+#endif /* _UTIL_CSR_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class.h -@@ -0,0 +1,133 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CLASS_H_ -+#define _CLASS_H_ -+ -+#include "pe.h" -+ -+#define CLASS_DMEM_BASE_ADDR 0x00000000 -+#define CLASS_DMEM_SIZE 0x2000 -+#define CLASS_DMEM_END (CLASS_DMEM_BASE_ADDR + CLASS_DMEM_SIZE) -+#define CLASS_PMEM_BASE_ADDR 0x00010000 -+ -+#define CBUS_BASE_ADDR 0xc0000000 -+#define CLASS_APB_BASE_ADDR 0xc1000000 -+#define CLASS_AHB1_BASE_ADDR 0xc2000000 -+#define CLASS_AHB2_BASE_ADDR 0xc3000000 -+ -+#include "cbus.h" -+ -+#define GPT_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x00000) -+#define UART_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x10000) -+#define PERG_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x20000) -+#define EFET_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x40000) -+ -+#define MAC_HASH_BASE_ADDR (CLASS_AHB1_BASE_ADDR + 0x30000) -+#define VLAN_HASH_BASE_ADDR (CLASS_AHB1_BASE_ADDR + 0x50000) -+ -+#define PE_LMEM_BASE_ADDR (CLASS_AHB2_BASE_ADDR + 0x10000) -+#define PE_LMEM_SIZE 0x8000 -+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE) -+#define CCU_BASE_ADDR (CLASS_AHB2_BASE_ADDR + 0x20000) -+ -+#define IS_DMEM(addr, len) (((unsigned long)(addr) >= CLASS_DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= CLASS_DMEM_END)) -+#define IS_PE_LMEM(addr, len) (((unsigned long)(addr) >= PE_LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PE_LMEM_END)) -+ -+ -+#include "gpt.h" -+#include "uart.h" -+#include "class/perg.h" -+#include "class/efet.h" -+#include "class/mac_hash.h" -+#include "class/vlan_hash.h" -+#include "class/ccu.h" -+ -+ -+#define CLASS_MAX_PBUFFERS 4 -+ -+#define PBUF_HWPARSE_OFFSET 0x10 /* Fixed by hardware */ -+ -+#define PAYLOAD_DMEM_MAX_SIZE (CLASS_PBUF_SIZE - CLASS_PBUF_HEADER_OFFSET - sizeof(class_rx_hdr_t)) -+ -+ -+#define MIN_PKT_SIZE 56 -+ -+#define PARSE_ETH_TYPE (1 << 0) -+#define PARSE_VLAN_TYPE (1 << 1) -+#define PARSE_PPPOE_TYPE (1 << 2) -+#define PARSE_ARP_TYPE (1 << 3) -+#define PARSE_MCAST_TYPE (1 << 4) -+#define PARSE_IP_TYPE (1 << 5) -+#define PARSE_IPV6_TYPE (1 << 6) -+#define PARSE_IPV4_TYPE (1 << 7) -+ -+#define PARSE_IPX_TYPE (1 << 9) -+ -+#define PARSE_UDP_FLOW (1 << 11) -+#define PARSE_TCP_FLOW (1 << 12) -+#define PARSE_ICMP_FLOW (1 << 13) -+#define PARSE_IGMP_FLOW (1 << 14) -+#define PARSE_FRAG_FLOW (1 << 15) -+ -+#define PARSE_HIF_PKT (1 << 23) -+#define PARSE_ARC_HIT (1 << 24) -+#define PARSE_PKT_OVERFLOW (1 << 25) -+ -+#define PARSE_PROTO_MISMATCH (1 << 28) -+#define PARSE_L3_MISMATCH (1 << 29) -+#define PARSE_L2_MISMATCH (1 << 30) -+#define PARSE_INCOMPLETE (1 << 31) -+ -+ -+typedef struct _hwparse_t { -+ u16 sid; -+ u16 connid; -+ u8 toevec; -+ u8 pLayer2Hdr; -+ u8 pLayer3Hdr; -+ u8 pLayer4Hdr; -+ u16 vlanid; -+ u16 ifParseFlags; -+ u32 parseFlags; -+ u16 srcport; -+ u16 dstport; -+ u32 proto:8; -+ u32 port:4; -+ u32 hash:20; -+ u64 rte_res_valid:1; -+ u64 vlan_res_valid:1; -+ u64 dst_res_valid:1; -+ u64 src_res_valid:1; -+ u64 vlan_lookup:20; -+ u64 dst_lookup:20; -+ u64 src_lookup:20; -+} hwparse_t; -+ -+ -+typedef struct { -+ u8 num_cpy; /* no of copies to send out from RO block, for each there must be a corresponding tx pre-header */ -+ u8 dma_len; /* len to be DMAed to DDR mem, including all tx pre-headers */ -+ u16 src_addr; /* class dmem source address, pointing to first tx pre-header */ -+ u32 dst_addr; /* DDR memory destination address of first tx pre-header, must be so packet data is continuous in DDR */ -+ u32 res1; /* reserved for software usage - queue number? */ -+ u16 res2; /* reserved for software usage */ -+ u16 tsv; /* time stamp val */ -+} class_tx_desc_t; -+ -+#endif /* _CLASS_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class/ccu.h -@@ -0,0 +1,28 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CCU_H_ -+#define _CCU_H_ -+ -+#define CCU_ADDR (CCU_BASE_ADDR + 0x00) -+#define CCU_CNT (CCU_BASE_ADDR + 0x04) -+#define CCU_STATUS (CCU_BASE_ADDR + 0x08) -+#define CCU_VAL (CCU_BASE_ADDR + 0x0c) -+ -+#endif /* _CCU_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class/efet.h -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _CLASS_EFET_H_ -+#define _CLASS_EFET_H_ -+ -+//#define CLASS_EFET_ASYNC 1 -+ -+#define CLASS_EFET_ENTRY_ADDR (EFET_BASE_ADDR + 0x00) -+#define CLASS_EFET_ENTRY_SIZE (EFET_BASE_ADDR + 0x04) -+#define CLASS_EFET_ENTRY_DMEM_ADDR (EFET_BASE_ADDR + 0x08) -+#define CLASS_EFET_ENTRY_STATUS (EFET_BASE_ADDR + 0x0c) -+#define CLASS_EFET_ENTRY_ENDIAN (EFET_BASE_ADDR + 0x10) -+ -+#define CBUS2DMEM 0 -+#define DMEM2CBUS 1 -+ -+#define EFET2BUS_LE (1 << 0) -+#define PE2BUS_LE (1 << 1) -+ -+#ifdef CLASS_EFET_ASYNC -+void class_efet_async(u32 cbus_addr, u32 dmem_addr, u32 len, u32 dir); -+#endif -+ -+void class_efet_sync(u32 cbus_addr, u32 dmem_addr, u32 len, u32 dir); -+ -+ -+#endif /* _CLASS_EFET_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class/mac_hash.h -@@ -0,0 +1,55 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _MAC_HASH_H_ -+#define _MAC_HASH_H_ -+ -+#define MAC_HASH_REQ1_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x000) -+#define MAC_HASH_REQ2_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x020) -+#define MAC_HASH_REQ3_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x040) -+#define MAC_HASH_REQ4_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x060) -+#define MAC_HASH_REQ5_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x080) -+#define MAC_HASH_REQ6_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0a0) -+#define MAC_HASH_REQ7_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0c0) -+#define MAC_HASH_REQ8_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0e0) -+ -+#define MAC_HASH_REQ_CMD(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x000) -+#define MAC_HASH_REQ_MAC1_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x004) -+#define MAC_HASH_REQ_MAC2_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x008) -+#define MAC_HASH_REQ_MASK1_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x00c) -+#define MAC_HASH_REQ_MASK2_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x010) -+#define MAC_HASH_REQ_ENTRY(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x014) -+#define MAC_HASH_REQ_STATUS(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x018) -+#define MAC_HASH_REQ_ENTRY_MAYCH(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x01c) -+ -+ -+#define MAC_HASH_FREELIST_PTR_HEAD (MAC_HASH_BASE_ADDR + 0x100) -+#define MAC_HASH_FREELIST_PTR_TAIL (MAC_HASH_BASE_ADDR + 0x104) -+#define MAC_HASH_FREELIST_ENTRIES_ADDR (MAC_HASH_BASE_ADDR + 0x108) -+ -+ -+#define HASH_CMD_INIT 1 -+#define HASH_CMD_ADD 2 -+#define HASH_CMD_DELETE 3 -+#define HASH_CMD_UPDATE 4 -+#define HASH_CMD_SEARCH 5 -+#define HASH_CMD_MEM_READ 6 -+#define HASH_CMD_MEM_WRITE 7 -+ -+#endif /* _MAC_HASH_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class/perg.h -@@ -0,0 +1,39 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _PERG_H_ -+#define _PERG_H_ -+ -+#define PERG_QB_BUF_STATUS (PERG_BASE_ADDR + 0x00) -+#define PERG_RO_BUF_STATUS (PERG_BASE_ADDR + 0x04) -+#define PERG_CLR_QB_BUF_STATUS (PERG_BASE_ADDR + 0x08) -+#define PERG_SET_RO_BUF_STATUS (PERG_BASE_ADDR + 0x0c) -+#define PERG_CLR_RO_ERR_PKT (PERG_BASE_ADDR + 0x10) -+#define PERG_CLR_BMU2_ERR_PKT (PERG_BASE_ADDR + 0x14) -+ -+#define PERG_ID (PERG_BASE_ADDR + 0x18) -+#define PERG_TIMER1 (PERG_BASE_ADDR + 0x1c) -+//FIXME #define PERG_TIMER2 (PERG_BASE_ADDR + 0x20) -+#define PERG_BMU1_CURRDEPTH (PERG_BASE_ADDR + 0x20) -+#define PERG_BMU2_CURRDEPTH (PERG_BASE_ADDR + 0x24) -+#define PERG_HOST_GP (PERG_BASE_ADDR + 0x2c) -+#define PERG_PE_GP (PERG_BASE_ADDR + 0x30) -+#define PERG_INT_ENABLE (PERG_BASE_ADDR + 0x34) -+#define PERG_INT_SRC (PERG_BASE_ADDR + 0x38) -+ -+#endif /* _PERG_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/class/vlan_hash.h -@@ -0,0 +1,46 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _VLAN_HASH_H_ -+#define _VLAN_HASH_H_ -+ -+#define VLAN_HASH_REQ1_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x000) -+#define VLAN_HASH_REQ2_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x020) -+#define VLAN_HASH_REQ3_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x040) -+#define VLAN_HASH_REQ4_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x060) -+#define VLAN_HASH_REQ5_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x080) -+#define VLAN_HASH_REQ6_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0a0) -+#define VLAN_HASH_REQ7_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0c0) -+#define VLAN_HASH_REQ8_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0e0) -+ -+#define VLAN_HASH_REQ_CMD(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x000) -+#define VLAN_HASH_REQ_MAC1_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x004) -+#define VLAN_HASH_REQ_MAC2_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x008) -+#define VLAN_HASH_REQ_MASK1_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x00c) -+#define VLAN_HASH_REQ_MASK2_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x010) -+#define VLAN_HASH_REQ_ENTRY(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x014) -+#define VLAN_HASH_REQ_STATUS(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x018) -+#define VLAN_HASH_REQ_ENTRY_MAYCH(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x01c) -+ -+ -+#define VLAN_HASH_FREELIST_PTR_HEAD (VLAN_HASH_BASE_ADDR + 0x100) -+#define VLAN_HASH_FREELIST_PTR_TAIL (VLAN_HASH_BASE_ADDR + 0x104) -+#define VLAN_HASH_FREELIST_ENTRIES_ADDR (VLAN_HASH_BASE_ADDR + 0x108) -+ -+#endif /* _VLAN_HASH_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/gpt.h -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _GPT_H_ -+#define _GPT_H_ -+ -+#define GPT_VERSION (GPT_BASE_ADDR + 0x00) -+#define GPT_STATUS (GPT_BASE_ADDR + 0x04) -+#define GPT_CONFIG (GPT_BASE_ADDR + 0x08) -+#define GPT_COUNTER (GPT_BASE_ADDR + 0x0c) -+#define GPT_PERIOD (GPT_BASE_ADDR + 0x10) -+#define GPT_WIDTH (GPT_BASE_ADDR + 0x14) -+ -+/*** These bits are defined for GPT_STATUS register */ -+#define GPT_STAT_IRQ (1<<0) -+#define GPT_STAT_OVERFLOW_ERR (1<<4) -+#define GPT_STAT_TMR_ENABLE (1<<8) -+#define GPT_STAT_TMR_DISABLE (1<<9) -+ -+/*** These bits are defined for GPT_CONFIG register */ -+#define GPT_CONFIG_PWM_MODE 0x1 -+#define GPT_CONFIG_WCAP_MODE 0x2 -+#define GPT_CONFIG_CAP_PULSE_OUT (1<<2) -+#define GPT_CONFIG_PERIOD_CNT (1<<3) -+#define GPT_CONFIG_INTR_ENABLE (1<<4) -+#define GPT_CONFIG_AUX_SEL (1<<5) -+ -+ -+#endif /* _GPT_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/pe.h -@@ -0,0 +1,626 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _PE_H_ -+#define _PE_H_ -+ -+#include "hal.h" -+ -+#if defined(COMCERTO_2000_CLASS) -+#include "pfe/class.h" -+#elif defined(COMCERTO_2000_TMU) -+#include "pfe/tmu.h" -+#elif defined(COMCERTO_2000_UTIL) -+#include "pfe/util.h" -+#endif -+ -+enum { -+ CLASS0_ID = 0, -+ CLASS1_ID, -+ CLASS2_ID, -+ CLASS3_ID, -+ CLASS4_ID, -+ CLASS5_ID, -+ TMU0_ID, -+ TMU1_ID, -+ TMU2_ID, -+ TMU3_ID, -+ UTIL_ID, -+ MAX_PE -+}; -+#define PE_ID_ANY MAX_PE -+ -+/* Hardware definition of physical ports */ -+/* CLASS rx header phy number */ -+enum CLASS_RX_PHY { -+ RX_PHY_0 = 0x0, -+ RX_PHY_1, -+ RX_PHY_2, -+ RX_PHY_HIF, -+ RX_PHY_HIF_NOCPY, -+ RX_PHY_CLASS = 1 << 14, /**< Control bit (in PHYNO field) used to inform CLASS PE that packet comes from Class. */ -+ RX_PHY_UTIL = 1 << 15 /**< Control bit (in PHYNO field) used to inform CLASS PE that packet comes from UtilPE. */ -+}; -+ -+#define RX_PHY_SW_INPUT_PORT_OFFSET 11 /**< Offset in PHYNO field where the original input port will be stored for packets coming directly from software (UtilPE or Class). */ -+ -+ -+/* CLASS/TMU tx header phy number */ -+enum TMU_TX_PHY { -+ TX_PHY_TMU0 = 0x0, -+ TX_PHY_TMU1, -+ TX_PHY_TMU2, -+ TX_PHY_TMU3 -+}; -+ -+ -+// NOTE: Any changes to the following drop counter definitions must also -+// be reflected in the pfe/pfe.h file and in pfe_ctrl/pfe_sysfs.c. -+ -+#if defined(COMCERTO_2000_CLASS) -+ -+#define CLASS_DROP_ICC 0 -+#define CLASS_DROP_HOST_PKT_ERROR 1 -+#define CLASS_DROP_RX_ERROR 2 -+#define CLASS_DROP_IPSEC_OUT 3 -+#define CLASS_DROP_IPSEC_IN 4 -+#define CLASS_DROP_EXPT_IPSEC 5 -+#define CLASS_DROP_REASSEMBLY 6 -+#define CLASS_DROP_FRAGMENTER 7 -+#define CLASS_DROP_NATT 8 -+#define CLASS_DROP_SOCKET 9 -+#define CLASS_DROP_MULTICAST 10 -+#define CLASS_DROP_NATPT 11 -+#define CLASS_DROP_TX_DISABLE 12 -+ -+#define CLASS_NUM_DROP_COUNTERS 13 -+ -+extern U32 drop_counter[CLASS_NUM_DROP_COUNTERS]; -+#define DROP_PACKET(pmtd, counter) free_packet(pmtd, CLASS_DROP_##counter) -+#define DROP_BUFFER(addr, counter) free_buffer(addr, CLASS_DROP_##counter) -+ -+#elif defined(COMCERTO_2000_UTIL) -+ -+#define UTIL_DROP_IPSEC_OUT 0 -+#define UTIL_DROP_IPSEC_IN 1 -+#define UTIL_DROP_IPSEC_RATE_LIMIT 2 -+#define UTIL_DROP_FRAGMENTER 3 -+#define UTIL_DROP_SOCKET 4 -+#define UTIL_DROP_TX_DISABLE 5 -+#define UTIL_DROP_RX_ERROR 6 -+#define UTIL_DROP_NO_MTD 7 -+ -+#define UTIL_NUM_DROP_COUNTERS 8 -+ -+extern U32 drop_counter[UTIL_NUM_DROP_COUNTERS]; -+#define DROP_PACKET(pmtd, counter) free_packet(pmtd, UTIL_DROP_##counter) -+#define DROP_BUFFER(addr, counter) free_buffer(addr, UTIL_DROP_##counter) -+ -+#endif -+ -+ -+ -+#define DDR_BASE_ADDR 0x00020000 -+#define DDR_END 0x86000000 /* This includes ACP and IRAM areas */ -+#define IRAM_BASE_ADDR 0x83000000 -+ -+#define IS_DDR(addr, len) (((unsigned long)(addr) >= DDR_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DDR_END)) -+/* action bits of act_phyno is defined as follows */ -+ -+#define ACT_SRC_MAC_REPLACE (1 << (4 + 0)) -+#define ACT_VLAN_ADD (1 << (4 + 1)) -+#define ACT_TCPCHKSUM_REPLACE (1 << (4 + 2)) -+#define ACT_VLAN_REPLACE (1 << (4 + 3)) -+#define ACT_DONT_FREE_BUFFER (1 << (4 + 5)) -+#define ACT_IPCHKSUM_REPLACE (1 << (4 + 6)) -+ -+typedef struct { -+ u8 start_data_off; /* packet data start offset, relative to start of this tx pre-header */ -+ u8 start_buf_off; /* this tx pre-header start offset, relative to start of DDR buffer */ -+ u16 pkt_length; /* total packet length */ -+ u8 act_phyno; /* action / phy number */ -+ u8 queueno; /* queueno */ -+ u16 unused; -+} class_tx_hdr_t; -+ -+typedef struct { -+ u8 start_data_off; /* packet data start offset, relative to start of this tx pre-header */ -+ u8 start_buf_off; /* this tx pre-header start offset, relative to start of DDR buffer */ -+ u16 pkt_length; /* total packet length */ -+ u8 act_phyno; /* action / phy number */ -+ u8 queueno; /* queueno */ -+ u16 src_mac_msb; /* indicates src_mac 47:32 */ -+ u32 src_mac_lsb; /* indicates src_mac 31:0 */ -+ u32 vlanid; /* vlanid */ -+} class_tx_hdr_mc_t; -+ -+typedef struct { -+ u32 next_ptr; /* ptr to the start of the first DDR buffer */ -+ u16 length; /* total packet length */ -+ u16 phyno; /* input physical port number */ -+ u32 status; /* gemac status bits bits[32:63]*/ -+ u32 status2; /* gemac status bits bits[0:31] */ -+} class_rx_hdr_t; -+/* class_rx_hdr status bits (status0 bits in hardware blocks) -+ * from hif_top/dma_dxr_dtx.v -+ * STATUS[9:0] is the encoding of bits in the LMEM buffer as seen by the QB block, -+ * NOT the encoding of bits as seen by the Class PEs in the DMEM rx header */ -+#define STATUS_PARSE_DISABLE (1 << 0) -+#define STATUS_BRFETCH_DISABLE (1 << 1) -+#define STATUS_RTFETCH_DISABLE (1 << 2) -+#define STATUS_DIR_PROC_ID (1 << 3) -+#define STATUS_CONN_ID_EN (1 << 4)) -+#define STATUS_PE2PROC_ID(x) (((x) & 7) << 5) -+#define STATUS_LE_DATA (1 << 8) -+#define STATUS_CHKSUM_EN (1 << 9) -+ -+/* from gpi/gpi_rmlf.v */ -+#define STATUS_CUMULATIVE_ERR (1 << 16) -+#define STATUS_LENGTH_ERR (1 << 17) -+#define STATUS_CRC_ERR (1 << 18) -+#define STATUS_TOO_SHORT_ERR (1 << 19) -+#define STATUS_TOO_LONG_ERR (1 << 20) -+#define STATUS_CODE_ERR (1 << 21) -+#define STATUS_MC_HASH_MATCH (1 << 22) -+#define STATUS_CUMULATIVE_ARC_HIT (1 << 23) -+#define STATUS_UNICAST_HASH_MATCH (1 << 24) -+#define STATUS_IP_CHECKSUM_CORRECT (1 << 25) -+#define STATUS_TCP_CHECKSUM_CORRECT (1 << 26) -+#define STATUS_UDP_CHECKSUM_CORRECT (1 << 27) -+#define STATUS_OVERFLOW_ERR (1 << 28) -+ -+#define UTIL_MAGIC_NUM 0xffd8ffe000104a46 -+#define UTIL_DDRC_WA -+ -+/* The following structure is filled by class-pe when the packet -+ * has to be sent to util-pe, by filling the required information */ -+typedef struct { -+ u32 mtd_flags : 16; -+ u32 packet_type : 8; -+ u32 input_port : 4; -+ u32 data_offset : 4; -+ u32 word[MTD_PRIV]; -+#ifdef UTIL_DDRC_WA -+ u64 magic_num; // magic_number to verify the data validity in utilpe -+#endif -+} __attribute__((aligned(8))) util_rx_hdr_t; // Size must be a multiple of 64-bit to allow copies using EFET. -+ -+#define UTIL_RX_IPS_IN_PKT EVENT_IPS_IN -+#define UTIL_RX_IPS_OUT_PKT EVENT_IPS_OUT -+#define UTIL_RX_RTP_PKT EVENT_RTP_RELAY -+#define UTIL_RX_RTP_QOS_PKT EVENT_RTP_QOS -+#define UTIL_RX_FRAG4_PKT EVENT_FRAG4 -+#define UTIL_RX_FRAG6_PKT EVENT_FRAG6 -+ -+/** Structure passed from UtilPE to Class, stored at the end of the LMEM buffer. Defined and used by software only. -+ * -+ */ -+ -+typedef struct -+{ -+ void *next; -+ u16 next_length; -+ u8 next_l3offset; -+ u8 next_l4offset; -+} frag_info; -+ -+typedef struct { -+ u8 packet_type : 6; -+ u8 padding : 2; -+ -+ u8 offset : 3; -+ u8 ddr_offset : 5; -+ -+ u16 mtd_flags; -+ union { -+ u16 half[6]; -+ u8 byte[12]; -+ -+ struct { -+ u16 sa_handle[2]; // SA_MAX_OP value should be used here instead of 2 -+ u8 proto; -+ S8 sa_op; -+ u8 l2hdr_len; -+ u8 adj_dmem; -+ } ipsec; -+ -+ struct { -+ u16 l4offset; -+ u16 socket_id; -+ BOOL update; -+ u8 reserved; -+ u32 payload_diff; -+ } relay; -+ -+ struct { -+ u16 l3offset; -+ u16 l4offset; -+ -+ frag_info frag; -+ } ipv6; -+ -+ struct { -+ u16 l3offset; -+ } ipv4; -+ -+ struct { -+ u32 ddr_addr; -+ u16 length; -+ u8 port; -+ u8 queue; -+ u8 action; -+ } tx; -+ }; -+} lmem_trailer_t; -+ -+/* The following values are defined for packet_type of lmem_trailer_t. -+ * These represent different types of packets sent from util to class -+ * for processing */ -+enum { -+ UTIL_TX_IPS_IN = 0, -+ UTIL_TX_IPV4_RTP_PKT, -+ UTIL_TX_IPV6_RTP_PKT, -+ UTIL_TX_IPV4_PKT, -+ UTIL_TX_IPV6_PKT, -+ UTIL_TX_EXPT_PKT, -+#ifdef CFG_PCAP -+ UTIL_TX_PKT, -+#endif -+ UTIL_TX_MAX_PKT -+}; -+ -+ -+#define UTIL_TX_TRAILER_SIZE sizeof(lmem_trailer_t) -+#define UTIL_TX_TRAILER(mtd) ((lmem_trailer_t *)ROUND_UP32((u32)(mtd)->rx_dmem_end)) -+ -+typedef struct { -+ u32 pkt_ptr; -+ u8 phyno; -+ u8 queueno; -+ u16 len; -+} tmu_tx_hdr_t; -+ -+struct hif_pkt_hdr { -+ u8 client_id; -+ u8 qNo; -+ u16 client_ctrl_le_lsw; -+ u16 client_ctrl_le_msw; -+}; -+ -+ -+#if defined(CFG_WIFI_OFFLOAD) -+#define MAX_WIFI_VAPS 3 -+#define PFE_WIFI_PKT_HEADROOM 96 /*PFE inserts this headroom for WiFi tx packets only in lro mode */ -+#else -+#define MAX_WIFI_VAPS 0 -+#endif -+ -+/* HIF header client id */ -+enum HIF_CLIENT_ID { -+ CLIENT_ID_GEM0 = 0, -+ CLIENT_ID_GEM1, -+ CLIENT_ID_GEM2, -+ CLIENT_ID_WIFI0, -+ CLIENT_ID_WIFI_LAST = MAX_WIFI_VAPS + CLIENT_ID_GEM2, -+ CLIENT_ID_PCAP, -+ CLIENT_ID_UNKNOWN = 0xff, -+}; -+ -+ -+#define IS_WIFI_CLIENT_ID(_clid) (((_clid) >= CLIENT_ID_WIFI0) && ((_clid) <= CLIENT_ID_WIFI_LAST)) -+ -+/* These match LE definition */ -+#define HIF_CTRL_TX_TSO_NOCPY __cpu_to_le32(1 << 8) -+#define HIF_CTRL_TX_IPSEC_OUT __cpu_to_le32(1 << 7) -+#define HIF_CTRL_TX_WIFI_OWNMAC __cpu_to_le32(1 << 6) -+#define HIF_CTRL_TX_TSO_END __cpu_to_le32(1 << 5) -+#define HIF_CTRL_TX_TSO6 __cpu_to_le32(1 << 4) -+#define HIF_CTRL_TX_TSO __cpu_to_le32(1 << 3) -+#define HIF_CTRL_TX_CHECKSUM __cpu_to_le32(1 << 2) -+#define HIF_CTRL_TX_CSUM_VALIDATE __cpu_to_le32(1 << 1) -+#define HIF_CTRL_TX_WIFI_TXOFLD __cpu_to_le32(1 << 0) -+ -+#define HIF_CTRL_RX_OFFSET_MASK __cpu_to_le32(0xf << 24) -+#define HIF_CTRL_RX_PE_ID_MASK __cpu_to_le32(0xf << 16) -+#define HIF_CTRL_RX_IPSEC_IN __cpu_to_le32(1 << 4) -+#define HIF_CTRL_RX_WIFI_EXPT __cpu_to_le32(1 << 3) -+#define HIF_CTRL_RX_CHECKSUMMED __cpu_to_le32(1 << 2) -+#define HIF_CTRL_RX_CONTINUED __cpu_to_le32(1 << 1) -+#define HIF_CTRL_RX_WIFI_HEADROOM __cpu_to_le32(1 << 0) -+ -+#ifdef CFG_LRO -+struct hif_lro_hdr { -+ u16 data_offset; -+ u16 mss; -+}; -+#endif -+ -+struct hif_ipsec_hdr { -+ u16 sa_handle[2]; -+}; -+ -+#define MAX_TSO_BUF_DESCS 5 -+struct hif_tso_buf_desc { -+ u32 addr; -+ u32 ctrl; -+#define TSO_CTRL_LAST_BUFFER (1 << 31) -+}; -+ -+struct hif_tso_hdr { -+ u16 ip_off; -+ u16 ip_id; -+ u16 ip_len; -+ u16 tcp_off; -+ u32 tcp_seq; -+}; -+ -+struct hif_tso_hdr_nocpy { -+ u16 ip_off; -+ u16 ip_id; -+ u16 ip_len; -+ u16 tcp_off; -+ u32 tcp_seq; -+ struct hif_tso_buf_desc bdesc[MAX_TSO_BUF_DESCS]; -+}; -+ -+struct hif_pcap_hdr { -+ u8 ifindex; -+ u8 unused; -+ u16 seqno; -+ u32 timestamp; -+}; -+ -+ -+struct pe_sync_mailbox -+{ -+ u32 stop; -+ u32 stopped; -+}; -+ -+struct pe_msg_mailbox -+{ -+ u32 dst; -+ u32 src; -+ u32 len; -+ u32 request; -+}; -+ -+ -+/** Basic busy loop delay function -+* -+* @param cycles Number of cycles to delay (actual cpu cycles should be close to 3 x cycles) -+* -+*/ -+static inline void delay(u32 cycles) -+{ -+ volatile int i; -+ -+ for (i = 0; i < cycles; i++); -+} -+ -+ -+/** Read PE id -+* -+* @return PE id (0 - 5 for CLASS-PE's, 6 - 9 for TMU-PE's, 10 for UTIL-PE) -+* -+*/ -+static inline u32 esi_get_mpid(void) -+{ -+ u32 mpid; -+ -+ asm ("rcsr %0, Configuration, MPID" : "=d" (mpid)); -+ -+ return mpid; -+} -+ -+ -+#define esi_get_csr(bank, csr) \ -+({ \ -+ u32 res; \ -+ asm ("rcsr %0, " #bank ", " #csr : "=d" (res)); \ -+ res; \ -+}) -+ -+#define esi_get_isa0() esi_get_csr(Configuration, ISA0) -+#define esi_get_isa1() esi_get_csr(Configuration, ISA1) -+#define esi_get_isa2() esi_get_csr(Configuration, ISA2) -+#define esi_get_isa3() esi_get_csr(Configuration, ISA3) -+#define esi_get_epc() esi_get_csr(Thread, EPC) -+#define esi_get_ecas() esi_get_csr(Thread, ECAS) -+#define esi_get_eid() esi_get_csr(Thread, EID) -+#define esi_get_ed() esi_get_csr(Thread, ED) -+ -+static inline void esi_pe_stop(U32 state) -+{ -+ PESTATUS_SETSTATE(state); -+ while (1) -+ { -+ asm("stop"); -+ } -+} -+ -+ -+/** Same 64bit alignment memory copy using efet. -+* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR. -+* Both the source and destination must have the same 64bit alignment, length should be more than four bytes -+* or dst/src must be 32bit aligned. Otherwise use efet_memcpy_any() -+* Uses efet synchronous interface to copy the data. -+* -+* @param dst Destination address to write to (must have the same 64bit alignment as src) -+* @param src Source address to read from (must have the same 64bit alignment as dst) -+* @param len Number of bytes to copy -+* -+*/ -+void efet_memcpy(void *dst, void *src, unsigned int len); -+ -+/** Same 64bit alignment memory copy using efet. -+* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR. -+* Both the source and destination must have the same 64bit alignment, there is no restriction on length. -+* For UTIL-PE revA0, this function will still fail to handle small/unaligned writes. -+* Uses efet synchronous interface to copy the data. -+* -+* @param dst Destination address to write to (must have the same 64bit alignment as src) -+* @param src Source address to read from (must have the same 64bit alignment as dst) -+* @param len Number of bytes to copy -+* -+*/ -+void efet_memcpy_any(void *dst, void *src, unsigned int len); -+ -+/** Same 64bit alignment memory copy using efet. -+* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR. -+* Both the source and destination must have the same 64bit alignment, length should be more than four bytes -+* or dst/src must be 32bit aligned. -+* Uses efet asynchronous interface to copy the data. -+* -+* @param dst Destination address to write to (must have the same 64bit alignment as src) -+* @param src Source address to read from (must have the same 64bit alignment as dst) -+* @param len Number of bytes to copy -+* -+*/ -+void efet_memcpy_nowait(void *dst, void *src, unsigned int len); -+ -+/** Unaligned memory copy using efet. -+* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR. -+* There is not restriction on source and destination, nor on length. -+* -+* @param dst Destination address to write to -+* @param src Source address to read from -+* @param len Number of bytes to copy -+* @param dmem_buf temp dmem buffer to use, must be 64bit aligned -+* @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes -+* -+*/ -+void efet_memcpy_unaligned(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len); -+ -+/** Aligned memory copy of 4 bytes to register address. -+* Register address must be 32 bit aligned. -+* -+* @param val value to be copied. -+* @param reg_addr Register address (must be 16bit aligned) -+* -+*/ -+void __efet_writel(u32 val, void *addr); -+ -+#ifdef REVA_WA -+#define efet_writel(val, addr) __efet_writel((u32)(val), (void *) (addr)) -+#else -+#define efet_writel(val, addr) writel((u32)(val), (void *) (addr)) -+#endif -+ -+ -+/** 32bit aligned memory copy. -+* Source and destination addresses must be 32bit aligned, there is no restriction on the length. -+* -+* @param dst Destination address (must be 32bit aligned) -+* @param src Source address (must be 32bit aligned) -+* @param len Number of bytes to copy -+* -+*/ -+void memcpy_aligned32(void *dst, void *src, unsigned int len); -+ -+/** Aligned memory copy. -+* Source and destination addresses must have the same alignment -+* relative to 32bit boundaries (but otherwsie may have any alignment), -+* there is no restriction on the length. -+* -+* @param dst Destination address -+* @param src Source address (must have same 32bit alignment as dst) -+* @param len Number of bytes to copy -+* -+*/ -+void memcpy_aligned(void *dst, void *src, unsigned int len); -+ -+/** Unaligned memory copy. -+* Implements unaligned memory copy. We first align the destination -+* to a 32bit boundary (using byte copies) then the src, and finally use a loop -+* of read, shift, write -+* -+* @param dst Destination address -+* @param src Source address (must have same 32bit alignment as dst) -+* @param len Number of bytes to copy -+* -+*/ -+void memcpy_unaligned(void *dst, void *src, unsigned int len); -+ -+/** Generic memory set. -+* Implements a generic memory set. Not very optimal (uses byte writes for the entire range) -+* -+* -+* @param dst Destination address -+* @param val Value to set memory to -+* @param len Number of bytes to set -+* -+*/ -+void memset(void *dst, u8 val, unsigned int len); -+ -+/** Generic memory copy. -+* Implements generic memory copy. If source and destination have the same -+* alignment memcpy_aligned() is used, otherwise memcpy_unaligned() -+* -+* @param dst Destination address -+* @param src Source address -+* @param len Number of bytes to copy -+* -+*/ -+void memcpy(void *dst, void *src, unsigned int len); -+ -+/** Generic memorymove. -+* Implements generic memorymove, where copies across overlapping -+* memory regions is supported. -+* Uses the dmem_buf passed as a parameter as a temporary buffer. -+* Includes two copies, forces one of the copies to be definitely aligned. -+* The "dmem_len" being passed should be atleast 3 bytes greater than "len" -+* The 3 bytes here are shift bytes used to ensure one aligned copy. -+* -+* @param dst Destination address -+* @param src Source address -+* @param len Number of bytes to copy -+* @param dmem_buf temp dmem buffer to use, must be 32bit aligned -+* @param dmem_len length of dmem buffer, must be 32bit aligned and at least 3 bytes greater -+* than @param len -+* -+*/ -+ -+void *memorymove(void * dst, void * src, unsigned int len, void *dmem_buf, unsigned int dmem_len); -+ -+/** Aligned memory copy in DDR memory. -+ * Implements aligned memory copy between two DDR buffers using efet_memcpy64 and DMEM -+ * Both the source and destination must have the same 64bit alignment, there is no restriction on length. -+ * If start or end are not 64bit aligned, data in destination buffer before start/after end will be corrupted. -+ * -+ * @param dst DDR Destination address -+ * @param src DDR Source address -+ * @param len Number of bytes to copy -+ * @param dmem_buf temp dmem buffer to use, must be 64bit aligned -+ * @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes -+ */ -+void memcpy_ddr_to_ddr(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len); -+ -+/** Unaligned memory copy in DDR memory. -+ * Implements generic memory copy between two DDR buffers using efet_memcpy and DMEM -+ * There is no restriction on the source, destination and length alignments. -+ * -+ * @param dst DDR Destination address -+ * @param src DDR Source address -+ * @param len Number of bytes to copy -+ * @param dmem_buf temp dmem buffer to use, must be 64bit aligned -+ * @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes -+ */ -+void memcpy_ddr_to_ddr_unaligned(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len); -+ -+#endif /* _PE_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h -@@ -0,0 +1,444 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _PFE_H_ -+#define _PFE_H_ -+ -+#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20)) -+#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20)) /* Only valid for mem access register interface */ -+#define CLASS_DMEM_SIZE 0x00002000 -+#define CLASS_IMEM_SIZE 0x00008000 -+ -+#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20)) -+#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20)) /* Only valid for mem access register interface */ -+#define TMU_DMEM_SIZE 0x00000800 -+#define TMU_IMEM_SIZE 0x00002000 -+ -+#define UTIL_DMEM_BASE_ADDR 0x00000000 -+#define UTIL_DMEM_SIZE 0x00002000 -+ -+#define PE_LMEM_BASE_ADDR 0xc3010000 -+#define PE_LMEM_SIZE 0x8000 -+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE) -+ -+#define DMEM_BASE_ADDR 0x00000000 -+#define DMEM_SIZE 0x2000 /**< TMU has less... */ -+#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE) -+ -+#define PMEM_BASE_ADDR 0x00010000 -+#define PMEM_SIZE 0x8000 /**< TMU has less... */ -+#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE) -+ -+ -+/* These check memory ranges from PE point of view/memory map */ -+#define IS_DMEM(addr, len) (((unsigned long)(addr) >= DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DMEM_END)) -+#define IS_PMEM(addr, len) (((unsigned long)(addr) >= PMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PMEM_END)) -+#define IS_PE_LMEM(addr, len) (((unsigned long)(addr) >= PE_LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PE_LMEM_END)) -+ -+#define IS_PFE_LMEM(addr, len) (((unsigned long)(addr) >= CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && (((unsigned long)(addr) + (len)) <= CBUS_VIRT_TO_PFE(LMEM_END))) -+#define __IS_PHYS_DDR(addr, len) (((unsigned long)(addr) >= DDR_PHYS_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DDR_PHYS_END)) -+#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len) -+ -+/* If using a run-time virtual address for the cbus base address use this code */ -+extern void *cbus_base_addr; -+extern void *ddr_base_addr; -+extern unsigned long ddr_phys_base_addr; -+extern unsigned int ddr_size; -+ -+#if defined(COMCERTO_2000_CONTROL) -+#include -+#if defined (CONFIG_PLATFORM_C2000) -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) -+/*This is copied from arch/arm/include/asm/system_info.h */ -+extern unsigned int system_rev; -+#endif -+#endif -+#endif -+ -+#define CBUS_BASE_ADDR cbus_base_addr -+#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr -+#define DDR_BASE_ADDR ddr_base_addr -+#define DDR_SIZE ddr_size -+ -+#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE) -+ -+#if defined(CONFIG_PLATFORM_C2000) -+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /**< CBUS physical base address as seen by PE's. */ -+#define DDR_PHYS_TO_PFE(p) (p) -+#define DDR_PFE_TO_PHYS(p) (p) -+#define CBUS_PHYS_TO_PFE(p) (p) -+#else -+#define LS1012A_PFE_RESET_WA /*PFE doesn't have global reset and re-init should takecare few things to make PFE functional after reset */ -+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /**< CBUS physical base address as seen by PE's. */ -+#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000 /**< CBUS physical base address as seen by PE's. */ -+#define DDR_PHYS_TO_PFE(p) (((unsigned long int) (p)) & 0x7FFFFFFF) -+#define DDR_PFE_TO_PHYS(p) (((unsigned long int) (p)) | 0x80000000) -+#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE) /*Translates to PFE address map */ -+#endif -+ -+#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR) -+#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR) -+#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p))) -+ -+#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + PFE_CBUS_PHYS_BASE_ADDR) -+#define CBUS_PFE_TO_VIRT(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR) -+ -+/* The below part of the code is used in QOS control driver from host */ -+#define TMU_APB_BASE_ADDR 0xc1000000 /** TMU base address seen by pe's */ -+ -+#define SHAPER0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x020000) -+#define SHAPER1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x030000) -+#define SHAPER2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x040000) -+#define SHAPER3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x050000) -+#define SHAPER4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x060000) -+#define SHAPER5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x070000) -+#define SHAPER6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x080000) -+#define SHAPER7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x090000) -+#define SHAPER8_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0a0000) -+#define SHAPER9_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0b0000) -+ -+#define SCHED0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1c0000) -+#define SCHED1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1d0000) -+#define SCHED2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1e0000) -+#define SCHED3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1f0000) -+#define SCHED4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x200000) -+#define SCHED5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x210000) -+#define SCHED6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x220000) -+#define SCHED7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x230000) -+ -+#define PHY_QUEUE_BASE_ADDR (TMU_APB_BASE_ADDR + 0x260000) -+#define QUEUE_RESULT0 (PHY_QUEUE_BASE_ADDR + 0x48) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY3), [6:0] winner input queue number */ -+#define QUEUE_RESULT1 (PHY_QUEUE_BASE_ADDR + 0x4c) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY4), [6:0] winner input queue number */ -+#define QUEUE_RESULT2 (PHY_QUEUE_BASE_ADDR + 0x50) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY5), [6:0] winner input queue number */ -+ -+#define QUEUE_RESULT0_REGOFFSET (QUEUE_RESULT0 - QUEUE_RESULT0) -+#define QUEUE_RESULT1_REGOFFSET (QUEUE_RESULT1 - QUEUE_RESULT0) -+#define QUEUE_RESULT2_REGOFFSET (QUEUE_RESULT2 - QUEUE_RESULT0) -+ -+ -+#include "cbus.h" -+ -+enum { -+ CLASS0_ID = 0, -+ CLASS1_ID, -+ CLASS2_ID, -+ CLASS3_ID, -+#if !defined(CONFIG_PLATFORM_PCI) -+ CLASS4_ID, -+ CLASS5_ID, -+#endif -+#if !defined(CONFIG_TMU_DUMMY) -+ TMU0_ID, -+ TMU1_ID, -+ TMU2_ID, -+ TMU3_ID, -+#else -+ TMU0_ID, -+#endif -+#if !defined(CONFIG_UTIL_DISABLED) -+ UTIL_ID, -+#endif -+ MAX_PE -+}; -+ -+enum { -+ CLASS_TYPE = 0, -+ TMU_TYPE, -+ UTIL_TYPE -+}; -+ -+#if !defined(CONFIG_PLATFORM_PCI) -+#define CLASS_MASK ((1 << CLASS0_ID) | (1 << CLASS1_ID) | (1 << CLASS2_ID) | (1 << CLASS3_ID) | (1 << CLASS4_ID) | (1 << CLASS5_ID)) -+#define CLASS_MAX_ID CLASS5_ID -+#else -+#define CLASS_MASK ((1 << CLASS0_ID) | (1 << CLASS1_ID) | (1 << CLASS2_ID) | (1 << CLASS3_ID)) -+#define CLASS_MAX_ID CLASS3_ID -+#endif -+ -+#if !defined(CONFIG_TMU_DUMMY) -+#if defined(CONFIG_PLATFORM_LS1012A) -+#define TMU_MASK ((1 << TMU0_ID) | (1 << TMU1_ID) | (1 << TMU3_ID)) -+#else -+#define TMU_MASK ((1 << TMU0_ID) | (1 << TMU1_ID) | (1 << TMU2_ID) | (1 << TMU3_ID)) -+#endif -+#define TMU_MAX_ID TMU3_ID -+#else -+#define TMU_MASK (1 << TMU0_ID) -+#define TMU_MAX_ID TMU0_ID -+#endif -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+#define UTIL_MASK (1 << UTIL_ID) -+#endif -+ -+typedef struct tPE_STATUS -+{ -+ u32 cpu_state; -+ u32 activity_counter; -+ u32 rx; -+ union { -+ u32 tx; -+ u32 tmu_qstatus; -+ }; -+ u32 drop; -+#if defined(CFG_PE_DEBUG) -+ u32 debug_indicator; -+ u32 debug[16]; -+#endif -+} __attribute__((aligned(16))) PE_STATUS; -+ -+ -+struct pe_sync_mailbox -+{ -+ u32 stop; -+ u32 stopped; -+}; -+ -+struct pe_msg_mailbox -+{ -+ u32 dst; -+ u32 src; -+ u32 len; -+ u32 request; -+}; -+ -+// Drop counter definitions -+ -+#define CLASS_NUM_DROP_COUNTERS 13 -+#define UTIL_NUM_DROP_COUNTERS 8 -+ -+ -+/** PE information. -+ * Structure containing PE's specific information. It is used to create -+ * generic C functions common to all PE's. -+ * Before using the library functions this structure needs to be initialized with the different registers virtual addresses -+ * (according to the ARM MMU mmaping). The default initialization supports a virtual == physical mapping. -+ * -+ */ -+struct pe_info -+{ -+ u32 dmem_base_addr; /**< PE's dmem base address */ -+ u32 pmem_base_addr; /**< PE's pmem base address */ -+ u32 pmem_size; /**< PE's pmem size */ -+ -+ void *mem_access_wdata; /**< PE's _MEM_ACCESS_WDATA register address */ -+ void *mem_access_addr; /**< PE's _MEM_ACCESS_ADDR register address */ -+ void *mem_access_rdata; /**< PE's _MEM_ACCESS_RDATA register address */ -+}; -+ -+ -+void pe_lmem_read(u32 *dst, u32 len, u32 offset); -+void pe_lmem_write(u32 *src, u32 len, u32 offset); -+ -+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len); -+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len); -+ -+u32 pe_pmem_read(int id, u32 addr, u8 size); -+ -+void pe_dmem_write(int id, u32 val, u32 addr, u8 size); -+u32 pe_dmem_read(int id, u32 addr, u8 size); -+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len); -+void class_pe_lmem_memset(u32 dst, int val, unsigned int len); -+void class_bus_write(u32 val, u32 addr, u8 size); -+u32 class_bus_read(u32 addr, u8 size); -+ -+ -+#define class_bus_readl(addr) class_bus_read(addr, 4) -+#define class_bus_readw(addr) class_bus_read(addr, 2) -+#define class_bus_readb(addr) class_bus_read(addr, 1) -+ -+#define class_bus_writel(val, addr) class_bus_write(val, addr, 4) -+#define class_bus_writew(val, addr) class_bus_write(val, addr, 2) -+#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1) -+ -+#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4) -+#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2) -+#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1) -+ -+#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4) -+#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2) -+#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1) -+ -+//int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr); -+int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev); -+ -+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, unsigned int ddr_size); -+void bmu_init(void *base, BMU_CFG *cfg); -+void bmu_reset(void *base); -+void bmu_enable(void *base); -+void bmu_disable(void *base); -+void bmu_set_config(void *base, BMU_CFG *cfg); -+ -+/* An enumerated type for loopback values. This can be one of three values, no -+ * loopback -normal operation, local loopback with internal loopback module of -+ * MAC or PHY loopback which is through the external PHY. -+ */ -+#ifndef __MAC_LOOP_ENUM__ -+#define __MAC_LOOP_ENUM__ -+typedef enum {LB_NONE, LB_EXT, LB_LOCAL} MAC_LOOP; -+#endif -+ -+ -+void gemac_init(void *base, void *config); -+void gemac_disable_rx_checksum_offload(void *base); -+void gemac_enable_rx_checksum_offload(void *base); -+void gemac_set_mdc_div(void *base, int mdc_div); -+void gemac_set_speed(void *base, MAC_SPEED gem_speed); -+void gemac_set_duplex(void *base, int duplex); -+void gemac_set_mode(void *base, int mode); -+void gemac_enable(void *base); -+void gemac_tx_disable(void *base); -+void gemac_disable(void *base); -+void gemac_reset(void *base); -+void gemac_set_address(void *base, SPEC_ADDR *addr); -+SPEC_ADDR gemac_get_address(void *base); -+void gemac_set_loop( void *base, MAC_LOOP gem_loop ); -+void gemac_set_laddr1(void *base, MAC_ADDR *address); -+void gemac_set_laddr2(void *base, MAC_ADDR *address); -+void gemac_set_laddr3(void *base, MAC_ADDR *address); -+void gemac_set_laddr4(void *base, MAC_ADDR *address); -+void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index); -+void gemac_clear_laddr1(void *base); -+void gemac_clear_laddr2(void *base); -+void gemac_clear_laddr3(void *base); -+void gemac_clear_laddr4(void *base); -+void gemac_clear_laddrN(void *base, unsigned int entry_index); -+MAC_ADDR gemac_get_hash( void *base ); -+void gemac_set_hash( void *base, MAC_ADDR *hash ); -+MAC_ADDR gem_get_laddr1(void *base); -+MAC_ADDR gem_get_laddr2(void *base); -+MAC_ADDR gem_get_laddr3(void *base); -+MAC_ADDR gem_get_laddr4(void *base); -+MAC_ADDR gem_get_laddrN(void *base, unsigned int entry_index); -+void gemac_set_config(void *base, GEMAC_CFG *cfg); -+void gemac_allow_broadcast(void *base); -+void gemac_no_broadcast(void *base); -+void gemac_enable_unicast(void *base); -+void gemac_disable_unicast(void *base); -+void gemac_enable_multicast(void *base); -+void gemac_disable_multicast(void *base); -+void gemac_enable_fcs_rx(void *base); -+void gemac_disable_fcs_rx(void *base); -+void gemac_enable_1536_rx(void *base); -+void gemac_disable_1536_rx(void *base); -+void gemac_enable_rx_jmb(void *base); -+void gemac_disable_rx_jmb(void *base); -+void gemac_enable_stacked_vlan(void *base); -+void gemac_disable_stacked_vlan(void *base); -+void gemac_enable_pause_rx(void *base); -+void gemac_disable_pause_rx(void *base); -+void gemac_enable_copy_all(void *base); -+void gemac_disable_copy_all(void *base); -+void gemac_set_bus_width(void *base, int width); -+void gemac_set_wol(void *base, u32 wol_conf); -+ -+void gpi_init(void *base, GPI_CFG *cfg); -+void gpi_reset(void *base); -+void gpi_enable(void *base); -+void gpi_disable(void *base); -+void gpi_set_config(void *base, GPI_CFG *cfg); -+ -+void class_init(CLASS_CFG *cfg); -+void class_reset(void); -+void class_enable(void); -+void class_disable(void); -+void class_set_config(CLASS_CFG *cfg); -+ -+void tmu_reset(void); -+void tmu_init(TMU_CFG *cfg); -+void tmu_enable(u32 pe_mask); -+void tmu_disable(u32 pe_mask); -+u32 tmu_qstatus(u32 if_id); -+u32 tmu_pkts_processed(u32 if_id); -+ -+void util_init(UTIL_CFG *cfg); -+void util_reset(void); -+void util_enable(void); -+void util_disable(void); -+ -+void hif_nocpy_init(void); -+void hif_nocpy_tx_enable(void); -+void hif_nocpy_tx_disable(void); -+void hif_nocpy_rx_enable(void); -+void hif_nocpy_rx_disable(void); -+ -+void hif_init(void); -+void hif_tx_enable(void); -+void hif_tx_disable(void); -+void hif_rx_enable(void); -+void hif_rx_disable(void); -+ -+ -+/** Get Chip Revision level -+* -+*/ -+ -+static inline unsigned int CHIP_REVISION(void) -+{ -+#if defined (CONFIG_PLATFORM_C2000) -+#if 1 -+ return system_rev; -+ //return 0; -+#else -+ return (readl(COMCERTO_GPIO_DEVICE_ID_REG) >> 24) & 0xf; -+#endif -+ -+#else -+ /*For LS1012A return always 1 */ -+ return 1; -+#endif -+} -+ -+/** Start HIF rx DMA -+* -+*/ -+static inline void hif_rx_dma_start(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL); -+} -+ -+/** Start HIF tx DMA -+* -+*/ -+static inline void hif_tx_dma_start(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL); -+} -+ -+/** Start HIF_NOCPY rx DMA -+* -+*/ -+static inline void hif_nocpy_rx_dma_start(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_NOCPY_RX_CTRL); -+} -+ -+/** Start HIF_NOCPY tx DMA -+* -+*/ -+static inline void hif_nocpy_tx_dma_start(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_NOCPY_TX_CTRL); -+} -+ -+#endif /* _PFE_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/tmu.h -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _TMU_H_ -+#define _TMU_H_ -+ -+#define TMU_DMEM_BASE_ADDR 0x00000000 -+#define TMU_PMEM_BASE_ADDR 0x00010000 -+ -+#define CBUS_BASE_ADDR 0xc0000000 -+#define TMU_APB_BASE_ADDR 0xc1000000 -+ -+#if defined (COMCERTO_2000_TMU) || defined (COMCERTO_2000_CONTROL) -+ -+#include "cbus.h" -+ -+#define GPT_BASE_ADDR (TMU_APB_BASE_ADDR + 0x00000) -+#define UART_BASE_ADDR (TMU_APB_BASE_ADDR + 0x10000) -+ -+#define SHAPER0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x020000) -+#define SHAPER1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x030000) -+#define SHAPER2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x040000) -+#define SHAPER3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x050000) -+#define SHAPER4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x060000) -+#define SHAPER5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x070000) -+#define SHAPER6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x080000) -+#define SHAPER7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x090000) -+#define SHAPER8_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0a0000) -+#define SHAPER9_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0b0000) -+ -+#define SCHED0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1c0000) -+#define SCHED1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1d0000) -+#define SCHED2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1e0000) -+#define SCHED3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1f0000) -+#define SCHED4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x200000) -+#define SCHED5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x210000) -+#define SCHED6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x220000) -+#define SCHED7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x230000) -+ -+#define SHAPER_STATUS (TMU_APB_BASE_ADDR + 0x270000) /**< [9:0] bitmask of shapers that have positive credit */ -+ -+#include "gpt.h" -+#include "uart.h" -+#include "tmu/shaper.h" -+#include "tmu/sched.h" -+ -+#endif -+ -+#define PHY_QUEUE_BASE_ADDR (TMU_APB_BASE_ADDR + 0x260000) -+ -+#include "tmu/phy_queue.h" -+ -+#endif /* _TMU_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/phy_queue.h -@@ -0,0 +1,56 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _PHY_QUEUE_H_ -+#define _PHY_QUEUE_H_ -+ -+#define PHY_QUEUE_SHAPER_STATUS (PHY_QUEUE_BASE_ADDR + 0x00) /**< [28:19] same as SHAPER_STATUS, [18:3] same as QUEUE_STATUS, [2:0] must be zero before a new packet may be dequeued */ -+#define QUEUE_STATUS (PHY_QUEUE_BASE_ADDR + 0x04) /**< [15:0] bit mask of input queues with pending packets */ -+ -+#define QUEUE0_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x08) -+#define QUEUE1_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x0c) -+#define QUEUE2_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x10) -+#define QUEUE3_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x14) -+#define QUEUE4_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x18) -+#define QUEUE5_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x1c) -+#define QUEUE6_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x20) -+#define QUEUE7_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x24) -+#define QUEUE8_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x28) -+#define QUEUE9_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x2c) -+#define QUEUE10_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x30) -+#define QUEUE11_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x34) -+#define QUEUE12_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x38) -+#define QUEUE13_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x3c) -+#define QUEUE14_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x40) -+#define QUEUE15_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x44) -+#define QUEUE_RESULT0 (PHY_QUEUE_BASE_ADDR + 0x48) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY3), [6:0] winner input queue number */ -+#define QUEUE_RESULT1 (PHY_QUEUE_BASE_ADDR + 0x4c) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY4), [6:0] winner input queue number */ -+#define QUEUE_RESULT2 (PHY_QUEUE_BASE_ADDR + 0x50) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY5), [6:0] winner input queue number */ -+#define TMU_PE_GP_REG (PHY_QUEUE_BASE_ADDR + 0x54) -+#define QUEUE_GBL_PKTLEN (PHY_QUEUE_BASE_ADDR + 0x5c) -+#define QUEUE_GBL_PKTLEN_MASK (PHY_QUEUE_BASE_ADDR + 0x60) -+ -+#define QUEUE_RESULT0_REGOFFSET (QUEUE_RESULT0 - QUEUE_RESULT0) -+#define QUEUE_RESULT1_REGOFFSET (QUEUE_RESULT1 - QUEUE_RESULT0) -+#define QUEUE_RESULT2_REGOFFSET (QUEUE_RESULT2 - QUEUE_RESULT0) -+ -+#define TEQ_HTD (1 << 22) -+#define TEQ_HWRED (1 << 21) -+ -+ -+#endif /* _PHY_QUEUE_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h -@@ -0,0 +1,72 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _SCHED_H_ -+#define _SCHED_H_ -+ -+/* Offsets from SCHEDx_BASE_ADDR */ -+#define SCHED_CTRL 0x00 -+#define SCHED_SLOT_TIME 0x04 -+#define SCHED_RES 0x08 -+#define SCHED_QUEUE_ALLOC0 0x0c -+#define SCHED_QUEUE_ALLOC1 0x10 -+#define SCHED_BW 0x14 -+#define SCHED_GUR_DEF_CTR 0x18 -+#define SCHED_AVL_CTR 0x1c -+#define SCHED_QU0_WGHT 0x20 -+#define SCHED_QU1_WGHT 0x24 -+#define SCHED_QU2_WGHT 0x28 -+#define SCHED_QU3_WGHT 0x2c -+#define SCHED_QU4_WGHT 0x30 -+#define SCHED_QU5_WGHT 0x34 -+#define SCHED_QU6_WGHT 0x38 -+#define SCHED_QU7_WGHT 0x3c -+#define SCHED_QUE0_DEFICIT_CNT 0x40 -+#define SCHED_QUE1_DEFICIT_CNT 0x44 -+#define SCHED_QUE2_DEFICIT_CNT 0x48 -+#define SCHED_QUE3_DEFICIT_CNT 0x4c -+#define SCHED_QUE4_DEFICIT_CNT 0x50 -+#define SCHED_QUE5_DEFICIT_CNT 0x54 -+#define SCHED_QUE6_DEFICIT_CNT 0x58 -+#define SCHED_QUE7_DEFICIT_CNT 0x5c -+#define SCHED_PKT_LEN 0x60 -+ -+#define SCHED_CTRL_ALGOTYPE(x) (((x) & 0xf) << 0) -+#define SCHED_CTRL_CALQUOTA(x) (((x) & 0x1) << 4) -+#define SCHED_CTRL_ACTIVE_Q(x) (((x) & 0xff) << 8) -+#define SCHED_CTRL_SHARE_BW(x) (((x) & 0xff) << 16) -+#define SCHED_CTRL_BARROW_BW(x) (((x) & 0xff) << 24) -+ -+#define SCHED_QUEUE_ALLOC(x, b) (((x) & 0x1f) << (b)) -+ -+#define SCHED_QUEUE_ALLOC0_QUEUEA(x) (((x) & 0x1f) << 0) -+#define SCHED_QUEUE_ALLOC0_QUEUEB(x) (((x) & 0x1f) << 8) -+#define SCHED_QUEUE_ALLOC0_QUEUEC(x) (((x) & 0x1f) << 16) -+#define SCHED_QUEUE_ALLOC0_QUEUED(x) (((x) & 0x1f) << 24) -+ -+#define SCHED_QUEUE_ALLOC0_RES0(x) (((x) & 0x7) << 5) -+#define SCHED_QUEUE_ALLOC0_RES1(x) (((x) & 0x7) << 13) -+#define SCHED_QUEUE_ALLOC0_RES2(x) (((x) & 0x7) << 21) -+#define SCHED_QUEUE_ALLOC0_RES3(x) (((x) & 0x7) << 29) -+ -+#define SCHED_QUEUE_ALLOC1_QUEUEA(x) (((x) & 0x1f) << 0) -+#define SCHED_QUEUE_ALLOC1_QUEUEB(x) (((x) & 0x1f) << 8) -+#define SCHED_QUEUE_ALLOC1_QUEUEC(x) (((x) & 0x1f) << 16) -+#define SCHED_QUEUE_ALLOC1_QUEUED(x) (((x) & 0x1f) << 24) -+ -+#endif /* _SCHED_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h -@@ -0,0 +1,37 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _SHAPER_H_ -+#define _SHAPER_H_ -+ -+/* Offsets from SHAPPERx_BASE_ADDR */ -+#define SHAPER_CTRL 0x00 -+#define SHAPER_WEIGHT 0x04 -+#define SHAPER_PKT_LEN 0x08 -+ -+#define SHAPER_CTRL_ENABLE(x) (((x) & 0x1) << 0) -+#define SHAPER_CTRL_QNO(x) (((x) & 0x3f) << 1) -+#define SHAPER_CTRL_CLKDIV(x) (((x) & 0xffff) << 16) -+ -+#define SHAPER_WEIGHT_FRACWT(x) (((x) & 0xff) << 0) -+#define SHAPER_WEIGHT_INTWT(x) (((x) & 0x3) << 8) -+#define SHAPER_WEIGHT_MAXCREDIT(x) (((x) & 0x3fffff) << 10) -+ -+#define PORT_SHAPER_MASK (1 << 0) -+ -+#endif /* _SHAPER_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/uart.h -@@ -0,0 +1,31 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _UART_H_ -+#define _UART_H_ -+ -+#define UART_THR (UART_BASE_ADDR + 0x00) -+#define UART_IER (UART_BASE_ADDR + 0x04) -+#define UART_IIR (UART_BASE_ADDR + 0x08) -+#define UART_LCR (UART_BASE_ADDR + 0x0c) -+#define UART_MCR (UART_BASE_ADDR + 0x10) -+#define UART_LSR (UART_BASE_ADDR + 0x14) -+#define UART_MDR (UART_BASE_ADDR + 0x18) -+#define UART_SCRATCH (UART_BASE_ADDR + 0x1c) -+ -+#endif /* _UART_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/util.h -@@ -0,0 +1,49 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _UTIL_H_ -+#define _UTIL_H_ -+ -+#define UTIL_DMEM_BASE_ADDR 0x00000000 -+#define UTIL_DMEM_SIZE 0x00002000 -+#define UTIL_DMEM_END (UTIL_DMEM_BASE_ADDR + UTIL_DMEM_SIZE) -+ -+#define IS_DMEM(addr, len) (((unsigned long)(addr) >= UTIL_DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= UTIL_DMEM_END)) -+ -+#define CBUS_BASE_ADDR 0xc0000000 -+#define UTIL_APB_BASE_ADDR 0xc1000000 -+ -+#include "cbus.h" -+ -+#define GPT_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x00000) -+#define UART_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x10000) -+#define EAPE_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x20000) -+#define INQ_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x30000) -+#define EFET1_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x40000) -+#define EFET2_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x50000) -+#define EFET3_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x60000) -+ -+ -+#include "gpt.h" -+#include "uart.h" -+#include "util/eape.h" -+#include "util/inq.h" -+#include "util/efet.h" -+ -+ -+#endif /* _UTIL_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/util/eape.h -@@ -0,0 +1,57 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _EAPE_H_ -+#define _EAPE_H_ -+ -+#define EAPE_STATUS (EAPE_BASE_ADDR + 0x0) -+#define EAPE_INT_ENABLE (EAPE_BASE_ADDR + 0x4) -+#define EAPE_INT_SRC (EAPE_BASE_ADDR + 0x8) -+#define EAPE_HOST_INT_ENABLE (EAPE_BASE_ADDR + 0xc) -+ -+/** The following bits represents to enable interrupts from host and to host -+* from / to utilpe */ -+ -+#define IRQ_EN_EFET_TO_UTIL 0x1 -+#define IRQ_EN_QB_TO_UTIL 0x2 -+#define IRQ_EN_INQ_TO_UTIL 0x4 -+#define IRQ_EN_EAPE_TO_UTIL 0x8 -+#define IRQ_EN_GPT_TMR_TO_UTIL 0x10 -+#define IRQ_EN_UART_TO_UTIL 0x20 -+#define IRQ_EN_SYSLP_TO_UTIL 0x40 -+#define IRQ_EN_UPEGP_TO_UTIL 0x80 -+ -+/** Out interrupts */ -+ -+#define IRQ_EN_EFET_OUT 0x100 -+#define IRQ_EN_QB_OUT 0x200 -+#define IRQ_EN_INQ_OUT 0x400 -+#define IRQ_EN_EAPE_OUT 0x800 -+#define IRQ_EN_GPT_TMR_OUT 0x1000 -+#define IRQ_EN_UART_OUT 0x2000 -+#define IRQ_EN_SYSLP_OUT 0x4000 -+#define IRQ_EN_UPEGP_OUT 0x8000 -+ -+/** The following bits are enabled in the status register -+ * which are mapped to IPSEC status register bits */ -+#define EAPE_IN_STAT_AVAIL 0x1 -+#define EAPE_OUT_STAT_AVAIL 0x2 -+#define EAPE_IN_CMD_AVAIL 0x4 -+#define EAPE_OUT_CMD_AVAIL 0x8 -+ -+#endif /* _EAPE_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/util/efet.h -@@ -0,0 +1,119 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _UTIL_EFET_H_ -+#define _UTIL_EFET_H_ -+ -+#define EFET_ENTRY_ADDR 0x00 -+#define EFET_ENTRY_SIZE 0x04 -+#define EFET_ENTRY_DMEM_ADDR 0x08 -+#define EFET_ENTRY_STATUS 0x0c -+#define EFET_ENTRY_ENDIAN 0x10 -+ -+#define CBUS2DMEM 0 -+#define DMEM2CBUS 1 -+ -+#define EFET2BUS_LE (1 << 0) -+ -+#define EFET1 0 -+#define EFET2 1 -+#define EFET3 2 -+#define MAX_UTIL_EFET_LEN 128 -+ -+extern const unsigned long util_efet_baseaddr[3]; -+extern u32 util_efet_status; -+ -+/* The barrier call is an empirical work-around for an unknown bug: for some unknown reason, it solves -+ * a UtilPE crash observed with LRO and packet steering. Other solutions also worked (e.g. barrier, -+ * nop calls in other positions). However, no common pattern could be extracted from those solutions -+ * to narrow down the source of the crash. -+ */ -+ -+#define __UTIL_EFET(i, cbus_addr, dmem_addr,len,dir) do { \ -+ __writel((len & 0x3FF) | (dir << 16), util_efet_baseaddr[i] + EFET_ENTRY_SIZE); \ -+ __writel(dmem_addr, util_efet_baseaddr[i] + EFET_ENTRY_DMEM_ADDR);\ -+ __writel(cbus_addr, util_efet_baseaddr[i] + EFET_ENTRY_ADDR);\ -+ nop();\ -+ }while(0) -+ -+#define UTIL_EFET(i, cbus_addr, dmem_addr,len,dir) do { \ -+ __UTIL_EFET(i, cbus_addr, dmem_addr, len, dir); \ -+ util_efet_status |= (1 << i); \ -+ } while(0) -+ -+ -+/** Waits for the util efet to finish a transaction, blocking the caller -+* (without updating the status). -+* Can be called at any time. -+* -+* @param i Efet index -+* -+* -+*/ -+static inline void __util_efet_wait(int i) -+{ -+ while (!(readl(util_efet_baseaddr[i] + EFET_ENTRY_STATUS) & 0x1)) ; -+} -+ -+/** Waits for the util efet to finish a transaction, blocking the caller. -+* Can be called at any time. -+* -+* @param i Efet index -+* -+*/ -+static inline void util_efet_wait(int i) -+{ -+ __util_efet_wait(i); -+ -+ util_efet_status &= ~(1 << i); -+} -+ -+/** Asynchronous interface to util efet read/write functions. -+* It will wait for the efet to finish previous transaction, but does not wait for the current transaction to finish. -+* -+* @param i Efet index -+* @param cbus_addr Cbus address (must be 64bits aligned) -+* @param dmem_addr DMEM address (must be 64bits aligned) -+* @param len Number of bytes to copy (must be 64bits aligned size) -+* @param dir Direction of the transaction (0 - cbus to dmem, 1 - dmem to cbus) -+* -+*/ -+static inline void util_efet_async(int i, u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir) -+{ -+ if (util_efet_status & (1 << i)) -+ util_efet_wait(i); -+ -+ UTIL_EFET(i, cbus_addr, dmem_addr, len, dir); -+} -+ -+ -+static inline void util_efet_async0( u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir) -+{ -+ util_efet_async(0, cbus_addr, dmem_addr, len,dir); -+} -+ -+/* EFET 2 is aways used for SYNC operations */ -+static inline void util_efet_sync2(u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir) -+{ -+ __UTIL_EFET(2, cbus_addr, dmem_addr, len,dir); -+ __util_efet_wait(2); -+} -+ -+void util_efet_sync0(u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir); -+#endif /* _UTIL_EFET_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/include/pfe/util/inq.h -@@ -0,0 +1,28 @@ -+/* -+ * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+*/ -+#ifndef _INQ_H_ -+#define _INQ_H_ -+ -+#define INQ_HOST_GP (INQ_BASE_ADDR + 0x00) /* FIXME what are these for ? */ -+#define INQ_UPE_GP (INQ_BASE_ADDR + 0x04) /* FIXME what are these for ? */ -+ -+#define INQ_QB_PKTPTR (INQ_BASE_ADDR + 0x08) -+#define INQ_FIFO_CNT (INQ_BASE_ADDR + 0x0c) -+ -+#endif /* _INQ_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c -@@ -0,0 +1,363 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+#include -+#include -+#else -+#include "platform.h" -+#endif -+ -+#include "pfe_mod.h" -+#include "pfe_ctrl.h" -+ -+#include "pfe_ctrl_hal.h" -+ -+static struct pe_sync_mailbox CLASS_DMEM_SH2(sync_mailbox); -+static struct pe_sync_mailbox TMU_DMEM_SH2(sync_mailbox); -+ -+static struct pe_msg_mailbox CLASS_DMEM_SH2(msg_mailbox); -+static struct pe_msg_mailbox TMU_DMEM_SH2(msg_mailbox); -+ -+#if !defined(CONFIG_PLATFORM_LS1012A) -+static u32 CLASS_DMEM_SH2(resume); -+static u32 TMU_DMEM_SH2(resume); -+#endif -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+static struct pe_sync_mailbox UTIL_DMEM_SH2(sync_mailbox); -+static struct pe_msg_mailbox UTIL_DMEM_SH2(msg_mailbox); -+static u32 UTIL_DMEM_SH2(resume); -+#endif -+ -+static int pfe_ctrl_timer(void *data); -+ -+static int initialized = 0; -+ -+#define TIMEOUT_MS 1000 -+ -+int relax(unsigned long end) -+{ -+#ifdef __KERNEL__ -+ if (time_after(jiffies, end)) { -+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000)) { -+ return -1; -+ } -+ -+ if (need_resched()) -+ schedule(); -+ } -+#else -+ udelay(1); -+#endif -+ -+ return 0; -+} -+ -+#if !defined(CONFIG_PLATFORM_LS1012A) -+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl) -+{ -+ int id; -+ -+ kthread_stop(ctrl->timer_thread); -+ -+ mutex_lock(&ctrl->mutex); -+ -+ initialized = 0; -+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) -+ pe_dmem_write(id, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&class_resume), 4); -+ -+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) { -+#if defined(CONFIG_PLATFORM_LS1012A) -+ if(id == TMU2_ID) continue; -+#endif -+ pe_dmem_write(id, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&tmu_resume), 4); -+ } -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&util_resume), 4); -+#endif -+ -+ pe_sync_stop(&pfe->ctrl, 0xFF); -+ -+ mutex_unlock(&ctrl->mutex); -+} -+ -+void pfe_ctrl_resume(struct pfe_ctrl *ctrl) -+{ -+ mutex_lock(&ctrl->mutex); -+ initialized = 1; -+ pe_start(&pfe->ctrl, 0xFF); -+ mutex_unlock(&ctrl->mutex); -+ -+ ctrl->timer_thread = kthread_create(pfe_ctrl_timer, ctrl, "pfe_ctrl_timer"); -+ -+ wake_up_process(ctrl->timer_thread); -+} -+#endif -+ -+/** PE sync stop. -+* Stops packet processing for a list of PE's (specified using a bitmask). -+* The caller must hold ctrl->mutex. -+* -+* @param ctrl Control context -+* @param pe_mask Mask of PE id's to stop -+* -+*/ -+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask) -+{ -+ struct pe_sync_mailbox *mbox; -+ int pe_stopped = 0; -+ unsigned long end = jiffies + 2; -+ int i; -+ -+#if defined(CONFIG_PLATFORM_LS1012A) -+ //TODO Util should be removed after IPSec is ported -+ pe_mask &= 0x2FF; //Exclude Util + TMU2 -+#endif -+ for (i = 0; i < MAX_PE; i++) -+ if (pe_mask & (1 << i)) { -+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i]; -+ -+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned long)&mbox->stop, 4); -+ } -+ -+ while (pe_stopped != pe_mask) { -+ for (i = 0; i < MAX_PE; i++) -+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) { -+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i]; -+ -+ if (pe_dmem_read(i, (unsigned long)&mbox->stopped, 4) & cpu_to_be32(0x1)) -+ pe_stopped |= (1 << i); -+ } -+ -+ if (relax(end) < 0) -+ goto err; -+ } -+ -+ return 0; -+ -+err: -+ printk(KERN_ERR "%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped); -+ -+ for (i = 0; i < MAX_PE; i++) -+ if (pe_mask & (1 << i)) { -+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i]; -+ -+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4); -+ } -+ -+ return -EIO; -+} -+ -+/** PE start. -+* Starts packet processing for a list of PE's (specified using a bitmask). -+* The caller must hold ctrl->mutex. -+* -+* @param ctrl Control context -+* @param pe_mask Mask of PE id's to start -+* -+*/ -+void pe_start(struct pfe_ctrl *ctrl, int pe_mask) -+{ -+ struct pe_sync_mailbox *mbox; -+ int i; -+ -+#if defined(CONFIG_PLATFORM_LS1012A) -+ //TODO Util should be removed after IPSec is ported -+ pe_mask &= 0x2FF; //Exclude Util + TMU2 -+#endif -+ for (i = 0; i < MAX_PE; i++) -+ if (pe_mask & (1 << i)) { -+ -+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i]; -+ -+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4); -+ } -+} -+ -+ -+/** Sends a control request to a given PE (to copy data to/from internal memory from/to DDR). -+* The caller must hold ctrl->mutex. -+* -+* @param ctrl Control context -+* @param id PE id -+* @param dst Physical destination address of data -+* @param src Physical source address of data -+* @param len Data length -+* -+*/ -+int pe_request(struct pfe_ctrl *ctrl, int id, unsigned short cmd_type, unsigned long dst, unsigned long src, int len) -+{ -+ struct pe_msg_mailbox mbox = { -+ .dst = cpu_to_be32(dst), -+ .src = cpu_to_be32(src), -+ .len = cpu_to_be32(len), -+ .request = cpu_to_be32((cmd_type << 16) | 0x1), -+ }; -+ struct pe_msg_mailbox *pmbox = (void *)ctrl->msg_mailbox_baseaddr[id]; -+ unsigned long end = jiffies + 2; -+ u32 rc; -+ -+ /* This works because .request is written last */ -+ pe_dmem_memcpy_to32(id, (unsigned long)pmbox, &mbox, sizeof(mbox)); -+ -+ while ((rc = pe_dmem_read(id, (unsigned long)&pmbox->request, 4)) & cpu_to_be32(0xffff)) { -+ if (relax(end) < 0) -+ goto err; -+ } -+ -+ rc = be32_to_cpu(rc); -+ -+ return rc >> 16; -+ -+err: -+ printk(KERN_ERR "%s: timeout, %x\n", __func__, be32_to_cpu(rc)); -+ pe_dmem_write(id, cpu_to_be32(0), (unsigned long)&pmbox->request, 4); -+ return -EIO; -+} -+ -+ -+/** Control code timer thread. -+* -+* A kernel thread is used so that the timer code can be run under the control path mutex. -+* The thread wakes up regularly and checks if any timer in the timer list as expired. -+* The timers are re-started automatically. -+* The code tries to keep the number of times a timer runs per unit time constant on average, -+* if the thread scheduling is delayed, it's possible for a particular timer to be scheduled in -+* quick succession to make up for the lost time. -+* -+* @param data Pointer to the control context structure -+* -+* @return 0 on sucess, a negative value on error -+* -+*/ -+static int pfe_ctrl_timer(void *data) -+{ -+ struct pfe_ctrl *ctrl = data; -+ TIMER_ENTRY *timer, *next; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ while (1) -+ { -+ schedule_timeout_uninterruptible(ctrl->timer_period); -+ -+ mutex_lock(&ctrl->mutex); -+ -+ list_for_each_entry_safe(timer, next, &ctrl->timer_list, list) -+ { -+ if (time_after(jiffies, timer->timeout)) -+ { -+ timer->timeout += timer->period; -+ -+ timer->handler(); -+ } -+ } -+ -+ mutex_unlock(&ctrl->mutex); -+ -+ if (kthread_should_stop()) -+ break; -+ } -+ -+ printk(KERN_INFO "%s exiting\n", __func__); -+ -+ return 0; -+} -+ -+ -+int pfe_ctrl_init(struct pfe *pfe) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ int id; -+ int rc; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ mutex_init(&ctrl->mutex); -+ spin_lock_init(&ctrl->lock); -+ -+ ctrl->timer_period = HZ / TIMER_TICKS_PER_SEC; -+ -+ INIT_LIST_HEAD(&ctrl->timer_list); -+ -+ /*INIT_WORK(&ctrl->work, comcerto_fpp_workqueue);*/ -+ -+ INIT_LIST_HEAD(&ctrl->msg_list); -+ -+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) { -+ ctrl->sync_mailbox_baseaddr[id] = virt_to_class_dmem(&class_sync_mailbox); -+ ctrl->msg_mailbox_baseaddr[id] = virt_to_class_dmem(&class_msg_mailbox); -+ } -+ -+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) { -+#if defined(CONFIG_PLATFORM_LS1012A) -+ if(id == TMU2_ID) continue; -+#endif -+ ctrl->sync_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_sync_mailbox); -+ ctrl->msg_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_msg_mailbox); -+ } -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_sync_mailbox); -+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_msg_mailbox); -+#endif -+ -+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR; -+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR; -+ ctrl->ipsec_lmem_phys_baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR); -+ ctrl->ipsec_lmem_baseaddr = (LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR); -+ -+ ctrl->timer_thread = kthread_create(pfe_ctrl_timer, ctrl, "pfe_ctrl_timer"); -+ if (IS_ERR(ctrl->timer_thread)) -+ { -+ printk (KERN_ERR "%s: kthread_create() failed\n", __func__); -+ rc = PTR_ERR(ctrl->timer_thread); -+ goto err0; -+ } -+ -+ ctrl->dev = pfe->dev; -+ -+ wake_up_process(ctrl->timer_thread); -+ -+ printk(KERN_INFO "%s finished\n", __func__); -+ -+ initialized = 1; -+ -+ return 0; -+ -+err0: -+ return rc; -+} -+ -+ -+void pfe_ctrl_exit(struct pfe *pfe) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ initialized = 0; -+ -+ kthread_stop(ctrl->timer_thread); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h -@@ -0,0 +1,111 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_CTRL_H_ -+#define _PFE_CTRL_H_ -+ -+#include -+ -+#include "pfe_mod.h" -+#include "pfe/pfe.h" -+ -+#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */ -+#define DMA_BUF_SIZE_256 0x100 /* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */ -+#define DMA_BUF_SIZE_512 0x200 /* 512bytes dma allocated buffers used by rtp relay feature */ -+#define DMA_BUF_MIN_ALIGNMENT 8 -+#define DMA_BUF_BOUNDARY (4 * 1024) /* bursts can not cross 4k boundary */ -+ -+#define CMD_TX_ENABLE 0x0501 -+#define CMD_TX_DISABLE 0x0502 -+ -+#define CMD_RX_LRO 0x0011 -+#define CMD_PKTCAP_ENABLE 0x0d01 -+#define CMD_QM_EXPT_RATE 0x020c -+ -+#define EXPT_TYPE_PCAP 0x3 -+ -+struct pfe_ctrl { -+ struct mutex mutex; -+ spinlock_t lock; -+ -+ void *dma_pool; -+ void *dma_pool_512; -+ void *dma_pool_128; -+ -+ struct device *dev; -+ -+ void *hash_array_baseaddr; /** Virtual base address of the conntrack hash array */ -+ unsigned long hash_array_phys_baseaddr; /** Physical base address of the conntrack hash array */ -+ -+ struct task_struct *timer_thread; -+ struct list_head timer_list; -+ unsigned long timer_period; -+ -+ int (*event_cb)(u16, u16, u16*); -+ -+ unsigned long sync_mailbox_baseaddr[MAX_PE]; /* Sync mailbox PFE internal address, initialized when parsing elf images */ -+ unsigned long msg_mailbox_baseaddr[MAX_PE]; /* Msg mailbox PFE internal address, initialized when parsing elf images */ -+ -+ unsigned long class_dmem_sh; -+ unsigned long class_pe_lmem_sh; -+ unsigned long tmu_dmem_sh; -+ unsigned long util_dmem_sh; -+ unsigned long util_ddr_sh; -+ struct clk *clk_axi; -+ unsigned int sys_clk; // AXI clock value, in KHz -+ void *ipsec_lmem_baseaddr; -+ unsigned long ipsec_lmem_phys_baseaddr; -+ -+ /* used for asynchronous message transfer to PFE */ -+ struct list_head msg_list; -+ struct work_struct work; -+}; -+ -+int pfe_ctrl_init(struct pfe *pfe); -+void pfe_ctrl_exit(struct pfe *pfe); -+ -+int pe_send_cmd(unsigned short cmd_type, unsigned short action, unsigned long param1, unsigned long param2); -+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask); -+void pe_start(struct pfe_ctrl *ctrl, int pe_mask); -+int pe_request(struct pfe_ctrl *ctrl, int id,unsigned short cmd_type, unsigned long dst, unsigned long src, int len); -+int pe_read(struct pfe_ctrl *ctrl, int id, u32 *dst, unsigned long src, int len, int clear_flag); -+int tmu_pe_request(struct pfe_ctrl *ctrl, int id, unsigned int tmu_cmd_bitmask); -+ -+int pfe_ctrl_set_eth_state(int id, unsigned int state, unsigned char *mac_addr); -+int pfe_ctrl_set_lro(char enable); -+#ifdef CFG_PCAP -+int pfe_ctrl_set_pcap(char enable); -+int pfe_ctrl_set_pcap_ratelimit(u32 pkts_per_msec); -+#endif -+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl); -+void pfe_ctrl_resume(struct pfe_ctrl *ctrl); -+int relax(unsigned long end); -+ -+/* used for asynchronous message transfer to PFE */ -+#define FPP_MAX_MSG_LENGTH 256 /* expressed in U8 -> 256 bytes*/ -+struct fpp_msg { -+ struct list_head list; -+ void (*callback)(unsigned long, int, u16, u16 *); -+ unsigned long data; -+ u16 fcode; -+ u16 length; -+ u16 *payload; -+}; -+ -+#endif /* _PFE_CTRL_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_ctrl_hal.c -@@ -0,0 +1,207 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/* OS abstraction functions used by PFE control code */ -+ -+#include -+ -+#include "pfe_ctrl_hal.h" -+ -+#include "pfe_mod.h" -+ -+extern char *__class_dmem_sh; -+extern char *__tmu_dmem_sh; -+#if !defined(CONFIG_UTIL_DISABLED) -+extern char *__util_dmem_sh; -+extern char *__util_ddr_sh; -+#endif -+ -+HostMessage msg_buf; -+static int msg_buf_used = 0; -+unsigned long virt_to_class_dmem(void *p) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ if (p) -+ return (unsigned long)p - (unsigned long)&__class_dmem_sh + ctrl->class_dmem_sh; -+ else -+ return 0; -+} -+unsigned long virt_to_tmu_dmem(void *p) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ if (p) -+ return (unsigned long)p - (unsigned long)&__tmu_dmem_sh + ctrl->tmu_dmem_sh; -+ else -+ return 0; -+} -+ -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+unsigned long virt_to_util_dmem(void *p) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ if (p) -+ return (unsigned long)p - (unsigned long)&__util_dmem_sh + ctrl->util_dmem_sh; -+ else -+ return 0; -+} -+ -+/** Returns the DDR physical address of a Util PE shared DDR variable. -+ * -+ * @param p pointer (kernel space, virtual) to be converted to a physical address. -+ */ -+unsigned long virt_to_util_ddr(void *p) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ if (p) -+ return (unsigned long)p - (unsigned long)&__util_ddr_sh + ctrl->util_ddr_sh; -+ else -+ return 0; -+} -+/** Returns the virtual address of a Util PE shared DDR variable. -+ * -+ * @param p pointer (kernel space, virtual) to be converted to a pointer (usable in kernel space) -+ * pointing to the actual data. -+ */ -+ -+void * virt_to_util_virt(void *p) -+{ -+ if (p) -+ return DDR_PHYS_TO_VIRT(virt_to_util_ddr(p)); -+ else -+ return NULL; -+} -+#endif -+unsigned long virt_to_phys_iram(void *p) -+{ -+ if (p) -+ return (p - pfe->iram_baseaddr) + pfe->iram_phys_baseaddr; -+ else -+ return 0; -+} -+ -+unsigned long virt_to_phys_ipsec_lmem(void *p) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ if (p) -+ return (p - ctrl->ipsec_lmem_baseaddr) + ctrl->ipsec_lmem_phys_baseaddr; -+ else -+ return 0; -+} -+ -+unsigned long virt_to_phys_ipsec_axi(void *p) -+{ -+ if (p) -+ return (p - pfe->ipsec_baseaddr) + pfe->ipsec_phys_baseaddr; -+ else -+ return 0; -+} -+ -+ -+HostMessage *msg_alloc(void) -+{ -+ if (msg_buf_used) -+ { -+ printk(KERN_ERR "%s: failed\n", __func__); -+ return NULL; -+ } -+ -+ msg_buf_used = 1; -+ -+ return &msg_buf; -+} -+ -+void msg_free(HostMessage *msg) -+{ -+ if (!msg_buf_used) -+ printk(KERN_ERR "%s: freing already free msg buffer\n", __func__); -+ -+ msg_buf_used = 0; -+} -+ -+int msg_send(HostMessage *msg) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ int rc = -1; -+ -+ if (!ctrl->event_cb) -+ goto out; -+ -+ if (ctrl->event_cb(msg->code, msg->length, msg->data) < 0) -+ goto out; -+ -+ rc = 0; -+ -+out: -+ msg_free(msg); -+ -+ return rc; -+} -+ -+ -+void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler) -+{ -+ timer->handler = handler; -+ timer->running = 0; -+} -+ -+ -+void timer_add(TIMER_ENTRY *timer, u16 granularity) -+{ -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ -+ timer->period = granularity; -+ timer->timeout = jiffies + timer->period; -+ -+ if (!timer->running) -+ { -+ list_add(&timer->list, &ctrl->timer_list); -+ timer->running = 1; -+ } -+} -+ -+ -+void timer_del(TIMER_ENTRY *timer) -+{ -+ -+ if (timer->running) -+ { -+ list_del(&timer->list); -+ timer->running = 0; -+ } -+} -+ -+ -+void *Heap_Alloc(int size) -+{ -+ /* FIXME we may want to use dma API's and use non cacheable memory */ -+ return pfe_kmalloc(size, GFP_KERNEL); -+} -+ -+ -+void Heap_Free(void *p) -+{ -+ pfe_kfree(p); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_ctrl_hal.h -@@ -0,0 +1,129 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_CTRL_HAL_H_ -+#define _PFE_CTRL_HAL_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "pfe_mod.h" -+ -+#define CLASS_DMEM_SH(var) __attribute__((section(".class_dmem_sh_" #var))) var -+#define CLASS_PE_LMEM_SH(var) __attribute__((section(".class_pe_lmem_sh_" #var))) var -+#define TMU_DMEM_SH(var) __attribute__((section(".tmu_dmem_sh_" #var))) var -+#define UTIL_DMEM_SH(var) __attribute__((section(".util_dmem_sh_" #var))) var -+#define UTIL_DDR_SH(var) __attribute__((section(".util_ddr_sh_" #var))) var -+ -+#define CLASS_DMEM_SH2(var) __attribute__((section(".class_dmem_sh_" #var))) class_##var -+#define CLASS_PE_LMEM_SH2(var) __attribute__((section(".class_pe_lmem_sh_" #var))) class_##var -+#define TMU_DMEM_SH2(var) __attribute__((section(".tmu_dmem_sh_" #var))) tmu_##var -+#define UTIL_DMEM_SH2(var) __attribute__((section(".util_dmem_sh_" #var))) util_##var -+ -+/** Translate the name of a shared variable to its PFE counterpart. -+ * Those macros may be used to determine the address of a shared variable, -+ * and will work even if the variable is accessed through a macro, as is the case -+ * with most fields of gFppGlobals. -+ */ -+#define CONCAT(str, var) str##var -+#define CLASS_VARNAME2(var) CONCAT(class_, var) -+#define UTIL_VARNAME2(var) CONCAT(util_, var) -+#define TMU_VARNAME2(var) CONCAT(tmu_, var) -+ -+typedef struct tHostMessage { -+ u16 length; -+ u16 code; -+ u16 data[128]; -+} HostMessage; -+ -+HostMessage *msg_alloc(void); -+void msg_free(HostMessage *msg); -+int msg_send(HostMessage *msg); -+ -+ -+unsigned long virt_to_class(void *p); -+unsigned long virt_to_class_dmem(void *p); -+unsigned long virt_to_class_pe_lmem(void *p); -+unsigned long virt_to_tmu_dmem(void *p); -+unsigned long virt_to_util_dmem(void *p); -+unsigned long virt_to_util_ddr(void *p); -+void * virt_to_util_virt(void *p); -+unsigned long virt_to_phys_iram(void *p); -+unsigned long virt_to_phys_ipsec_lmem(void *p); -+unsigned long virt_to_phys_ipsec_axi(void *p); -+ -+ -+#define TIMER_TICKS_PER_SEC 100 -+ -+#if TIMER_TICKS_PER_SEC > HZ -+#error TIMER_TICKS_PER_SEC is too high -+#endif -+ -+ -+typedef void (* TIMER_HANDLER)(void); -+ -+typedef struct { -+ struct list_head list; -+ unsigned long timeout; -+ unsigned long period; -+ TIMER_HANDLER handler; -+ char running; -+} TIMER_ENTRY; -+ -+ -+/** Initializes a timer structure. -+* Must be called once for each TIMER_ENTRY structure. -+* The caller must be holding the ctrl->mutex. -+* -+* @param timer pointer to the timer to be initialized -+* @param handler timer handler function pointer -+* -+*/ -+void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler); -+ -+/** Adds a timer to the running timer list. -+* It's safe to call even if the timer was already running. In this case we just update the granularity. -+* The caller must be holding the ctrl->mutex. -+* -+* @param timer pointer to the timer to be added -+* @param granularity granularity of the timer (in timer tick units) -+* -+*/ -+void timer_add(TIMER_ENTRY *timer, u16 granularity); -+ -+/** Deletes a timer from the running timer list. -+* It's safe to call even if the timer is no longer running. -+* The caller must be holding the ctrl->mutex. -+* -+* @param timer pointer to the timer to be removed -+*/ -+void timer_del(TIMER_ENTRY *timer); -+ -+void *Heap_Alloc(int size); -+ -+#define Heap_Alloc_ARAM(s) Heap_Alloc(s) -+#define __Heap_Alloc(h, s) Heap_Alloc(s) -+void Heap_Free(void *p); -+ -+#endif /* _PFE_CTRL_HAL_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c -@@ -0,0 +1,109 @@ -+/* -+ * (C) Copyright 2013 -+ * Author : Freescale Technologes -+ * -+ * See file CREDITS for list of people who contributed to this -+ * project. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of -+ * the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, -+ * MA 02111-1307 USA -+ * */ -+ -+#include -+#include -+#include -+ -+#include "pfe_mod.h" -+ -+static int dmem_show(struct seq_file *s, void *unused) -+{ -+ u32 dmem_addr, val; -+ int id = (long int)s->private; -+ int i; -+ -+ for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) { -+ seq_printf(s, "%04x:", dmem_addr); -+ -+ for (i = 0; i < 8; i++) { -+ val = pe_dmem_read(id, dmem_addr + i * 4, 4); -+ seq_printf(s, " %02x %02x %02x %02x", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff, (val >> 24) & 0xff); -+ } -+ -+ seq_printf(s, "\n"); -+ } -+ -+ return 0; -+} -+ -+static int dmem_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, dmem_show, inode->i_private); -+} -+ -+static const struct file_operations dmem_fops = { -+ .open = dmem_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+int pfe_debugfs_init(struct pfe *pfe) -+{ -+ struct dentry *d; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ pfe->dentry = debugfs_create_dir("pfe", NULL); -+ if (IS_ERR_OR_NULL(pfe->dentry)) -+ goto err_dir; -+ -+ d = debugfs_create_file("pe0_dmem", S_IRUGO, pfe->dentry, (void *)0, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ d = debugfs_create_file("pe1_dmem", S_IRUGO, pfe->dentry, (void *)1, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ d = debugfs_create_file("pe2_dmem", S_IRUGO, pfe->dentry, (void *)2, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ d = debugfs_create_file("pe3_dmem", S_IRUGO, pfe->dentry, (void *)3, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ d = debugfs_create_file("pe4_dmem", S_IRUGO, pfe->dentry, (void *)4, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ d = debugfs_create_file("pe5_dmem", S_IRUGO, pfe->dentry, (void *)5, &dmem_fops); -+ if (IS_ERR_OR_NULL(d)) -+ goto err_pe; -+ -+ return 0; -+ -+err_pe: -+ debugfs_remove_recursive(pfe->dentry); -+ -+err_dir: -+ return -1; -+} -+ -+void pfe_debugfs_exit(struct pfe *pfe) -+{ -+ debugfs_remove_recursive(pfe->dentry); -+} -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h -@@ -0,0 +1,8 @@ -+#ifndef _PFE_DEBUGFS_H_ -+#define _PFE_DEBUGFS_H_ -+ -+int pfe_debugfs_init(struct pfe *pfe); -+void pfe_debugfs_exit(struct pfe *pfe); -+#endif /* _PFE_DEBUGFS_H_ */ -+ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_eth.c -@@ -0,0 +1,2956 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/** @pfe_eth.c. -+ * Ethernet driver for to handle exception path for PFE. -+ * - uses HIF functions to send/receive packets. -+ * - uses ctrl function to start/stop interfaces. -+ * - uses direct register accesses to control phy operation. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if defined(CONFIG_NF_CONNTRACK_MARK) -+#include -+#endif -+ -+#include "pfe_mod.h" -+#include "pfe_eth.h" -+ -+const char comcerto_eth_driver_version[]="1.0"; -+static void *cbus_emac_base[3]; -+static void *cbus_gpi_base[3]; -+ -+/* Forward Declaration */ -+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv); -+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force); -+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc); -+ -+#if defined(CONFIG_PLATFORM_C2000) -+static void pfe_eth_set_device_wakeup(struct pfe *pfe); -+ -+ -+ -+unsigned int gemac_regs[] = { -+ 0x0000, /* Network control */ -+ 0x0004, /* Network configuration */ -+ 0x0008, /* Network status */ -+ 0x0010, /* DMA configuration */ -+ 0x0014, /* Transmit status */ -+ 0x0020, /* Receive status */ -+ 0x0024, /* Interrupt status */ -+ 0x0030, /* Interrupt mask */ -+ 0x0038, /* Received pause quantum */ -+ 0x003c, /* Transmit pause quantum */ -+ 0x0080, /* Hash register bottom [31:0] */ -+ 0x0084, /* Hash register bottom [63:32] */ -+ 0x0088, /* Specific address 1 bottom [31:0] */ -+ 0x008c, /* Specific address 1 top [47:32] */ -+ 0x0090, /* Specific address 2 bottom [31:0] */ -+ 0x0094, /* Specific address 2 top [47:32] */ -+ 0x0098, /* Specific address 3 bottom [31:0] */ -+ 0x009c, /* Specific address 3 top [47:32] */ -+ 0x00a0, /* Specific address 4 bottom [31:0] */ -+ 0x00a4, /* Specific address 4 top [47:32] */ -+ 0x00a8, /* Type ID Match 1 */ -+ 0x00ac, /* Type ID Match 2 */ -+ 0x00b0, /* Type ID Match 3 */ -+ 0x00b4, /* Type ID Match 4 */ -+ 0x00b8, /* Wake Up ON LAN */ -+ 0x00bc, /* IPG stretch register */ -+ 0x00c0, /* Stacked VLAN Register */ -+ 0x00fc, /* Module ID */ -+ 0x07a0 /* EMAC Control register */ -+}; -+#else -+unsigned int gemac_regs[] = { -+ 0x0004, /*Interrupt event */ -+ 0x0008, /*Interrupt mask */ -+ 0x0024, /*Ethernet control */ -+ 0x0064, /*MIB Control/Status */ -+ 0x0084, /*Receive control/status */ -+ 0x00C4, /*Transmit control */ -+ 0x00E4, /*Physical address low */ -+ 0x00E8, /*Physical address high */ -+ 0x0144, /*Transmit FIFO Watermark and Store and Forward Control*/ -+ 0x0190, /* Receive FIFO Section Full Threshold */ -+ 0x01A0, /* Transmit FIFO Section Empty Threshold */ -+ 0x01B0, /* Frame Truncation Length */ -+}; -+#endif -+/********************************************************************/ -+/* SYSFS INTERFACE */ -+/********************************************************************/ -+ -+ -+ -+#ifdef PFE_ETH_NAPI_STATS -+/* -+ * pfe_eth_show_napi_stats -+ */ -+static ssize_t pfe_eth_show_napi_stats(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t len = 0; -+ -+ len += sprintf(buf + len, "sched: %u\n", priv->napi_counters[NAPI_SCHED_COUNT]); -+ len += sprintf(buf + len, "poll: %u\n", priv->napi_counters[NAPI_POLL_COUNT]); -+ len += sprintf(buf + len, "packet: %u\n", priv->napi_counters[NAPI_PACKET_COUNT]); -+ len += sprintf(buf + len, "budget: %u\n", priv->napi_counters[NAPI_FULL_BUDGET_COUNT]); -+ len += sprintf(buf + len, "desc: %u\n", priv->napi_counters[NAPI_DESC_COUNT]); -+ -+ return len; -+} -+ -+/* -+ * pfe_eth_set_napi_stats -+ */ -+static ssize_t pfe_eth_set_napi_stats(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t count) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ -+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters)); -+ -+ return count; -+} -+#endif -+#ifdef PFE_ETH_TX_STATS -+/** pfe_eth_show_tx_stats -+ * -+ */ -+static ssize_t pfe_eth_show_tx_stats(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t len = 0; -+ int i; -+ -+ len += sprintf(buf + len, "TX queues stats:\n"); -+ -+ for (i = 0; i < emac_txq_cnt; i++) { -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i); -+ -+ len += sprintf(buf + len, "\n"); -+ __netif_tx_lock_bh(tx_queue); -+ -+ hif_tx_lock(&pfe->hif); -+ len += sprintf(buf + len, "Queue %2d : credits = %10d\n", i, hif_lib_tx_credit_avail(pfe, priv->id, i)); -+ len += sprintf(buf + len, " tx packets = %10d\n", pfe->tmu_credit.tx_packets[priv->id][i]); -+ hif_tx_unlock(&pfe->hif); -+ -+ /* Don't output additionnal stats if queue never used */ -+ if (!pfe->tmu_credit.tx_packets[priv->id][i]) -+ goto skip; -+ -+ len += sprintf(buf + len, " clean_fail = %10d\n", priv->clean_fail[i]); -+ len += sprintf(buf + len, " stop_queue = %10d\n", priv->stop_queue_total[i]); -+ len += sprintf(buf + len, " stop_queue_hif = %10d\n", priv->stop_queue_hif[i]); -+ len += sprintf(buf + len, " stop_queue_hif_client = %10d\n", priv->stop_queue_hif_client[i]); -+ len += sprintf(buf + len, " stop_queue_credit = %10d\n", priv->stop_queue_credit[i]); -+skip: -+ __netif_tx_unlock_bh(tx_queue); -+ } -+ return len; -+} -+ -+/** pfe_eth_set_tx_stats -+ * -+ */ -+static ssize_t pfe_eth_set_tx_stats(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t count) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ int i; -+ -+ for (i = 0; i < emac_txq_cnt; i++) { -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i); -+ -+ __netif_tx_lock_bh(tx_queue); -+ priv->clean_fail[i] = 0; -+ priv->stop_queue_total[i] = 0; -+ priv->stop_queue_hif[i] = 0; -+ priv->stop_queue_hif_client[i]= 0; -+ priv->stop_queue_credit[i] = 0; -+ __netif_tx_unlock_bh(tx_queue); -+ } -+ -+ return count; -+} -+#endif -+/** pfe_eth_show_txavail -+ * -+ */ -+static ssize_t pfe_eth_show_txavail(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ ssize_t len = 0; -+ int i; -+ -+ for (i = 0; i < emac_txq_cnt; i++) { -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i); -+ -+ __netif_tx_lock_bh(tx_queue); -+ -+ len += sprintf(buf + len, "%d", hif_lib_tx_avail(&priv->client, i)); -+ -+ __netif_tx_unlock_bh(tx_queue); -+ -+ if (i == (emac_txq_cnt - 1)) -+ len += sprintf(buf + len, "\n"); -+ else -+ len += sprintf(buf + len, " "); -+ } -+ -+ return len; -+} -+ -+ -+/** pfe_eth_show_default_priority -+ * -+ */ -+static ssize_t pfe_eth_show_default_priority(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ unsigned long flags; -+ int rc; -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ rc = sprintf(buf, "%d\n", priv->default_priority); -+ spin_unlock_irqrestore(&priv->lock, flags); -+ -+ return rc; -+} -+ -+/** pfe_eth_set_default_priority -+ * -+ */ -+ -+static ssize_t pfe_eth_set_default_priority(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev)); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ priv->default_priority = simple_strtoul(buf, NULL, 0); -+ spin_unlock_irqrestore(&priv->lock, flags); -+ -+ return count; -+} -+ -+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL); -+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority, pfe_eth_set_default_priority); -+ -+#ifdef PFE_ETH_NAPI_STATS -+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats, pfe_eth_set_napi_stats); -+#endif -+ -+#ifdef PFE_ETH_TX_STATS -+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats, pfe_eth_set_tx_stats); -+#endif -+ -+ -+/** pfe_eth_sysfs_init -+ * -+ */ -+static int pfe_eth_sysfs_init(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int err; -+ -+ /* Initialize the default values */ -+ /* By default, packets without conntrack will use this default high priority queue */ -+ priv->default_priority = 15; -+ -+ /* Create our sysfs files */ -+ err = device_create_file(&dev->dev, &dev_attr_default_priority); -+ if (err) { -+ netdev_err(dev, "failed to create default_priority sysfs files\n"); -+ goto err_priority; -+ } -+ -+ err = device_create_file(&dev->dev, &dev_attr_txavail); -+ if (err) { -+ netdev_err(dev, "failed to create default_priority sysfs files\n"); -+ goto err_txavail; -+ } -+ -+#ifdef PFE_ETH_NAPI_STATS -+ err = device_create_file(&dev->dev, &dev_attr_napi_stats); -+ if (err) { -+ netdev_err(dev, "failed to create napi stats sysfs files\n"); -+ goto err_napi; -+ } -+#endif -+ -+#ifdef PFE_ETH_TX_STATS -+ err = device_create_file(&dev->dev, &dev_attr_tx_stats); -+ if (err) { -+ netdev_err(dev, "failed to create tx stats sysfs files\n"); -+ goto err_tx; -+ } -+#endif -+ -+ return 0; -+ -+#ifdef PFE_ETH_TX_STATS -+err_tx: -+#endif -+#ifdef PFE_ETH_NAPI_STATS -+ device_remove_file(&dev->dev, &dev_attr_napi_stats); -+ -+err_napi: -+#endif -+ device_remove_file(&dev->dev, &dev_attr_txavail); -+ -+err_txavail: -+ device_remove_file(&dev->dev, &dev_attr_default_priority); -+ -+err_priority: -+ return -1; -+} -+ -+/** pfe_eth_sysfs_exit -+ * -+ */ -+void pfe_eth_sysfs_exit(struct net_device *dev) -+{ -+#ifdef PFE_ETH_TX_STATS -+ device_remove_file(&dev->dev, &dev_attr_tx_stats); -+#endif -+ -+#ifdef PFE_ETH_NAPI_STATS -+ device_remove_file(&dev->dev, &dev_attr_napi_stats); -+#endif -+ device_remove_file(&dev->dev, &dev_attr_txavail); -+ device_remove_file(&dev->dev, &dev_attr_default_priority); -+} -+ -+/*************************************************************************/ -+/* ETHTOOL INTERCAE */ -+/*************************************************************************/ -+ -+#if defined(CONFIG_PLATFORM_C2000) -+static char stat_gstrings[][ETH_GSTRING_LEN] = { -+ "tx- octets", -+ "tx- packets", -+ "tx- broadcast", -+ "tx- multicast", -+ "tx- pause", -+ "tx- 64 bytes packets", -+ "tx- 64 - 127 bytes packets", -+ "tx- 128 - 255 bytes packets", -+ "tx- 256 - 511 bytes packets", -+ "tx- 512 - 1023 bytes packets", -+ "tx- 1024 - 1518 bytes packets", -+ "tx- > 1518 bytes packets", -+ "tx- underruns - errors", -+ "tx- single collision", -+ "tx- multi collision", -+ "tx- exces. collision - errors", -+ "tx- late collision - errors", -+ "tx- deferred", -+ "tx- carrier sense - errors", -+ "rx- octets", -+ "rx- packets", -+ "rx- broadcast", -+ "rx- multicast", -+ "rx- pause", -+ "rx- 64 bytes packets", -+ "rx- 64 - 127 bytes packets", -+ "rx- 128 - 255 bytes packets", -+ "rx- 256 - 511 bytes packets", -+ "rx- 512 - 1023 bytes packets", -+ "rx- 1024 - 1518 bytes packets", -+ "rx- > 1518 bytes packets", -+ "rx- undersize -errors", -+ "rx- oversize - errors ", -+ "rx- jabbers - errors", -+ "rx- fcs - errors", -+ "rx- length - errors", -+ "rx- symbol - errors", -+ "rx- align - errors", -+ "rx- ressource - errors", -+ "rx- overrun - errors", -+ "rx- IP cksum - errors", -+ "rx- TCP cksum - errors", -+ "rx- UDP cksum - errors" -+}; -+ -+ -+/** -+ * pfe_eth_gstrings - Fill in a buffer with the strings which correspond to -+ * the stats. -+ * -+ */ -+static void pfe_eth_gstrings(struct net_device *dev, u32 stringset, u8 * buf) -+{ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ memcpy(buf, stat_gstrings, (EMAC_RMON_LEN - 2) * ETH_GSTRING_LEN); -+ break; -+ -+ default: -+ WARN_ON(1); -+ break; -+ } -+} -+ -+/** -+ * pfe_eth_fill_stats - Fill in an array of 64-bit statistics from -+ * various sources. This array will be appended -+ * to the end of the ethtool_stats* structure, and -+ * returned to user space -+ */ -+static void pfe_eth_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int i; -+ for (i=0;iEMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2)); -+ if ( ( i == EMAC_RMON_TXBYTES_POS ) || ( i == EMAC_RMON_RXBYTES_POS ) ){ -+ i++; -+ *buf |= (u64)readl(priv->EMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2)) << 32; -+ } -+ } -+ -+} -+ -+/** -+ * pfe_eth_stats_count - Returns the number of stats (and their corresponding strings) -+ * -+ */ -+static int pfe_eth_stats_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return EMAC_RMON_LEN - 2; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+#if defined(CONFIG_PLATFORM_C2000) -+/** -+ * pfe_eth_set_wol - Set the magic packet option, in WoL register. -+ * -+ */ -+static int pfe_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_ARP | WAKE_MCAST | WAKE_UCAST)) -+ return -EOPNOTSUPP; -+ -+ priv->wol = 0; -+ -+ if (wol->wolopts & WAKE_MAGIC) -+ priv->wol |= EMAC_WOL_MAGIC; -+ if (wol->wolopts & WAKE_ARP) -+ priv->wol |= EMAC_WOL_ARP; -+ if (wol->wolopts & WAKE_MCAST) -+ priv->wol |= EMAC_WOL_MULTI; -+ if (wol->wolopts & WAKE_UCAST) -+ priv->wol |= EMAC_WOL_SPEC_ADDR; -+ -+ pfe_eth_set_device_wakeup(priv->pfe); -+ -+ return 0; -+} -+ -+/** -+ * -+ * pfe_eth_get_wol - Get the WoL options. -+ * -+ */ -+static void pfe_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ wol->supported = (WAKE_MAGIC | WAKE_ARP | WAKE_MCAST | WAKE_UCAST); -+ wol->wolopts = 0; -+ -+ if(priv->wol & EMAC_WOL_MAGIC) -+ wol->wolopts |= WAKE_MAGIC; -+ if(priv->wol & EMAC_WOL_ARP) -+ wol->wolopts |= WAKE_ARP; -+ if(priv->wol & EMAC_WOL_MULTI) -+ wol->wolopts |= WAKE_UCAST; -+ if(priv->wol & EMAC_WOL_SPEC_ADDR) -+ wol->wolopts |= WAKE_UCAST; -+ -+ memset(&wol->sopass, 0, sizeof(wol->sopass)); -+} -+#endif -+/** -+ * pfe_eth_gemac_reglen - Return the length of the register structure. -+ * -+ */ -+static int pfe_eth_gemac_reglen(struct net_device *dev) -+{ -+ return (sizeof (gemac_regs)/ sizeof(u32)) + (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2); -+} -+ -+/** -+ * pfe_eth_gemac_get_regs - Return the gemac register structure. -+ * -+ */ -+static void pfe_eth_gemac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) -+{ -+ int i,j; -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ u32 *buf = (u32 *) regbuf; -+ -+ for (i = 0; i < sizeof (gemac_regs) / sizeof (u32); i++) -+ buf[i] = readl( priv->EMAC_baseaddr + gemac_regs[i] ); -+ -+ for (j = 0; j < (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2); j++,i++) -+ buf[i] = readl( priv->EMAC_baseaddr + EMAC_SPEC5_ADD_BOT + (j<<2) ); -+ -+} -+ -+ -+#else //if defined(CONFIG_PLATFORM_C2000) -+/*MTIP GEMAC */ -+static const struct fec_stat { -+ char name[ETH_GSTRING_LEN]; -+ u16 offset; -+} fec_stats[] = { -+ /* RMON TX */ -+ { "tx_dropped", RMON_T_DROP }, -+ { "tx_packets", RMON_T_PACKETS }, -+ { "tx_broadcast", RMON_T_BC_PKT }, -+ { "tx_multicast", RMON_T_MC_PKT }, -+ { "tx_crc_errors", RMON_T_CRC_ALIGN }, -+ { "tx_undersize", RMON_T_UNDERSIZE }, -+ { "tx_oversize", RMON_T_OVERSIZE }, -+ { "tx_fragment", RMON_T_FRAG }, -+ { "tx_jabber", RMON_T_JAB }, -+ { "tx_collision", RMON_T_COL }, -+ { "tx_64byte", RMON_T_P64 }, -+ { "tx_65to127byte", RMON_T_P65TO127 }, -+ { "tx_128to255byte", RMON_T_P128TO255 }, -+ { "tx_256to511byte", RMON_T_P256TO511 }, -+ { "tx_512to1023byte", RMON_T_P512TO1023 }, -+ { "tx_1024to2047byte", RMON_T_P1024TO2047 }, -+ { "tx_GTE2048byte", RMON_T_P_GTE2048 }, -+ { "tx_octets", RMON_T_OCTETS }, -+ -+ /* IEEE TX */ -+ { "IEEE_tx_drop", IEEE_T_DROP }, -+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, -+ { "IEEE_tx_1col", IEEE_T_1COL }, -+ { "IEEE_tx_mcol", IEEE_T_MCOL }, -+ { "IEEE_tx_def", IEEE_T_DEF }, -+ { "IEEE_tx_lcol", IEEE_T_LCOL }, -+ { "IEEE_tx_excol", IEEE_T_EXCOL }, -+ { "IEEE_tx_macerr", IEEE_T_MACERR }, -+ { "IEEE_tx_cserr", IEEE_T_CSERR }, -+ { "IEEE_tx_sqe", IEEE_T_SQE }, -+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, -+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, -+ -+ /* RMON RX */ -+ { "rx_packets", RMON_R_PACKETS }, -+ { "rx_broadcast", RMON_R_BC_PKT }, -+ { "rx_multicast", RMON_R_MC_PKT }, -+ { "rx_crc_errors", RMON_R_CRC_ALIGN }, -+ { "rx_undersize", RMON_R_UNDERSIZE }, -+ { "rx_oversize", RMON_R_OVERSIZE }, -+ { "rx_fragment", RMON_R_FRAG }, -+ { "rx_jabber", RMON_R_JAB }, -+ { "rx_64byte", RMON_R_P64 }, -+ { "rx_65to127byte", RMON_R_P65TO127 }, -+ { "rx_128to255byte", RMON_R_P128TO255 }, -+ { "rx_256to511byte", RMON_R_P256TO511 }, -+ { "rx_512to1023byte", RMON_R_P512TO1023 }, -+ { "rx_1024to2047byte", RMON_R_P1024TO2047 }, -+ { "rx_GTE2048byte", RMON_R_P_GTE2048 }, -+ { "rx_octets", RMON_R_OCTETS }, -+ -+ /* IEEE RX */ -+ { "IEEE_rx_drop", IEEE_R_DROP }, -+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, -+ { "IEEE_rx_crc", IEEE_R_CRC }, -+ { "IEEE_rx_align", IEEE_R_ALIGN }, -+ { "IEEE_rx_macerr", IEEE_R_MACERR }, -+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, -+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, -+}; -+ -+static void pfe_eth_fill_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) -+ data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset); -+} -+ -+static void pfe_eth_gstrings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ fec_stats[i].name, ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static int pfe_eth_stats_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(fec_stats); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** -+ * pfe_eth_gemac_reglen - Return the length of the register structure. -+ * -+ */ -+static int pfe_eth_gemac_reglen(struct net_device *dev) -+{ -+ printk("%s() \n", __func__); -+ return (sizeof (gemac_regs)/ sizeof(u32)) ; -+} -+ -+/** -+ * pfe_eth_gemac_get_regs - Return the gemac register structure. -+ * -+ */ -+static void pfe_eth_gemac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) -+{ -+ int i; -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ u32 *buf = (u32 *) regbuf; -+ -+ printk("%s() \n", __func__); -+ for (i = 0; i < sizeof (gemac_regs) / sizeof (u32); i++) -+ buf[i] = readl( priv->EMAC_baseaddr + gemac_regs[i] ); -+ -+} -+ -+ -+#endif -+ -+/** -+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info -+ * -+ */ -+static void pfe_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) -+{ -+ strncpy(drvinfo->driver, DRV_NAME, COMCERTO_INFOSTR_LEN); -+ strncpy(drvinfo->version, comcerto_eth_driver_version, COMCERTO_INFOSTR_LEN); -+ strncpy(drvinfo->fw_version, "N/A", COMCERTO_INFOSTR_LEN); -+ strncpy(drvinfo->bus_info, "N/A", COMCERTO_INFOSTR_LEN); -+ drvinfo->testinfo_len = 0; -+ drvinfo->regdump_len = 0; -+ drvinfo->eedump_len = 0; -+} -+ -+/** -+ * pfe_eth_set_settings - Used to send commands to PHY. -+ * -+ */ -+ -+static int pfe_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ struct phy_device *phydev = priv->phydev; -+ -+ if (NULL == phydev) -+ return -ENODEV; -+ -+ return phy_ethtool_sset(phydev, cmd); -+} -+ -+ -+/** -+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd structure. -+ * -+ */ -+static int pfe_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ struct phy_device *phydev = priv->phydev; -+ -+ if (NULL == phydev) -+ return -ENODEV; -+ -+ return phy_ethtool_gset(phydev, cmd); -+} -+ -+ -+/** -+ * pfe_eth_get_msglevel - Gets the debug message mask. -+ * -+ */ -+static uint32_t pfe_eth_get_msglevel(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ return priv->msg_enable; -+} -+ -+/** -+ * pfe_eth_set_msglevel - Sets the debug message mask. -+ * -+ */ -+static void pfe_eth_set_msglevel(struct net_device *dev, uint32_t data) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ priv->msg_enable = data; -+} -+ -+#define HIF_RX_COAL_MAX_CLKS (~(1<<31)) -+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk/1000) -+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS/HIF_RX_COAL_CLKS_PER_USEC) -+ -+/** -+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer. -+ * -+ */ -+static int pfe_eth_set_coalesce(struct net_device *dev, -+ struct ethtool_coalesce *ec) -+{ -+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS) -+ return -EINVAL; -+ -+ if (!ec->rx_coalesce_usecs) { -+ writel(0, HIF_INT_COAL); -+ return 0; -+ } -+ -+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) | HIF_INT_COAL_ENABLE, HIF_INT_COAL); -+ -+ return 0; -+} -+ -+/** -+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value. -+ * -+ */ -+static int pfe_eth_get_coalesce(struct net_device *dev, -+ struct ethtool_coalesce *ec) -+{ -+ int reg_val = readl(HIF_INT_COAL); -+ -+ if (reg_val & HIF_INT_COAL_ENABLE) -+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) / HIF_RX_COAL_CLKS_PER_USEC; -+ else -+ ec->rx_coalesce_usecs = 0; -+ -+ return 0; -+} -+ -+#if defined(CONFIG_PLATFORM_C2000) -+/** -+ * pfe_eth_pause_rx_enabled - Tests if pause rx is enabled on GEM -+ * -+ */ -+static int pfe_eth_pause_rx_enabled(struct pfe_eth_priv_s *priv) -+{ -+ return (readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONFIG) & EMAC_ENABLE_PAUSE_RX) != 0; -+} -+ -+/** -+ * pfe_eth_set_pauseparam - Sets pause parameters -+ * -+ */ -+static int pfe_eth_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ if (epause->rx_pause) -+ { -+ gemac_enable_pause_rx(priv->EMAC_baseaddr); -+ if (priv->phydev) -+ priv->phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; -+ } -+ else -+ { -+ gemac_disable_pause_rx(priv->EMAC_baseaddr); -+ if (priv->phydev) -+ priv->phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); -+ } -+ -+ return 0; -+} -+ -+/** -+ * pfe_eth_get_pauseparam - Gets pause parameters -+ * -+ */ -+static void pfe_eth_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ epause->autoneg = 0; -+ epause->tx_pause = 0; -+ epause->rx_pause = pfe_eth_pause_rx_enabled(priv); -+} -+ -+/** pfe_eth_get_hash -+ */ -+static int pfe_eth_get_hash(u8 * addr) -+{ -+ u8 temp1,temp2,temp3,temp4,temp5,temp6,temp7,temp8; -+ temp1 = addr[0] & 0x3F ; -+ temp2 = ((addr[0] & 0xC0) >> 6)| ((addr[1] & 0x0F) << 2); -+ temp3 = ((addr[1] & 0xF0) >> 4) | ((addr[2] & 0x03) << 4); -+ temp4 = (addr[2] & 0xFC) >> 2; -+ temp5 = addr[3] & 0x3F; -+ temp6 = ((addr[3] & 0xC0) >> 6) | ((addr[4] & 0x0F) << 2); -+ temp7 = ((addr[4] & 0xF0) >>4 ) | ((addr[5] & 0x03) << 4); -+ temp8 = ((addr[5] &0xFC) >> 2); -+ return (temp1 ^ temp2 ^ temp3 ^ temp4 ^ temp5 ^ temp6 ^ temp7 ^ temp8); -+} -+ -+#else -+ /*TODO Add pause frame support for LS1012A */ -+ -+/** pfe_eth_get_hash -+ */ -+#define HASH_BITS 6 /* #bits in hash */ -+#define CRC32_POLY 0xEDB88320 -+ -+static int pfe_eth_get_hash(u8 * addr) -+{ -+ unsigned int i, bit, data, crc, hash; -+ -+ /* calculate crc32 value of mac address */ -+ crc = 0xffffffff; -+ -+ for (i = 0; i < 6; i++) { -+ data = addr[i]; -+ for (bit = 0; bit < 8; bit++, data >>= 1) { -+ crc = (crc >> 1) ^ -+ (((crc ^ data) & 1) ? CRC32_POLY : 0); -+ } -+ } -+ -+ /* only upper 6 bits (HASH_BITS) are used -+ * which point to specific bit in he hash registers -+ */ -+ hash = (crc >> (32 - HASH_BITS)) & 0x3f; -+ -+ return hash; -+} -+ -+#endif -+ -+struct ethtool_ops pfe_ethtool_ops = { -+ .get_settings = pfe_eth_get_settings, -+ .set_settings = pfe_eth_set_settings, -+ .get_drvinfo = pfe_eth_get_drvinfo, -+ .get_regs_len = pfe_eth_gemac_reglen, -+ .get_regs = pfe_eth_gemac_get_regs, -+ .get_link = ethtool_op_get_link, -+#if defined(CONFIG_PLATFORM_C2000) -+ .get_wol = pfe_eth_get_wol, -+ .set_wol = pfe_eth_set_wol, -+ .set_pauseparam = pfe_eth_set_pauseparam, -+ .get_pauseparam = pfe_eth_get_pauseparam, -+#endif -+ .get_strings = pfe_eth_gstrings, -+ .get_sset_count = pfe_eth_stats_count, -+ .get_ethtool_stats = pfe_eth_fill_stats, -+ .get_msglevel = pfe_eth_get_msglevel, -+ .set_msglevel = pfe_eth_set_msglevel, -+ .set_coalesce = pfe_eth_set_coalesce, -+ .get_coalesce = pfe_eth_get_coalesce, -+}; -+ -+ -+ -+#if defined(CONFIG_PLATFORM_C2000) -+/** pfe_eth_mdio_reset -+ */ -+int pfe_eth_mdio_reset(struct mii_bus *bus) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ -+ netif_info(priv, hw, priv->dev, "%s\n", __func__); -+ -+#if !defined(CONFIG_PLATFORM_EMULATION) -+ mutex_lock(&bus->mdio_lock); -+ -+ /* Setup the MII Mgmt clock speed */ -+ if (priv->mii_bus) -+ gemac_set_mdc_div(priv->EMAC_baseaddr, priv->mdc_div); -+ -+ /* Reset the management interface */ -+ __raw_writel(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL) | EMAC_MDIO_EN, -+ priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL); -+ -+ /* Wait until the bus is free */ -+ while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE)); -+ -+ mutex_unlock(&bus->mdio_lock); -+#endif -+ -+ return 0; -+} -+ -+ -+/** pfe_eth_gemac_phy_timeout -+ * -+ */ -+static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout) -+{ -+ while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE)) { -+ -+ if (timeout-- <= 0) { -+ return -1; -+ } -+ -+ udelay(10); -+ } -+ -+ return 0; -+} -+ -+ -+/** pfe_eth_mdio_write -+ */ -+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ u32 write_data; -+ -+#if !defined(CONFIG_PLATFORM_EMULATION) -+ -+ netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id); -+ -+// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value); -+ -+ write_data = 0x50020000; -+ write_data |= ((mii_id << 23) | (regnum << 18) | value); -+ __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT); -+ -+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)){ -+ netdev_err(priv->dev, "%s: phy MDIO write timeout\n", __func__); -+ return -1; -+ } -+ -+#endif -+ -+ return 0; -+} -+ -+ -+/** pfe_eth_mdio_read -+ */ -+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ u16 value = 0; -+ u32 write_data; -+ -+#if !defined(CONFIG_PLATFORM_EMULATION) -+ netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id); -+ -+ write_data = 0x60020000; -+ write_data |= ((mii_id << 23) | (regnum << 18)); -+ -+ __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT); -+ -+ if (pfe_eth_gemac_phy_timeout( priv, EMAC_MDIO_TIMEOUT)) { -+ netdev_err(priv->dev, "%s: phy MDIO read timeout\n", __func__); -+ return -1; -+ } -+ -+ value = __raw_readl(priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT) & 0xFFFF; -+#endif -+ -+// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value); -+ -+ return value; -+} -+ -+#else -+/** pfe_eth_mdio_reset -+ */ -+int pfe_eth_mdio_reset(struct mii_bus *bus) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ u32 phy_speed, pclk = 250000000; /*TODO this needs to be checked read from the correct source*/ -+ -+ netif_info(priv, hw, priv->dev, "%s\n", __func__); -+ -+ mutex_lock(&bus->mdio_lock); -+ -+ /* -+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) -+ * -+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while -+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. -+ */ -+ phy_speed = (DIV_ROUND_UP(pclk, 4000000) << EMAC_MII_SPEED_SHIFT); -+ phy_speed |= EMAC_HOLDTIME(0x5); -+ __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG); -+ -+ mutex_unlock(&bus->mdio_lock); -+ -+ return 0; -+} -+ -+/** pfe_eth_gemac_phy_timeout -+ * -+ */ -+static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout) -+{ -+ while(!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) & EMAC_IEVENT_MII)) { -+ -+ if (timeout-- <= 0) { -+ return -1; -+ } -+ -+ udelay(10); -+ } -+ __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG); -+ -+ return 0; -+} -+ -+static int pfe_eth_mdio_mux(u8 muxval) -+{ -+ struct i2c_adapter *a; -+ struct i2c_msg msg; -+ unsigned char buf[2]; -+ int ret; -+ -+ a = i2c_get_adapter(0); -+ if (!a) -+ return -ENODEV; -+ -+ /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */ -+ buf[0] = 0x54; //reg number -+ buf[1] = (muxval << 6)| 0x3; //data -+ msg.addr = 0x66; -+ msg.buf = buf; -+ msg.len = 2; -+ msg.flags = 0; -+ ret = i2c_transfer(a, &msg, 1); -+ i2c_put_adapter(a); -+ if (ret != 1) -+ return -ENODEV; -+ return 0; -+ -+ -+} -+ -+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ -+ /*FIXME Dirty hack to configure mux */ -+ if(priv->mdio_muxval) { -+ if(mii_id == 0x1) -+ pfe_eth_mdio_mux(0x1); -+ else -+ pfe_eth_mdio_mux(0x2); -+ } -+ -+ /* start a write op */ -+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR | -+ EMAC_MII_DATA_PA(mii_id) | EMAC_MII_DATA_RA(regnum) | -+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value), -+ priv->PHY_baseaddr + EMAC_MII_DATA_REG); -+ -+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)){ -+ netdev_err(priv->dev, "%s: phy MDIO write timeout\n", __func__); -+ return -1; -+ } -+ netif_info(priv, hw, priv->dev, "%s: phy %x reg %x val %x\n", __func__, mii_id, regnum, value); -+ -+ return 0; -+ -+ -+} -+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv; -+ u16 value = 0; -+ -+ /*FIXME Dirty hack to configure mux */ -+ if(priv->mdio_muxval){ -+ if(mii_id == 0x1) -+ pfe_eth_mdio_mux(0x1); -+ else -+ pfe_eth_mdio_mux(0x2); -+ } -+ -+ /* start a read op */ -+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD | -+ EMAC_MII_DATA_PA(mii_id) | EMAC_MII_DATA_RA(regnum) | -+ EMAC_MII_DATA_TA, priv->PHY_baseaddr + EMAC_MII_DATA_REG); -+ -+ if (pfe_eth_gemac_phy_timeout( priv, EMAC_MDIO_TIMEOUT)) { -+ netdev_err(priv->dev, "%s: phy MDIO read timeout\n", __func__); -+ return -1; -+ } -+ -+ value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr + EMAC_MII_DATA_REG)); -+ netif_info(priv, hw, priv->dev, "%s: phy %x reg %x val %x\n", __func__, mii_id, regnum, value); -+ return value; -+} -+#endif -+static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv, struct comcerto_mdio_platform_data *minfo) -+{ -+ struct mii_bus *bus; -+ int rc; -+ -+ netif_info(priv, drv, priv->dev, "%s\n", __func__); -+ printk( "%s\n", __func__); -+ -+#if !defined(CONFIG_PLATFORM_EMULATION) -+ bus = mdiobus_alloc(); -+ if (!bus) { -+ netdev_err(priv->dev, "mdiobus_alloc() failed\n"); -+ rc = -ENOMEM; -+ goto err0; -+ } -+ -+ bus->name = "Comcerto MDIO Bus"; -+ bus->read = &pfe_eth_mdio_read; -+ bus->write = &pfe_eth_mdio_write; -+ bus->reset = &pfe_eth_mdio_reset; -+ snprintf(bus->id, MII_BUS_ID_SIZE, "comcerto-%x", priv->id); -+ bus->priv = priv; -+ -+ bus->phy_mask = minfo->phy_mask; -+ priv->mdc_div = minfo->mdc_div; -+ -+ if (!priv->mdc_div) -+ priv->mdc_div = 64; -+ -+ bus->irq = minfo->irq; -+ -+ bus->parent = priv->pfe->dev; -+ -+ netif_info(priv, drv, priv->dev, "%s: mdc_div: %d, phy_mask: %x \n", __func__, priv->mdc_div, bus->phy_mask); -+ rc = mdiobus_register(bus); -+ if (rc) { -+ netdev_err(priv->dev, "mdiobus_register(%s) failed\n", bus->name); -+ goto err1; -+ } -+ -+ priv->mii_bus = bus; -+ pfe_eth_mdio_reset(bus); -+ -+ return 0; -+ -+err1: -+ mdiobus_free(bus); -+err0: -+ return rc; -+#else -+ return 0; -+#endif -+ -+} -+ -+/** pfe_eth_mdio_exit -+ */ -+static void pfe_eth_mdio_exit(struct mii_bus *bus) -+{ -+ if (!bus) -+ return; -+ -+ netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct pfe_eth_priv_s *)(bus->priv))->dev, "%s\n", __func__); -+ -+ mdiobus_unregister(bus); -+ mdiobus_free(bus); -+} -+ -+#if defined(CONFIG_PLATFORM_C2000) -+/** pfe_get_interface -+ */ -+static phy_interface_t pfe_get_interface(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ u32 mii_mode = priv->einfo->mii_config; -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ if (priv->einfo->gemac_mode & (GEMAC_SW_CONF)) { -+ switch (mii_mode) { -+ case CONFIG_COMCERTO_USE_GMII: -+ return PHY_INTERFACE_MODE_GMII; -+ break; -+ case CONFIG_COMCERTO_USE_RGMII: -+ return PHY_INTERFACE_MODE_RGMII; -+ break; -+ case CONFIG_COMCERTO_USE_RMII: -+ return PHY_INTERFACE_MODE_RMII; -+ break; -+ case CONFIG_COMCERTO_USE_SGMII: -+ return PHY_INTERFACE_MODE_SGMII; -+ break; -+ -+ default : -+ case CONFIG_COMCERTO_USE_MII: -+ return PHY_INTERFACE_MODE_MII; -+ break; -+ -+ } -+ } else { -+ // Bootstrap config read from controller -+ BUG(); -+ return 0; -+ } -+} -+#endif -+ -+/** pfe_get_phydev_speed -+ */ -+static int pfe_get_phydev_speed(struct phy_device *phydev) -+{ -+ switch (phydev->speed) { -+ case 10: -+ return SPEED_10M; -+ case 100: -+ return SPEED_100M; -+ case 1000: -+ default: -+ return SPEED_1000M; -+ } -+ -+} -+ -+/** pfe_set_rgmii_speed -+ */ -+#define RGMIIPCR 0x434 -+/* RGMIIPCR bit definitions*/ -+#define SCFG_RGMIIPCR_EN_AUTO (0x00000008) -+#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004) -+#define SCFG_RGMIIPCR_SETSP_100M (0x00000000) -+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002) -+#define SCFG_RGMIIPCR_SETFD (0x00000001) -+ -+static void pfe_set_rgmii_speed(struct phy_device *phydev) -+{ -+ u32 rgmii_pcr; -+ -+ regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr); -+ rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M|SCFG_RGMIIPCR_SETSP_10M); -+ -+ switch (phydev->speed) { -+ case 10: -+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M; -+ break; -+ case 1000: -+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M; -+ break; -+ case 100: -+ default: -+ /* Default is 100M */ -+ break; -+ } -+ regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr); -+ -+ -+} -+/** pfe_get_phydev_duplex -+ */ -+static int pfe_get_phydev_duplex(struct phy_device *phydev) -+{ -+ //return ( phydev->duplex == DUPLEX_HALF ) ? DUP_HALF:DUP_FULL ; -+ return DUPLEX_FULL; -+} -+ -+/** pfe_eth_adjust_link -+ */ -+static void pfe_eth_adjust_link(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ unsigned long flags; -+ struct phy_device *phydev = priv->phydev; -+ int new_state = 0; -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ if (phydev->link) { -+ /* Now we make sure that we can be in full duplex mode. -+ * If not, we operate in half-duplex mode. */ -+ if (phydev->duplex != priv->oldduplex) { -+ new_state = 1; -+ gemac_set_duplex(priv->EMAC_baseaddr, pfe_get_phydev_duplex(phydev)); -+ priv->oldduplex = phydev->duplex; -+ } -+ -+ if (phydev->speed != priv->oldspeed) { -+ new_state = 1; -+ gemac_set_speed(priv->EMAC_baseaddr, pfe_get_phydev_speed(phydev)); -+ if(priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII) -+ pfe_set_rgmii_speed(phydev); -+ priv->oldspeed = phydev->speed; -+ } -+ -+ if (!priv->oldlink) { -+ new_state = 1; -+ priv->oldlink = 1; -+ } -+ -+ } else if (priv->oldlink) { -+ new_state = 1; -+ priv->oldlink = 0; -+ priv->oldspeed = 0; -+ priv->oldduplex = -1; -+ } -+ -+ if (new_state && netif_msg_link(priv)) -+ phy_print_status(phydev); -+ -+ spin_unlock_irqrestore(&priv->lock, flags); -+} -+ -+ -+/** pfe_phy_exit -+ */ -+static void pfe_phy_exit(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ phy_disconnect(priv->phydev); -+ priv->phydev = NULL; -+} -+ -+/** pfe_eth_stop -+ */ -+static void pfe_eth_stop( struct net_device *dev , int wake) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ if (wake) -+ gemac_tx_disable(priv->EMAC_baseaddr); -+ else { -+ gemac_disable(priv->EMAC_baseaddr); -+ gpi_disable(priv->GPI_baseaddr); -+ -+ if (priv->phydev) -+ phy_stop(priv->phydev); -+ } -+} -+ -+/** pfe_eth_start -+ */ -+static int pfe_eth_start( struct pfe_eth_priv_s *priv ) -+{ -+ netif_info(priv, drv, priv->dev, "%s\n", __func__); -+ -+ if (priv->phydev) -+ phy_start(priv->phydev); -+ -+ gpi_enable(priv->GPI_baseaddr); -+ gemac_enable(priv->EMAC_baseaddr); -+ -+ return 0; -+} -+ -+/*Configure on chip serdes through mdio -+ * Is there any better way to do this? */ -+static void ls1012a_configure_serdes(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0]; // FIXME This will not work for EMAC2 as SGMII -+ /*int value,sgmii_2500=0; */ -+ struct mii_bus *bus = priv->mii_bus; -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ /* PCS configuration done with corresponding GEMAC */ -+ -+ pfe_eth_mdio_read(bus, 0, 0); -+ pfe_eth_mdio_read(bus, 0, 1); -+#if 1 -+ /*These settings taken from validtion team */ -+ pfe_eth_mdio_write(bus, 0, 0x0, 0x8000); -+ pfe_eth_mdio_write(bus, 0, 0x14, 0xb); -+ pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1); -+ pfe_eth_mdio_write(bus, 0, 0x12, 0x400); -+ pfe_eth_mdio_write(bus, 0, 0x13, 0x0); -+ pfe_eth_mdio_write(bus, 0, 0x0, 0x1140); -+ return; -+#else -+ /*Reset serdes */ -+ pfe_eth_mdio_write(bus, 0, 0x0, 0x8000); -+ -+ /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */ -+ value = PHY_SGMII_IF_MODE_SGMII; -+ if (!sgmii_2500) -+ value |= PHY_SGMII_IF_MODE_AN; -+ -+ pfe_eth_mdio_write(bus, 0, 0x14, value); -+ -+ /* Dev ability according to SGMII specification */ -+ value = PHY_SGMII_DEV_ABILITY_SGMII; -+ pfe_eth_mdio_write(bus, 0, 0x4, value); -+ -+ //These values taken from validation team -+ pfe_eth_mdio_write(bus, 0, 0x13, 0x0); -+ pfe_eth_mdio_write(bus, 0, 0x12, 0x400); -+ -+ /* Restart AN */ -+ value = PHY_SGMII_CR_DEF_VAL; -+ if (!sgmii_2500) -+ value |= PHY_SGMII_CR_RESET_AN; -+ pfe_eth_mdio_write(bus, 0, 0, value); -+ -+#endif -+} -+ -+/** pfe_phy_init -+ * -+ */ -+static int pfe_phy_init(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ struct phy_device *phydev; -+ char phy_id[MII_BUS_ID_SIZE + 3]; -+ char bus_id[MII_BUS_ID_SIZE]; -+ phy_interface_t interface; -+ -+ priv->oldlink = 0; -+ priv->oldspeed = 0; -+ priv->oldduplex = -1; -+ -+ snprintf(bus_id, MII_BUS_ID_SIZE, "comcerto-%d", 0); -+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->einfo->phy_id); -+ -+ netif_info(priv, drv, dev, "%s: %s\n", __func__, phy_id); -+#if defined(CONFIG_PLATFORM_C2000) -+ interface = pfe_get_interface(dev); -+#else -+ interface = priv->einfo->mii_config; -+ if(interface == PHY_INTERFACE_MODE_SGMII) { -+ /*Configure SGMII PCS */ -+ if(pfe->scfg) { -+ /*Config MDIO from serdes */ -+ regmap_write(pfe->scfg, 0x484, 0x00000000); -+ } -+ ls1012a_configure_serdes(dev); -+ } -+ -+ if(pfe->scfg) { -+ /*Config MDIO from PAD */ -+ regmap_write(pfe->scfg, 0x484, 0x80000000); -+ } -+#endif -+ -+ -+ priv->oldlink = 0; -+ priv->oldspeed = 0; -+ priv->oldduplex = -1; -+ -+ printk("%s interface %x \n", __func__, interface); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) -+ phydev = phy_connect(dev, phy_id, &pfe_eth_adjust_link, interface); -+#else -+ phydev = phy_connect(dev, phy_id, &pfe_eth_adjust_link, 0, interface); -+#endif -+ -+ if (IS_ERR(phydev)) { -+ netdev_err(dev, "phy_connect() failed\n"); -+ return PTR_ERR(phydev); -+ } -+ -+ priv->phydev = phydev; -+ phydev->irq = PHY_POLL; -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ /* Pause frame support */ -+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; -+ if (pfe_eth_pause_rx_enabled(priv)) -+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; -+ else -+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); -+#else -+ /*TODO Add pause frame support for LS1012A */ -+#endif -+ -+ return 0; -+} -+ -+/** pfe_gemac_init -+ */ -+static int pfe_gemac_init(struct pfe_eth_priv_s *priv) -+{ -+ GEMAC_CFG cfg; -+ -+ netif_info(priv, ifup, priv->dev, "%s\n", __func__); -+ -+ /* software config */ -+ /* MII interface mode selection */ -+ switch (priv->einfo->mii_config) { -+ case CONFIG_COMCERTO_USE_GMII: -+ cfg.mode = GMII; -+ break; -+ -+ case CONFIG_COMCERTO_USE_MII: -+ cfg.mode = MII; -+ break; -+ -+ case CONFIG_COMCERTO_USE_RGMII: -+ cfg.mode = RGMII; -+ break; -+ -+ case CONFIG_COMCERTO_USE_RMII: -+ cfg.mode = RMII; -+ break; -+ -+ case CONFIG_COMCERTO_USE_SGMII: -+ cfg.mode = SGMII; -+ break; -+ -+ default: -+ cfg.mode = RGMII; -+ } -+ -+ /* Speed selection */ -+ switch (priv->einfo->gemac_mode & GEMAC_SW_SPEED_1G ) { -+ case GEMAC_SW_SPEED_1G: -+ cfg.speed = SPEED_1000M; -+ break; -+ -+ case GEMAC_SW_SPEED_100M: -+ cfg.speed = SPEED_100M; -+ break; -+ -+ case GEMAC_SW_SPEED_10M: -+ cfg.speed = SPEED_10M; -+ break; -+ -+ default: -+ cfg.speed = SPEED_1000M; -+ } -+ -+ /* Duplex selection */ -+ cfg.duplex = ( priv->einfo->gemac_mode & GEMAC_SW_FULL_DUPLEX ) ? DUPLEX_FULL : DUPLEX_HALF; -+ -+ gemac_set_config( priv->EMAC_baseaddr, &cfg); -+ gemac_allow_broadcast( priv->EMAC_baseaddr ); -+ gemac_disable_unicast( priv->EMAC_baseaddr ); -+ gemac_disable_multicast( priv->EMAC_baseaddr ); -+ gemac_disable_fcs_rx( priv->EMAC_baseaddr ); -+ gemac_enable_1536_rx( priv->EMAC_baseaddr ); -+ gemac_enable_rx_jmb( priv->EMAC_baseaddr ); -+ gemac_enable_stacked_vlan( priv->EMAC_baseaddr ); -+ gemac_enable_pause_rx( priv->EMAC_baseaddr ); -+ gemac_set_bus_width(priv->EMAC_baseaddr, 64); -+ /*TODO just for testing remove it later */ -+ gemac_enable_copy_all(priv->EMAC_baseaddr); -+ -+ /*GEM will perform checksum verifications*/ -+ if (priv->dev->features & NETIF_F_RXCSUM) -+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); -+ else -+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr); -+ -+ return 0; -+} -+ -+/** pfe_eth_event_handler -+ */ -+static int pfe_eth_event_handler(void *data, int event, int qno) -+{ -+ struct pfe_eth_priv_s *priv = data; -+ -+ switch (event) { -+ case EVENT_RX_PKT_IND: -+ -+ if (qno == 0) { -+ if (napi_schedule_prep(&priv->high_napi)) { -+ netif_info(priv, intr, priv->dev, "%s: schedule high prio poll\n", __func__); -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_SCHED_COUNT]++; -+#endif -+ -+ __napi_schedule(&priv->high_napi); -+ } -+ } -+ else if (qno == 1) { -+ if (napi_schedule_prep(&priv->low_napi)) { -+ netif_info(priv, intr, priv->dev, "%s: schedule low prio poll\n", __func__); -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_SCHED_COUNT]++; -+#endif -+ __napi_schedule(&priv->low_napi); -+ } -+ } -+ else if (qno == 2) { -+ if (napi_schedule_prep(&priv->lro_napi)) { -+ netif_info(priv, intr, priv->dev, "%s: schedule lro prio poll\n", __func__); -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_SCHED_COUNT]++; -+#endif -+ __napi_schedule(&priv->lro_napi); -+ } -+ } -+ -+ break; -+ -+ case EVENT_TXDONE_IND: -+ case EVENT_HIGH_RX_WM: -+ default: -+ break; -+ } -+ -+ return 0; -+} -+ -+/** pfe_eth_open -+ */ -+static int pfe_eth_open(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ struct hif_client_s *client; -+ int rc; -+ -+ netif_info(priv, ifup, dev, "%s\n", __func__); -+ -+ /* Register client driver with HIF */ -+ client = &priv->client; -+ memset(client, 0, sizeof(*client)); -+ client->id = PFE_CL_GEM0 + priv->id; -+ client->tx_qn = emac_txq_cnt; -+ client->rx_qn = EMAC_RXQ_CNT; -+ client->priv = priv; -+ client->pfe = priv->pfe; -+ client->event_handler = pfe_eth_event_handler; -+ -+ /* FIXME : For now hif lib sets all tx and rx queues to same size */ -+ client->tx_qsize = EMAC_TXQ_DEPTH; -+ client->rx_qsize = EMAC_RXQ_DEPTH; -+ -+ if ((rc = hif_lib_client_register(client))) { -+ netdev_err(dev, "%s: hif_lib_client_register(%d) failed\n", __func__, client->id); -+ goto err0; -+ } -+ -+ netif_info(priv, drv, dev, "%s: registered client: %p\n", __func__, client); -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ /* Enable gemac tx clock */ -+ clk_enable(priv->gemtx_clk); -+#endif -+ -+ pfe_gemac_init(priv); -+ -+ if (!is_valid_ether_addr(dev->dev_addr)) { -+ netdev_err(dev, "%s: invalid MAC address\n", __func__); -+ rc = -EADDRNOTAVAIL; -+ goto err1; -+ } -+ -+ gemac_set_laddrN( priv->EMAC_baseaddr, ( MAC_ADDR *)dev->dev_addr, 1 ); -+ -+ napi_enable(&priv->high_napi); -+ napi_enable(&priv->low_napi); -+ napi_enable(&priv->lro_napi); -+ -+ rc = pfe_eth_start(priv); -+ -+ netif_tx_wake_all_queues(dev); -+ -+ //pfe_ctrl_set_eth_state(priv->id, 1, dev->dev_addr); -+ -+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000; -+ add_timer(&priv->tx_timer); -+ -+ return rc; -+ -+err1: -+ hif_lib_client_unregister(&priv->client); -+#if defined(CONFIG_PLATFORM_C2000) -+ clk_disable(priv->gemtx_clk); -+#endif -+ -+err0: -+ return rc; -+} -+/* -+ * pfe_eth_shutdown -+ */ -+int pfe_eth_shutdown( struct net_device *dev, int wake) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int i, qstatus; -+ unsigned long next_poll = jiffies + 1, end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000; -+ int tx_pkts, prv_tx_pkts; -+ -+ netif_info(priv, ifdown, dev, "%s\n", __func__); -+ -+ del_timer_sync(&priv->tx_timer); -+ -+ for(i = 0; i < emac_txq_cnt; i++) -+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer); -+ -+ netif_tx_stop_all_queues(dev); -+ -+ do { -+ tx_pkts = 0; -+ pfe_eth_flush_tx(priv, 1); -+ -+ for (i = 0; i < emac_txq_cnt; i++) -+ tx_pkts += hif_lib_tx_pending(&priv->client, i); -+ -+ if (tx_pkts) { -+ /*Don't wait forever, break if we cross max timeout */ -+ if (time_after(jiffies, end)) { -+ printk(KERN_ERR "(%s)Tx is not complete after %dmsec\n", dev->name, TX_POLL_TIMEOUT_MS); -+ break; -+ } -+ -+ printk("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n", __func__, dev->name, tx_pkts); -+ if (need_resched()) -+ schedule(); -+ } -+ -+ } while(tx_pkts); -+ -+ end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000; -+ /*Disable transmit in PFE before disabling GEMAC */ -+ //pfe_ctrl_set_eth_state(priv->id, 0, NULL); -+ -+ prv_tx_pkts = tmu_pkts_processed(priv->id); -+ /*Wait till TMU transmits all pending packets -+ * poll tmu_qstatus and pkts processed by TMU for every 10ms -+ * Consider TMU is busy, If we see TMU qeueu pending or any packets processed by TMU -+ */ -+ while(1) { -+ -+ if (time_after(jiffies, next_poll)) { -+ -+ tx_pkts = tmu_pkts_processed(priv->id); -+ qstatus = tmu_qstatus(priv->id) & 0x7ffff; -+ -+ if(!qstatus && (tx_pkts == prv_tx_pkts)) { -+ break; -+ } -+ /*Don't wait forever, break if we cross max timeout(TX_POLL_TIMEOUT_MS) */ -+ if (time_after(jiffies, end)) { -+ printk(KERN_ERR "TMU%d is busy after %dmsec\n", priv->id, TX_POLL_TIMEOUT_MS); -+ break; -+ } -+ prv_tx_pkts = tx_pkts; -+ next_poll++; -+ } -+ if (need_resched()) -+ schedule(); -+ -+ -+ } -+ /* Wait for some more time to complete transmitting packet if any */ -+ next_poll = jiffies + 1; -+ while(1) { -+ if (time_after(jiffies, next_poll)) -+ break; -+ if (need_resched()) -+ schedule(); -+ } -+ -+ pfe_eth_stop(dev, wake); -+ -+ napi_disable(&priv->lro_napi); -+ napi_disable(&priv->low_napi); -+ napi_disable(&priv->high_napi); -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ /* Disable gemac tx clock */ -+ clk_disable(priv->gemtx_clk); -+#endif -+ -+ hif_lib_client_unregister(&priv->client); -+ -+ return 0; -+} -+ -+/* pfe_eth_close -+ * -+ */ -+static int pfe_eth_close( struct net_device *dev ) -+{ -+ pfe_eth_shutdown(dev, 0); -+ -+ return 0; -+} -+ -+/* pfe_eth_suspend -+ * -+ * return value : 1 if netdevice is configured to wakeup system -+ * 0 otherwise -+ */ -+int pfe_eth_suspend(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int retval = 0; -+ -+ if (priv->wol) { -+ gemac_set_wol(priv->EMAC_baseaddr, priv->wol); -+ retval = 1; -+ } -+ pfe_eth_shutdown(dev, priv->wol); -+ -+ return retval; -+} -+ -+/** pfe_eth_resume -+ * -+ */ -+int pfe_eth_resume(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ if (priv->wol) -+ gemac_set_wol(priv->EMAC_baseaddr, 0); -+ -+ return pfe_eth_open(dev); -+} -+ -+#if defined(CONFIG_PLATFORM_C2000) -+/** pfe_eth_set_device_wakeup -+ * -+ * Called when a netdevice changes its wol status. -+ * Scans state of all interfaces and updae PFE device -+ * wakeable state -+ */ -+static void pfe_eth_set_device_wakeup(struct pfe *pfe) -+{ -+ int i; -+ int wake = 0; -+ -+ for(i = 0; i < NUM_GEMAC_SUPPORT; i++) -+ wake |= pfe->eth.eth_priv[i]->wol; -+ -+ device_set_wakeup_enable(pfe->dev, wake); -+ //TODO Find correct IRQ mapping. -+ //TODO interface with PMU -+ //int irq_set_irq_wake(unsigned int irq, unsigned int on) -+} -+#endif -+/** pfe_eth_get_queuenum -+ * -+ */ -+static int pfe_eth_get_queuenum( struct pfe_eth_priv_s *priv, struct sk_buff *skb ) -+{ -+ int queuenum = 0; -+ unsigned long flags; -+ -+ /* Get the Fast Path queue number */ -+ /* Use conntrack mark (if conntrack exists), then packet mark (if any), then fallback to default */ -+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK) -+ if (skb->nfct) { -+ enum ip_conntrack_info cinfo; -+ struct nf_conn *ct; -+ ct = nf_ct_get(skb, &cinfo); -+ -+ if (ct) { -+ u_int32_t connmark; -+ connmark = ct->mark; -+ -+ if ((connmark & 0x80000000) && priv->id != 0) -+ connmark >>= 16; -+ -+ queuenum = connmark & EMAC_QUEUENUM_MASK; -+ } -+ } -+ else /* continued after #endif ... */ -+#endif -+ if (skb->mark) -+ queuenum = skb->mark & EMAC_QUEUENUM_MASK; -+ else { -+ spin_lock_irqsave(&priv->lock, flags); -+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK; -+ spin_unlock_irqrestore(&priv->lock, flags); -+ } -+ -+ return queuenum; -+} -+ -+ -+ -+/** pfe_eth_might_stop_tx -+ * -+ */ -+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum, struct netdev_queue *tx_queue, unsigned int n_desc, unsigned int n_segs) -+{ -+ int tried = 0; -+ ktime_t kt; -+ -+try_again: -+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) -+ || (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) -+ || (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) { -+ -+ if (!tried) { -+ hif_tx_unlock(&pfe->hif); -+ pfe_eth_flush_txQ(priv, queuenum, 1, n_desc); -+ hif_lib_update_credit(&priv->client, queuenum); -+ tried = 1; -+ hif_tx_lock(&pfe->hif); -+ goto try_again; -+ } -+#ifdef PFE_ETH_TX_STATS -+ if (__hif_tx_avail(&pfe->hif) < n_desc) -+ priv->stop_queue_hif[queuenum]++; -+ else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) { -+ priv->stop_queue_hif_client[queuenum]++; -+ } -+ else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs) { -+ priv->stop_queue_credit[queuenum]++; -+ } -+ priv->stop_queue_total[queuenum]++; -+#endif -+ netif_tx_stop_queue(tx_queue); -+ -+ kt = ktime_set(0, COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS * NSEC_PER_MSEC); -+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt, HRTIMER_MODE_REL); -+ return -1; -+ } -+ else { -+ return 0; -+ } -+} -+ -+#define SA_MAX_OP 2 -+/** pfe_hif_send_packet -+ * -+ * At this level if TX fails we drop the packet -+ */ -+static void pfe_hif_send_packet( struct sk_buff *skb, struct pfe_eth_priv_s *priv, int queuenum) -+{ -+ struct skb_shared_info *sh = skb_shinfo(skb); -+ unsigned int nr_frags; -+ u32 ctrl = 0; -+ -+ netif_info(priv, tx_queued, priv->dev, "%s\n", __func__); -+ -+ if (skb_is_gso(skb)) { -+ priv->stats.tx_dropped++; -+ return; -+ } -+ -+ if (skb->ip_summed == CHECKSUM_PARTIAL) { -+ if (skb->len > 1522) { -+ skb->ip_summed = 0; -+ ctrl = 0; -+ -+ if (pfe_compute_csum(skb)){ -+ kfree_skb(skb); -+ return; -+ } -+ } -+ else -+ ctrl = HIF_CTRL_TX_CHECKSUM; -+ } -+ -+ nr_frags = sh->nr_frags; -+ -+ if (nr_frags) { -+ skb_frag_t *f; -+ int i; -+ -+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb_headlen(skb), ctrl, HIF_FIRST_BUFFER, skb); -+ -+ for (i = 0; i < nr_frags - 1; i++) { -+ f = &sh->frags[i]; -+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, 0x0, skb); -+ } -+ -+ f = &sh->frags[i]; -+ -+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, HIF_LAST_BUFFER|HIF_DATA_VALID, skb); -+ -+ netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n", __func__, skb, nr_frags, skb->len); -+ } -+ else { -+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb->len, ctrl, HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID, skb); -+ netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p len:%d\n", __func__, skb, skb->len); -+ } -+ hif_tx_dma_start(); -+ priv->stats.tx_packets++; -+ priv->stats.tx_bytes += skb->len; -+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1); -+} -+ -+/** pfe_eth_flush_txQ -+ */ -+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc) -+{ -+ struct sk_buff *skb; -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num); -+ int count = max(TX_FREE_MAX_COUNT, n_desc); -+ unsigned int flags; -+ -+ netif_info(priv, tx_done, priv->dev, "%s\n", __func__); -+ -+ if (!from_tx) -+ __netif_tx_lock_bh(tx_queue); -+ -+ /* Clean HIF and client queue */ -+ while (count && (skb = hif_lib_tx_get_next_complete(&priv->client, txQ_num, &flags, count))) { -+ -+ /* FIXME : Invalid data can be skipped in hif_lib itself */ -+ if (flags & HIF_DATA_VALID) { -+ dev_kfree_skb_any(skb); -+ -+ } -+ // When called from the timer, flush all descriptors -+ if (from_tx) -+ count--; -+ } -+ -+ if (!from_tx) -+ __netif_tx_unlock_bh(tx_queue); -+} -+ -+/** pfe_eth_flush_tx -+ */ -+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force) -+{ -+ int ii; -+ -+ netif_info(priv, tx_done, priv->dev, "%s\n", __func__); -+ -+ for (ii = 0; ii < emac_txq_cnt; ii++) { -+ if (force || (time_after(jiffies, priv->client.tx_q[ii].jiffies_last_packet + (COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ)/1000))) { -+ pfe_eth_flush_txQ(priv, ii, 0, 0); //We will release everything we can based on from_tx param, so the count param can be set to any value -+ hif_lib_update_credit(&priv->client, ii); -+ } -+ } -+} -+ -+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int *n_segs) -+{ -+ struct skb_shared_info *sh = skb_shinfo(skb); -+ -+ // Scattered data -+ if (sh->nr_frags) { -+ *n_desc = sh->nr_frags + 1; -+ *n_segs = 1; -+ } -+ // Regular case -+ else { -+ *n_desc = 1; -+ *n_segs = 1; -+ } -+ return; -+} -+ -+/** pfe_eth_send_packet -+ */ -+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int txQ_num = skb_get_queue_mapping(skb); -+ int n_desc, n_segs, count; -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num); -+ -+ netif_info(priv, tx_queued, dev, "%s\n", __func__); -+ -+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ + sizeof(unsigned long)))) { -+ -+ netif_warn(priv, tx_err, priv->dev, "%s: copying skb\n", __func__); -+ -+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned long)), 0, GFP_ATOMIC)) { -+ /* No need to re-transmit, no way to recover*/ -+ kfree_skb(skb); -+ priv->stats.tx_dropped++; -+ return NETDEV_TX_OK; -+ } -+ } -+ -+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs); -+ -+ hif_tx_lock(&pfe->hif); -+ if(unlikely(pfe_eth_might_stop_tx(priv, txQ_num, tx_queue, n_desc, n_segs))) { -+#ifdef PFE_ETH_TX_STATS -+ if(priv->was_stopped[txQ_num]) { -+ priv->clean_fail[txQ_num]++; -+ priv->was_stopped[txQ_num] = 0; -+ } -+#endif -+ hif_tx_unlock(&pfe->hif); -+ return NETDEV_TX_BUSY; -+ } -+ -+ pfe_hif_send_packet(skb, priv, txQ_num); -+ -+ hif_tx_unlock(&pfe->hif); -+ -+ dev->trans_start = jiffies; -+ -+ // Recycle buffers if a socket's send buffer becomes half full or if the HIF client queue starts filling up -+ if (((count = (hif_lib_tx_pending(&priv->client, txQ_num) - HIF_CL_TX_FLUSH_MARK)) > 0) -+ || (skb->sk && ((sk_wmem_alloc_get(skb->sk) << 1) > skb->sk->sk_sndbuf))) -+ pfe_eth_flush_txQ(priv, txQ_num, 1, count); -+ -+#ifdef PFE_ETH_TX_STATS -+ priv->was_stopped[txQ_num] = 0; -+#endif -+ -+ return NETDEV_TX_OK; -+} -+ -+/** pfe_eth_select_queue -+ * -+ */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) -+static u16 pfe_eth_select_queue( struct net_device *dev, struct sk_buff *skb, -+ void *accel_priv, select_queue_fallback_t fallback) -+#else -+static u16 pfe_eth_select_queue( struct net_device *dev, struct sk_buff *skb ) -+#endif -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ return pfe_eth_get_queuenum(priv, skb); -+} -+ -+ -+/** pfe_eth_get_stats -+ */ -+static struct net_device_stats *pfe_eth_get_stats(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ return &priv->stats; -+} -+ -+ -+/** pfe_eth_change_mtu -+ */ -+static int pfe_eth_change_mtu(struct net_device *dev, int new_mtu) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int oldsize = dev->mtu ; -+ int frame_size = new_mtu + ETH_HLEN +4; -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { -+ netif_err(priv, drv, dev, "Invalid MTU setting\n"); -+ return -EINVAL; -+ } -+ -+ if ((new_mtu > 1500) && (dev->features & NETIF_F_TSO)) -+ { -+ priv->usr_features = dev->features; -+ if (dev->features & NETIF_F_TSO) -+ { -+ netdev_err(dev, "MTU cannot be set to more than 1500 while TSO is enabled. disabling TSO.\n"); -+ dev->features &= ~(NETIF_F_TSO); -+ } -+ } -+ else if ((dev->mtu > 1500) && (new_mtu <= 1500)) -+ { -+ if (priv->usr_features & NETIF_F_TSO) -+ { -+ priv->usr_features &= ~(NETIF_F_TSO); -+ dev->features |= NETIF_F_TSO; -+ netdev_err(dev, "MTU is <= 1500, Enabling TSO feature.\n"); -+ } -+ } -+ -+ /* Only stop and start the controller if it isn't already -+ * stopped, and we changed something */ -+ if ((oldsize != new_mtu) && (dev->flags & IFF_UP)){ -+ netdev_err(dev, "Can not change MTU - fast_path must be disabled and ifconfig down must be issued first\n"); -+ -+ return -EINVAL; -+ } -+ -+ dev->mtu = new_mtu; -+ -+ return 0; -+} -+ -+/** pfe_eth_set_mac_address -+ */ -+static int pfe_eth_set_mac_address(struct net_device *dev, void *addr) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ struct sockaddr *sa = addr; -+ -+ netif_info(priv, drv, dev, "%s\n", __func__); -+ -+ if (!is_valid_ether_addr(sa->sa_data)) -+ return -EADDRNOTAVAIL; -+ -+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); -+ -+ gemac_set_laddrN(priv->EMAC_baseaddr, (MAC_ADDR *)dev->dev_addr, 1); -+ -+ return 0; -+ -+} -+ -+/** pfe_eth_enet_addr_byte_mac -+ */ -+int pfe_eth_enet_addr_byte_mac(u8 * enet_byte_addr, MAC_ADDR *enet_addr) -+{ -+ if ((enet_byte_addr == NULL) || (enet_addr == NULL)) -+ { -+ return -1; -+ } -+ else -+ { -+ enet_addr->bottom = enet_byte_addr[0] | -+ (enet_byte_addr[1] << 8) | -+ (enet_byte_addr[2] << 16) | -+ (enet_byte_addr[3] << 24); -+ enet_addr->top = enet_byte_addr[4] | -+ (enet_byte_addr[5] << 8); -+ return 0; -+ } -+} -+ -+ -+ -+/** pfe_eth_set_multi -+ */ -+static void pfe_eth_set_multi(struct net_device *dev) -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ MAC_ADDR hash_addr; /* hash register structure */ -+ MAC_ADDR spec_addr; /* specific mac address register structure */ -+ int result; /* index into hash register to set.. */ -+ int uc_count = 0; -+ struct netdev_hw_addr *ha; -+ -+ if (dev->flags & IFF_PROMISC) { -+ netif_info(priv, drv, dev, "entering promiscuous mode\n"); -+ -+ priv->promisc = 1; -+ gemac_enable_copy_all(priv->EMAC_baseaddr); -+ } else { -+ priv->promisc = 0; -+ gemac_disable_copy_all(priv->EMAC_baseaddr); -+ } -+ -+ /* Enable broadcast frame reception if required. */ -+ if (dev->flags & IFF_BROADCAST) { -+ gemac_allow_broadcast(priv->EMAC_baseaddr); -+ } else { -+ netif_info(priv, drv, dev, "disabling broadcast frame reception\n"); -+ -+ gemac_no_broadcast(priv->EMAC_baseaddr); -+ } -+ -+ if (dev->flags & IFF_ALLMULTI) { -+ /* Set the hash to rx all multicast frames */ -+ hash_addr.bottom = 0xFFFFFFFF; -+ hash_addr.top = 0xFFFFFFFF; -+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); -+ gemac_enable_multicast(priv->EMAC_baseaddr); -+ netdev_for_each_uc_addr(ha, dev) { -+ if(uc_count >= MAX_UC_SPEC_ADDR_REG) break; -+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr); -+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2); -+ uc_count++; -+ } -+ } else if ((netdev_mc_count(dev) > 0) || (netdev_uc_count(dev))) { -+ u8 *addr; -+ -+ hash_addr.bottom = 0; -+ hash_addr.top = 0; -+ -+ netdev_for_each_mc_addr(ha, dev) { -+ addr = ha->addr; -+ -+ netif_info(priv, drv, dev, "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n", -+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); -+ -+ result = pfe_eth_get_hash(addr); -+ -+ if (result >= EMAC_HASH_REG_BITS) { -+ break; -+ } else { -+ if (result < 32) { -+ hash_addr.bottom |= (1 << result); -+ } else { -+ hash_addr.top |= (1 << (result - 32)); -+ } -+ } -+ -+ } -+ -+ uc_count = -1; -+ netdev_for_each_uc_addr(ha, dev) { -+ addr = ha->addr; -+ -+ if(++uc_count < MAX_UC_SPEC_ADDR_REG) -+ { -+ netdev_info(dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n", -+ addr[0], addr[1], addr[2], -+ addr[3], addr[4], addr[5]); -+ -+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr); -+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2); -+ } -+ else -+ { -+ netif_info(priv, drv, dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n", -+ addr[0], addr[1], addr[2], -+ addr[3], addr[4], addr[5]); -+ -+ result = pfe_eth_get_hash(addr); -+ if (result >= EMAC_HASH_REG_BITS) { -+ break; -+ } else { -+ if (result < 32) -+ hash_addr.bottom |= (1 << result); -+ else -+ hash_addr.top |= (1 << (result - 32)); -+ } -+ -+ -+ } -+ } -+ -+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); -+ if(netdev_mc_count(dev)) -+ gemac_enable_multicast(priv->EMAC_baseaddr); -+ else -+ gemac_disable_multicast(priv->EMAC_baseaddr); -+ } -+ -+ if(netdev_uc_count(dev) >= MAX_UC_SPEC_ADDR_REG) -+ gemac_enable_unicast(priv->EMAC_baseaddr); -+ else -+ { -+ /* Check if there are any specific address HW registers that need -+ * to be flushed -+ * */ -+ for(uc_count = netdev_uc_count(dev); uc_count < MAX_UC_SPEC_ADDR_REG; uc_count++) -+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2); -+ -+ gemac_disable_unicast(priv->EMAC_baseaddr); -+ } -+ -+ if (dev->flags & IFF_LOOPBACK) { -+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL); -+ } -+ -+ return; -+} -+ -+/** pfe_eth_set_features -+ */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) -+static int pfe_eth_set_features(struct net_device *dev, netdev_features_t features) -+#else -+static int pfe_eth_set_features(struct net_device *dev, u32 features) -+#endif -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ int rc = 0; -+ -+ if (features & NETIF_F_RXCSUM) -+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); -+ else -+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr); -+ return rc; -+} -+ -+/** pfe_eth_fix_features -+ */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) -+static netdev_features_t pfe_eth_fix_features(struct net_device *dev, netdev_features_t features) -+#else -+static unsigned int pfe_eth_fix_features(struct net_device *dev,u32 features) -+#endif -+{ -+ struct pfe_eth_priv_s *priv = netdev_priv(dev); -+ -+ if (dev->mtu > 1500) -+ { -+ if (features & (NETIF_F_TSO)) -+ { -+ priv->usr_features |= NETIF_F_TSO; -+ features &= ~(NETIF_F_TSO); -+ netdev_err(dev, "TSO cannot be enabled when the MTU is larger than 1500. Please set the MTU to 1500 or lower first.\n"); -+ } -+ } -+ -+ return features; -+} -+ -+/** pfe_eth_tx_timeout -+ */ -+void pfe_eth_tx_timeout(unsigned long data ) -+{ -+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)data; -+ -+ netif_info(priv, timer, priv->dev, "%s\n", __func__); -+ -+ pfe_eth_flush_tx(priv, 0); -+ -+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000; -+ add_timer(&priv->tx_timer); -+} -+ -+/** pfe_eth_fast_tx_timeout -+ */ -+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer) -+{ -+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct pfe_eth_fast_timer, timer); -+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base, struct pfe_eth_priv_s, fast_tx_timeout); -+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, fast_tx_timeout->queuenum); -+ -+ if(netif_tx_queue_stopped(tx_queue)) { -+#ifdef PFE_ETH_TX_STATS -+ priv->was_stopped[fast_tx_timeout->queuenum] = 1; -+#endif -+ netif_tx_wake_queue(tx_queue); -+ } -+ -+ return HRTIMER_NORESTART; -+} -+ -+/** pfe_eth_fast_tx_timeout_init -+ */ -+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv) -+{ -+ int i; -+ for (i = 0; i < emac_txq_cnt; i++) { -+ priv->fast_tx_timeout[i].queuenum = i; -+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ priv->fast_tx_timeout[i].timer.function = pfe_eth_fast_tx_timeout; -+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout; -+ } -+} -+ -+static struct sk_buff *pfe_eth_rx_skb(struct net_device *dev, struct pfe_eth_priv_s *priv, unsigned int qno) -+{ -+ void *buf_addr; -+ unsigned int rx_ctrl; -+ unsigned int desc_ctrl = 0; -+ struct hif_ipsec_hdr *ipsec_hdr = NULL; -+ struct sk_buff *skb; -+ struct sk_buff *skb_frag, *skb_frag_last = NULL; -+ int length = 0, offset; -+ -+ skb = priv->skb_inflight[qno]; -+ -+ if (skb && (skb_frag_last = skb_shinfo(skb)->frag_list)) { -+ while (skb_frag_last->next) -+ skb_frag_last = skb_frag_last->next; -+ } -+ -+ while (!(desc_ctrl & CL_DESC_LAST)) { -+ -+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&ipsec_hdr); -+ if (!buf_addr) -+ goto incomplete; -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_DESC_COUNT]++; -+#endif -+ -+ /* First frag */ -+ if (desc_ctrl & CL_DESC_FIRST) { -+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI) -+ skb = dev_alloc_skb(PFE_BUF_SIZE); -+ if (unlikely(!skb)) { -+ goto pkt_drop; -+ } -+ -+ skb_copy_to_linear_data(skb, buf_addr, length + offset); -+ kfree(buf_addr); -+#else -+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB) -+ skb = alloc_skb(length + offset + 32, GFP_ATOMIC); -+#else -+ skb = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC); -+#endif -+ if (unlikely(!skb)) { -+ goto pkt_drop; -+ } -+#endif -+ skb_reserve(skb, offset); -+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB) -+ __memcpy(skb->data, buf_addr + offset, length); -+ if (ipsec_hdr) { -+ sah_local = *(unsigned int *)&ipsec_hdr->sa_handle[0]; -+ } -+ kfree(buf_addr); -+#endif -+ skb_put(skb, length); -+ skb->dev = dev; -+ -+ if ((dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED)) -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ else -+ skb_checksum_none_assert(skb); -+ -+ } else { -+ -+ /* Next frags */ -+ if (unlikely(!skb)) { -+ printk(KERN_ERR "%s: NULL skb_inflight\n", __func__); -+ goto pkt_drop; -+ } -+ -+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB) -+ skb_frag = alloc_skb(length + offset + 32, GFP_ATOMIC); -+#else -+ skb_frag = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC); -+#endif -+ if (unlikely(!skb_frag)) { -+ kfree(buf_addr); -+ goto pkt_drop; -+ } -+ -+ skb_reserve(skb_frag, offset); -+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB) -+ __memcpy(skb_frag->data, buf_addr + offset, length); -+ kfree(buf_addr); -+#endif -+ skb_put(skb_frag, length); -+ -+ skb_frag->dev = dev; -+ -+ if (skb_shinfo(skb)->frag_list) -+ skb_frag_last->next = skb_frag; -+ else -+ skb_shinfo(skb)->frag_list = skb_frag; -+ -+ skb->truesize += skb_frag->truesize; -+ skb->data_len += length; -+ skb->len += length; -+ skb_frag_last = skb_frag; -+ } -+ } -+ -+ priv->skb_inflight[qno] = NULL; -+ return skb; -+ -+incomplete: -+ priv->skb_inflight[qno] = skb; -+ return NULL; -+ -+pkt_drop: -+ priv->skb_inflight[qno] = NULL; -+ -+ if (skb) { -+ kfree_skb(skb); -+ } else { -+ kfree(buf_addr); -+ } -+ -+ priv->stats.rx_errors++; -+ -+ return NULL; -+} -+ -+ -+/** pfe_eth_poll -+ */ -+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi, unsigned int qno, int budget) -+{ -+ struct net_device *dev = priv->dev; -+ struct sk_buff *skb; -+ int work_done = 0; -+ unsigned int len; -+ -+ netif_info(priv, intr, priv->dev, "%s\n", __func__); -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_POLL_COUNT]++; -+#endif -+ -+ do { -+ skb = pfe_eth_rx_skb(dev, priv, qno); -+ -+ if (!skb) -+ break; -+ -+ len = skb->len; -+ -+ /* Packet will be processed */ -+ skb->protocol = eth_type_trans(skb, dev); -+ -+ netif_receive_skb(skb); -+ -+ priv->stats.rx_packets++; -+ priv->stats.rx_bytes += len; -+ -+ dev->last_rx = jiffies; -+ -+ work_done++; -+ -+#ifdef PFE_ETH_NAPI_STATS -+ priv->napi_counters[NAPI_PACKET_COUNT]++; -+#endif -+ -+ } while (work_done < budget); -+ -+ /* If no Rx receive nor cleanup work was done, exit polling mode. -+ * No more netif_running(dev) check is required here , as this is checked in -+ * net/core/dev.c ( 2.6.33.5 kernel specific). -+ */ -+ if (work_done < budget) { -+ napi_complete(napi); -+ -+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND, qno); -+ } -+#ifdef PFE_ETH_NAPI_STATS -+ else -+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++; -+#endif -+ -+ return work_done; -+} -+ -+/** pfe_eth_lro_poll -+ */ -+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget) -+{ -+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, lro_napi); -+ -+ netif_info(priv, intr, priv->dev, "%s\n", __func__); -+ -+ return pfe_eth_poll(priv, napi, 2, budget); -+} -+ -+ -+/** pfe_eth_low_poll -+ */ -+static int pfe_eth_low_poll(struct napi_struct *napi, int budget) -+{ -+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, low_napi); -+ -+ netif_info(priv, intr, priv->dev, "%s\n", __func__); -+ -+ return pfe_eth_poll(priv, napi, 1, budget); -+} -+ -+/** pfe_eth_high_poll -+ */ -+static int pfe_eth_high_poll(struct napi_struct *napi, int budget ) -+{ -+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, high_napi); -+ -+ netif_info(priv, intr, priv->dev, "%s\n", __func__); -+ -+ return pfe_eth_poll(priv, napi, 0, budget); -+} -+ -+static const struct net_device_ops pfe_netdev_ops = { -+ .ndo_open = pfe_eth_open, -+ .ndo_stop = pfe_eth_close, -+ .ndo_start_xmit = pfe_eth_send_packet, -+ .ndo_select_queue = pfe_eth_select_queue, -+ .ndo_get_stats = pfe_eth_get_stats, -+ .ndo_change_mtu = pfe_eth_change_mtu, -+ .ndo_set_mac_address = pfe_eth_set_mac_address, -+ .ndo_set_rx_mode = pfe_eth_set_multi, -+ .ndo_set_features = pfe_eth_set_features, -+ .ndo_fix_features = pfe_eth_fix_features, -+ .ndo_validate_addr = eth_validate_addr, -+}; -+ -+ -+/** pfe_eth_init_one -+ */ -+ -+static int pfe_eth_init_one( struct pfe *pfe, int id ) -+{ -+ struct net_device *dev = NULL; -+ struct pfe_eth_priv_s *priv = NULL; -+ struct comcerto_eth_platform_data *einfo; -+ struct comcerto_mdio_platform_data *minfo; -+ struct comcerto_pfe_platform_data *pfe_info; -+ int err; -+ -+ /* Extract pltform data */ -+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI) -+ pfe_info = (struct comcerto_pfe_platform_data *) &comcerto_pfe_pdata; -+#else -+ pfe_info = (struct comcerto_pfe_platform_data *) pfe->dev->platform_data; -+#endif -+ if (!pfe_info) { -+ printk(KERN_ERR "%s: pfe missing additional platform data\n", __func__); -+ err = -ENODEV; -+ goto err0; -+ } -+ -+ einfo = (struct comcerto_eth_platform_data *) pfe_info->comcerto_eth_pdata; -+ -+ /* einfo never be NULL, but no harm in having this check */ -+ if (!einfo) { -+ printk(KERN_ERR "%s: pfe missing additional gemacs platform data\n", __func__); -+ err = -ENODEV; -+ goto err0; -+ } -+ -+ minfo = (struct comcerto_mdio_platform_data *) pfe_info->comcerto_mdio_pdata; -+ -+ /* einfo never be NULL, but no harm in having this check */ -+ if (!minfo) { -+ printk(KERN_ERR "%s: pfe missing additional mdios platform data\n", __func__); -+ err = -ENODEV; -+ goto err0; -+ } -+ -+ /* -+ * FIXME: Need to check some flag in "einfo" to know whether -+ * GEMAC is enabled Or not. -+ */ -+ -+ /* Create an ethernet device instance */ -+ dev = alloc_etherdev_mq(sizeof (*priv), emac_txq_cnt); -+ -+ if (!dev) { -+ printk(KERN_ERR "%s: gemac %d device allocation failed\n", __func__, einfo[id].gem_id); -+ err = -ENOMEM; -+ goto err0; -+ } -+ -+ priv = netdev_priv(dev); -+ priv->dev = dev; -+ priv->id = einfo[id].gem_id; -+ priv->pfe = pfe; -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ /* get gemac tx clock */ -+ priv->gemtx_clk = clk_get(NULL, "gemtx"); -+ -+ if (IS_ERR(priv->gemtx_clk)) { -+ printk(KERN_ERR "%s: Unable to get the clock for gemac %d\n", __func__, priv->id); -+ err = -ENODEV; -+ goto err1; -+ } -+#endif -+ -+ pfe->eth.eth_priv[id] = priv; -+ -+ /* Set the info in the priv to the current info */ -+ priv->einfo = &einfo[id]; -+ priv->EMAC_baseaddr = cbus_emac_base[id]; -+ priv->PHY_baseaddr = cbus_emac_base[0]; -+ priv->mdio_muxval = einfo[id].mdio_muxval; -+ priv->GPI_baseaddr = cbus_gpi_base[id]; -+ -+ /* FIXME : For now TMU queue numbers hardcoded, later should be taken from pfe.h */ -+#define HIF_GEMAC_TMUQ_BASE 6 -+ priv->low_tmuQ = HIF_GEMAC_TMUQ_BASE + (id * 2); -+ priv->high_tmuQ = priv->low_tmuQ + 1; -+ -+ spin_lock_init(&priv->lock); -+ priv->tx_timer.data = (unsigned long)priv; -+ priv->tx_timer.function = pfe_eth_tx_timeout; -+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000; -+ init_timer(&priv->tx_timer); -+ -+ pfe_eth_fast_tx_timeout_init(priv); -+ -+ /* Copy the station address into the dev structure, */ -+ memcpy(dev->dev_addr, einfo[id].mac_addr, ETH_ALEN); -+ -+ /* Initialize mdio */ -+ if (minfo[id].enabled) { -+ if ((err = pfe_eth_mdio_init(priv, &minfo[id]))) { -+ netdev_err(dev, "%s: pfe_eth_mdio_init() failed\n", __func__); -+ goto err2; -+ } -+ } -+ -+ dev->mtu = 1500; -+ -+ /* supported features */ -+ dev->hw_features = NETIF_F_SG; -+ /* Enable after checksum offload is validated -+ dev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_SG; */ -+ -+ /* enabled by default */ -+ dev->features = dev->hw_features; -+ -+ priv->usr_features = dev->features; -+ -+ dev->netdev_ops = &pfe_netdev_ops; -+ -+ dev->ethtool_ops = &pfe_ethtool_ops; -+ -+ /* Enable basic messages by default */ -+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK | NETIF_MSG_PROBE; -+ -+ netif_napi_add(dev, &priv->low_napi, pfe_eth_low_poll, HIF_RX_POLL_WEIGHT - 16); -+ netif_napi_add(dev, &priv->high_napi, pfe_eth_high_poll, HIF_RX_POLL_WEIGHT - 16); -+ netif_napi_add(dev, &priv->lro_napi, pfe_eth_lro_poll, HIF_RX_POLL_WEIGHT - 16); -+ -+ err = register_netdev(dev); -+ -+ if (err) { -+ netdev_err(dev, "register_netdev() failed\n"); -+ goto err3; -+ } -+ -+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) { -+ err = pfe_phy_init(dev); -+ if (err) { -+ netdev_err(dev, "%s: pfe_phy_init() failed\n", __func__); -+ goto err4; -+ } -+ } -+ -+ -+ /* Create all the sysfs files */ -+ if(pfe_eth_sysfs_init(dev)) -+ goto err4; -+ -+ netif_info(priv, probe, dev, "%s: created interface, baseaddr: %p\n", __func__, priv->EMAC_baseaddr); -+ -+ return 0; -+err4: -+ unregister_netdev(dev); -+err3: -+ pfe_eth_mdio_exit(priv->mii_bus); -+err2: -+#if defined(CONFIG_PLATFORM_C2000) -+ clk_put(priv->gemtx_clk); -+err1: -+#endif -+ free_netdev(priv->dev); -+ -+err0: -+ return err; -+} -+ -+/** pfe_eth_init -+ */ -+int pfe_eth_init(struct pfe *pfe) -+{ -+ int ii = 0; -+ int err; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ cbus_emac_base[0] = EMAC1_BASE_ADDR; -+ cbus_emac_base[1] = EMAC2_BASE_ADDR; -+ -+ cbus_gpi_base[0] = EGPI1_BASE_ADDR; -+ cbus_gpi_base[1] = EGPI2_BASE_ADDR; -+ -+#if !defined(CONFIG_PLATFORM_LS1012A) -+ cbus_emac_base[2] = EMAC3_BASE_ADDR; -+ cbus_gpi_base[2] = EGPI3_BASE_ADDR; -+#endif -+ -+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) { -+ if ((err = pfe_eth_init_one(pfe, ii))) -+ goto err0; -+ } -+ -+ return 0; -+ -+err0: -+ while(ii--){ -+ pfe_eth_exit_one( pfe->eth.eth_priv[ii] ); -+ } -+ -+ /* Register three network devices in the kernel */ -+ return err; -+} -+ -+/** pfe_eth_exit_one -+ */ -+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv) -+{ -+ netif_info(priv, probe, priv->dev, "%s\n", __func__); -+ -+ pfe_eth_sysfs_exit(priv->dev); -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ clk_put(priv->gemtx_clk); -+#endif -+ -+ unregister_netdev(priv->dev); -+ -+ pfe_eth_mdio_exit(priv->mii_bus); -+ -+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) -+ pfe_phy_exit(priv->dev); -+ -+ free_netdev(priv->dev); -+} -+ -+/** pfe_eth_exit -+ */ -+void pfe_eth_exit(struct pfe *pfe) -+{ -+ int ii; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ for(ii = 0; ii < NUM_GEMAC_SUPPORT; ii++ ) { -+ /* -+ * FIXME: Need to check some flag in "einfo" to know whether -+ * GEMAC is enabled Or not. -+ */ -+ -+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]); -+ } -+} -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_eth.h -@@ -0,0 +1,384 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_ETH_H_ -+#define _PFE_ETH_H_ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PFE_ETH_NAPI_STATS -+#define PFE_ETH_TX_STATS -+ -+#define PFE_ETH_FRAGS_MAX (65536/HIF_RX_PKT_MIN_SIZE) -+#define LRO_LEN_COUNT_MAX 32 -+#define LRO_NB_COUNT_MAX 32 -+ -+#if defined(CONFIG_PLATFORM_PCI) || defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_LS1012A) -+ -+#define CONFIG_COMCERTO_GEMAC 1 -+ -+#define CONFIG_COMCERTO_USE_MII 1 -+#define CONFIG_COMCERTO_USE_RMII 2 -+#define CONFIG_COMCERTO_USE_GMII 4 -+#define CONFIG_COMCERTO_USE_RGMII 8 -+#define CONFIG_COMCERTO_USE_SGMII 16 -+ -+#define GEMAC_SW_CONF (1 << 8) | (1 << 11) // GEMAC configured by SW -+#define GEMAC_PHY_CONF 0 // GEMAC configured by phy lines (not for MII/GMII) -+#define GEMAC_SW_FULL_DUPLEX (1 << 9) -+#define GEMAC_SW_SPEED_10M (0 << 12) -+#define GEMAC_SW_SPEED_100M (1 << 12) -+#define GEMAC_SW_SPEED_1G (2 << 12) -+ -+#define GEMAC_NO_PHY (1 << 0) // set if no phy connected to MAC (ex ethernet switch). In this case use MAC fixed configuration -+#define GEMAC_PHY_RGMII_ADD_DELAY (1 << 1) -+ -+/* gemac to interface name assignment */ -+#define GEMAC0_ITF_NAME "eth5" -+#define GEMAC1_ITF_NAME "eth6" -+#define GEMAC2_ITF_NAME "eth7" -+ -+#define GEMAC0_MAC { 0x00, 0xED, 0xCD, 0xEF, 0xAA, 0xCC } -+#define GEMAC1_MAC { 0x00, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E } -+ -+struct comcerto_eth_platform_data { -+ /* device specific information */ -+ u32 device_flags; -+ char name[16]; -+ -+ -+ /* board specific information */ -+ u32 mii_config; -+ u32 gemac_mode; -+ u32 phy_flags; -+ u32 gem_id; -+ u32 bus_id; -+ u32 phy_id; -+ u32 mdio_muxval; -+ u8 mac_addr[ETH_ALEN]; -+}; -+ -+struct comcerto_mdio_platform_data { -+ int enabled; -+ int irq[32]; -+ u32 phy_mask; -+ int mdc_div; -+}; -+ -+struct comcerto_pfe_platform_data -+{ -+ struct comcerto_eth_platform_data comcerto_eth_pdata[3]; -+ struct comcerto_mdio_platform_data comcerto_mdio_pdata[3]; -+}; -+#if !defined(CONFIG_PLATFORM_LS1012A) -+static struct comcerto_pfe_platform_data comcerto_pfe_pdata = { -+ .comcerto_eth_pdata[0] = { -+ .name = GEMAC0_ITF_NAME, -+ .device_flags = CONFIG_COMCERTO_GEMAC, -+ .mii_config = CONFIG_COMCERTO_USE_MII, -+ .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_100M, -+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI) -+ .phy_flags = GEMAC_NO_PHY, -+#else -+ .phy_flags = GEMAC_PHY_RGMII_ADD_DELAY, -+#endif -+ .bus_id = 0, -+ .phy_id = 0, -+ .gem_id = 0, -+ .mac_addr = (u8[])GEMAC0_MAC, -+ }, -+ -+ .comcerto_eth_pdata[1] = { -+ .name = GEMAC1_ITF_NAME, -+ .device_flags = CONFIG_COMCERTO_GEMAC, -+ .mii_config = CONFIG_COMCERTO_USE_RGMII, -+ .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G, -+ .phy_flags = GEMAC_NO_PHY, -+ .gem_id = 1, -+ .mac_addr = (u8[])GEMAC1_MAC, -+ }, -+ -+ .comcerto_eth_pdata[2] = { -+ .name = GEMAC2_ITF_NAME, -+ }, -+ -+ .comcerto_mdio_pdata[0] = { -+ .enabled = 1, -+ .phy_mask = 0xFFFFFFFE, -+ .mdc_div = 96, -+ .irq = { -+ [0] = PHY_POLL, -+ }, -+ }, -+}; -+#endif -+#endif -+ -+#if defined(CONFIG_PLATFORM_LS1012A) -+#define NUM_GEMAC_SUPPORT 2 -+#define DRV_NAME "ls1012a-geth" -+#else -+#define NUM_GEMAC_SUPPORT 3 -+#define DRV_NAME "c2000-geth" -+#endif -+#define COMCERTO_INFOSTR_LEN 32 -+#define COMCERTO_TX_RECOVERY_TIMEOUT_MS 500 -+#define COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS 3 -+#define TX_POLL_TIMEOUT_MS 1000 -+ -+#define EMAC_TXQ_CNT 16 -+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT) -+ -+#define JUMBO_FRAME_SIZE 10258 -+/** -+ * Client Tx queue threshold, for txQ flush condition. -+ * It must be smaller than the queue size (in case we ever change it in the future). -+ */ -+#define HIF_CL_TX_FLUSH_MARK 32 -+ -+/** -+ * Max number of TX resources (HIF descriptors or skbs) that will be released -+ * in a single go during batch recycling. -+ * Should be lower than the flush mark so the SW can provide the HW with a -+ * continuous stream of packets instead of bursts. -+ */ -+#define TX_FREE_MAX_COUNT 16 -+#define EMAC_RXQ_CNT 3 -+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT /* make sure clients can receive a full burst of packets */ -+#define EMAC_RMON_TXBYTES_POS 0x00 -+#define EMAC_RMON_RXBYTES_POS 0x14 -+ -+#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1) -+#define EMAC_MDIO_TIMEOUT 1000 -+#define MAX_UC_SPEC_ADDR_REG 31 -+ -+ -+/* The set of statistics registers implemented in the Cadence MAC. -+ * The statistics registers implemented are a subset of all the statistics -+ * available, but contains all the compulsory ones. -+ * For full descriptions on the registers, refer to the Cadence MAC programmers -+ * guide or the IEEE 802.3 specifications. -+ */ -+struct gemac_stats{ -+ u32 octets_tx_bot; /* Lower 32-bits for number of octets tx'd */ -+ u32 octets_tx_top; /* Upper 16-bits for number of octets tx'd */ -+ u32 frames_tx; /* Number of frames transmitted OK */ -+ u32 broadcast_tx; /* Number of broadcast frames transmitted */ -+ u32 multicast_tx; /* Number of multicast frames transmitted */ -+ u32 pause_tx; /* Number of pause frames transmitted. */ -+ u32 frame64_tx; /* Number of 64byte frames transmitted */ -+ u32 frame65_127_tx; /* Number of 65-127 byte frames transmitted */ -+ u32 frame128_255_tx; /* Number of 128-255 byte frames transmitted */ -+ u32 frame256_511_tx; /* Number of 256-511 byte frames transmitted */ -+ u32 frame512_1023_tx; /* Number of 512-1023 byte frames transmitted */ -+ u32 frame1024_1518_tx; /* Number of 1024-1518 byte frames transmitted*/ -+ u32 frame1519_tx; /* Number of frames greater than 1518 bytes tx*/ -+ u32 tx_urun; /* Transmit underrun errors due to DMA */ -+ u32 single_col; /* Number of single collision frames */ -+ u32 multi_col; /* Number of multi collision frames */ -+ u32 excess_col; /* Number of excessive collision frames. */ -+ u32 late_col; /* Collisions occuring after slot time */ -+ u32 def_tx; /* Frames deferred due to crs */ -+ u32 crs_errors; /* Errors caused by crs not being asserted. */ -+ u32 octets_rx_bot; /* Lower 32-bits for number of octets rx'd */ -+ u32 octets_rx_top; /* Upper 16-bits for number of octets rx'd */ -+ u32 frames_rx; /* Number of frames received OK */ -+ u32 broadcast_rx; /* Number of broadcast frames received */ -+ u32 multicast_rx; /* Number of multicast frames received */ -+ u32 pause_rx; /* Number of pause frames received. */ -+ u32 frame64_rx; /* Number of 64byte frames received */ -+ u32 frame65_127_rx; /* Number of 65-127 byte frames received */ -+ u32 frame128_255_rx; /* Number of 128-255 byte frames received */ -+ u32 frame256_511_rx; /* Number of 256-511 byte frames received */ -+ u32 frame512_1023_rx; /* Number of 512-1023 byte frames received */ -+ u32 frame1024_1518_rx; /* Number of 1024-1518 byte frames received*/ -+ u32 frame1519_rx; /* Number of frames greater than 1518 bytes rx*/ -+ u32 usize_frames; /* Frames received less than min of 64 bytes */ -+ u32 excess_length; /* Number of excessive length frames rx */ -+ u32 jabbers; /* Excessive length + crc or align errors. */ -+ u32 fcs_errors; /* Number of frames received with crc errors */ -+ u32 length_check_errors;/* Number of frames with incorrect length */ -+ u32 rx_symbol_errors; /* Number of times rx_er asserted during rx */ -+ u32 align_errors; /* Frames received without integer no. bytes */ -+ u32 rx_res_errors; /* Number of times buffers ran out during rx */ -+ u32 rx_orun; /* Receive overrun errors due to DMA */ -+ u32 ip_cksum; /* IP header checksum errors */ -+ u32 tcp_cksum; /* TCP checksum errors */ -+ u32 udp_cksum; /* UDP checksum errors */ -+}; -+ -+#define EMAC_REG_SPACE sizeof(struct gemac_reg) -+#define EMAC_RMON_LEN (sizeof(struct gemac_stats)/sizeof(u32)) -+ -+ -+struct pfe_eth_fast_timer { -+ int queuenum; -+ struct hrtimer timer; -+ void * base; -+}; -+ -+typedef struct pfe_eth_priv_s -+{ -+ struct pfe *pfe; -+ struct hif_client_s client; -+ struct napi_struct lro_napi; -+ struct napi_struct low_napi; -+ struct napi_struct high_napi; -+ int low_tmuQ; -+ int high_tmuQ; -+ struct net_device_stats stats; -+ struct net_device *dev; -+ int id; -+ int promisc; -+ unsigned int msg_enable; -+ unsigned int usr_features; -+ -+ spinlock_t lock; -+ unsigned int event_status; -+ int irq; -+ void* EMAC_baseaddr; -+ void* PHY_baseaddr; /* This points to the EMAC base from where we access PHY */ -+ void* GPI_baseaddr; -+ int mdio_muxval; -+ /* PHY stuff */ -+ struct phy_device *phydev; -+ int oldspeed; -+ int oldduplex; -+ int oldlink; -+ /* mdio info */ -+ int mdc_div; -+ struct mii_bus *mii_bus; -+ struct clk *gemtx_clk; -+ int wol; -+ -+ int default_priority; -+ struct timer_list tx_timer; -+ struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT]; -+ -+ struct comcerto_eth_platform_data *einfo; -+ struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6]; -+ -+#ifdef PFE_ETH_LRO_STATS -+ unsigned int lro_len_counters[LRO_LEN_COUNT_MAX]; -+ unsigned int lro_nb_counters[LRO_NB_COUNT_MAX]; //TODO change to exact max number when RX scatter done -+#endif -+ -+ -+#ifdef PFE_ETH_TX_STATS -+ unsigned int stop_queue_total[EMAC_TXQ_CNT]; -+ unsigned int stop_queue_hif[EMAC_TXQ_CNT]; -+ unsigned int stop_queue_hif_client[EMAC_TXQ_CNT]; -+ unsigned int stop_queue_credit[EMAC_TXQ_CNT]; -+ unsigned int clean_fail[EMAC_TXQ_CNT]; -+ unsigned int was_stopped[EMAC_TXQ_CNT]; -+#endif -+ -+#ifdef PFE_ETH_NAPI_STATS -+ unsigned int napi_counters[NAPI_MAX_COUNT]; -+#endif -+ unsigned int frags_inflight[EMAC_RXQ_CNT + 6]; -+ -+}pfe_eth_priv_t; -+ -+struct pfe_eth { -+ struct pfe_eth_priv_s *eth_priv[3]; -+}; -+ -+int pfe_eth_init(struct pfe *pfe); -+void pfe_eth_exit(struct pfe *pfe); -+int pfe_eth_suspend(struct net_device *dev); -+int pfe_eth_resume(struct net_device *dev); -+int pfe_eth_mdio_reset(struct mii_bus *bus); -+ -+/** pfe_compute_csum -+ * -+ */ -+static int inline pfe_compute_csum(struct sk_buff *skb) -+{ -+ struct skb_shared_info *sh; -+ unsigned int nr_frags; -+ skb_frag_t *f; -+ u32 csum = 0; -+ int i; -+ int len; -+ -+ /* Make sure that no intermediate buffers/fragments are odd byte aligned */ -+ if (skb_is_nonlinear(skb)) { -+ int linearize = 0; -+ -+ sh = skb_shinfo(skb); -+ nr_frags = sh->nr_frags; -+ len = skb_headlen(skb) - skb_transport_offset(skb); -+ -+ if (len & 0x1) { -+ linearize = 1; -+ //printk("#1 Odd length %d\n", len); -+ } -+ else { -+ for (i = 0; i < nr_frags - 1; i++) { -+ f = &sh->frags[i]; -+ len = skb_frag_size(f); -+ -+ if (len & 0x1) { -+ linearize = 1; -+ //printk("#2 %d Odd length %d\n", i, len); -+ break; -+ } -+ } -+ } -+ -+ if (linearize) -+ if (skb_linearize(skb)) -+ return -1; -+ } -+ -+ /* Compute checksum */ -+ if (!skb_is_nonlinear(skb)) { -+ *(u16*)(skb_transport_header(skb) + skb->csum_offset) = csum_fold(csum_partial(skb_transport_header(skb), skb->len - skb_transport_offset(skb), 0)); -+ } -+ else { -+ sh = skb_shinfo(skb); -+ nr_frags = sh->nr_frags; -+ -+ if (nr_frags) { -+ csum = csum_partial(skb_transport_header(skb), skb_headlen(skb) - skb_transport_offset(skb), 0); -+ -+ for (i = 0; i < nr_frags - 1; i++) { -+ f = &sh->frags[i]; -+ csum = csum_partial(skb_frag_address(f), skb_frag_size(f), csum); -+ } -+ -+ f = &sh->frags[i]; -+ *(u16*)(skb_transport_header(skb) + skb->csum_offset) = csum_fold(csum_partial(skb_frag_address(f), skb_frag_size(f), csum)); -+ } -+ } -+ -+ return 0; -+} -+ -+ -+ -+#endif /* _PFE_ETH_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c -@@ -0,0 +1,322 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/** @file -+ * Contains all the functions to handle parsing and loading of PE firmware files. -+ */ -+#include -+ -+#include "pfe_mod.h" -+#include "pfe_firmware.h" -+#include "pfe/pfe.h" -+ -+static Elf32_Shdr * get_elf_section_header(const struct firmware *fw, const char *section) -+{ -+ Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data; -+ Elf32_Shdr *shdr, *shdr_shstr; -+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff); -+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize); -+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum); -+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx); -+ Elf32_Off shstr_offset; -+ Elf32_Word sh_name; -+ const char *name; -+ int i; -+ -+ /* Section header strings */ -+ shdr_shstr = (Elf32_Shdr *)(fw->data + e_shoff + e_shstrndx * e_shentsize); -+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset); -+ -+ for (i = 0; i < e_shnum; i++) { -+ shdr = (Elf32_Shdr *)(fw->data + e_shoff + i * e_shentsize); -+ -+ sh_name = be32_to_cpu(shdr->sh_name); -+ -+ name = (const char *)(fw->data + shstr_offset + sh_name); -+ -+ if (!strcmp(name, section)) -+ return shdr; -+ } -+ -+ printk(KERN_ERR "%s: didn't find section %s\n", __func__, section); -+ -+ return NULL; -+} -+ -+static unsigned long get_elf_section(const struct firmware *fw, const char *section) -+{ -+ Elf32_Shdr *shdr = get_elf_section_header(fw, section); -+ -+ if (shdr) -+ return be32_to_cpu(shdr->sh_addr); -+ else -+ return -1; -+} -+ -+#if defined(CFG_DIAGS) -+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info *diags_info) -+{ -+ Elf32_Shdr *shdr; -+ unsigned long offset, size; -+ -+ shdr = get_elf_section_header(fw, ".pfe_diags_str"); -+ if (shdr) -+ { -+ offset = be32_to_cpu(shdr->sh_offset); -+ size = be32_to_cpu(shdr->sh_size); -+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr); -+ diags_info->diags_str_size = size; -+ diags_info->diags_str_array = pfe_kmalloc(size, GFP_KERNEL); -+ memcpy(diags_info->diags_str_array, fw->data+offset, size); -+ -+ return 0; -+ } else -+ { -+ return -1; -+ } -+} -+#endif -+ -+static void pfe_check_version_info(const struct firmware *fw) -+{ -+ static char *version = NULL; -+ -+ Elf32_Shdr *shdr = get_elf_section_header(fw, ".version"); -+ -+ if (shdr) -+ { -+ if(!version) -+ { -+ /* this is the first fw we load, use its version string as reference (whatever it is) */ -+ version = (char *)(fw->data + be32_to_cpu(shdr->sh_offset)); -+ -+ printk(KERN_INFO "PFE binary version: %s\n", version); -+ } -+ else -+ { -+ /* already have loaded at least one firmware, check sequence can start now */ -+ if(strcmp(version, (char *)(fw->data + be32_to_cpu(shdr->sh_offset)))) -+ { -+ printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n"); -+ } -+ } -+ } -+ else -+ { -+ /* version cannot be verified, a potential issue that should be reported */ -+ printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n"); -+ } -+} -+ -+/** PFE elf firmware loader. -+* Loads an elf firmware image into a list of PE's (specified using a bitmask) -+* -+* @param pe_mask Mask of PE id's to load firmware to -+* @param fw Pointer to the firmware image -+* -+* @return 0 on sucess, a negative value on error -+* -+*/ -+int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe) -+{ -+ Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data; -+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum); -+ Elf32_Shdr *shdr = (Elf32_Shdr *) (fw->data + be32_to_cpu(elf_hdr->e_shoff)); -+ int id, section; -+ int rc; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ /* Some sanity checks */ -+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) -+ { -+ printk(KERN_ERR "%s: incorrect elf magic number\n", __func__); -+ return -EINVAL; -+ } -+ -+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) -+ { -+ printk(KERN_ERR "%s: incorrect elf class(%x)\n", __func__, elf_hdr->e_ident[EI_CLASS]); -+ return -EINVAL; -+ } -+ -+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) -+ { -+ printk(KERN_ERR "%s: incorrect elf data(%x)\n", __func__, elf_hdr->e_ident[EI_DATA]); -+ return -EINVAL; -+ } -+ -+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) -+ { -+ printk(KERN_ERR "%s: incorrect elf file type(%x)\n", __func__, be16_to_cpu(elf_hdr->e_type)); -+ return -EINVAL; -+ } -+ -+ for (section = 0; section < sections; section++, shdr++) -+ { -+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR))) -+ continue; -+ -+ for (id = 0; id < MAX_PE; id++) -+ if (pe_mask & (1 << id)) -+ { -+ rc = pe_load_elf_section(id, fw->data, shdr, pfe->dev); -+ if (rc < 0) -+ goto err; -+ } -+ } -+ -+ pfe_check_version_info(fw); -+ -+ return 0; -+ -+err: -+ return rc; -+} -+ -+ -+/** PFE firmware initialization. -+* Loads different firmware files from filesystem. -+* Initializes PE IMEM/DMEM and UTIL-PE DDR -+* Initializes control path symbol addresses (by looking them up in the elf firmware files -+* Takes PE's out of reset -+* -+* @return 0 on sucess, a negative value on error -+* -+*/ -+int pfe_firmware_init(struct pfe *pfe) -+{ -+ const struct firmware *class_fw, *tmu_fw; -+ int rc = 0; -+#if !defined(CONFIG_UTIL_DISABLED) -+ const char* util_fw_name; -+ const struct firmware *util_fw; -+#endif -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) { -+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, CLASS_FIRMWARE_FILENAME); -+ rc = -ETIMEDOUT; -+ goto err0; -+ } -+ -+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) { -+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, TMU_FIRMWARE_FILENAME); -+ rc = -ETIMEDOUT; -+ goto err1; -+ } -+#if !defined(CONFIG_UTIL_DISABLED) -+#if defined(CONFIG_PLATFORM_C2000) -+ util_fw_name = (system_rev == 0) ? UTIL_REVA0_FIRMWARE_FILENAME : UTIL_FIRMWARE_FILENAME; -+#else -+ util_fw_name = UTIL_FIRMWARE_FILENAME; -+#endif -+ -+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) { -+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, util_fw_name); -+ rc = -ETIMEDOUT; -+ goto err2; -+ } -+#endif -+ rc = pfe_load_elf(CLASS_MASK, class_fw, pfe); -+ if (rc < 0) { -+ printk(KERN_ERR "%s: class firmware load failed\n", __func__); -+ goto err3; -+ } -+ -+ pfe->ctrl.class_dmem_sh = get_elf_section(class_fw, ".dmem_sh"); -+ pfe->ctrl.class_pe_lmem_sh = get_elf_section(class_fw, ".pe_lmem_sh"); -+ -+#if defined(CFG_DIAGS) -+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info); -+ if (rc < 0) { -+ printk (KERN_WARNING "PFE diags won't be available for class PEs\n"); -+ rc = 0; -+ } -+#endif -+ -+ printk(KERN_INFO "%s: class firmware loaded %#lx %#lx\n", __func__, pfe->ctrl.class_dmem_sh, pfe->ctrl.class_pe_lmem_sh); -+ -+ rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe); -+ if (rc < 0) { -+ printk(KERN_ERR "%s: tmu firmware load failed\n", __func__); -+ goto err3; -+ } -+ -+ pfe->ctrl.tmu_dmem_sh = get_elf_section(tmu_fw, ".dmem_sh"); -+ -+ printk(KERN_INFO "%s: tmu firmware loaded %#lx\n", __func__, pfe->ctrl.tmu_dmem_sh); -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ rc = pfe_load_elf(UTIL_MASK, util_fw, pfe); -+ if (rc < 0) { -+ printk(KERN_ERR "%s: util firmware load failed\n", __func__); -+ goto err3; -+ } -+ -+ pfe->ctrl.util_dmem_sh = get_elf_section(util_fw, ".dmem_sh"); -+ pfe->ctrl.util_ddr_sh = get_elf_section(util_fw, ".ddr_sh"); -+ -+#if defined(CFG_DIAGS) -+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info); -+ if (rc < 0) { -+ printk(KERN_WARNING "PFE diags won't be available for util PE\n"); -+ rc = 0; -+ } -+#endif -+ -+ printk(KERN_INFO "%s: util firmware loaded %#lx\n", __func__, pfe->ctrl.util_dmem_sh); -+ -+ util_enable(); -+#endif -+ -+ tmu_enable(0xf); -+ class_enable(); -+ -+err3: -+#if !defined(CONFIG_UTIL_DISABLED) -+ release_firmware(util_fw); -+ -+err2: -+#endif -+ release_firmware(tmu_fw); -+ -+err1: -+ release_firmware(class_fw); -+ -+err0: -+ return rc; -+} -+ -+/** PFE firmware cleanup -+* Puts PE's in reset -+* -+* -+*/ -+void pfe_firmware_exit(struct pfe *pfe) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ class_disable(); -+ tmu_disable(0xf); -+#if !defined(CONFIG_UTIL_DISABLED) -+ util_disable(); -+#endif -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_firmware.h -@@ -0,0 +1,41 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_FIRMWARE_H_ -+#define _PFE_FIRMWARE_H_ -+ -+#if defined(CONFIG_PLATFORM_C2000) -+#define CLASS_FIRMWARE_FILENAME "class_c2000.elf" -+#define TMU_FIRMWARE_FILENAME "tmu_c2000.elf" -+#define UTIL_FIRMWARE_FILENAME "util_c2000.elf" -+#define UTIL_REVA0_FIRMWARE_FILENAME "util_c2000_revA0.elf" -+#else -+#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf" -+#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf" -+#endif -+ -+#define PFE_FW_CHECK_PASS 0 -+#define PFE_FW_CHECK_FAIL 1 -+#define NUM_PFE_FW 3 -+ -+int pfe_firmware_init(struct pfe *pfe); -+void pfe_firmware_exit(struct pfe *pfe); -+ -+#endif /* _PFE_FIRMWARE_H_ */ -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hal.c -@@ -0,0 +1,2217 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+ -+#include "pfe_ctrl_hal.h" -+#include "pfe/pfe.h" -+ -+void *cbus_base_addr; -+void *ddr_base_addr; -+unsigned long ddr_phys_base_addr; -+unsigned int ddr_size; -+ -+static struct pe_info pe[MAX_PE]; -+ -+/** Initializes the PFE library. -+* Must be called before using any of the library functions. -+* -+* @param[in] cbus_base CBUS virtual base address (as mapped in the host CPU address space) -+* @param[in] ddr_base PFE DDR range virtual base address (as mapped in the host CPU address space) -+* @param[in] ddr_phys_base PFE DDR range physical base address (as mapped in platform) -+* @param[in] size PFE DDR range size (as defined by the host software) -+*/ -+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, unsigned int size) -+{ -+ cbus_base_addr = cbus_base; -+ ddr_base_addr = ddr_base; -+ ddr_phys_base_addr = ddr_phys_base; -+ ddr_size = size; -+ -+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0); -+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0); -+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+ -+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1); -+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1); -+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+ -+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2); -+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2); -+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+ -+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3); -+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3); -+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+ -+#if !defined(CONFIG_PLATFORM_PCI) -+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4); -+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4); -+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+ -+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5); -+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5); -+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE; -+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; -+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; -+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; -+#endif -+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0); -+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0); -+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE; -+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; -+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; -+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; -+ -+#if !defined(CONFIG_TMU_DUMMY) -+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1); -+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1); -+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE; -+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; -+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; -+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; -+ -+#if !defined(CONFIG_PLATFORM_LS1012A) -+ pe[TMU2_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(2); -+ pe[TMU2_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(2); -+ pe[TMU2_ID].pmem_size = TMU_IMEM_SIZE; -+ pe[TMU2_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; -+ pe[TMU2_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; -+ pe[TMU2_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; -+#endif -+ -+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3); -+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3); -+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE; -+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; -+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; -+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; -+#endif -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR; -+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA; -+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR; -+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA; -+#endif -+} -+ -+ -+/** Writes a buffer to PE internal memory from the host -+ * through indirect access registers. -+ * -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] src Buffer source address -+ * @param[in] mem_access_addr DMEM destination address (must be 32bit aligned) -+ * @param[in] len Number of bytes to copy -+ */ -+void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned int len) -+{ -+ u32 offset = 0, val, addr; -+ unsigned int len32 = len >> 2; -+ int i; -+ -+ addr = mem_access_addr | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_BYTE_ENABLE(0, 4); -+ -+ for (i = 0; i < len32; i++, offset += 4, src += 4) { -+ val = *(u32 *)src; -+ writel(cpu_to_be32(val), pe[id].mem_access_wdata); -+ writel(addr + offset, pe[id].mem_access_addr); -+ } -+ -+ if ((len = (len & 0x3))) { -+ val = 0; -+ -+ addr = (mem_access_addr | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset; -+ -+ for (i = 0; i < len; i++, src++) -+ val |= (*(u8 *)src) << (8 * i); -+ -+ writel(cpu_to_be32(val), pe[id].mem_access_wdata); -+ writel(addr, pe[id].mem_access_addr); -+ } -+} -+ -+/** Writes a buffer to PE internal data memory (DMEM) from the host -+ * through indirect access registers. -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] src Buffer source address -+ * @param[in] dst DMEM destination address (must be 32bit aligned) -+ * @param[in] len Number of bytes to copy -+ */ -+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len) -+{ -+ pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst | PE_MEM_ACCESS_DMEM, src, len); -+} -+ -+ -+/** Writes a buffer to PE internal program memory (PMEM) from the host -+ * through indirect access registers. -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID) -+ * @param[in] src Buffer source address -+ * @param[in] dst PMEM destination address (must be 32bit aligned) -+ * @param[in] len Number of bytes to copy -+ */ -+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len) -+{ -+ pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size - 1)) | PE_MEM_ACCESS_IMEM, src, len); -+} -+ -+ -+/** Reads PE internal program memory (IMEM) from the host -+ * through indirect access registers. -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID) -+ * @param[in] addr PMEM read address (must be aligned on size) -+ * @param[in] size Number of bytes to read (maximum 4, must not cross 32bit boundaries) -+ * @return the data read (in PE endianess, i.e BE). -+ */ -+u32 pe_pmem_read(int id, u32 addr, u8 size) -+{ -+ u32 offset = addr & 0x3; -+ u32 mask = 0xffffffff >> ((4 - size) << 3); -+ u32 val; -+ -+ addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1)) | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size); -+ -+ writel(addr, pe[id].mem_access_addr); -+ val = be32_to_cpu(readl(pe[id].mem_access_rdata)); -+ -+ return (val >> (offset << 3)) & mask; -+} -+ -+ -+/** Writes PE internal data memory (DMEM) from the host -+ * through indirect access registers. -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] addr DMEM write address (must be aligned on size) -+ * @param[in] val Value to write (in PE endianess, i.e BE) -+ * @param[in] size Number of bytes to write (maximum 4, must not cross 32bit boundaries) -+ */ -+void pe_dmem_write(int id, u32 val, u32 addr, u8 size) -+{ -+ u32 offset = addr & 0x3; -+ -+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size); -+ -+ /* Indirect access interface is byte swapping data being written */ -+ writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata); -+ writel(addr, pe[id].mem_access_addr); -+} -+ -+ -+/** Reads PE internal data memory (DMEM) from the host -+ * through indirect access registers. -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] addr DMEM read address (must be aligned on size) -+ * @param[in] size Number of bytes to read (maximum 4, must not cross 32bit boundaries) -+ * @return the data read (in PE endianess, i.e BE). -+ */ -+u32 pe_dmem_read(int id, u32 addr, u8 size) -+{ -+ u32 offset = addr & 0x3; -+ u32 mask = 0xffffffff >> ((4 - size) << 3); -+ u32 val; -+ -+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size); -+ -+ writel(addr, pe[id].mem_access_addr); -+ -+ /* Indirect access interface is byte swapping data being read */ -+ val = be32_to_cpu(readl(pe[id].mem_access_rdata)); -+ -+ return (val >> (offset << 3)) & mask; -+} -+ -+ -+/** This function is used to write to CLASS internal bus peripherals (ccu, pe-lem) from the host -+* through indirect access registers. -+* @param[in] val value to write -+* @param[in] addr Address to write to (must be aligned on size) -+* @param[in] size Number of bytes to write (1, 2 or 4) -+* -+*/ -+void class_bus_write(u32 val, u32 addr, u8 size) -+{ -+ u32 offset = addr & 0x3; -+ -+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE); -+ -+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE | (size << 24); -+ -+ writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA); -+ writel(addr, CLASS_BUS_ACCESS_ADDR); -+} -+ -+ -+/** Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host -+* through indirect access registers. -+* @param[in] addr Address to read from (must be aligned on size) -+* @param[in] size Number of bytes to read (1, 2 or 4) -+* @return the read data -+* -+*/ -+u32 class_bus_read(u32 addr, u8 size) -+{ -+ u32 offset = addr & 0x3; -+ u32 mask = 0xffffffff >> ((4 - size) << 3); -+ u32 val; -+ -+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE); -+ -+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24); -+ -+ writel(addr, CLASS_BUS_ACCESS_ADDR); -+ val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA)); -+ -+ return (val >> (offset << 3)) & mask; -+} -+ -+ -+/** Writes data to the cluster memory (PE_LMEM) -+* @param[in] dst PE LMEM destination address (must be 32bit aligned) -+* @param[in] src Buffer source address -+* @param[in] len Number of bytes to copy -+*/ -+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len) -+{ -+ u32 len32 = len >> 2; -+ int i; -+ -+ for (i = 0; i < len32; i++, src += 4, dst += 4) -+ class_bus_write(*(u32 *)src, dst, 4); -+ -+ if (len & 0x2) -+ { -+ class_bus_write(*(u16 *)src, dst, 2); -+ src += 2; -+ dst += 2; -+ } -+ -+ if (len & 0x1) -+ { -+ class_bus_write(*(u8 *)src, dst, 1); -+ src++; -+ dst++; -+ } -+} -+ -+/** Writes value to the cluster memory (PE_LMEM) -+* @param[in] dst PE LMEM destination address (must be 32bit aligned) -+* @param[in] val Value to write -+* @param[in] len Number of bytes to write -+*/ -+void class_pe_lmem_memset(u32 dst, int val, unsigned int len) -+{ -+ u32 len32 = len >> 2; -+ int i; -+ -+ val = val | (val << 8) | (val << 16) | (val << 24); -+ -+ for (i = 0; i < len32; i++, dst += 4) -+ class_bus_write(val, dst, 4); -+ -+ if (len & 0x2) -+ { -+ class_bus_write(val, dst, 2); -+ dst += 2; -+ } -+ -+ if (len & 0x1) -+ { -+ class_bus_write(val, dst, 1); -+ dst++; -+ } -+} -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ -+/** Writes UTIL program memory (DDR) from the host. -+ * -+ * @param[in] addr Address to write (virtual, must be aligned on size) -+ * @param[in] val Value to write (in PE endianess, i.e BE) -+ * @param[in] size Number of bytes to write (2 or 4) -+ */ -+static void util_pmem_write(u32 val, void *addr, u8 size) -+{ -+ void *addr64 = (void *)((unsigned long)addr & ~0x7); -+ unsigned long off = 8 - ((unsigned long)addr & 0x7) - size; -+ -+ //IMEM should be loaded as a 64bit swapped value in a 64bit aligned location -+ if (size == 4) -+ writel(be32_to_cpu(val), addr64 + off); -+ else -+ writew(be16_to_cpu((u16)val), addr64 + off); -+} -+ -+ -+/** Writes a buffer to UTIL program memory (DDR) from the host. -+ * -+ * @param[in] dst Address to write (virtual, must be at least 16bit aligned) -+ * @param[in] src Buffer to write (in PE endianess, i.e BE, must have same alignment as dst) -+ * @param[in] len Number of bytes to write (must be at least 16bit aligned) -+ */ -+static void util_pmem_memcpy(void *dst, const void *src, unsigned int len) -+{ -+ unsigned int len32; -+ int i; -+ -+ if ((unsigned long)src & 0x2) { -+ util_pmem_write(*(u16 *)src, dst, 2); -+ src += 2; -+ dst += 2; -+ len -= 2; -+ } -+ -+ len32 = len >> 2; -+ -+ for (i = 0; i < len32; i++, dst += 4, src += 4) -+ util_pmem_write(*(u32 *)src, dst, 4); -+ -+ if (len & 0x2) -+ util_pmem_write(*(u16 *)src, dst, len & 0x2); -+} -+#endif -+ -+/** Loads an elf section into pmem -+ * Code needs to be at least 16bit aligned and only PROGBITS sections are supported -+ * -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID) -+ * @param[in] data pointer to the elf firmware -+ * @param[in] shdr pointer to the elf section header -+ * -+ */ -+static int pe_load_pmem_section(int id, const void *data, Elf32_Shdr *shdr) -+{ -+ u32 offset = be32_to_cpu(shdr->sh_offset); -+ u32 addr = be32_to_cpu(shdr->sh_addr); -+ u32 size = be32_to_cpu(shdr->sh_size); -+ u32 type = be32_to_cpu(shdr->sh_type); -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ if (id == UTIL_ID) -+ { -+ printk(KERN_ERR "%s: unsuported pmem section for UTIL\n", __func__); -+ return -EINVAL; -+ } -+#endif -+ -+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n", -+ __func__, addr, (unsigned long) data + offset); -+ -+ return -EINVAL; -+ } -+ -+ if (addr & 0x1) -+ { -+ printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr); -+ return -EINVAL; -+ } -+ -+ if (size & 0x1) -+ { -+ printk(KERN_ERR "%s: load size(%x) is not 16bit aligned\n", __func__, size); -+ return -EINVAL; -+ } -+ -+ switch (type) -+ { -+ case SHT_PROGBITS: -+ pe_pmem_memcpy_to32(id, addr, data + offset, size); -+ -+ break; -+ -+ default: -+ printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type); -+ return -EINVAL; -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+/** Loads an elf section into dmem -+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0 -+ * -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] data pointer to the elf firmware -+ * @param[in] shdr pointer to the elf section header -+ * -+ */ -+static int pe_load_dmem_section(int id, const void *data, Elf32_Shdr *shdr) -+{ -+ u32 offset = be32_to_cpu(shdr->sh_offset); -+ u32 addr = be32_to_cpu(shdr->sh_addr); -+ u32 size = be32_to_cpu(shdr->sh_size); -+ u32 type = be32_to_cpu(shdr->sh_type); -+ u32 size32 = size >> 2; -+ int i; -+ -+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n", -+ __func__, addr, (unsigned long)data + offset); -+ -+ return -EINVAL; -+ } -+ -+ if (addr & 0x3) -+ { -+ printk(KERN_ERR "%s: load address(%x) is not 32bit aligned\n", __func__, addr); -+ return -EINVAL; -+ } -+ -+ switch (type) -+ { -+ case SHT_PROGBITS: -+ pe_dmem_memcpy_to32(id, addr, data + offset, size); -+ break; -+ -+ case SHT_NOBITS: -+ for (i = 0; i < size32; i++, addr += 4) -+ pe_dmem_write(id, 0, addr, 4); -+ -+ if (size & 0x3) -+ pe_dmem_write(id, 0, addr, size & 0x3); -+ -+ break; -+ -+ default: -+ printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type); -+ return -EINVAL; -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+/** Loads an elf section into DDR -+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0 -+ * -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] data pointer to the elf firmware -+ * @param[in] shdr pointer to the elf section header -+ * -+ */ -+static int pe_load_ddr_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev) -+{ -+ u32 offset = be32_to_cpu(shdr->sh_offset); -+ u32 addr = be32_to_cpu(shdr->sh_addr); -+ u32 size = be32_to_cpu(shdr->sh_size); -+ u32 type = be32_to_cpu(shdr->sh_type); -+ u32 flags = be32_to_cpu(shdr->sh_flags); -+ -+ switch (type) -+ { -+ case SHT_PROGBITS: -+ if (flags & SHF_EXECINSTR) -+ { -+ if (id <= CLASS_MAX_ID) -+ { -+ /* DO the loading only once in DDR */ -+ if (id == CLASS0_ID) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) rcvd\n", __func__, addr, (unsigned long)data + offset); -+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n", -+ __func__, addr, (unsigned long)data + offset); -+ -+ return -EINVAL; -+ } -+ -+ if (addr & 0x1) -+ { -+ printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr); -+ return -EINVAL; -+ } -+ -+ if (size & 0x1) -+ { -+ printk(KERN_ERR "%s: load length(%x) is not 16bit aligned\n", __func__, size); -+ return -EINVAL; -+ } -+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size); -+ } -+ } -+#if !defined(CONFIG_UTIL_DISABLED) -+ else if (id == UTIL_ID) -+ { -+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n", -+ __func__, addr, (unsigned long)data + offset); -+ -+ return -EINVAL; -+ } -+ -+ if (addr & 0x1) -+ { -+ printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr); -+ return -EINVAL; -+ } -+ -+ if (size & 0x1) -+ { -+ printk(KERN_ERR "%s: load length(%x) is not 16bit aligned\n", __func__, size); -+ return -EINVAL; -+ } -+ -+ util_pmem_memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size); -+ } -+#endif -+ else -+ { -+ printk(KERN_ERR "%s: unsuported ddr section type(%x) for PE(%d)\n", __func__, type, id); -+ return -EINVAL; -+ } -+ -+ } -+ else -+ { -+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size); -+ } -+ -+ break; -+ -+ case SHT_NOBITS: -+ memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size); -+ -+ break; -+ -+ default: -+ printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type); -+ return -EINVAL; -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+/** Loads an elf section into pe lmem -+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0 -+ * -+ * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID) -+ * @param[in] data pointer to the elf firmware -+ * @param[in] shdr pointer to the elf section header -+ * -+ */ -+static int pe_load_pe_lmem_section(int id, const void *data, Elf32_Shdr *shdr) -+{ -+ u32 offset = be32_to_cpu(shdr->sh_offset); -+ u32 addr = be32_to_cpu(shdr->sh_addr); -+ u32 size = be32_to_cpu(shdr->sh_size); -+ u32 type = be32_to_cpu(shdr->sh_type); -+ -+ if (id > CLASS_MAX_ID) -+ { -+ printk(KERN_ERR "%s: unsuported pe-lmem section type(%x) for PE(%d)\n", __func__, type, id); -+ return -EINVAL; -+ } -+ -+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) -+ { -+ printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n", -+ __func__, addr, (unsigned long)data + offset); -+ -+ return -EINVAL; -+ } -+ -+ if (addr & 0x3) -+ { -+ printk(KERN_ERR "%s: load address(%x) is not 32bit aligned\n", __func__, addr); -+ return -EINVAL; -+ } -+ -+ switch (type) -+ { -+ case SHT_PROGBITS: -+ class_pe_lmem_memcpy_to32(addr, data + offset, size); -+ break; -+ -+ case SHT_NOBITS: -+ class_pe_lmem_memset(addr, 0, size); -+ break; -+ -+ default: -+ printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type); -+ return -EINVAL; -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+/** Loads an elf section into a PE -+ * For now only supports loading a section to dmem (all PE's), pmem (class and tmu PE's), -+ * DDDR (util PE code) -+ * -+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID) -+ * @param[in] data pointer to the elf firmware -+ * @param[in] shdr pointer to the elf section header -+ * -+ */ -+int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev) -+{ -+ u32 addr = be32_to_cpu(shdr->sh_addr); -+ u32 size = be32_to_cpu(shdr->sh_size); -+ -+ if (IS_DMEM(addr, size)) -+ return pe_load_dmem_section(id, data, shdr); -+ else if (IS_PMEM(addr, size)) -+ return pe_load_pmem_section(id, data, shdr); -+ else if (IS_PFE_LMEM(addr, size)) -+ return 0; /* FIXME */ -+ else if (IS_PHYS_DDR(addr, size)) -+ return pe_load_ddr_section(id, data, shdr, dev); -+ else if (IS_PE_LMEM(addr, size)) -+ return pe_load_pe_lmem_section(id, data, shdr); -+ else { -+ printk(KERN_ERR "%s: unsuported memory range(%x)\n", __func__, addr); -+// return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+ -+/**************************** BMU ***************************/ -+ -+/** Initializes a BMU block. -+* @param[in] base BMU block base address -+* @param[in] cfg BMU configuration -+*/ -+void bmu_init(void *base, BMU_CFG *cfg) -+{ -+ bmu_disable(base); -+ -+ bmu_set_config(base, cfg); -+ -+ bmu_reset(base); -+} -+ -+/** Resets a BMU block. -+* @param[in] base BMU block base address -+*/ -+void bmu_reset(void *base) -+{ -+ writel(CORE_SW_RESET, base + BMU_CTRL); -+ -+ /* Wait for self clear */ -+ while (readl(base + BMU_CTRL) & CORE_SW_RESET) ; -+} -+ -+/** Enabled a BMU block. -+* @param[in] base BMU block base address -+*/ -+void bmu_enable(void *base) -+{ -+ writel (CORE_ENABLE, base + BMU_CTRL); -+} -+ -+/** Disables a BMU block. -+* @param[in] base BMU block base address -+*/ -+void bmu_disable(void *base) -+{ -+ writel (CORE_DISABLE, base + BMU_CTRL); -+} -+ -+/** Sets the configuration of a BMU block. -+* @param[in] base BMU block base address -+* @param[in] cfg BMU configuration -+*/ -+void bmu_set_config(void *base, BMU_CFG *cfg) -+{ -+ writel (cfg->baseaddr, base + BMU_UCAST_BASE_ADDR); -+ writel (cfg->count & 0xffff, base + BMU_UCAST_CONFIG); -+ writel (cfg->size & 0xffff, base + BMU_BUF_SIZE); -+// writel (BMU1_THRES_CNT, base + BMU_THRES); -+ -+ /* Interrupts are never used */ -+// writel (0x0, base + BMU_INT_SRC); -+ writel (0x0, base + BMU_INT_ENABLE); -+} -+#if defined(CONFIG_PLATFORM_C2000) -+/**************************** GEMAC ***************************/ -+ -+/** Enable Rx Checksum Engine. With this enabled, Frame with bad IP, -+ * TCP or UDP checksums are discarded -+ * -+ * @param[in] base GEMAC base address. -+ */ -+void gemac_enable_rx_checksum_offload(void *base) -+{ -+ writel(readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_CHKSUM_RX, base + EMAC_NETWORK_CONFIG); -+ writel(readl(CLASS_L4_CHKSUM_ADDR) | IPV4_CHKSUM_DROP, CLASS_L4_CHKSUM_ADDR); -+} -+ -+/** Disable Rx Checksum Engine. -+ * -+ * @param[in] base GEMAC base address. -+ */ -+void gemac_disable_rx_checksum_offload(void *base) -+{ -+ writel(readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_CHKSUM_RX, base + EMAC_NETWORK_CONFIG); -+ writel(readl(CLASS_L4_CHKSUM_ADDR) & ~IPV4_CHKSUM_DROP, CLASS_L4_CHKSUM_ADDR); -+} -+ -+/** Setup the MII Mgmt clock speed. -+ * @param[in] base GEMAC base address (GEMAC0, GEMAC1, GEMAC2) -+ * @param[in] mdc_div MII clock dividor -+ */ -+void gemac_set_mdc_div(void *base, int mdc_div) -+{ -+ u32 val = readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_MDC_DIV_MASK; -+ u32 div; -+ -+ switch (mdc_div) { -+ case 8: -+ div = 0; -+ break; -+ -+ case 16: -+ div = 1; -+ break; -+ -+ case 32: -+ div = 2; -+ break; -+ -+ case 48: -+ div = 3; -+ break; -+ -+ default: -+ case 64: -+ div = 4; -+ break; -+ -+ case 96: -+ div = 5; -+ break; -+ -+ case 128: -+ div = 6; -+ break; -+ -+ case 224: -+ div = 7; -+ break; -+ } -+ -+ val |= div << 18; -+ -+ writel(val, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC set speed. -+* @param[in] base GEMAC base address -+* @param[in] speed GEMAC speed (10, 100 or 1000 Mbps) -+*/ -+void gemac_set_speed(void *base, MAC_SPEED gem_speed) -+{ -+ u32 val = readl(base + EMAC_NETWORK_CONFIG); -+ -+ val = val & ~EMAC_SPEED_MASK; -+ -+ switch (gem_speed) -+ { -+ case SPEED_10M: -+ val &= (~EMAC_PCS_ENABLE); -+ break; -+ -+ case SPEED_100M: -+ val = val | EMAC_SPEED_100; -+ val &= (~EMAC_PCS_ENABLE); -+ break; -+ -+ case SPEED_1000M: -+ val = val | EMAC_SPEED_1000; -+ val &= (~EMAC_PCS_ENABLE); -+ break; -+ -+ case SPEED_1000M_PCS: -+ val = val | EMAC_SPEED_1000; -+ val |= EMAC_PCS_ENABLE; -+ break; -+ -+ default: -+ val = val | EMAC_SPEED_100; -+ val &= (~EMAC_PCS_ENABLE); -+ break; -+ } -+ -+ writel (val, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC set duplex. -+* @param[in] base GEMAC base address -+* @param[in] duplex GEMAC duplex mode (Full, Half) -+*/ -+void gemac_set_duplex(void *base, int duplex) -+{ -+ u32 val = readl(base + EMAC_NETWORK_CONFIG); -+ -+ if (duplex == DUPLEX_HALF) -+ val = (val & ~EMAC_DUPLEX_MASK) | EMAC_HALF_DUP; -+ else -+ val = (val & ~EMAC_DUPLEX_MASK) | EMAC_FULL_DUP; -+ -+ writel (val, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC set mode. -+* @param[in] base GEMAC base address -+* @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII) -+*/ -+ -+#if defined(CONFIG_IP_ALIGNED) -+#define IP_ALIGNED_BITVAL EMAC_TWO_BYTES_IP_ALIGN -+#else -+#define IP_ALIGNED_BITVAL 0 -+#endif -+ -+void gemac_set_mode(void *base, int mode) -+{ -+ switch (mode) -+ { -+ case GMII: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_GMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG); -+ break; -+ -+ case RGMII: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_RGMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG); -+ break; -+ -+ case RMII: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_RMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG); -+ break; -+ -+ case MII: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_MII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG); -+ break; -+ -+ case SGMII: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE) | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_SGMII_MODE_ENABLE, base + EMAC_NETWORK_CONFIG); -+ break; -+ -+ default: -+ writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_MII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL); -+ writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG); -+ break; -+ } -+} -+/** GEMAC Enable MDIO: Activate the Management interface. This is required to program the PHY -+ * @param[in] base GEMAC base address -+ */ -+void gemac_enable_mdio(void *base) -+{ -+ u32 data; -+ -+ data = readl(base + EMAC_NETWORK_CONTROL); -+ data |= EMAC_MDIO_EN; -+ writel(data, base + EMAC_NETWORK_CONTROL); -+} -+ -+/** GEMAC Disable MDIO: Disable the Management interface. -+ * @param[in] base GEMAC base address -+ */ -+void gemac_disable_mdio(void *base) -+{ -+ u32 data; -+ -+ data = readl(base + EMAC_NETWORK_CONTROL); -+ data &= ~EMAC_MDIO_EN; -+ writel(data, base + EMAC_NETWORK_CONTROL); -+} -+ -+ -+/** GEMAC reset function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_reset(void *base) -+{ -+} -+ -+/** GEMAC enable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONTROL) | EMAC_TX_ENABLE | EMAC_RX_ENABLE, base + EMAC_NETWORK_CONTROL); -+} -+ -+/** GEMAC disable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONTROL) & ~(EMAC_TX_ENABLE | EMAC_RX_ENABLE), base + EMAC_NETWORK_CONTROL); -+} -+ -+/** GEMAC TX disable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_tx_disable(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONTROL) & ~(EMAC_TX_ENABLE), base + EMAC_NETWORK_CONTROL); -+} -+ -+/** GEMAC set mac address configuration. -+* @param[in] base GEMAC base address -+* @param[in] addr MAC address to be configured -+*/ -+void gemac_set_address(void *base, SPEC_ADDR *addr) -+{ -+ writel(addr->one.bottom, base + EMAC_SPEC1_ADD_BOT); -+ writel(addr->one.top, base + EMAC_SPEC1_ADD_TOP); -+ writel(addr->two.bottom, base + EMAC_SPEC2_ADD_BOT); -+ writel(addr->two.top, base + EMAC_SPEC2_ADD_TOP); -+ writel(addr->three.bottom, base + EMAC_SPEC3_ADD_BOT); -+ writel(addr->three.top, base + EMAC_SPEC3_ADD_TOP); -+ writel(addr->four.bottom, base + EMAC_SPEC4_ADD_BOT); -+ writel(addr->four.top, base + EMAC_SPEC4_ADD_TOP); -+} -+ -+/** GEMAC get mac address configuration. -+* @param[in] base GEMAC base address -+* -+* @return MAC addresses configured -+*/ -+SPEC_ADDR gemac_get_address(void *base) -+{ -+ SPEC_ADDR addr; -+ -+ addr.one.bottom = readl(base + EMAC_SPEC1_ADD_BOT); -+ addr.one.top = readl(base + EMAC_SPEC1_ADD_TOP); -+ addr.two.bottom = readl(base + EMAC_SPEC2_ADD_BOT); -+ addr.two.top = readl(base + EMAC_SPEC2_ADD_TOP); -+ addr.three.bottom = readl(base + EMAC_SPEC3_ADD_BOT); -+ addr.three.top = readl(base + EMAC_SPEC3_ADD_TOP); -+ addr.four.bottom = readl(base + EMAC_SPEC4_ADD_BOT); -+ addr.four.top = readl(base + EMAC_SPEC4_ADD_TOP); -+ -+ return addr; -+} -+ -+/** Sets the hash register of the MAC. -+ * This register is used for matching unicast and multicast frames. -+ * -+ * @param[in] base GEMAC base address. -+ * @param[in] hash 64-bit hash to be configured. -+ */ -+void gemac_set_hash( void *base, MAC_ADDR *hash ) -+{ -+ writel(hash->bottom, base + EMAC_HASH_BOT); -+ writel(hash->top, base + EMAC_HASH_TOP); -+} -+ -+/** Get the current value hash register of the MAC. -+ * This register is used for matching unicast and multicast frames. -+ * -+ * @param[in] base GEMAC base address -+ -+ * @returns 64-bit hash. -+ */ -+MAC_ADDR gemac_get_hash( void *base ) -+{ -+ MAC_ADDR hash; -+ -+ hash.bottom = readl(base + EMAC_HASH_BOT); -+ hash.top = readl(base + EMAC_HASH_TOP); -+ -+ return hash; -+} -+ -+/** GEMAC set specific local addresses of the MAC. -+* Rather than setting up all four specific addresses, this function sets them up individually. -+* -+* @param[in] base GEMAC base address -+* @param[in] addr MAC address to be configured -+*/ -+void gemac_set_laddr1(void *base, MAC_ADDR *address) -+{ -+ writel(address->bottom, base + EMAC_SPEC1_ADD_BOT); -+ writel(address->top, base + EMAC_SPEC1_ADD_TOP); -+} -+ -+ -+void gemac_set_laddr2(void *base, MAC_ADDR *address) -+{ -+ writel(address->bottom, base + EMAC_SPEC2_ADD_BOT); -+ writel(address->top, base + EMAC_SPEC2_ADD_TOP); -+} -+ -+ -+void gemac_set_laddr3(void *base, MAC_ADDR *address) -+{ -+ writel(address->bottom, base + EMAC_SPEC3_ADD_BOT); -+ writel(address->top, base + EMAC_SPEC3_ADD_TOP); -+} -+ -+ -+void gemac_set_laddr4(void *base, MAC_ADDR *address) -+{ -+ writel(address->bottom, base + EMAC_SPEC4_ADD_BOT); -+ writel(address->top, base + EMAC_SPEC4_ADD_TOP); -+} -+ -+void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index) -+{ -+ if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) ) -+ return; -+ -+ entry_index = entry_index - 1; -+ -+ if (entry_index < 4) -+ { -+ writel(address->bottom, base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT); -+ writel(address->top, base + (entry_index * 8) + EMAC_SPEC1_ADD_TOP); -+ } -+ else -+ { -+ writel(address->bottom, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT); -+ writel(address->top, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_TOP); -+ } -+} -+ -+/** Get specific local addresses of the MAC. -+* This allows returning of a single specific address stored in the MAC. -+* @param[in] base GEMAC base address -+* -+* @return Specific MAC address 1 -+* -+*/ -+MAC_ADDR gem_get_laddr1(void *base) -+{ -+ MAC_ADDR addr; -+ addr.bottom = readl(base + EMAC_SPEC1_ADD_BOT); -+ addr.top = readl(base + EMAC_SPEC1_ADD_TOP); -+ return addr; -+} -+ -+ -+MAC_ADDR gem_get_laddr2(void *base) -+{ -+ MAC_ADDR addr; -+ addr.bottom = readl(base + EMAC_SPEC2_ADD_BOT); -+ addr.top = readl(base + EMAC_SPEC2_ADD_TOP); -+ return addr; -+} -+ -+ -+MAC_ADDR gem_get_laddr3(void *base) -+{ -+ MAC_ADDR addr; -+ addr.bottom = readl(base + EMAC_SPEC3_ADD_BOT); -+ addr.top = readl(base + EMAC_SPEC3_ADD_TOP); -+ return addr; -+} -+ -+ -+MAC_ADDR gem_get_laddr4(void *base) -+{ -+ MAC_ADDR addr; -+ addr.bottom = readl(base + EMAC_SPEC4_ADD_BOT); -+ addr.top = readl(base + EMAC_SPEC4_ADD_TOP); -+ return addr; -+} -+ -+MAC_ADDR gem_get_laddrN(void *base, unsigned int entry_index) -+{ -+ MAC_ADDR addr = {0xffffffff, 0xffffffff}; -+ -+ if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) ) -+ return addr; -+ -+ entry_index = entry_index - 1; -+ -+ if (entry_index < 4) -+ { -+ addr.bottom = readl(base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT); -+ addr.top = readl(base + (entry_index * 8) + EMAC_SPEC1_ADD_TOP); -+ } -+ else -+ { -+ addr.bottom = readl(base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT); -+ addr.top = readl(base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_TOP); -+ } -+ -+ return addr; -+} -+ -+/** Clear specific local addresses of the MAC. -+ * @param[in] base GEMAC base address -+ */ -+ -+void gemac_clear_laddr1(void *base) -+{ -+ writel(0, base + EMAC_SPEC1_ADD_BOT); -+} -+ -+void gemac_clear_laddr2(void *base) -+{ -+ writel(0, base + EMAC_SPEC2_ADD_BOT); -+} -+ -+void gemac_clear_laddr3(void *base) -+{ -+ writel(0, base + EMAC_SPEC3_ADD_BOT); -+} -+ -+void gemac_clear_laddr4(void *base) -+{ -+ writel(0, base + EMAC_SPEC4_ADD_BOT); -+} -+ -+void gemac_clear_laddrN(void *base, unsigned int entry_index) -+{ -+ if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) ) -+ return; -+ -+ entry_index = entry_index - 1; -+ -+ if ( entry_index < 4 ) -+ writel(0, base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT); -+ else -+ writel(0, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT); -+} -+ -+/** Set the loopback mode of the MAC. This can be either no loopback for normal -+ * operation, local loopback through MAC internal loopback module or PHY -+ * loopback for external loopback through a PHY. This asserts the external loop -+ * pin. -+ * -+ * @param[in] base GEMAC base address. -+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC Loopback, -+ * LB_EXT - PHY Loopback. -+ */ -+void gemac_set_loop( void *base, MAC_LOOP gem_loop ) -+{ -+ switch (gem_loop) { -+ case LB_LOCAL: -+ writel(readl(base + EMAC_NETWORK_CONTROL) & (~EMAC_LB_PHY), -+ base + EMAC_NETWORK_CONTROL); -+ writel(readl(base + EMAC_NETWORK_CONTROL) | (EMAC_LB_MAC), -+ base + EMAC_NETWORK_CONTROL); -+ break; -+ case LB_EXT: -+ writel(readl(base + EMAC_NETWORK_CONTROL) & (~EMAC_LB_MAC), -+ base + EMAC_NETWORK_CONTROL); -+ writel(readl(base + EMAC_NETWORK_CONTROL) | (EMAC_LB_PHY), -+ base + EMAC_NETWORK_CONTROL); -+ break; -+ default: -+ writel(readl(base + EMAC_NETWORK_CONTROL) & (~(EMAC_LB_MAC | EMAC_LB_PHY)), -+ base + EMAC_NETWORK_CONTROL); -+ } -+} -+ -+/** GEMAC allow frames -+ * @param[in] base GEMAC base address -+ */ -+void gemac_enable_copy_all(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_COPY_ALL, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC do not allow frames -+ * @param[in] base GEMAC base address -+*/ -+void gemac_disable_copy_all(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_COPY_ALL, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC allow broadcast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_allow_broadcast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_NO_BROADCAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC no broadcast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_no_broadcast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_NO_BROADCAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable unicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_unicast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_UNICAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable unicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_unicast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_UNICAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable multicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_multicast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_MULTICAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable multicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_multicast(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_MULTICAST, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable fcs rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_fcs_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_FCS_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable fcs rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_fcs_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_FCS_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable 1536 rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_1536_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_1536_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable 1536 rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_1536_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_1536_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable jumbo function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_rx_jmb(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_JUMBO_FRAME, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable jumbo function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_rx_jmb(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_JUMBO_FRAME, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC enable stacked vlan function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_stacked_vlan(void *base) -+{ -+ writel (readl(base + EMAC_STACKED_VLAN_REG) | EMAC_ENABLE_STACKED_VLAN, base + EMAC_STACKED_VLAN_REG); -+} -+ -+/** GEMAC enable stacked vlan function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_stacked_vlan(void *base) -+{ -+ writel (readl(base + EMAC_STACKED_VLAN_REG) & ~EMAC_ENABLE_STACKED_VLAN, base + EMAC_STACKED_VLAN_REG); -+} -+ -+/** GEMAC enable pause rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_pause_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_PAUSE_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC disable pause rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_pause_rx(void *base) -+{ -+ writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_PAUSE_RX, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** GEMAC wol configuration -+* @param[in] base GEMAC base address -+* @param[in] wol_conf WoL register configuration -+*/ -+void gemac_set_wol(void *base, u32 wol_conf) -+{ -+ writel(wol_conf, base + EMAC_WOL); -+} -+ -+/** Sets Gemac bus width to 64bit -+ * @param[in] base GEMAC base address -+ * @param[in] width gemac bus width to be set possible values are 32/64/128 -+ * */ -+void gemac_set_bus_width(void *base, int width) -+{ -+ u32 val = readl(base + EMAC_NETWORK_CONFIG); -+ switch(width) -+ { -+ case 32: -+ val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_32; -+ case 128: -+ val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_128; -+ case 64: -+ default: -+ val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_64; -+ -+ } -+ writel (val, base + EMAC_NETWORK_CONFIG); -+} -+ -+/** Sets Gemac configuration. -+* @param[in] base GEMAC base address -+* @param[in] cfg GEMAC configuration -+*/ -+void gemac_set_config(void *base, GEMAC_CFG *cfg) -+{ -+ gemac_set_mode(base, cfg->mode); -+ -+ gemac_set_speed(base, cfg->speed); -+ -+ gemac_set_duplex(base,cfg->duplex); -+} -+#elif defined(CONFIG_PLATFORM_LS1012A) -+/**************************** MTIP GEMAC ***************************/ -+ -+/** Enable Rx Checksum Engine. With this enabled, Frame with bad IP, -+ * TCP or UDP checksums are discarded -+ * -+ * @param[in] base GEMAC base address. -+ */ -+void gemac_enable_rx_checksum_offload(void *base) -+{ -+ /*Do not find configuration to do this */ -+} -+ -+/** Disable Rx Checksum Engine. -+ * -+ * @param[in] base GEMAC base address. -+ */ -+void gemac_disable_rx_checksum_offload(void *base) -+{ -+ /*Do not find configuration to do this */ -+} -+ -+/** GEMAC set speed. -+* @param[in] base GEMAC base address -+* @param[in] speed GEMAC speed (10, 100 or 1000 Mbps) -+*/ -+void gemac_set_speed(void *base, MAC_SPEED gem_speed) -+{ -+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED; -+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T; -+ -+ switch (gem_speed) -+ { -+ case SPEED_10M: -+ rcr |= EMAC_RCNTRL_RMII_10T; -+ break; -+ -+ -+ case SPEED_1000M: -+ ecr |= EMAC_ECNTRL_SPEED; -+ break; -+ -+ case SPEED_100M: -+ default: -+ /*It is in 100M mode */ -+ break; -+ } -+ writel(ecr, (base + EMAC_ECNTRL_REG)); -+ writel(rcr, (base + EMAC_RCNTRL_REG)); -+} -+ -+/** GEMAC set duplex. -+* @param[in] base GEMAC base address -+* @param[in] duplex GEMAC duplex mode (Full, Half) -+*/ -+void gemac_set_duplex(void *base, int duplex) -+{ -+ -+ if (duplex == DUPLEX_HALF) { -+ printk("%s() TODO\n", __func__); -+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base + EMAC_TCNTRL_REG); -+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base + EMAC_RCNTRL_REG)); -+ }else{ -+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base + EMAC_TCNTRL_REG); -+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base + EMAC_RCNTRL_REG)); -+ } -+} -+ -+/** GEMAC set mode. -+* @param[in] base GEMAC base address -+* @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII) -+*/ -+void gemac_set_mode(void *base, int mode) -+{ -+ u32 val = readl(base + EMAC_RCNTRL_REG); -+ -+ /*Remove loopbank*/ -+ val &= ~EMAC_RCNTRL_LOOP; -+ -+ /*Enable flow control and MII mode*/ -+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE); -+ -+ writel(val, base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC enable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable(void *base) -+{ -+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base + EMAC_ECNTRL_REG); -+} -+ -+/** GEMAC disable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable(void *base) -+{ -+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base + EMAC_ECNTRL_REG); -+} -+ -+/** GEMAC TX disable function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_tx_disable(void *base) -+{ -+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base + EMAC_TCNTRL_REG); -+} -+ -+/** Sets the hash register of the MAC. -+ * This register is used for matching unicast and multicast frames. -+ * -+ * @param[in] base GEMAC base address. -+ * @param[in] hash 64-bit hash to be configured. -+ */ -+void gemac_set_hash( void *base, MAC_ADDR *hash ) -+{ -+ writel(hash->bottom, base + EMAC_GALR); -+ writel(hash->top, base + EMAC_GAUR); -+} -+ -+void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index) -+{ -+ if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) ) -+ return; -+ -+ entry_index = entry_index - 1; -+ if (entry_index < 1) { -+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW); -+ writel((htonl(address->top) | 0x8808), base + EMAC_PHY_ADDR_HIGH); -+ } -+ else -+ { -+ /* TODO for other entry_index */ -+ /*printk("%s for entry_index %d \n",__func__, entry_index); */ -+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0); -+ writel((htonl(address->top) | 0x8808), base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1); -+ } -+ -+} -+ -+void gemac_clear_laddrN(void *base, unsigned int entry_index) -+{ -+ if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) ) -+ return; -+ -+ entry_index = entry_index - 1; -+ if (entry_index < 1) { -+ writel(0, base + EMAC_PHY_ADDR_LOW); -+ writel(0, base + EMAC_PHY_ADDR_HIGH); -+ } -+ else -+ { -+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0); -+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1); -+ } -+ -+ -+} -+ -+/** Set the loopback mode of the MAC. This can be either no loopback for normal -+ * operation, local loopback through MAC internal loopback module or PHY -+ * loopback for external loopback through a PHY. This asserts the external loop -+ * pin. -+ * -+ * @param[in] base GEMAC base address. -+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC Loopback, -+ * LB_EXT - PHY Loopback. -+ */ -+void gemac_set_loop( void *base, MAC_LOOP gem_loop ) -+{ -+ printk("%s()\n", __func__); -+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base + EMAC_RCNTRL_REG)); -+} -+ -+ -+/** GEMAC allow frames -+ * @param[in] base GEMAC base address -+ */ -+void gemac_enable_copy_all(void *base) -+{ -+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base + EMAC_RCNTRL_REG)); -+} -+ -+/** GEMAC do not allow frames -+ * @param[in] base GEMAC base address -+*/ -+void gemac_disable_copy_all(void *base) -+{ -+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base + EMAC_RCNTRL_REG)); -+} -+ -+/** GEMAC allow broadcast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_allow_broadcast(void *base) -+{ -+ writel (readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC no broadcast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_no_broadcast(void *base) -+{ -+ writel (readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC enable unicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_unicast(void *base) -+{ -+ return; -+} -+ -+/** GEMAC disable unicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_unicast(void *base) -+{ -+ return; -+} -+ -+/** GEMAC enable multicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_multicast(void *base) -+{ -+ return; -+} -+ -+/** GEMAC disable multicast function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_multicast(void *base) -+{ -+ /* TODO how to disable multicast? */ -+ return; -+} -+ -+/** GEMAC enable fcs rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_fcs_rx(void *base) -+{ -+ /*Do not find configuration to do this */ -+} -+ -+/** GEMAC disable fcs rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_fcs_rx(void *base) -+{ -+ /*Do not find configuration to do this */ -+} -+ -+ -+/** GEMAC enable 1536 rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_1536_rx(void *base) -+{ -+ /* Set 1536 as Maximum frame length */ -+ writel (readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC enable jumbo function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_rx_jmb(void *base) -+{ -+ /*TODO what is the jumbo size supported by MTIP */ -+ writel (readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC enable stacked vlan function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_stacked_vlan(void *base) -+{ -+ /* MTIP doesn't support stacked vlan */ -+ return; -+} -+ -+/** GEMAC enable pause rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_enable_pause_rx(void *base) -+{ -+ writel (readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE, base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC disable pause rx function. -+* @param[in] base GEMAC base address -+*/ -+void gemac_disable_pause_rx(void *base) -+{ -+ writel (readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE, base + EMAC_RCNTRL_REG); -+} -+ -+/** GEMAC wol configuration -+* @param[in] base GEMAC base address -+* @param[in] wol_conf WoL register configuration -+*/ -+void gemac_set_wol(void *base, u32 wol_conf) -+{ -+ printk("%s() TODO\n", __func__); -+} -+ -+/** Sets Gemac bus width to 64bit -+ * @param[in] base GEMAC base address -+ * @param[in] width gemac bus width to be set possible values are 32/64/128 -+ * */ -+void gemac_set_bus_width(void *base, int width) -+{ -+} -+ -+/** Sets Gemac configuration. -+* @param[in] base GEMAC base address -+* @param[in] cfg GEMAC configuration -+*/ -+void gemac_set_config(void *base, GEMAC_CFG *cfg) -+{ -+ -+ /*GEMAC config taken from VLSI */ -+ writel(0x00000004, base + EMAC_TFWR_STR_FWD); -+ writel(0x00000005, base + EMAC_RX_SECTIOM_FULL); -+ writel(0x00003fff, base + EMAC_TRUNC_FL); -+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY); -+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG); -+ -+ gemac_set_mode(base, cfg->mode); -+ -+ gemac_set_speed(base, cfg->speed); -+ -+ gemac_set_duplex(base,cfg->duplex); -+} -+ -+ -+#endif //CONFIG_PLATFORM_LS1012A) -+ -+ -+ -+/**************************** GPI ***************************/ -+ -+/** Initializes a GPI block. -+* @param[in] base GPI base address -+* @param[in] cfg GPI configuration -+*/ -+void gpi_init(void *base, GPI_CFG *cfg) -+{ -+ gpi_reset(base); -+ -+ gpi_disable(base); -+ -+ gpi_set_config(base, cfg); -+} -+ -+/** Resets a GPI block. -+* @param[in] base GPI base address -+*/ -+void gpi_reset(void *base) -+{ -+ writel (CORE_SW_RESET, base + GPI_CTRL); -+} -+ -+/** Enables a GPI block. -+* @param[in] base GPI base address -+*/ -+void gpi_enable(void *base) -+{ -+ writel (CORE_ENABLE, base + GPI_CTRL); -+} -+ -+/** Disables a GPI block. -+* @param[in] base GPI base address -+*/ -+void gpi_disable(void *base) -+{ -+ writel (CORE_DISABLE, base + GPI_CTRL); -+} -+ -+ -+/** Sets the configuration of a GPI block. -+* @param[in] base GPI base address -+* @param[in] cfg GPI configuration -+*/ -+void gpi_set_config(void *base, GPI_CFG *cfg) -+{ -+ writel (CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base + GPI_LMEM_ALLOC_ADDR); -+ writel (CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base + GPI_LMEM_FREE_ADDR); -+ writel (CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base + GPI_DDR_ALLOC_ADDR); -+ writel (CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base + GPI_DDR_FREE_ADDR); -+ writel (CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR); -+ writel (DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET); -+ writel (LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET); -+ writel (0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET); -+ writel (0, base + GPI_DDR_SEC_BUF_DATA_OFFSET); -+ writel ((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE); -+ writel ((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE); -+ -+ writel (((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) | GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG); -+ writel (cfg->tmlf_txthres, base + GPI_TMLF_TX); -+ writel (cfg->aseq_len, base + GPI_DTX_ASEQ); -+ writel (1, base + GPI_TOE_CHKSUM_EN); -+} -+ -+/**************************** CLASSIFIER ***************************/ -+ -+/** Initializes CLASSIFIER block. -+* @param[in] cfg CLASSIFIER configuration -+*/ -+void class_init(CLASS_CFG *cfg) -+{ -+ class_reset(); -+ -+ class_disable(); -+ -+ class_set_config(cfg); -+} -+ -+/** Resets CLASSIFIER block. -+* -+*/ -+void class_reset(void) -+{ -+ writel(CORE_SW_RESET, CLASS_TX_CTRL); -+} -+ -+/** Enables all CLASS-PE's cores. -+* -+*/ -+void class_enable(void) -+{ -+ writel(CORE_ENABLE, CLASS_TX_CTRL); -+} -+ -+/** Disables all CLASS-PE's cores. -+* -+*/ -+void class_disable(void) -+{ -+ writel(CORE_DISABLE, CLASS_TX_CTRL); -+} -+ -+/** Sets the configuration of the CLASSIFIER block. -+* @param[in] cfg CLASSIFIER configuration -+*/ -+void class_set_config(CLASS_CFG *cfg) -+{ -+ u32 val; -+ -+ /* Initialize route table */ -+ if (!cfg->resume) -+ memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 << cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE); -+ -+#if !defined(LS1012A_PFE_RESET_WA) -+ writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO); -+#endif -+ -+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE); -+ writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE); -+ writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) | CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits), CLASS_ROUTE_HASH_ENTRY_SIZE); -+ writel(HIF_PKT_CLASS_EN| HIF_PKT_OFFSET(sizeof(struct hif_hdr)), CLASS_HIF_PARSE); -+ -+ val = HASH_CRC_PORT_IP | QB2BUS_LE; -+ -+#if defined(CONFIG_IP_ALIGNED) -+ val |= IP_ALIGNED; -+#endif -+ -+ /* Class PE packet steering will only work if TOE mode, bridge fetch or -+ * route fetch are enabled (see class/qb_fet.v). Route fetch would trigger -+ * additional memory copies (likely from DDR because of hash table size, which -+ * cannot be reduced because PE software still relies on hash value computed -+ * in HW), so when not in TOE mode we simply enable HW bridge fetch even -+ * though we don't use it. -+ */ -+ if (cfg->toe_mode) -+ val |= CLASS_TOE; -+ else -+ val |= HW_BRIDGE_FETCH; -+ -+ writel(val, CLASS_ROUTE_MULTI); -+ -+ writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr), CLASS_ROUTE_TABLE_BASE); -+ writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0); -+ writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1); -+ writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0); -+ writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1); -+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR); -+ -+ writel(23, CLASS_AFULL_THRES); -+ writel(23, CLASS_TSQ_FIFO_THRES); -+ -+ writel(24, CLASS_MAX_BUF_CNT); -+ writel(24, CLASS_TSQ_MAX_CNT); -+} -+ -+/**************************** TMU ***************************/ -+ -+void tmu_reset(void) -+{ -+ writel(SW_RESET, TMU_CTRL); -+} -+ -+/** Initializes TMU block. -+* @param[in] cfg TMU configuration -+*/ -+void tmu_init(TMU_CFG *cfg) -+{ -+ int q, phyno; -+ -+ tmu_disable(0xF); -+ mdelay(10); -+ -+#if !defined(LS1012A_PFE_RESET_WA) -+ /* keep in soft reset */ -+ writel(SW_RESET, TMU_CTRL); -+#endif -+ writel(0x3, TMU_SYS_GENERIC_CONTROL); -+ writel(750, TMU_INQ_WATERMARK); -+ writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR); -+ writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR); -+#if !defined(CONFIG_PLATFORM_LS1012A) -+ writel(CBUS_VIRT_TO_PFE(EGPI3_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY2_INQ_ADDR); -+#endif -+ writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR); -+ writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR); -+ writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR); -+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), TMU_BMU_INQ_ADDR); -+ -+ writel(0x3FF, TMU_TDQ0_SCH_CTRL); // enabling all 10 schedulers [9:0] of each TDQ -+ writel(0x3FF, TMU_TDQ1_SCH_CTRL); -+#if !defined(CONFIG_PLATFORM_LS1012A) -+ writel(0x3FF, TMU_TDQ2_SCH_CTRL); -+#endif -+ writel(0x3FF, TMU_TDQ3_SCH_CTRL); -+ -+#if !defined(LS1012A_PFE_RESET_WA) -+ writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO); -+#endif -+ -+#if !defined(LS1012A_PFE_RESET_WA) -+ writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR); // Extra packet pointers will be stored from this address onwards -+ -+ writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN); -+ writel(5, TMU_TDQ_IIFG_CFG); -+ writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE); -+ -+ writel(0x0, TMU_CTRL); -+ -+ /* MEM init */ -+ printk(KERN_INFO "%s: mem init\n", __func__); -+ writel(MEM_INIT, TMU_CTRL); -+ -+ while(!(readl(TMU_CTRL) & MEM_INIT_DONE)) ; -+ -+ /* LLM init */ -+ printk(KERN_INFO "%s: lmem init\n", __func__); -+ writel(LLM_INIT, TMU_CTRL); -+ -+ while(!(readl(TMU_CTRL) & LLM_INIT_DONE)) ; -+#endif -+ // set up each queue for tail drop -+ for (phyno = 0; phyno < 4; phyno++) -+ { -+#if defined(CONFIG_PLATFORM_LS1012A) -+ if(phyno == 2) continue; -+#endif -+ for (q = 0; q < 16; q++) -+ { -+ u32 qdepth; -+ writel((phyno << 8) | q, TMU_TEQ_CTRL); -+ writel(1 << 22, TMU_TEQ_QCFG); //Enable tail drop -+ -+ if (phyno == 3) -+ qdepth = DEFAULT_TMU3_QDEPTH; -+ else -+ qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH; -+ -+ // LOG: 68855 -+ // The following is a workaround for the reordered packet and BMU2 buffer leakage issue. -+ if (CHIP_REVISION() == 0) -+ qdepth = 31; -+ -+ writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2); -+ writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3); -+ } -+ } -+ -+#ifdef CFG_LRO -+ /* Set TMU-3 queue 5 (LRO) in no-drop mode */ -+ writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL); -+ writel(0, TMU_TEQ_QCFG); -+#endif -+ -+ writel(0x05, TMU_TEQ_DISABLE_DROPCHK); -+ -+ writel(0x0, TMU_CTRL); -+} -+ -+/** Enables TMU-PE cores. -+* @param[in] pe_mask TMU PE mask -+*/ -+void tmu_enable(u32 pe_mask) -+{ -+ writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL); -+} -+ -+/** Disables TMU cores. -+* @param[in] pe_mask TMU PE mask -+*/ -+void tmu_disable(u32 pe_mask) -+{ -+ writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL); -+} -+/** This will return the tmu queue status -+ * @param[in] if_id gem interface id or TMU index -+ * @return returns the bit mask of busy queues, zero means all queues are empty -+ */ -+u32 tmu_qstatus(u32 if_id) -+{ -+ return cpu_to_be32(pe_dmem_read(TMU0_ID+if_id, PESTATUS_ADDR_TMU + offsetof(PE_STATUS, tmu_qstatus), 4)); -+} -+ -+u32 tmu_pkts_processed(u32 if_id) -+{ -+ return cpu_to_be32(pe_dmem_read(TMU0_ID+if_id, PESTATUS_ADDR_TMU + offsetof(PE_STATUS, rx), 4)); -+} -+/**************************** UTIL ***************************/ -+ -+/** Resets UTIL block. -+*/ -+void util_reset(void) -+{ -+ writel(CORE_SW_RESET, UTIL_TX_CTRL); -+} -+ -+/** Initializes UTIL block. -+* @param[in] cfg UTIL configuration -+*/ -+void util_init(UTIL_CFG *cfg) -+{ -+ writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO); -+} -+ -+/** Enables UTIL-PE core. -+* -+*/ -+void util_enable(void) -+{ -+ writel(CORE_ENABLE, UTIL_TX_CTRL); -+} -+ -+/** Disables UTIL-PE core. -+* -+*/ -+void util_disable(void) -+{ -+ writel(CORE_DISABLE, UTIL_TX_CTRL); -+} -+ -+/**************************** HIF ***************************/ -+ -+/** Initializes HIF no copy block. -+* -+*/ -+void hif_nocpy_init(void) -+{ -+ writel(4, HIF_NOCPY_TX_PORT_NO); -+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), HIF_NOCPY_LMEM_ALLOC_ADDR); -+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), HIF_NOCPY_CLASS_ADDR); -+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), HIF_NOCPY_TMU_PORT0_ADDR); -+ writel(HIF_RX_POLL_CTRL_CYCLE<<16|HIF_TX_POLL_CTRL_CYCLE, HIF_NOCPY_POLL_CTRL); -+} -+ -+/** Enable hif_nocpy tx DMA and interrupt -+* -+*/ -+void hif_nocpy_tx_enable(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel( HIF_CTRL_DMA_EN, HIF_NOCPY_TX_CTRL); -+ //writel((readl(HIF_NOCPY_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN), HIF_NOCPY_INT_ENABLE); -+} -+ -+/** Disable hif_nocpy tx DMA and interrupt -+* -+*/ -+void hif_nocpy_tx_disable(void) -+{ -+ u32 hif_int; -+ -+ writel(0, HIF_NOCPY_TX_CTRL); -+ -+ hif_int = readl(HIF_NOCPY_INT_ENABLE); -+ hif_int &= HIF_TXPKT_INT_EN; -+ writel(hif_int, HIF_NOCPY_INT_ENABLE); -+} -+ -+/** Enable hif rx DMA and interrupt -+* -+*/ -+void hif_nocpy_rx_enable(void) -+{ -+ hif_nocpy_rx_dma_start(); -+ writel((readl(HIF_NOCPY_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN), HIF_NOCPY_INT_ENABLE); -+} -+ -+/** Disable hif_nocpy rx DMA and interrupt -+* -+*/ -+void hif_nocpy_rx_disable(void) -+{ -+ u32 hif_int; -+ -+ writel(0, HIF_NOCPY_RX_CTRL); -+ -+ hif_int = readl(HIF_NOCPY_INT_ENABLE); -+ hif_int &= HIF_RXPKT_INT_EN; -+ writel(hif_int, HIF_NOCPY_INT_ENABLE); -+ -+} -+/** Initializes HIF copy block. -+* -+*/ -+void hif_init(void) -+{ -+ /*Initialize HIF registers*/ -+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE, HIF_POLL_CTRL); -+} -+ -+/** Enable hif tx DMA and interrupt -+* -+*/ -+void hif_tx_enable(void) -+{ -+ /*TODO not sure poll_cntrl_en is required or not */ -+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL); -+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN), HIF_INT_ENABLE); -+} -+ -+/** Disable hif tx DMA and interrupt -+* -+*/ -+void hif_tx_disable(void) -+{ -+ u32 hif_int; -+ -+ writel(0, HIF_TX_CTRL); -+ -+ hif_int = readl(HIF_INT_ENABLE); -+ hif_int &= HIF_TXPKT_INT_EN; -+ writel(hif_int, HIF_INT_ENABLE); -+} -+ -+/** Enable hif rx DMA and interrupt -+* -+*/ -+void hif_rx_enable(void) -+{ -+ hif_rx_dma_start(); -+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN), HIF_INT_ENABLE); -+} -+ -+/** Disable hif rx DMA and interrupt -+* -+*/ -+void hif_rx_disable(void) -+{ -+ u32 hif_int; -+ -+ writel(0, HIF_RX_CTRL); -+ -+ hif_int = readl(HIF_INT_ENABLE); -+ hif_int &= HIF_RXPKT_INT_EN; -+ writel(hif_int, HIF_INT_ENABLE); -+ -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hif.c -@@ -0,0 +1,939 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#else -+#include "platform.h" -+#endif -+ -+ -+#include "pfe_mod.h" -+#if 0 -+#define DMA_MAP_SINGLE(dev, vaddr, size, direction) dma_map_single(dev, vaddr, size, direction) -+#define DMA_UNMAP_SINGLE(dev, vaddr, size, direction) dma_unmap_single(dev, vaddr, size, direction) -+void ct_flush(void *addr, u32 size) -+{ -+ dma_map_single(pfe->dev, addr, size, DMA_TO_DEVICE); -+} -+#else -+#define DMA_UNMAP_SINGLE(dev, vaddr, size, direction) -+#define DMA_MAP_SINGLE(dev, vaddr, size, direction) virt_to_phys(vaddr) -+#define ct_flush(addr, sz) -+#endif -+ -+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT) -+ -+#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1) -+#define inc_hif_rxidx(idxname) idxname = (idxname+1) & (hif->RxRingSize-1) -+#define inc_hif_txidx(idxname) idxname = (idxname+1) & (hif->TxRingSize-1) -+ -+unsigned char napi_first_batch = 0; -+ -+static int pfe_hif_alloc_descr(struct pfe_hif *hif) -+{ -+#if !defined(CONFIG_PLATFORM_PCI) -+ void *addr; -+ dma_addr_t dma_addr; -+ int err = 0; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ addr = dma_alloc_coherent(pfe->dev, -+ HIF_RX_DESC_NT * sizeof(struct hif_desc) + HIF_TX_DESC_NT * sizeof(struct hif_desc), -+ &dma_addr, GFP_KERNEL); -+ -+ if (!addr) { -+ printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", __func__); -+ err = -ENOMEM; -+ goto err0; -+ } -+ -+ hif->descr_baseaddr_p = dma_addr; -+ hif->descr_baseaddr_v = addr; -+#else -+ hif->descr_baseaddr_p = pfe->ddr_phys_baseaddr + HIF_DESC_BASEADDR; -+ hif->descr_baseaddr_v = pfe->ddr_baseaddr + HIF_DESC_BASEADDR; -+#endif -+ hif->RxRingSize = HIF_RX_DESC_NT; -+ hif->TxRingSize = HIF_TX_DESC_NT; -+ -+ return 0; -+ -+err0: -+ return err; -+} -+ -+static void pfe_hif_free_descr(struct pfe_hif *hif) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+#if !defined(CONFIG_PLATFORM_PCI) -+ dma_free_coherent(pfe->dev, -+ hif->RxRingSize * sizeof(struct hif_desc) + hif->TxRingSize * sizeof(struct hif_desc), -+ hif->descr_baseaddr_v, hif->descr_baseaddr_p); -+#endif -+} -+void pfe_hif_desc_dump(struct pfe_hif *hif) -+{ -+ struct hif_desc *desc; -+ unsigned long desc_p; -+ int ii=0; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ desc = hif->RxBase; -+ desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v + hif->descr_baseaddr_p); -+ -+ printk("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p); -+ for (ii = 0; ii < hif->RxRingSize; ii++) { -+ printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n", -+ desc->status, desc->ctrl, desc->data, desc->next); -+ desc++; -+ } -+ -+ desc = hif->TxBase; -+ desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v + hif->descr_baseaddr_p); -+ -+ printk("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p); -+ for (ii = 0; ii < hif->TxRingSize; ii++) { -+ printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n", -+ desc->status, desc->ctrl, desc->data, desc->next); -+ desc++; -+ } -+ -+} -+ -+/* pfe_hif_release_buffers -+ * -+ */ -+static void pfe_hif_release_buffers(struct pfe_hif *hif) -+{ -+ struct hif_desc *desc; -+ int i = 0; -+ -+ hif->RxBase = hif->descr_baseaddr_v; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ /*Free Rx buffers */ -+#if !defined(CONFIG_PLATFORM_PCI) -+ desc = hif->RxBase; -+ for (i = 0; i < hif->RxRingSize; i++) { -+ if (desc->data) { -+ if ((i < hif->shm->rx_buf_pool_cnt) && (hif->shm->rx_buf_pool[i] == NULL)) { -+ //dma_unmap_single(hif->dev, desc->data, hif->rx_buf_len[i], DMA_FROM_DEVICE); -+ DMA_UNMAP_SINGLE(hif->dev, desc->data, hif->rx_buf_len[i], DMA_FROM_DEVICE); -+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i]; -+ } -+ else { -+ /*TODO This should not happen*/ -+ printk(KERN_ERR "%s: buffer pool already full\n", __func__); -+ } -+ } -+ -+ desc->data = 0; -+ desc->status = 0; -+ desc->ctrl = 0; -+ desc++; -+ } -+#endif -+} -+ -+ -+/* -+ * pfe_hif_init_buffers -+ * This function initializes the HIF Rx/Tx ring descriptors and -+ * initialize Rx queue with buffers. -+ */ -+static int pfe_hif_init_buffers(struct pfe_hif *hif) -+{ -+ struct hif_desc *desc, *first_desc_p; -+ u32 data; -+ int i = 0; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ /* Check enough Rx buffers available in the shared memory */ -+ if (hif->shm->rx_buf_pool_cnt < hif->RxRingSize) -+ return -ENOMEM; -+ -+ hif->RxBase = hif->descr_baseaddr_v; -+ memset(hif->RxBase, 0, hif->RxRingSize * sizeof(struct hif_desc)); -+ -+ /*Initialize Rx descriptors */ -+ desc = hif->RxBase; -+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p; -+ -+ for (i = 0; i < hif->RxRingSize; i++) { -+ /* Initialize Rx buffers from the shared memory */ -+ -+#if defined(CONFIG_PLATFORM_PCI) -+ data = pfe->ddr_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE; -+#else -+ data = (u32)DMA_MAP_SINGLE(hif->dev, hif->shm->rx_buf_pool[i], pfe_pkt_size, DMA_FROM_DEVICE); -+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i]; -+ hif->rx_buf_len[i] = pfe_pkt_size; -+ // printk("#%d %p %p %d\n", i, data, hif->rx_buf_addr[i], hif->rx_buf_len[i]); -+ hif->shm->rx_buf_pool[i] = NULL; -+#endif -+ if (likely(dma_mapping_error(hif->dev, data) == 0)) { -+ desc->data = DDR_PHYS_TO_PFE(data); -+ } else { -+ printk(KERN_ERR "%s : low on mem\n", __func__); -+ -+ goto err; -+ } -+ -+ desc->status = 0; -+ wmb(); -+ desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR | -+ BD_CTRL_DESC_EN | BD_BUF_LEN(pfe_pkt_size); -+ /* Chain descriptors */ -+ desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1); -+ desc++; -+ } -+ -+ /* Overwrite last descriptor to chain it to first one*/ -+ desc--; -+ desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p); -+ -+ hif->RxtocleanIndex = 0; -+ -+ /*Initialize Rx buffer descriptor ring base address */ -+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR); -+ -+ hif->TxBase = hif->RxBase + hif->RxRingSize; -+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + hif->RxRingSize; -+ memset(hif->TxBase, 0, hif->TxRingSize * sizeof(struct hif_desc)); -+ -+ /*Initialize tx descriptors */ -+ desc = hif->TxBase; -+ -+ for (i = 0; i < hif->TxRingSize; i++) { -+ /* Chain descriptors */ -+ desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1); -+#if defined(CONFIG_PLATFORM_PCI) -+ desc->data = pfe->ddr_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE; -+#endif -+ desc->ctrl = 0; -+ desc++; -+ } -+ -+ /* Overwrite last descriptor to chain it to first one */ -+ desc--; -+ desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p); -+ hif->TxAvail = hif->TxRingSize; -+ hif->Txtosend = 0; -+ hif->Txtoclean = 0; -+ hif->Txtoflush = 0; -+ -+ /*Initialize Tx buffer descriptor ring base address */ -+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR); -+ -+ return 0; -+ -+err: -+ pfe_hif_release_buffers(hif); -+ return -ENOMEM; -+} -+ -+/* pfe_hif_client_register -+ * -+ * This function used to register a client driver with the HIF driver. -+ * -+ * Return value: -+ * 0 - on Successful registration -+ */ -+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id, struct hif_client_shm *client_shm) -+{ -+ struct hif_client *client = &hif->client[client_id]; -+ u32 i, cnt; -+ struct rx_queue_desc *rx_qbase; -+ struct tx_queue_desc *tx_qbase; -+ struct hif_rx_queue *rx_queue; -+ struct hif_tx_queue *tx_queue; -+ int err = 0; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ spin_lock_bh(&hif->tx_lock); -+ -+ if (test_bit(client_id, &hif->shm->gClient_status[0])) { -+ printk(KERN_ERR "%s: client %d already registered\n", __func__, client_id); -+ err = -1; -+ goto unlock; -+ } -+ -+ memset(client, 0, sizeof(struct hif_client)); -+ -+ /*Initialize client Rx queues baseaddr, size */ -+ -+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl); -+ /*Check if client is requesting for more queues than supported */ -+ if (cnt > HIF_CLIENT_QUEUES_MAX) -+ cnt = HIF_CLIENT_QUEUES_MAX; -+ -+ client->rx_qn = cnt; -+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase; -+ for (i = 0; i < cnt; i++) -+ { -+ rx_queue = &client->rx_q[i]; -+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize; -+ rx_queue->size = client_shm->rx_qsize; -+ rx_queue->write_idx = 0; -+ } -+ -+ /*Initialize client Tx queues baseaddr, size */ -+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl); -+ -+ /*Check if client is requesting for more queues than supported */ -+ if (cnt > HIF_CLIENT_QUEUES_MAX) -+ cnt = HIF_CLIENT_QUEUES_MAX; -+ -+ client->tx_qn = cnt; -+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase; -+ for (i = 0; i < cnt; i++) -+ { -+ tx_queue = &client->tx_q[i]; -+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize; -+ tx_queue->size = client_shm->tx_qsize; -+ tx_queue->ack_idx = 0; -+ } -+ -+ set_bit(client_id, &hif->shm->gClient_status[0]); -+ -+unlock: -+ spin_unlock_bh(&hif->tx_lock); -+ -+ return err; -+} -+ -+ -+/* pfe_hif_client_unregister -+ * -+ * This function used to unregister a client from the HIF driver. -+ * -+ */ -+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ /* Mark client as no longer available (which prevents further packet receive for this client) */ -+ spin_lock_bh(&hif->tx_lock); -+ -+ if (!test_bit(client_id, &hif->shm->gClient_status[0])) { -+ printk(KERN_ERR "%s: client %d not registered\n", __func__, client_id); -+ -+ spin_unlock_bh(&hif->tx_lock); -+ return; -+ } -+ -+ clear_bit(client_id, &hif->shm->gClient_status[0]); -+ -+ spin_unlock_bh(&hif->tx_lock); -+} -+ -+/* client_put_rxpacket- -+ * This functions puts the Rx pkt in the given client Rx queue. -+ * It actually swap the Rx pkt in the client Rx descriptor buffer -+ * and returns the free buffer from it. -+ * -+ * If the funtion returns NULL means client Rx queue is full and -+ * packet couldn't send to client queue. -+ */ -+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len, u32 flags, u32 client_ctrl, u32 *rem_len) -+{ -+ void *free_pkt = NULL; -+ struct rx_queue_desc *desc = queue->base + queue->write_idx; -+ -+ if (desc->ctrl & CL_DESC_OWN) { -+#if defined(CONFIG_PLATFORM_PCI) -+ memcpy(desc->data, pkt, len); -+ free_pkt = PFE_HOST_TO_PCI(pkt); -+ smp_wmb(); -+ desc->ctrl = CL_DESC_BUF_LEN(len) | flags; -+ inc_cl_idx(queue->write_idx); -+#else -+ //TODO: move allocations after Rx loop to improve instruction cache locality -+ if (page_mode) { -+ int rem_page_size = PAGE_SIZE - PRESENT_OFST_IN_PAGE(pkt); -+ int cur_pkt_size = ROUND_MIN_RX_SIZE(len + pfe_pkt_headroom); -+ *rem_len = (rem_page_size - cur_pkt_size); -+ //printk("%p rem_len %d cur_len %d buf_len %d\n", pkt, rem_page_size, cur_pkt_size, *rem_len); -+ if (*rem_len) -+ { -+ free_pkt = pkt + cur_pkt_size; -+ get_page(virt_to_page(free_pkt)); -+ } else { -+ free_pkt = (void *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE); -+ *rem_len = pfe_pkt_size; -+ } -+ } else { -+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC | GFP_DMA_PFE); -+ *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom; -+ } -+ -+ if (free_pkt) { -+ desc->data = pkt; -+ desc->client_ctrl = client_ctrl; -+ smp_wmb(); -+ desc->ctrl = CL_DESC_BUF_LEN(len) | flags; -+ inc_cl_idx(queue->write_idx); -+ free_pkt += pfe_pkt_headroom; -+ } -+#endif -+ } -+ -+ return free_pkt; -+} -+ -+ -+/* pfe_hif_rx_process- -+ * This function does pfe hif rx queue processing. -+ * Dequeue packet from Rx queue and send it to corresponding client queue -+ */ -+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget) -+{ -+ struct hif_desc *desc; -+ struct hif_hdr *pkt_hdr; -+ struct __hif_hdr hif_hdr; -+ void *free_buf; -+ int rtc, len, rx_processed = 0; -+ struct __hif_desc local_desc; -+ int flags; -+ unsigned int desc_p; -+ unsigned int buf_size = 0; -+ -+ spin_lock_bh(&hif->lock); -+ -+ rtc = hif->RxtocleanIndex; -+ -+ while (rx_processed < budget) -+ { -+ /*TODO may need to implement rx process budget */ -+ desc = hif->RxBase + rtc; -+ -+ __memcpy12(&local_desc, desc); -+ -+ /* ACK pending Rx interrupt */ -+ if (local_desc.ctrl & BD_CTRL_DESC_EN) { -+ writel(HIF_INT_MASK, HIF_INT_SRC); -+ -+ if(rx_processed == 0) -+ { -+ if(napi_first_batch == 1) -+ { -+ desc_p = hif->descr_baseaddr_p + ((unsigned long int)(desc) - (unsigned long int)hif->descr_baseaddr_v); -+#if defined(CONFIG_PLATFORM_C2000) -+ outer_inv_range(desc_p, (desc_p + 16)); -+#endif -+ napi_first_batch = 0; -+ } -+ } -+ -+ __memcpy12(&local_desc, desc); -+ -+ if (local_desc.ctrl & BD_CTRL_DESC_EN) -+ break; -+ } -+ -+ napi_first_batch = 0; -+ -+#ifdef HIF_NAPI_STATS -+ hif->napi_counters[NAPI_DESC_COUNT]++; -+#endif -+ len = BD_BUF_LEN(local_desc.ctrl); -+#if defined(CONFIG_PLATFORM_PCI) -+ pkt_hdr = &hif_hdr; -+ memcpy(pkt_hdr, (void *)PFE_PCI_TO_HOST(local_desc.data), sizeof(struct hif_hdr)); -+#else -+ //dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data), hif->rx_buf_len[rtc], DMA_FROM_DEVICE); -+ DMA_UNMAP_SINGLE(hif->dev, DDR_PFE_TO_PHYS(local_desc.data), hif->rx_buf_len[rtc], DMA_FROM_DEVICE); -+ -+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc]; -+ -+ /* Track last HIF header received */ -+ if (!hif->started) { -+ hif->started = 1; -+ -+ __memcpy8(&hif_hdr, pkt_hdr); -+ -+ hif->qno = hif_hdr.hdr.qNo; -+ hif->client_id = hif_hdr.hdr.client_id; -+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) | hif_hdr.hdr.client_ctrl; -+ flags = CL_DESC_FIRST; -+ -+// printk(KERN_INFO "start of packet: id %d, q %d, len %d, flags %x %x\n", hif->client_id, hif->qno, len, local_desc.ctrl, hif->client_ctrl); -+ } -+ else { -+// printk(KERN_INFO "continuation: id %d, q %d, len %d, flags %x\n", hif->client_id, hif->qno, len, local_desc.ctrl); -+ flags = 0; -+ } -+ -+ if (local_desc.ctrl & BD_CTRL_LIFM) -+ flags |= CL_DESC_LAST; -+#endif -+ /* Check for valid client id and still registered */ -+ if ((hif->client_id >= HIF_CLIENTS_MAX) || !(test_bit(hif->client_id, &hif->shm->gClient_status[0]))) { -+ if (printk_ratelimit()) -+ printk(KERN_ERR "%s: packet with invalid client id %d qNo %d\n", __func__, hif->client_id, hif->qno); -+ -+#if defined(CONFIG_PLATFORM_PCI) -+ free_buf = local_desc.data; -+#else -+ free_buf = pkt_hdr; -+#endif -+ goto pkt_drop; -+ } -+ -+ /* Check to valid queue number */ -+ if (hif->client[hif->client_id].rx_qn <= hif->qno) { -+ printk(KERN_INFO "%s: packet with invalid queue: %d\n", __func__, hif->qno); -+ hif->qno = 0; -+ } -+ -+#if defined(CONFIG_PLATFORM_PCI) -+ free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno], -+ (void *)PFE_PCI_TO_HOST(desc->data), len, flags, hif->client_ctrl, &buf_zize); -+#else -+ free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno], -+ (void *)pkt_hdr, len, flags, hif->client_ctrl, &buf_size); -+#endif -+ -+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND, hif->qno); -+ -+ if (unlikely(!free_buf)) { -+#ifdef HIF_NAPI_STATS -+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++; -+#endif -+ /* If we want to keep in polling mode to retry later, we need to tell napi that we consumed -+ the full budget or we will hit a livelock scenario. The core code keeps this napi instance -+ at the head of the list and none of the other instances get to run */ -+ rx_processed = budget; -+ -+ if (flags & CL_DESC_FIRST) -+ hif->started = 0; -+ -+ break; -+ } -+ -+ pkt_drop: -+#if defined(CONFIG_PLATFORM_PCI) -+ desc->data = (u32)free_buf; -+#else -+ /*Fill free buffer in the descriptor */ -+ hif->rx_buf_addr[rtc] = free_buf; -+ hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size); -+ desc->data = DDR_PHYS_TO_PFE((u32)DMA_MAP_SINGLE(hif->dev, free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE)); -+ //printk("#%p %p %d\n", desc->data, hif->rx_buf_addr[rtc], hif->rx_buf_len[rtc]); -+#endif -+ wmb(); -+ desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR | -+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc]); -+ -+ inc_hif_rxidx(rtc); -+ -+ if (local_desc.ctrl & BD_CTRL_LIFM) { -+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) { -+ rx_processed++; -+ -+#ifdef HIF_NAPI_STATS -+ hif->napi_counters[NAPI_PACKET_COUNT]++; -+#endif -+ } -+ hif->started = 0; -+ } -+ } -+ -+ hif->RxtocleanIndex = rtc; -+ spin_unlock_bh(&hif->lock); -+ -+ /* we made some progress, re-start rx dma in case it stopped */ -+ hif_rx_dma_start(); -+ -+ return rx_processed; -+} -+ -+ -+/* client_ack_txpacket- -+ * This function ack the Tx packet in the give client Tx queue by resetting -+ * ownership bit in the descriptor. -+ */ -+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no) -+{ -+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no]; -+ struct tx_queue_desc *desc = queue->base + queue->ack_idx; -+ -+ if (desc->ctrl & CL_DESC_OWN) { -+ /*TODO Do we need to match the pkt address also? */ -+ desc->ctrl &= ~CL_DESC_OWN; -+ inc_cl_idx(queue->ack_idx); -+ -+ return 0; -+ } -+ else { -+ /*This should not happen */ -+ printk(KERN_ERR "%s: %d %d %d %d %d %p %d\n", __func__, hif->Txtosend, hif->Txtoclean, hif->TxAvail, client_id, q_no, queue, queue->ack_idx); -+ BUG(); -+ return 1; -+ } -+} -+ -+void __hif_tx_done_process(struct pfe_hif *hif, int count) -+{ -+ struct hif_desc *desc; -+ struct hif_desc_sw *desc_sw; -+ int ttc, tx_avl; -+ -+ ttc = hif->Txtoclean; -+ tx_avl = hif->TxAvail; -+ -+ while ((tx_avl < hif->TxRingSize) && count--) { -+ desc = hif->TxBase + ttc; -+ -+ if (desc->ctrl & BD_CTRL_DESC_EN) -+ break; -+ -+ desc_sw = &hif->tx_sw_queue[ttc]; -+ -+ if (desc_sw->data) { -+#if !defined(CONFIG_PLATFORM_PCI) -+ //dmap_unmap_single(hif->dev, desc_sw->data, desc_sw->len, DMA_TO_DEVICE); -+ DMA_UNMAP_SINGLE(hif->dev, desc_sw->data, desc_sw->len, DMA_TO_DEVICE); -+#endif -+ } -+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no); -+ -+ inc_hif_txidx(ttc); -+ tx_avl++; -+ } -+ -+ hif->Txtoclean = ttc; -+ hif->TxAvail = tx_avl; -+} -+ -+ -+/* __hif_xmit_pkt - -+ * This function puts one packet in the HIF Tx queue -+ */ -+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags) -+{ -+ struct hif_desc *desc; -+ struct hif_desc_sw *desc_sw; -+ -+#if defined(CONFIG_PLATFORM_EMULATION) -+ { -+ struct hif_queue *queue = &hif->client[client_id].rx_q[0]; -+ struct queue_desc *qdesc = queue->base + queue->write_idx; -+ void *buf; -+ -+ printk("%s: packet loop backed client_id:%d qno:%d data : %p len:%d\n", __func__, client_id, q_no, data, len); -+#if 1 -+ if (qdesc->ctrl & CL_DESC_OWN) { -+ buf = (void *)qdesc->data; -+ memcpy(buf, data, len); -+ wmb(); -+ qdesc->ctrl = CL_DESC_BUF_LEN(len); -+ inc_cl_idx(queue->write_idx); -+ printk("%s: packet loop backed..\n", __func__); -+ hif_lib_indicate_client(client_id, EVENT_RX_PKT_IND, q_no); -+ client_ack_txpacket(&hif->client[client_id].tx_q[q_no]); -+ } -+#endif -+ } -+ -+#else -+ desc = hif->TxBase + hif->Txtosend; -+ desc_sw = &hif->tx_sw_queue[hif->Txtosend]; -+ -+ desc_sw->len = len; -+ desc_sw->client_id = client_id; -+ desc_sw->q_no = q_no; -+ desc_sw->flags = flags; -+ -+#if !defined(CONFIG_PLATFORM_PCI) -+ if (flags & HIF_DONT_DMA_MAP) { -+ desc_sw->data = 0; -+ desc->data = (u32)DDR_PHYS_TO_PFE(data); -+ } else { -+ desc_sw->data = DMA_MAP_SINGLE(hif->dev, data, len, DMA_TO_DEVICE); -+ desc->data = (u32)DDR_PHYS_TO_PFE(desc_sw->data); -+ } -+#else -+#define ALIGN32(x) ((x) & ~0x3) -+ memcpy(PFE_PCI_TO_HOST(desc->data), data, ALIGN32(len+0x3)); -+#endif -+ -+ inc_hif_txidx(hif->Txtosend); -+ hif->TxAvail--; -+ -+ /* For TSO we skip actual TX until the last descriptor */ -+ /* This reduce the number of required wmb() */ -+ if ((flags & HIF_TSO) && (!((flags & HIF_DATA_VALID) && (flags & HIF_LAST_BUFFER)))) -+ goto skip_tx; -+ -+ wmb(); -+ -+ do { -+ desc_sw = &hif->tx_sw_queue[hif->Txtoflush]; -+ desc = hif->TxBase + hif->Txtoflush; -+ -+ if (desc_sw->flags & HIF_LAST_BUFFER) { -+ if ((desc_sw->client_id < PFE_CL_VWD0) || (desc_sw->client_id > (PFE_CL_VWD0 + MAX_VAP_SUPPORT))) -+ desc->ctrl = BD_CTRL_LIFM | BD_CTRL_BRFETCH_DISABLE | -+ BD_CTRL_RTFETCH_DISABLE | BD_CTRL_PARSE_DISABLE | -+ BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len); -+ else { -+ -+ desc->ctrl = BD_CTRL_LIFM | BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len); -+ } -+ } -+ else -+ desc->ctrl = BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len); -+ -+ inc_hif_txidx(hif->Txtoflush); -+ } -+ while (hif->Txtoflush != hif->Txtosend); -+ -+skip_tx: -+ return; -+ -+#endif -+} -+ -+ -+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len) -+{ -+ int rc = 0; -+ -+ spin_lock_bh(&hif->tx_lock); -+ -+ if (!hif->TxAvail) -+ rc = 1; -+ else { -+ __hif_xmit_pkt(hif, client_id, q_no, data, len, HIF_FIRST_BUFFER | HIF_LAST_BUFFER); -+ hif_tx_dma_start(); -+ } -+ if (hif->TxAvail < (hif->TxRingSize >> 1)) -+ __hif_tx_done_process(hif, TX_FREE_MAX_COUNT); -+ -+ spin_unlock_bh(&hif->tx_lock); -+ -+ return rc; -+} -+ -+/* hif_isr- -+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block -+ */ -+static irqreturn_t hif_isr(int irq, void *dev_id) -+{ -+ struct pfe_hif *hif = (struct pfe_hif *) dev_id; -+ int int_status; -+ -+ /*Read hif interrupt source register */ -+ int_status = readl_relaxed(HIF_INT_SRC); -+ -+ if ((int_status & HIF_INT) == 0) -+ return(IRQ_NONE); -+ -+ int_status &= ~(HIF_INT); -+ -+ if (int_status & HIF_RXPKT_INT) { -+ int_status &= ~(HIF_RXPKT_INT); -+ -+ /* Disable interrupts */ -+ writel_relaxed(0, HIF_INT_ENABLE); -+ -+ napi_first_batch = 1; -+ -+ if (napi_schedule_prep(&hif->napi)) -+ { -+#ifdef HIF_NAPI_STATS -+ hif->napi_counters[NAPI_SCHED_COUNT]++; -+#endif -+ __napi_schedule(&hif->napi); -+ } -+ } -+ -+ if (int_status) { -+ printk(KERN_INFO "%s : Invalid interrupt : %d\n", __func__, int_status); -+ writel(int_status, HIF_INT_SRC); -+ } -+ -+ return IRQ_HANDLED; -+} -+ -+ -+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2) -+{ -+ unsigned int client_id = data1; -+ -+ if (client_id >= HIF_CLIENTS_MAX) -+ { -+ printk(KERN_ERR "%s: client id %d out of bounds\n", __func__, client_id); -+ return; -+ } -+ -+ switch (req) { -+ case REQUEST_CL_REGISTER: -+ /* Request for register a client */ -+ printk(KERN_INFO "%s: register client_id %d\n", __func__, client_id); -+ pfe_hif_client_register(hif, client_id, (struct hif_client_shm *)&hif->shm->client[client_id]); -+ break; -+ -+ case REQUEST_CL_UNREGISTER: -+ printk(KERN_INFO "%s: unregister client_id %d\n", __func__, client_id); -+ -+ /* Request for unregister a client */ -+ pfe_hif_client_unregister(hif, client_id); -+ -+ break; -+ -+ default: -+ printk(KERN_ERR "%s: unsupported request %d\n", __func__, req); -+ break; -+ } -+ -+ /*TODO check for TMU queue resume request */ -+ -+ /*Process client Tx queues -+ * Currently we don't have checking for tx pending*/ -+} -+ -+/** pfe_hif_rx_poll -+ * This function is NAPI poll function to process HIF Rx queue. -+ */ -+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget) -+{ -+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi); -+ int work_done; -+ -+#ifdef HIF_NAPI_STATS -+ hif->napi_counters[NAPI_POLL_COUNT]++; -+#endif -+ -+ work_done = pfe_hif_rx_process(hif, budget); -+ -+ if (work_done < budget) -+ { -+ napi_complete(napi); -+ writel_relaxed(HIF_INT_MASK, HIF_INT_ENABLE); -+ } -+#ifdef HIF_NAPI_STATS -+ else -+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++; -+#endif -+ -+ return work_done; -+} -+ -+/* pfe_hif_init -+ * This function initializes the baseaddresses and irq, etc. -+ */ -+int pfe_hif_init(struct pfe *pfe) -+{ -+ struct pfe_hif *hif = &pfe->hif; -+ int err; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ hif->dev = pfe->dev; -+ hif->irq = pfe->hif_irq; -+ -+ if ((err = pfe_hif_alloc_descr(hif))) { -+ goto err0; -+ } -+ -+ if (pfe_hif_init_buffers(hif)) { -+ printk(KERN_ERR "%s: Could not initialize buffer descriptors\n", __func__); -+ err = -ENOMEM; -+ goto err1; -+ } -+ -+ /* Initilize NAPI for Rx processing */ -+ init_dummy_netdev(&hif->dummy_dev); -+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll, HIF_RX_POLL_WEIGHT); -+ napi_enable(&hif->napi); -+ -+ spin_lock_init(&hif->tx_lock); -+ spin_lock_init(&hif->lock); -+ -+ hif_init(); -+ hif_rx_enable(); -+ hif_tx_enable(); -+ -+ /* Disable tx done interrupt */ -+ writel(HIF_INT_MASK, HIF_INT_ENABLE); -+ -+ gpi_enable(HGPI_BASE_ADDR); -+ -+#ifdef __KERNEL__ -+ err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif); -+ if (err) { -+ printk(KERN_ERR "%s: failed to get the hif IRQ = %d\n", __func__, hif->irq); -+ goto err1; -+ } -+#else -+ /*TODO register interrupts */ -+#endif -+ -+ return 0; -+err1: -+ pfe_hif_free_descr(hif); -+err0: -+ return err; -+} -+ -+/* pfe_hif_exit- -+ */ -+void pfe_hif_exit(struct pfe *pfe) -+{ -+ struct pfe_hif *hif = &pfe->hif; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ spin_lock_bh(&hif->lock); -+ hif->shm->gClient_status[0] = 0; -+ hif->shm->gClient_status[1] = 0; /* Make sure all clients are disabled */ -+ -+ spin_unlock_bh(&hif->lock); -+ -+ /*Disable Rx/Tx */ -+ gpi_disable(HGPI_BASE_ADDR); -+ hif_rx_disable(); -+ hif_tx_disable(); -+ -+ napi_disable(&hif->napi); -+ netif_napi_del(&hif->napi); -+ -+#ifdef __KERNEL__ -+ free_irq(hif->irq, hif); -+#endif -+ pfe_hif_release_buffers(hif); -+ pfe_hif_free_descr(hif); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hif.h -@@ -0,0 +1,322 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_HIF_H_ -+#define _PFE_HIF_H_ -+ -+#include -+ -+#define HIF_NAPI_STATS -+ -+#define HIF_CLIENT_QUEUES_MAX 16 -+#define HIF_RX_POLL_WEIGHT 64 -+ -+#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */ -+#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1) -+#define ROUND_MIN_RX_SIZE(_sz) ((_sz + (HIF_RX_PKT_MIN_SIZE - 1)) & HIF_RX_PKT_MIN_SIZE_MASK) -+#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)_buf & (PAGE_SIZE - 1)) & HIF_RX_PKT_MIN_SIZE_MASK) -+ -+enum { -+ NAPI_SCHED_COUNT = 0, -+ NAPI_POLL_COUNT, -+ NAPI_PACKET_COUNT, -+ NAPI_DESC_COUNT, -+ NAPI_FULL_BUDGET_COUNT, -+ NAPI_CLIENT_FULL_COUNT, -+ NAPI_MAX_COUNT -+}; -+ -+ -+/* XXX HIF_TX_DESC_NT value should be always greter than 4, -+ * Otherwise HIF_TX_POLL_MARK will become zero. -+ */ -+#if defined(CONFIG_PLATFORM_PCI) -+#define HIF_RX_DESC_NT 4 -+#define HIF_TX_DESC_NT 4 -+#else -+#if defined(CONFIG_COMCERTO_64K_PAGES) -+#define HIF_RX_DESC_NT 64 -+#else -+#define HIF_RX_DESC_NT 256 -+#endif -+#define HIF_TX_DESC_NT 2048 -+#endif -+ -+#define HIF_FIRST_BUFFER (1 << 0) -+#define HIF_LAST_BUFFER (1 << 1) -+#define HIF_DONT_DMA_MAP (1 << 2) //TODO merge it with TSO -+#define HIF_DATA_VALID (1 << 3) -+#define HIF_TSO (1 << 4) -+ -+#define MAX_VAP_SUPPORT 3 -+#define MAX_WIFI_VAPS MAX_VAP_SUPPORT -+ -+enum { -+ PFE_CL_GEM0 = 0, -+ PFE_CL_GEM1, -+ PFE_CL_GEM2, -+ PFE_CL_VWD0, -+ PFE_CL_VWD_LAST = PFE_CL_VWD0 + MAX_VAP_SUPPORT, -+ PFE_CL_PCAP0, -+ HIF_CLIENTS_MAX -+}; -+ -+/*structure to store client queue info */ -+struct hif_rx_queue { -+ struct rx_queue_desc *base; -+ u32 size; -+ u32 write_idx; -+}; -+ -+struct hif_tx_queue { -+ struct tx_queue_desc *base; -+ u32 size; -+ u32 ack_idx; -+}; -+ -+/*Structure to store the client info */ -+struct hif_client { -+ int rx_qn; -+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; -+ int tx_qn; -+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; -+}; -+ -+/*HIF hardware buffer descriptor */ -+struct hif_desc { -+ volatile u32 ctrl; -+ volatile u32 status; -+ volatile u32 data; -+ volatile u32 next; -+}; -+ -+struct __hif_desc { -+ u32 ctrl; -+ u32 status; -+ u32 data; -+}; -+ -+struct hif_desc_sw { -+ dma_addr_t data; -+ u16 len; -+ u8 client_id; -+ u8 q_no; -+ u16 flags; -+}; -+ -+struct hif_hdr { -+ u8 client_id; -+ u8 qNo; -+ u16 client_ctrl; -+ u16 client_ctrl1; -+}; -+ -+struct __hif_hdr { -+ union { -+ struct hif_hdr hdr; -+ u32 word[2]; -+ }; -+}; -+ -+struct hif_lro_hdr { -+ u16 data_offset; -+ u16 mss; -+}; -+ -+struct hif_ipsec_hdr { -+ u16 sa_handle[2]; -+}__attribute__((packed)); -+ -+#define MAX_TSO_BUF_DESCS 5 -+struct hif_tso_buf_desc { -+ u32 addr; -+ u32 ctrl; -+#define TSO_CTRL_LAST_BUFFER (1 << 31) -+}; -+ -+struct hif_tso_hdr { -+ struct hif_hdr pkt_hdr; -+ u16 ip_off; -+ u16 ip_id; -+ u16 ip_len; -+ u16 tcp_off; -+ u32 tcp_seq; -+} __attribute__((packed)); -+ -+struct hif_tso_hdr_nocpy { -+ struct hif_tso_hdr tso_hdr; -+ struct hif_tso_buf_desc bdesc[MAX_TSO_BUF_DESCS]; -+} __attribute__((packed)); -+ -+struct hif_pcap_hdr { -+ u8 ifindex; -+ u8 unused; -+ u16 seqno; -+ u32 timestamp; -+}__attribute__((packed)); -+ -+/* HIF_CTRL_TX... defines */ -+#define HIF_CTRL_TX_TSO_NOCPY (1 << 8) -+#define HIF_CTRL_TX_IPSEC_OUT (1 << 7) -+#define HIF_CTRL_TX_OWN_MAC (1 << 6) -+#define HIF_CTRL_TX_TSO_END (1 << 5) -+#define HIF_CTRL_TX_TSO6 (1 << 4) -+#define HIF_CTRL_TX_TSO (1 << 3) -+#define HIF_CTRL_TX_CHECKSUM (1 << 2) -+#define HIF_CTRL_TX_CSUM_VALIDATE (1 << 1) -+#define HIF_CTRL_TX_WIFI (1 << 0) -+ -+/* HIF_CTRL_RX... defines */ -+#define HIF_CTRL_RX_OFFSET_OFST (24) -+#define HIF_CTRL_RX_PE_ID_OFST (16) -+#define HIF_CTRL_RX_IPSEC_IN (1 << 4) -+#define HIF_CTRL_RX_WIFI_EXPT (1 << 3) -+#define HIF_CTRL_RX_CHECKSUMMED (1 << 2) -+#define HIF_CTRL_RX_CONTINUED (1 << 1) -+#define HIF_CTRL_RX_WIFI_HEADROOM (1 << 0) -+ -+#define HIF_CTRL_VAPID_OFST (8) -+ -+struct pfe_hif { -+ /* To store registered clients in hif layer */ -+ struct hif_client client[HIF_CLIENTS_MAX]; -+ struct hif_shm *shm; -+ int irq; -+ -+ void *descr_baseaddr_v; -+ unsigned long descr_baseaddr_p; -+ -+ struct hif_desc *RxBase; -+ u32 RxRingSize; -+ u32 RxtocleanIndex; -+ void *rx_buf_addr[HIF_RX_DESC_NT]; -+ int rx_buf_len[HIF_RX_DESC_NT]; -+ unsigned int qno; -+ unsigned int client_id; -+ unsigned int client_ctrl; -+ unsigned int started; -+ -+ struct hif_desc *TxBase; -+ u32 TxRingSize; -+ u32 Txtosend; -+ u32 Txtoclean; -+ u32 TxAvail; -+ u32 Txtoflush; -+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT]; -+ struct hif_tso_hdr_nocpy *tso_hdr_v; -+ dma_addr_t tso_hdr_p; -+ -+ spinlock_t tx_lock; -+ spinlock_t lock; -+ struct net_device dummy_dev; -+ struct napi_struct napi; -+ struct device *dev; -+ -+#ifdef CONFIG_HOTPLUG_CPU -+ struct notifier_block cpu_notify; -+#endif -+ -+#ifdef HIF_NAPI_STATS -+ unsigned int napi_counters[NAPI_MAX_COUNT]; -+#endif -+}; -+ -+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags); -+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len); -+void __hif_tx_done_process(struct pfe_hif *hif, int count); -+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2); -+int pfe_hif_init(struct pfe *pfe); -+void pfe_hif_exit(struct pfe *pfe); -+ -+static inline void hif_tx_done_process(struct pfe_hif *hif, int count) -+{ -+ spin_lock_bh(&hif->tx_lock); -+ __hif_tx_done_process(hif, count); -+ spin_unlock_bh(&hif->tx_lock); -+} -+ -+static inline void hif_tx_lock(struct pfe_hif *hif) -+{ -+ spin_lock_bh(&hif->tx_lock); -+} -+ -+static inline void hif_tx_unlock(struct pfe_hif *hif) -+{ -+ spin_unlock_bh(&hif->tx_lock); -+} -+ -+static inline int __hif_tx_avail(struct pfe_hif *hif) -+{ -+ return hif->TxAvail; -+} -+ -+#if defined(CONFIG_PLATFORM_C2000) -+static inline void __memcpy8(void *dst, void *src) -+{ -+ asm volatile ( "ldm %1, {r9, r10}\n\t" -+ "stm %0, {r9, r10}\n\t" -+ : -+ : "r" (dst), "r" (src) -+ : "r9", "r10", "memory" -+ ); -+} -+ -+static inline void __memcpy12(void *dst, void *src) -+{ -+ asm volatile ( "ldm %1, {r8, r9, r10}\n\t" -+ "stm %0, {r8, r9, r10}\n\t" -+ : -+ : "r" (dst), "r" (src) -+ : "r8", "r9", "r10", "memory" -+ ); -+} -+ -+static inline void __memcpy16(void *dst, void *src) -+{ -+ asm volatile ( "ldm %1, {r7, r8, r9, r10}\n\t" -+ "stm %0, {r7, r8, r9, r10}\n\t" -+ : -+ : "r"(dst), "r"(src) -+ : "r7", "r8", "r9", "r10", "memory" -+ ); -+} -+ -+#define HIF_MEMCPY_BURSTSIZE 32 /*__memcpy copy 32byte in a burst*/ -+static inline void __memcpy(void *dst, void *src, unsigned int len) -+{ -+ void *end = src + len; -+ -+ dst = (void *)((unsigned long)dst & ~0x3); -+ src = (void *)((unsigned long)src & ~0x3); -+ -+ while (src < end) { -+ asm volatile ( "ldm %1!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t" -+ "stm %0!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t" -+ : "+r"(dst), "+r"(src) -+ : -+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "memory" -+ ); -+ } -+} -+#else -+#define __memcpy8(dst, src) memcpy(dst, src, 8) -+#define __memcpy12(dst, src) memcpy(dst, src, 12) -+#define __memcpy(dst, src, len) memcpy(dst, src, len) -+#endif -+#endif /* _PFE_HIF_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c -@@ -0,0 +1,658 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) -+#include -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) -+//#include -+#endif -+ -+#include "pfe_mod.h" -+#include "pfe_hif.h" -+#include "pfe_hif_lib.h" -+#include "pfe_ctrl_hal.h" -+ -+ -+unsigned int lro_mode = 0; -+unsigned int page_mode = 0; -+unsigned int tx_qos = 0; -+unsigned int pfe_pkt_size; -+unsigned int pfe_pkt_headroom; -+unsigned int emac_txq_cnt; -+ -+/** @pfe_hal_lib.c. -+ * Common functions used by HIF client drivers -+ */ -+ -+/*HIF shared memory Global variable */ -+struct hif_shm ghif_shm; -+ -+/* TMU tx transmitted packets counter, 1 per TMU */ -+unsigned int TMU_DMEM_SH(tx_trans)[EMAC_TXQ_CNT]; -+ -+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool. -+ * This function should be called after pfe_hif_exit -+ * -+ * @param[in] hif_shm Shared memory address location in DDR -+ */ -+static void pfe_hif_shm_clean(struct hif_shm *hif_shm) -+{ -+ int i; -+ void *pkt; -+ -+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { -+ pkt = hif_shm->rx_buf_pool[i]; -+ if (pkt) { -+ hif_shm->rx_buf_pool[i] = NULL; -+ pkt -= pfe_pkt_headroom; -+ -+ if (page_mode) { -+ put_page(virt_to_page(pkt)); -+ } else -+ kfree(pkt); -+ } -+ } -+} -+ -+/* Initialize shared memory used between HIF driver and clients, -+ * allocate rx_buffer_pool required for HIF Rx descriptors. -+ * This function should be called before initializing HIF driver. -+ * -+ * @param[in] hif_shm Shared memory address location in DDR -+ * @rerurn 0 - on succes, <0 on fail to initialize -+ */ -+static int pfe_hif_shm_init(struct hif_shm *hif_shm) -+{ -+ int i; -+ void *pkt; -+ -+ memset(hif_shm, 0, sizeof(struct hif_shm)); -+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT; -+ -+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { -+ if (page_mode) { -+ pkt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA_PFE); -+ } else -+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE); -+ -+ if (pkt) -+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom; -+ else -+ goto err0; -+ } -+ -+ return 0; -+ -+err0: -+ printk(KERN_ERR "%s Low memory\n", __func__); -+ pfe_hif_shm_clean(hif_shm); -+ return -ENOMEM; -+} -+ -+/*This function sends indication to HIF driver -+ * -+ * @param[in] hif hif context -+ **/ -+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int data2) -+{ -+ //TODO : If we separate HIF and HIF LIB, then send req and data through shared memory. -+ -+ hif_process_client_req(hif, req, data1, data2); -+} -+ -+void hif_lib_indicate_client(int client_id, int event_type, int qno) -+{ -+ struct hif_client_s *client = pfe->hif_client[client_id]; -+ -+ /* -+ * TODO : Right now, all events are queue number based. So we are masking events per queue -+ * basis. Later if we add any events those do not depend on queue number, then we may -+ * may need may need to add masking per event. -+ */ -+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX) ) -+ return; -+ -+ if (!test_and_set_bit(qno, &client->queue_mask[event_type])) { -+ client->event_handler(client->priv, event_type, qno); -+ } -+ -+} -+ -+ -+/*This function releases Rx queue descriptors memory and pre-filled buffers -+ * -+ * @param[in] client hif_client context -+ */ -+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client) -+{ -+ struct rx_queue_desc *desc; -+ int qno, ii; -+ void *buf; -+ -+ for (qno = 0; qno < client->rx_qn; qno++) { -+ desc = client->rx_q[qno].base; -+ -+ for (ii = 0; ii < client->rx_q[qno].size; ii++) { -+ buf = (void *)desc->data; -+ if (buf) { -+ buf -= pfe_pkt_headroom; -+ -+ if (page_mode) -+ free_page((unsigned long)buf); -+ else -+ kfree(buf); -+ -+ desc->ctrl = 0; -+ } -+ -+ desc++; -+ } -+ } -+ -+ kfree(client->rx_qbase); -+} -+ -+ -+/*This function allocates memory for the rxq descriptors and pre-fill rx queues -+ * with buffers. -+ * @param[in] client client context -+ * @param[in] q_size size of the rxQ, all queues are of same size -+ */ -+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int q_size) -+{ -+ struct rx_queue_desc *desc; -+ struct hif_client_rx_queue *queue; -+ int ii, qno; -+ -+ /*Allocate memory for the client queues */ -+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct rx_queue_desc), GFP_KERNEL); -+ if (!client->rx_qbase){ -+ goto err; -+ } -+ -+ for (qno = 0; qno < client->rx_qn; qno++) { -+ queue = &client->rx_q[qno]; -+ -+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct rx_queue_desc); -+ queue->size = q_size; -+ queue->read_idx = 0; -+ queue->write_idx = 0; -+ -+ dbg_print_info("rx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size); -+ } -+ -+ for (qno = 0; qno < client->rx_qn; qno++) { -+ queue = &client->rx_q[qno]; -+ desc = queue->base; -+ -+ for (ii = 0; ii < queue->size; ii++) { -+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN; -+ desc++; -+ } -+ } -+ -+ return 0; -+ -+err: -+ return 1; -+} -+ -+#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1) -+ -+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue) -+{ -+ dbg_print_info( "%s\n", __func__); -+ -+ /* Check if there are any pending packets. Client must flush the tx queues -+ before unregistering, by calling by calling hif_lib_tx_get_next_complete() */ -+ /* Hif no longer calls since we are no longer registered */ -+ -+ if (queue->tx_pending) -+ printk(KERN_ERR "%s: pending transmit packets\n", __func__); -+} -+ -+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client) -+{ -+ int qno; -+ -+ dbg_print_info("%s\n", __func__); -+ -+ for (qno = 0; qno < client->tx_qn; qno++) { -+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]); -+ } -+ -+ kfree(client->tx_qbase); -+} -+ -+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int q_size) -+{ -+ struct hif_client_tx_queue *queue; -+ int qno; -+ -+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct tx_queue_desc), GFP_KERNEL); -+ if (!client->tx_qbase) { -+ return 1; -+ } -+ -+ for (qno = 0; qno < client->tx_qn; qno++) { -+ queue = &client->tx_q[qno]; -+ -+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct tx_queue_desc); -+ queue->size = q_size; -+ queue->read_idx = 0; -+ queue->write_idx = 0; -+ queue->tx_pending = 0; -+ queue->nocpy_flag = 0; -+ queue->prev_tmu_tx_pkts = 0; -+ queue->done_tmu_tx_pkts = 0; -+ -+ dbg_print_info("tx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size); -+ } -+ -+ return 0; -+} -+ -+static int hif_lib_event_dummy( void *priv, int event_type, int qno) -+{ -+ return 0; -+} -+ -+int hif_lib_client_register(struct hif_client_s *client) -+{ -+ struct hif_shm *hif_shm; -+ struct hif_client_shm *client_shm; -+ int err, i; -+// int loop_cnt = 0; -+ -+ dbg_print_info("%s\n", __func__); -+ -+ /*Allocate memory before spin_lock*/ -+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) { -+ err = -ENOMEM; -+ goto err_rx; -+ } -+ -+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) { -+ err = -ENOMEM; -+ goto err_tx; -+ } -+ -+ spin_lock_bh(&pfe->hif.lock); -+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) || (pfe->hif_client[client->id])) { -+ err = -EINVAL; -+ goto err; -+ } -+ -+ hif_shm = client->pfe->hif.shm; -+ -+ if (!client->event_handler) -+ client->event_handler = hif_lib_event_dummy; -+ -+ /*Initialize client specific shared memory */ -+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id]; -+ client_shm->rx_qbase = (unsigned long int)client->rx_qbase; -+ client_shm->rx_qsize = client->rx_qsize; -+ client_shm->tx_qbase = (unsigned long int)client->tx_qbase; -+ client_shm->tx_qsize = client->tx_qsize; -+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) | (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST); -+// spin_lock_init(&client->rx_lock); -+ -+ for (i = 0; i < HIF_EVENT_MAX; i++) { -+ client->queue_mask[i] = 0; /* By default all events are unmasked */ -+ } -+ -+ /*Indicate to HIF driver*/ -+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0); -+ -+ dbg_print_info("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n", -+ __func__, client, client->id, client->tx_qsize, client->rx_qsize); -+ -+ client->cpu_id = -1; -+ -+ pfe->hif_client[client->id] = client; -+ spin_unlock_bh(&pfe->hif.lock); -+ -+ return 0; -+ -+err: -+ spin_unlock_bh(&pfe->hif.lock); -+ hif_lib_client_release_tx_buffers(client); -+ -+err_tx: -+ hif_lib_client_release_rx_buffers(client); -+ -+err_rx: -+ return err; -+} -+ -+int hif_lib_client_unregister(struct hif_client_s *client) -+{ -+ struct pfe *pfe = client->pfe; -+ u32 client_id = client->id; -+ -+ printk(KERN_INFO "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n", -+ __func__, client, client->id, client->tx_qsize, client->rx_qsize); -+ -+ -+ spin_lock_bh(&pfe->hif.lock); -+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0); -+ -+ hif_lib_client_release_tx_buffers(client); -+ hif_lib_client_release_rx_buffers(client); -+ pfe->hif_client[client_id] = NULL; -+ spin_unlock_bh(&pfe->hif.lock); -+ -+ return 0; -+} -+ -+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int qno) -+{ -+ struct hif_client_rx_queue *queue = &client->rx_q[qno]; -+ struct rx_queue_desc *desc = queue->base + queue->read_idx; -+ -+ if ((event >= HIF_EVENT_MAX) || ( qno >= HIF_CLIENT_QUEUES_MAX)) { -+ dbg_print_info("%s: Unsupported event : %d queue number : %d\n", __func__, event, qno); -+ return -1; -+ } -+ -+ test_and_clear_bit(qno, &client->queue_mask[event]); -+ -+ switch (event) { -+ case EVENT_RX_PKT_IND: -+ if (!(desc->ctrl & CL_DESC_OWN)) -+ hif_lib_indicate_client(client->id, EVENT_RX_PKT_IND, qno); -+ break; -+ -+ case EVENT_HIGH_RX_WM: -+ case EVENT_TXDONE_IND: -+ default: -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+/*This function gets one packet from the specified client queue -+ * It also refill the rx buffer */ -+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data) -+{ -+ struct hif_client_rx_queue *queue = &client->rx_q[qno]; -+ struct rx_queue_desc *desc; -+ void *pkt = NULL; -+ -+ //printk(KERN_INFO "%s\n", __func__); -+#if defined(CONFIG_PLATFORM_EMULATION) -+ printk(KERN_INFO "%s:qno:%d cid:%d desc:%p rdidx:%d \n", -+ __func__, qno, client->id, desc, -+ queue->read_idx); -+#endif -+ -+ /* Following lock is to protect rx queue access from, hif_lib_event_handler_start. -+ * In general below lock is not required, because hif_lib_xmit_pkt and -+ * hif_lib_event_handler_start are called from napi poll and which is not -+ * re-entrant. But if some client use in different way this lock is required. -+ */ -+ //spin_lock_irqsave(&client->rx_lock, flags); -+ desc = queue->base + queue->read_idx; -+ if (!(desc->ctrl & CL_DESC_OWN)) { -+ pkt = desc->data - pfe_pkt_headroom; -+ -+ *rx_ctrl = desc->client_ctrl; -+ *desc_ctrl = desc->ctrl; -+ -+ if (desc->ctrl & CL_DESC_FIRST) { -+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST; -+ -+ if (size) { -+ *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ - size; -+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ + size; -+ *priv_data = desc->data + PFE_PKT_HEADER_SZ; -+ } else { -+ *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ; -+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ; -+ *priv_data = NULL; -+ } -+ -+ } else { -+ *len = CL_DESC_BUF_LEN(desc->ctrl); -+ *ofst = pfe_pkt_headroom; -+ } -+ -+ desc->data = NULL; // Needed so we don't free a buffer/page twice on module_exit -+ smp_wmb(); -+ -+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN; -+ inc_cl_idx(queue->read_idx); -+ } -+ -+ //spin_unlock_irqrestore(&client->rx_lock, flags); -+ return pkt; -+} -+ -+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int client_id, unsigned int qno, u32 client_ctrl) -+{ -+ /* Optimize the write since the destinaton may be non-cacheable */ -+ if (!((unsigned long)pkt_hdr & 0x3)) { -+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) | client_id; -+ } else { -+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF); -+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF); -+ } -+} -+ -+/*This function puts the given packet in the specific client queue */ -+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ struct tx_queue_desc *desc = queue->base + queue->write_idx; -+ -+ //printk(KERN_INFO "%s\n",__func__); -+ -+ /* First buffer */ -+ if (flags & HIF_FIRST_BUFFER) -+ { -+ data -= sizeof(struct hif_hdr); -+ len += sizeof(struct hif_hdr); -+ -+ hif_hdr_write(data, client->id, qno, client_ctrl); -+ } -+ -+ desc->data = client_data; -+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags); -+ -+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags); -+ -+ inc_cl_idx(queue->write_idx); -+ queue->tx_pending++; -+ queue->jiffies_last_packet = jiffies; -+ -+} -+ -+/*This function puts the given packet in the specific client queue */ -+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ struct tx_queue_desc *desc = queue->base + queue->write_idx; -+ -+ //printk(KERN_INFO "%s\n",__func__); -+ -+ if (queue->tx_pending < queue->size) { -+ /*Construct pkt header */ -+ -+ data -= sizeof(struct hif_hdr); -+ len += sizeof(struct hif_hdr); -+ -+ hif_hdr_write(data, client->id, qno, client_ctrl); -+ -+ desc->data = client_data; -+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID); -+ -+ if (hif_xmit_pkt(&pfe->hif, client->id, qno, data, len)) -+ return 1; -+ -+ inc_cl_idx(queue->write_idx); -+ queue->tx_pending++; -+ queue->jiffies_last_packet = jiffies; -+ -+ return 0; -+ } -+ -+ dbg_print_info("%s Tx client %d qno %d is full\n",__func__, client->id, qno); -+ return 1; -+} -+ -+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ struct tx_queue_desc *desc = queue->base + queue->read_idx; -+ -+ dbg_print_info("%s: qno : %d rd_indx: %d pending:%d\n",__func__, qno, queue->read_idx, queue->tx_pending); -+ -+ if (!queue->tx_pending ) -+ return NULL; -+ -+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) { -+ u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID + client->id, virt_to_tmu_dmem(&tx_trans[qno]), 4)); -+ -+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts) -+ queue->done_tmu_tx_pkts = UINT_MAX - queue->prev_tmu_tx_pkts + tmu_tx_pkts; -+ else -+ queue->done_tmu_tx_pkts = tmu_tx_pkts - queue->prev_tmu_tx_pkts; -+ -+ queue->prev_tmu_tx_pkts = tmu_tx_pkts; -+ -+ if (!queue->done_tmu_tx_pkts) { -+ return NULL; -+ } -+ } -+ -+ if (desc->ctrl & CL_DESC_OWN) { -+ hif_tx_done_process(&pfe->hif, count); -+ -+ //Check again, if packets done in tx queue. -+ if (desc->ctrl & CL_DESC_OWN) -+ return NULL; -+ } -+ -+ inc_cl_idx(queue->read_idx); -+ queue->tx_pending--; -+ -+ *flags = CL_DESC_GET_FLAGS(desc->ctrl); -+ -+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER)) -+ queue->done_tmu_tx_pkts--; -+ -+ -+ return desc->data; -+} -+ -+//FIXME: TMU queues length mapping needs to be declared in shared PFE/PFE_CTRL header -+static void hif_lib_tmu_credit_init(struct pfe *pfe) -+{ -+ int i, q; -+ -+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++) -+ for (q = 0; q < emac_txq_cnt; q++) { -+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ? DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH; -+ pfe->tmu_credit.tx_credit[i][q] = pfe->tmu_credit.tx_credit_max[i][q]; -+ } -+} -+/** __hif_lib_update_credit -+ * -+ * @param[in] client hif client context -+ * @param[in] queue queue number in match with TMU -+ */ -+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue) -+{ -+ unsigned int tmu_tx_packets, tmp; -+ -+ if (tx_qos) { -+ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID + client->id, virt_to_tmu_dmem(&tx_trans[queue]), 4)); -+ -+ // tx_packets counter overflowed -+ if (tmu_tx_packets > pfe->tmu_credit.tx_packets[client->id][queue]) { -+ tmp = UINT_MAX - tmu_tx_packets + pfe->tmu_credit.tx_packets[client->id][queue]; -+ pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp; -+ } -+ // TMU tx <= pfe_eth tx, normal case or both OF since last time -+ else -+ pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - (pfe->tmu_credit.tx_packets[client->id][queue] - tmu_tx_packets); -+ } -+} -+ -+/** hif_lib_update_credit -+ * -+ * @param[in] client hif client context -+ * @param[in] queue queue number in match with TMU -+ */ -+void hif_lib_update_credit(struct hif_client_s *client, unsigned int queue) -+{ -+ spin_lock_bh(&pfe->hif.tx_lock); -+ __hif_lib_update_credit(client, queue); -+ spin_unlock_bh(&pfe->hif.tx_lock); -+} -+ -+int pfe_hif_lib_init(struct pfe *pfe) -+{ -+ int rc; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ if (lro_mode) { -+ page_mode = 1; -+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE); -+ pfe_pkt_headroom = 0; -+ } else { -+ page_mode = 0; -+ pfe_pkt_size = PFE_PKT_SIZE; -+ pfe_pkt_headroom = PFE_PKT_HEADROOM; -+ } -+ -+ if (tx_qos) -+ emac_txq_cnt = EMAC_TXQ_CNT / 2; -+ else -+ emac_txq_cnt = EMAC_TXQ_CNT; -+ -+ hif_lib_tmu_credit_init(pfe); -+ pfe->hif.shm = &ghif_shm; -+ rc = pfe_hif_shm_init(pfe->hif.shm); -+ -+ return rc; -+} -+ -+ -+void pfe_hif_lib_exit(struct pfe *pfe) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ pfe_hif_shm_clean(pfe->hif.shm); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h -@@ -0,0 +1,219 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_HIF_LIB_H_ -+#define _PFE_HIF_LIB_H_ -+ -+#include "pfe_hif.h" -+ -+#ifdef HIF_LIB_DEBUG -+#define dbg_print_info( fmt, args...) \ -+ printk(KERN_INFO fmt, ##args) -+#else -+#define dbg_print_info( fmt, args...) -+#endif -+ -+#define HIF_CL_REQ_TIMEOUT 10 -+ -+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB) -+#define GFP_DMA_PFE (GFP_DMA_NCNB | __GFP_NOWARN) -+#else -+#define GFP_DMA_PFE 0 -+#endif -+ -+enum { -+ REQUEST_CL_REGISTER = 0, -+ REQUEST_CL_UNREGISTER, -+ HIF_REQUEST_MAX -+}; -+ -+enum { -+ EVENT_HIGH_RX_WM = 0, /* Event to indicate that client rx queue is reached water mark level */ -+ EVENT_RX_PKT_IND, /* Event to indicate that, packet recieved for client */ -+ EVENT_TXDONE_IND, /* Event to indicate that, packet tx done for client */ -+ HIF_EVENT_MAX -+}; -+ -+/*structure to store client queue info */ -+ -+/*structure to store client queue info */ -+struct hif_client_rx_queue { -+ struct rx_queue_desc *base; -+ u32 size; -+ u32 read_idx; -+ u32 write_idx; -+}; -+ -+struct hif_client_tx_queue { -+ struct tx_queue_desc *base; -+ u32 size; -+ u32 read_idx; -+ u32 write_idx; -+ u32 tx_pending; -+ unsigned long jiffies_last_packet; -+ u32 nocpy_flag; -+ u32 prev_tmu_tx_pkts; -+ u32 done_tmu_tx_pkts; -+ u32 cur_tso_hdr_p; -+ int tso_buf_cnt; -+}; -+ -+struct hif_client_s -+{ -+ int id; -+ int tx_qn; -+ int rx_qn; -+ void *rx_qbase; -+ void *tx_qbase; -+ /* FIXME tx/rx_qsize fields can be removed after per queue depth is supported*/ -+ int tx_qsize; -+ int rx_qsize; -+ int cpu_id; -+ -+// spinlock_t rx_lock; -+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; -+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; -+ int (*event_handler)(void *priv, int event, int data); -+ unsigned long queue_mask[HIF_EVENT_MAX]; -+ struct pfe *pfe; -+ void *priv; -+}; -+ -+ -+/* Client specific shared memory -+ * It contains number of Rx/Tx queues, base addresses and queue sizes */ -+struct hif_client_shm { -+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */ -+ unsigned long rx_qbase; /*Rx queue base address */ -+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */ -+ unsigned long tx_qbase; /* Tx queue base address */ -+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */ -+}; -+ -+/*Client shared memory ctrl bit description */ -+#define CLIENT_CTRL_RX_Q_CNT_OFST 0 -+#define CLIENT_CTRL_TX_Q_CNT_OFST 8 -+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) & 0xFF) -+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) & 0xFF) -+ -+ -+ -+/*Shared memory used to communicate between HIF driver and host/client drivers -+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be -+ * initialized with host buffers and buffers count in the pool. -+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT. -+ * -+ */ -+struct hif_shm { -+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/ -+ void *rx_buf_pool[HIF_RX_DESC_NT];/*Rx buffers required to initialize HIF rx descriptors */ -+ unsigned long gClient_status[2]; /*Global client status bit mask */ -+ u32 hif_qfull; /*TODO Client-id that caused for the TMU3 queue stop */ -+ u32 hif_qresume; /*TODO */ -+ struct hif_client_shm client[HIF_CLIENTS_MAX]; /* Client specific shared memory */ -+}; -+ -+ -+#define CL_DESC_OWN (1 << 31) /* This sets owner ship to HIF driver */ -+#define CL_DESC_LAST (1 << 30) /* This indicates last packet for multi buffers handling */ -+#define CL_DESC_FIRST (1 << 29) /* This indicates first packet for multi buffers handling */ -+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF) -+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16) -+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF) -+ -+struct rx_queue_desc { -+ void *data; -+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/ -+ u32 client_ctrl; -+}; -+ -+struct tx_queue_desc { -+ void *data; -+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/ -+}; -+ -+/* HIF Rx is not working properly for 2-byte aligned buffers and -+ * ip_header should be 4byte aligned for better iperformance. -+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned. -+ */ -+#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr) -+#define PFE_BUF_SIZE 2048 /* must be big enough for headroom, pkt size and skb shared info */ -+#define PFE_PKT_HEADROOM 128 -+#define SKB_SHARED_INFO_SIZE 256 /* At least sizeof(struct skb_shared_info) bytes */ -+ -+//#define PFE_PKT_SIZE 1544 /* maximum ethernet packet size */ -+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM - SKB_SHARED_INFO_SIZE) /* maximum ethernet packet size after reassembly offload*/ -+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */ -+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */ -+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */ -+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + MAX_L4_HDR_SIZE) -+#define MAX_WIFI_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + 6) -+#define MAX_PFE_PKT_SIZE 16380UL /* Used in page mode to clamp packet size to the maximum supported by the hif hw interface (<16KiB) */ -+ -+extern unsigned int pfe_pkt_size; -+extern unsigned int pfe_pkt_headroom; -+extern unsigned int page_mode; -+extern unsigned int lro_mode; -+extern unsigned int tx_qos; -+extern unsigned int emac_txq_cnt; -+ -+int pfe_hif_lib_init(struct pfe *pfe); -+void pfe_hif_lib_exit(struct pfe *pfe); -+int hif_lib_client_register(struct hif_client_s *client); -+int hif_lib_client_unregister(struct hif_client_s *client); -+void __hif_lib_xmit_tso_hdr(struct hif_client_s *client, unsigned int qno, u32 client_ctrl, unsigned int ip_off, unsigned int ip_id, unsigned int ip_len, unsigned int tcp_off, unsigned int tcp_seq); -+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data); -+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data); -+void hif_lib_indicate_client(int cl_id, int event, int data); -+int hif_lib_event_handler_start( struct hif_client_s *client, int event, int data ); -+int hif_lib_tmu_queue_start( struct hif_client_s *client, int qno ); -+int hif_lib_tmu_queue_stop( struct hif_client_s *client, int qno ); -+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count); -+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data); -+void hif_lib_update_credit(struct hif_client_s *client, unsigned int qno); -+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue); -+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id); -+void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int enable); -+static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int qno) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ -+ return (queue->size - queue->tx_pending); -+} -+ -+static inline int hif_lib_get_tx_wrIndex(struct hif_client_s *client, unsigned int qno) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ -+ return queue->write_idx; -+} -+ -+ -+static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int qno) -+{ -+ struct hif_client_tx_queue *queue = &client->tx_q[qno]; -+ -+ return queue->tx_pending; -+} -+ -+#define hif_lib_tx_credit_avail(pfe, id, qno) pfe->tmu_credit.tx_credit[id][qno] -+#define hif_lib_tx_credit_max(pfe, id, qno) pfe->tmu_credit.tx_credit_max[id][qno] -+#define hif_lib_tx_credit_use(pfe, id, qno, credit) do {if (tx_qos) {pfe->tmu_credit.tx_credit[id][qno]-= credit; pfe->tmu_credit.tx_packets[id][qno]+=credit;}} while (0) -+ -+#endif /* _PFE_HIF_LIB_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hw.c -@@ -0,0 +1,188 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include "pfe_mod.h" -+#include "pfe_hw.h" -+ -+/* Functions to handle most of pfe hw register initialization */ -+ -+int pfe_hw_init(struct pfe *pfe, int resume) -+{ -+ CLASS_CFG class_cfg = { -+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO, -+ .route_table_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR, -+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS, -+ }; -+ -+ TMU_CFG tmu_cfg = { -+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO, -+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR, -+ .llm_queue_len = TMU_LLM_QUEUE_LEN, -+ }; -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ UTIL_CFG util_cfg = { -+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO, -+ }; -+#endif -+ -+ BMU_CFG bmu1_cfg = { -+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + BMU1_LMEM_BASEADDR), -+ .count = BMU1_BUF_COUNT, -+ .size = BMU1_BUF_SIZE, -+ }; -+ -+ BMU_CFG bmu2_cfg = { -+ .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR), -+ .count = BMU2_BUF_COUNT, -+ .size = BMU2_BUF_SIZE, -+ }; -+ -+ GPI_CFG egpi1_cfg = { -+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT, -+ .tmlf_txthres = EGPI1_TMLF_TXTHRES, -+ .aseq_len = EGPI1_ASEQ_LEN, -+ }; -+ -+ GPI_CFG egpi2_cfg = { -+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT, -+ .tmlf_txthres = EGPI2_TMLF_TXTHRES, -+ .aseq_len = EGPI2_ASEQ_LEN, -+ }; -+ -+#if defined(CONFIG_PLATFORM_C2000) -+ GPI_CFG egpi3_cfg = { -+ .lmem_rtry_cnt = EGPI3_LMEM_RTRY_CNT, -+ .tmlf_txthres = EGPI3_TMLF_TXTHRES, -+ .aseq_len = EGPI3_ASEQ_LEN, -+ }; -+#endif -+ -+ GPI_CFG hgpi_cfg = { -+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT, -+ .tmlf_txthres = HGPI_TMLF_TXTHRES, -+ .aseq_len = HGPI_ASEQ_LEN, -+ }; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+#if defined(CONFIG_PLATFORM_LS1012A) && !defined(LS1012A_PFE_RESET_WA) -+ /* LS1012A needs this to make PE work correctly */ -+ writel(0x3, CLASS_PE_SYS_CLK_RATIO); -+ writel(0x3, TMU_PE_SYS_CLK_RATIO); -+ writel(0x3, UTIL_PE_SYS_CLK_RATIO); -+ udelay(10); -+#endif -+ -+ printk(KERN_INFO "CLASS version: %x\n", readl(CLASS_VERSION)); -+ printk(KERN_INFO "TMU version: %x\n", readl(TMU_VERSION)); -+ -+ printk(KERN_INFO "BMU1 version: %x\n", readl(BMU1_BASE_ADDR + BMU_VERSION)); -+ printk(KERN_INFO "BMU2 version: %x\n", readl(BMU2_BASE_ADDR + BMU_VERSION)); -+#if defined(CONFIG_PLATFORM_C2000) -+ printk(KERN_INFO "EMAC1 network cfg: %x\n", readl(EMAC1_BASE_ADDR + EMAC_NETWORK_CONFIG)); -+ printk(KERN_INFO "EMAC2 network cfg: %x\n", readl(EMAC2_BASE_ADDR + EMAC_NETWORK_CONFIG)); -+#if !defined(CONFIG_PLATFORM_PCI) -+ printk(KERN_INFO "EMAC3 network cfg: %x\n", readl(EMAC3_BASE_ADDR + EMAC_NETWORK_CONFIG)); -+#endif -+#else -+ //TODO print MTIP config -+#endif -+ -+ printk(KERN_INFO "EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR + GPI_VERSION)); -+ printk(KERN_INFO "EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR + GPI_VERSION)); -+#if !defined(CONFIG_PLATFORM_PCI) && !defined(CONFIG_PLATFORM_LS1012A) -+ printk(KERN_INFO "EGPI3 version: %x\n", readl(EGPI3_BASE_ADDR + GPI_VERSION)); -+#endif -+ printk(KERN_INFO "HGPI version: %x\n", readl(HGPI_BASE_ADDR + GPI_VERSION)); -+ -+#if !defined(CONFIG_PLATFORM_PCI) -+ printk(KERN_INFO "GPT version: %x\n", readl(CBUS_GPT_VERSION)); -+#endif -+ -+ printk(KERN_INFO "HIF version: %x\n", readl(HIF_VERSION)); -+ printk(KERN_INFO "HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION)); -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ printk(KERN_INFO "UTIL version: %x\n", readl(UTIL_VERSION)); -+#endif -+ while(!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE)) ; -+ -+ hif_rx_disable(); -+ hif_tx_disable(); -+ -+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg); -+ -+ printk(KERN_INFO "bmu_init(1) done\n"); -+ -+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg); -+ -+ printk(KERN_INFO "bmu_init(2) done\n"); -+ -+ class_cfg.resume = resume ? 1 : 0; -+ -+ class_init(&class_cfg); -+ -+ printk(KERN_INFO "class_init() done\n"); -+ -+ tmu_init(&tmu_cfg); -+ -+ printk(KERN_INFO "tmu_init() done\n"); -+#if !defined(CONFIG_UTIL_DISABLED) -+ util_init(&util_cfg); -+ -+ printk(KERN_INFO "util_init() done\n"); -+#endif -+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg); -+ -+ printk(KERN_INFO "gpi_init(1) done\n"); -+ -+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg); -+ -+ printk(KERN_INFO "gpi_init(2) done\n"); -+#if !defined(CONFIG_PLATFORM_PCI) && !defined(CONFIG_PLATFORM_LS1012A) -+ gpi_init(EGPI3_BASE_ADDR, &egpi3_cfg); -+ -+ printk(KERN_INFO "gpi_init(3) done\n"); -+#endif -+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg); -+ -+ printk(KERN_INFO "gpi_init(hif) done\n"); -+ -+ bmu_enable(BMU1_BASE_ADDR); -+ -+ printk(KERN_INFO "bmu_enable(1) done\n"); -+ -+ bmu_enable(BMU2_BASE_ADDR); -+ -+ printk(KERN_INFO "bmu_enable(2) done\n"); -+ -+ return 0; -+} -+ -+void pfe_hw_exit(struct pfe *pfe) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ bmu_disable(BMU1_BASE_ADDR); -+ bmu_reset(BMU1_BASE_ADDR); -+ -+ bmu_disable(BMU2_BASE_ADDR); -+ bmu_reset(BMU2_BASE_ADDR); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_hw.h -@@ -0,0 +1,32 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_HW_H_ -+#define _PFE_HW_H_ -+ -+#if !defined(CONFIG_PLATFORM_PCI) -+#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */ -+#else -+#define PE_SYS_CLK_RATIO 0 /* SYS = 40MHz, HFE = 40MHz */ -+#endif -+ -+int pfe_hw_init(struct pfe *pfe, int resume); -+void pfe_hw_exit(struct pfe *pfe); -+ -+#endif /* _PFE_HW_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c -@@ -0,0 +1,341 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+#include "pfe_mod.h" -+ -+struct comcerto_pfe_platform_data pfe_platform_data; -+ -+ -+ -+static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int if_cnt, -+ struct comcerto_pfe_platform_data *pdata) -+{ -+ struct device_node *gem = NULL, *phy = NULL; -+ int size; -+ int ii = 0, phy_id = 0; -+ const u32 *addr; -+ const void *mac_addr; -+ -+ for (ii = 0; ii < if_cnt; ii++) { -+ gem = of_get_next_child(parent, gem); -+ if (!gem) -+ goto err; -+ addr = of_get_property(gem, "reg", &size); -+ if (addr && (be32_to_cpup(addr) == port)) -+ break; -+ } -+ -+ if (ii >= if_cnt) { -+ printk(KERN_ERR "%s:%d Failed to find interface = %d\n", __func__, __LINE__, if_cnt); -+ goto err; -+ } -+ -+ pdata->comcerto_eth_pdata[port].gem_id = port; -+ -+ mac_addr = of_get_mac_address(gem); -+ -+ if (mac_addr) { -+ memcpy(pdata->comcerto_eth_pdata[port].mac_addr, mac_addr, ETH_ALEN); -+ } -+ -+ if ((pdata->comcerto_eth_pdata[port].mii_config = of_get_phy_mode(gem)) < 0) -+ printk(KERN_ERR "%s:%d Incorrect Phy mode....\n", __func__, __LINE__); -+ -+ -+ addr = of_get_property(gem, "fsl,gemac-bus-id", &size); -+ if (!addr) -+ printk(KERN_ERR "%s:%d Invalid gemac-bus-id....\n", __func__, __LINE__); -+ else -+ pdata->comcerto_eth_pdata[port].bus_id = be32_to_cpup(addr); -+ -+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size); -+ if (!addr) -+ printk(KERN_ERR "%s:%d Invalid gemac-phy-id....\n", __func__, __LINE__); -+ else -+ phy_id = pdata->comcerto_eth_pdata[port].phy_id = be32_to_cpup(addr); -+ -+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size); -+ if (!addr) -+ printk(KERN_ERR "%s: Invalid mdio-mux-val....\n", __func__); -+ else -+ phy_id = pdata->comcerto_eth_pdata[port].mdio_muxval= be32_to_cpup(addr); -+ -+ -+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size); -+ if (!addr) -+ printk(KERN_ERR "%s:%d Invalid pfe-phy-if-flags....\n", __func__, __LINE__); -+ else -+ pdata->comcerto_eth_pdata[port].phy_flags = be32_to_cpup(addr); -+ -+ addr = of_get_property(gem, "fsl,pfe-gemac-mode", &size); -+ if (!addr) -+ printk(KERN_ERR "%s:%d Invalid pfe-gemac-mode....\n", __func__, __LINE__); -+ else -+ pdata->comcerto_eth_pdata[port].gemac_mode = be32_to_cpup(addr); -+ -+ -+ /* If PHY is enabled, read mdio properties */ -+ if (pdata->comcerto_eth_pdata[port].phy_flags & GEMAC_NO_PHY) -+ goto done; -+ -+ phy = of_get_next_child(gem, NULL); -+ -+ addr = of_get_property(phy, "reg", &size); -+ -+ if (!addr) -+ printk(KERN_ERR "%s:%d Invalid phy enable flag....\n", __func__, __LINE__); -+ else -+ pdata->comcerto_mdio_pdata[port].enabled = be32_to_cpup(addr); -+ -+ addr = of_get_property (phy, "fsl,mdio-phy-mask", &size); -+ if (!addr) -+ printk(KERN_ERR "%s:%d Unable to read mdio-phy-mask....\n", __func__, __LINE__); -+ else -+ pdata->comcerto_mdio_pdata[port].phy_mask= be32_to_cpup(addr); -+ pdata->comcerto_mdio_pdata[port].irq[0] = PHY_POLL; -+ -+done: -+ -+ return 0; -+ -+err: -+ return -1; -+} -+/** -+ * pfe_platform_probe - -+ * -+ * -+ */ -+static int pfe_platform_probe(struct platform_device *pdev) -+{ -+ struct resource res; -+ int ii, rc, interface_count = 0, size = 0; -+ const u32 *prop; -+ struct device_node *np; -+ -+ np = pdev->dev.of_node; -+ -+ if (!np) { -+ printk(KERN_ERR "Invalid device node\n"); -+ return -EINVAL; -+ } -+ -+ pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL); -+ if (!pfe) { -+ rc = -ENOMEM; -+ goto err_alloc; -+ } -+ -+ platform_set_drvdata(pdev, pfe); -+ -+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); -+ -+ if (of_address_to_resource(np, 2, &res)) -+ { -+ rc = -ENOMEM; -+ printk(KERN_ERR "failed to get ddr resource\n"); -+ goto err_ddr; -+ } -+ -+ -+ pfe->ddr_phys_baseaddr = res.start; -+ pfe->ddr_size = resource_size(&res); -+ -+ //pfe->ddr_baseaddr = ioremap(res.start, resource_size(&res)); -+ pfe->ddr_baseaddr = phys_to_virt(res.start); -+ if (!pfe->ddr_baseaddr) { -+ printk(KERN_ERR "ioremap() ddr failed\n"); -+ rc = -ENOMEM; -+ goto err_ddr; -+ } -+ -+ /*printk("%s:%d : DDR Res : Phy addr:len = %x:%x Mapped addr : %x\n", __func__, __LINE__, -+ pfe->ddr_phys_baseaddr, pfe->ddr_size, pfe->ddr_baseaddr);*/ -+ -+ pfe->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,"fsl,pfe-scfg"); -+ if (IS_ERR(pfe->scfg)) { -+ dev_err(&pdev->dev, "No syscfg phandle specified\n"); -+ return PTR_ERR(pfe->scfg); -+ } -+ /*printk("%s scfg %p\n",__func__,pfe->scfg);*/ -+ -+ -+#if 1 -+ if (!(pfe->cbus_baseaddr = of_iomap(np, 1))) -+ { -+ rc = -ENOMEM; -+ printk(KERN_ERR "failed to get axi resource\n"); -+ goto err_axi; -+ } -+ -+ /*printk("%s:%d : AXI Mapped addr : %lx\n", __func__, __LINE__, pfe->cbus_baseaddr); -+ printk("%s:%d : AXI Mapped addr : phys %lx\n", __func__, __LINE__, virt_to_phys(pfe->cbus_baseaddr));*/ -+#else -+ -+ if (of_address_to_resource(np, 1, &res)) -+ { -+ rc = -ENOMEM; -+ printk(KERN_ERR "failed to get AXI resource\n"); -+ goto err_iram; -+ } -+ pfe->cbus_baseaddr = ioremap(res.start, resource_size(&res)); -+ if (!pfe->cbus_baseaddr) { -+ printk(KERN_INFO "ioremap() AXI failed %lx %x\n", res.start, resource_size(&res)); -+ rc = -ENOMEM; -+ goto err_iram; -+ } -+ printk("%s:%d : AXI Mapped addr : %x PHY addr = %x\n", __func__, __LINE__, pfe->cbus_baseaddr, res.start); -+#endif -+ -+ pfe->hif_irq = platform_get_irq(pdev, 0); -+ if (pfe->hif_irq < 0) { -+ printk(KERN_ERR "platform_get_irq_byname(hif) failed\n"); -+ rc = pfe->hif_irq; -+ goto err_hif_irq; -+ } -+ /*printk("hif_irq: %d \n", pfe->hif_irq);*/ -+ -+ /* Read interface count */ -+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); -+ if (!prop) { -+ printk(KERN_ERR "Failed to read number of interfaces\n"); -+ rc = -ENXIO; -+ goto err_prop; -+ } -+ -+ interface_count = be32_to_cpup(prop); -+ /*printk(KERN_INFO "%s:%d Number of interfaces : %d\n", __func__, __LINE__, interface_count);*/ -+ if (interface_count <= 0) { -+ printk(KERN_ERR "No ethernet interface count : %d\n", interface_count); -+ rc = -ENXIO; -+ goto err_prop; -+ } -+ -+ for (ii = 0; ii < interface_count; ii++) { -+ pfe_get_gemac_if_proprties(np, ii, interface_count, &pfe_platform_data); -+ } -+ -+ -+ pfe->dev = &pdev->dev; -+ -+ pfe->dev->platform_data = &pfe_platform_data; -+ -+ //FIXME get the correct clock from dts -+ pfe->ctrl.sys_clk = 250000; // save sys_clk value as KHz -+ -+ rc = pfe_probe(pfe); -+ if (rc < 0) -+ goto err_probe; -+ -+ return 0; -+ -+err_probe: -+err_prop: -+ /*TODO complet the code */ -+err_hif_irq: -+ iounmap(pfe->cbus_baseaddr); -+ -+err_axi: -+ iounmap(pfe->ddr_baseaddr); -+ -+err_ddr: -+ platform_set_drvdata(pdev, NULL); -+ -+ kfree(pfe); -+ -+err_alloc: -+ return rc; -+} -+ -+ -+/** -+ * pfe_platform_remove - -+ * -+ * -+ */ -+static int pfe_platform_remove(struct platform_device *pdev) -+{ -+ struct pfe *pfe = platform_get_drvdata(pdev); -+ int rc; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ rc = pfe_remove(pfe); -+ -+ iounmap(pfe->cbus_baseaddr); -+ iounmap(pfe->ddr_baseaddr); -+ -+ platform_set_drvdata(pdev, NULL); -+ -+ kfree(pfe); -+ -+ return rc; -+} -+ -+static struct of_device_id pfe_match[] = { -+ { -+ .compatible = "fsl,pfe", -+ }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, pfe_match); -+ -+static struct platform_driver pfe_platform_driver = { -+ .probe = pfe_platform_probe, -+ .remove = pfe_platform_remove, -+ .driver = { -+ .name = "pfe", -+ .of_match_table = pfe_match, -+ }, -+}; -+ -+#if 0 -+static int __init pfe_module_init(void) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ return platform_driver_register(&pfe_platform_driver); -+} -+ -+ -+static void __exit pfe_module_exit(void) -+{ -+ platform_driver_unregister(&pfe_platform_driver); -+ -+ printk(KERN_INFO "%s\n", __func__); -+} -+module_init(pfe_module_init); -+module_exit(pfe_module_exit); -+#endif -+ -+module_platform_driver(pfe_platform_driver); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("PFE Ethernet driver"); -+MODULE_AUTHOR("NXP DNCPE"); ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_mod.c -@@ -0,0 +1,140 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include "pfe_mod.h" -+ -+struct pfe *pfe; -+ -+/** -+ * pfe_probe - -+ * -+ * -+ */ -+int pfe_probe(struct pfe *pfe) -+{ -+ int rc; -+ -+ -+ if (DDR_MAX_SIZE > pfe->ddr_size) { -+ printk(KERN_ERR "%s: required DDR memory (%x) above platform ddr memory (%x)\n", __func__, DDR_MAX_SIZE, pfe->ddr_size); -+ rc = -ENOMEM; -+ goto err_hw; -+ } -+ -+ if (((int) (pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) & (8*SZ_1M - 1)) != 0) { -+ printk(KERN_ERR "%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n", __func__, (int) pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR); -+ rc = -ENOMEM; -+ goto err_hw; -+ } -+ -+ -+ printk(KERN_INFO "cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n", -+ (unsigned long)pfe->cbus_baseaddr, (unsigned long)pfe->ddr_baseaddr, -+ pfe->ddr_phys_baseaddr, pfe->ddr_size); -+ -+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr, pfe->ddr_phys_baseaddr, pfe->ddr_size); -+ -+ rc = pfe_hw_init(pfe, 0); -+ if (rc < 0) -+ goto err_hw; -+ -+ rc = pfe_hif_lib_init(pfe); -+ if (rc < 0) -+ goto err_hif_lib; -+ -+ rc = pfe_hif_init(pfe); -+ if (rc < 0) -+ goto err_hif; -+ -+ rc = pfe_firmware_init(pfe); -+ if (rc < 0) -+ goto err_firmware; -+ -+ rc = pfe_ctrl_init(pfe); -+ if (rc < 0) -+ goto err_ctrl; -+ -+ rc = pfe_eth_init(pfe); -+ if (rc < 0) -+ goto err_eth; -+ -+ rc = pfe_sysfs_init(pfe); -+ if(rc < 0) -+ goto err_sysfs; -+ -+ rc = pfe_debugfs_init(pfe); -+ if (rc < 0) -+ goto err_debugfs; -+ -+ return 0; -+ -+err_debugfs: -+ pfe_sysfs_exit(pfe); -+ -+err_sysfs: -+ pfe_eth_exit(pfe); -+ -+err_eth: -+ pfe_ctrl_exit(pfe); -+ -+err_ctrl: -+ pfe_firmware_exit(pfe); -+ -+err_firmware: -+ pfe_hif_exit(pfe); -+ -+err_hif: -+ pfe_hif_lib_exit(pfe); -+ -+err_hif_lib: -+ pfe_hw_exit(pfe); -+ -+err_hw: -+ return rc; -+} -+ -+ -+/** -+ * pfe_remove - -+ * -+ * -+ */ -+int pfe_remove(struct pfe *pfe) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ pfe_debugfs_exit(pfe); -+ -+ pfe_sysfs_exit(pfe); -+ -+ pfe_eth_exit(pfe); -+ -+ pfe_ctrl_exit(pfe); -+ -+ pfe_firmware_exit(pfe); -+ -+ pfe_hif_exit(pfe); -+ -+ pfe_hif_lib_exit(pfe); -+ -+ pfe_hw_exit(pfe); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_mod.h -@@ -0,0 +1,163 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_MOD_H_ -+#define _PFE_MOD_H_ -+ -+#include -+#include -+ -+struct pfe; -+ -+#include "config.h" -+#include "pfe_hw.h" -+#include "pfe_firmware.h" -+#include "pfe_ctrl.h" -+#include "pfe_hif.h" -+#include "pfe_hif_lib.h" -+#include "pfe_eth.h" -+#include "pfe_sysfs.h" -+#include "pfe_perfmon.h" -+#include "pfe_debugfs.h" -+ -+struct pfe_tmu_credit { -+ /* Number of allowed TX packet in-flight, matches TMU queue size */ -+ unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT]; -+ unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT]; -+ unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT]; -+}; -+ -+struct pfe { -+ struct regmap *scfg; -+ unsigned long ddr_phys_baseaddr; -+ void *ddr_baseaddr; -+ unsigned int ddr_size; -+ void *cbus_baseaddr; -+ void *apb_baseaddr; -+ unsigned long iram_phys_baseaddr; -+ void *iram_baseaddr; -+ unsigned long ipsec_phys_baseaddr; -+ void *ipsec_baseaddr; -+ int hif_irq; -+ int hif_client_irq; -+ struct device *dev; -+ struct dentry *dentry; -+ struct pfe_ctrl ctrl; -+ struct pfe_hif hif; -+ struct pfe_eth eth; -+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX]; -+#if defined(CFG_DIAGS) -+ struct pfe_diags diags; -+#endif -+ struct pfe_tmu_credit tmu_credit; -+ struct pfe_cpumon cpumon; -+ struct pfe_memmon memmon; -+ int wake; -+ struct clk * hfe_clock; -+}; -+ -+extern struct pfe *pfe; -+ -+int pfe_probe(struct pfe *pfe); -+int pfe_remove(struct pfe *pfe); -+ -+#ifndef SZ_1K -+#define SZ_1K 1024 -+#endif -+ -+#ifndef SZ_1M -+#define SZ_1M (1024 * 1024) -+#endif -+ -+/* DDR Mapping */ -+#if !defined(CONFIG_PLATFORM_PCI) -+#define ROUTE_TABLE_BASEADDR 0 -+#define ROUTE_TABLE_HASH_BITS 15 /**< 32K entries */ -+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE) -+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE) -+#define BMU2_BUF_COUNT (4096 - 256) /**< This is to get a total DDR size of 12MiB */ -+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT) -+#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE) -+#define UTIL_CODE_SIZE (128 * SZ_1K) -+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE) -+#define UTIL_DDR_DATA_SIZE (64 * SZ_1K) -+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE) -+#define CLASS_DDR_DATA_SIZE (32 * SZ_1K) -+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE) -+#define TMU_DDR_DATA_SIZE (32 * SZ_1K) -+#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE) -+#define TMU_LLM_QUEUE_LEN (8 * 512) /**< Must be power of two and at least 16 * 8 = 128 bytes */ -+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */ -+ -+#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE) -+ -+#else -+ -+#define UTIL_CODE_BASEADDR 0 -+#if defined(CONFIG_UTIL_DISABLED) -+#define UTIL_CODE_SIZE (0 * SZ_1K) -+#else -+#define UTIL_CODE_SIZE (8 * SZ_1K) -+#endif -+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE) -+#define UTIL_DDR_DATA_SIZE (0 * SZ_1K) -+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE) -+#define CLASS_DDR_DATA_SIZE (0 * SZ_1K) -+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE) -+#define TMU_DDR_DATA_SIZE (0 * SZ_1K) -+#define ROUTE_TABLE_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE) -+#define ROUTE_TABLE_HASH_BITS 5 /**< 32 entries */ -+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE) -+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE) -+#define BMU2_BUF_COUNT 16 -+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT) -+#define TMU_LLM_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE) -+#define TMU_LLM_QUEUE_LEN (16 * 8) /**< Must be power of two and at least 16 * 8 = 128 bytes */ -+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */ -+#define HIF_DESC_BASEADDR (TMU_LLM_BASEADDR + TMU_LLM_SIZE) -+#define HIF_RX_DESC_SIZE (16*HIF_RX_DESC_NT) -+#define HIF_TX_DESC_SIZE (16*HIF_TX_DESC_NT) -+#define HIF_DESC_SIZE (HIF_RX_DESC_SIZE + HIF_TX_DESC_SIZE) -+#define HIF_RX_PKT_DDR_BASEADDR (HIF_DESC_BASEADDR + HIF_DESC_SIZE) -+#define HIF_RX_PKT_DDR_SIZE (HIF_RX_DESC_NT * DDR_BUF_SIZE) -+#define HIF_TX_PKT_DDR_BASEADDR (HIF_RX_PKT_DDR_BASEADDR + HIF_RX_PKT_DDR_SIZE) -+#define HIF_TX_PKT_DDR_SIZE (HIF_TX_DESC_NT * DDR_BUF_SIZE) -+#define ROUTE_BASEADDR (HIF_TX_PKT_DDR_BASEADDR + HIF_TX_PKT_DDR_SIZE) -+#define ROUTE_SIZE (2 * CLASS_ROUTE_SIZE) -+ -+#define DDR_MAX_SIZE (ROUTE_BASEADDR + ROUTE_SIZE) -+ -+#define PFE_HOST_TO_PCI(addr) (((u32)addr)- ((u32)DDR_BASE_ADDR)) -+#define PFE_PCI_TO_HOST(addr) (((u32)addr)+ ((u32)DDR_BASE_ADDR)) -+#endif -+ -+/* IRAM Mapping */ -+#define IPSEC_IRAM_BASEADDR 0 -+#define IPSEC_IRAM_SIZE 0x2000 -+ -+/* LMEM Mapping */ -+#define BMU1_LMEM_BASEADDR 0 -+#define BMU1_BUF_COUNT 256 -+#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT) -+#define IPSEC_LMEM_BASEADDR (BMU1_LMEM_BASEADDR + BMU1_LMEM_SIZE) -+#define IPSEC_LMEM_SIZE (30 * 1024) -+ -+ -+ -+#endif /* _PFE_MOD_H */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.c -@@ -0,0 +1,175 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/* PFE performance monitoring functions */ -+ -+#include "pfe_ctrl_hal.h" -+#include "pfe_perfmon.h" -+ -+static TIMER_ENTRY cpumon_timer; -+ -+u32 CLASS_DMEM_SH2(cpu_ticks[2]); -+u32 TMU_DMEM_SH2(cpu_ticks[2]); -+#if !defined(CONFIG_UTIL_DISABLED) -+u32 UTIL_DMEM_SH2(cpu_ticks[2]); -+#endif -+ -+#define compute_active_pct(total_ticks, active_ticks) ((active_ticks * 100 + (total_ticks >> 1)) / total_ticks) -+ -+static void cpumon_timer_handler(void) -+{ -+ int id; -+ u32 dmem_addr; -+ u32 ticks[2]; -+ u32 total, active; -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ struct pfe_cpumon *cpumon = &pfe->cpumon; -+ -+ // Process class PE's -+ total = active = 0; -+ dmem_addr = virt_to_class_dmem(&class_cpu_ticks[0]); -+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) -+ { -+ cpumon->cpu_usage_pct[id] = 0; -+ if (pe_sync_stop(ctrl, (1 << id)) < 0) -+ continue; -+ ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4)); -+ ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4)); -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ pe_dmem_write(id, 0, dmem_addr + 4, 4); -+ pe_start(ctrl, (1 << id)); -+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow -+ ticks[1] >>= 8; -+ total += ticks[0]; -+ active += ticks[1]; -+ if (ticks[0] != 0) -+ cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]); -+ } -+ if (total != 0) -+ cpumon->class_usage_pct = compute_active_pct(total, active); -+ else -+ cpumon->class_usage_pct = 0; -+ -+ // Process TMU PE's -+ total = active = 0; -+ dmem_addr = virt_to_tmu_dmem(&tmu_cpu_ticks[0]); -+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) -+ { -+#if defined(CONFIG_PLATFORM_LS1012A) -+ if(id == TMU2_ID) continue; -+#endif -+ cpumon->cpu_usage_pct[id] = 0; -+ if (pe_sync_stop(ctrl, (1 << id)) < 0) -+ continue; -+ ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4)); -+ ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4)); -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ pe_dmem_write(id, 0, dmem_addr + 4, 4); -+ pe_start(ctrl, (1 << id)); -+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow -+ ticks[1] >>= 8; -+ if (ticks[0] != 0) -+ cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]); -+ } -+#if !defined(CONFIG_UTIL_DISABLED) -+ // Process Util PE -+ dmem_addr = virt_to_util_dmem(&util_cpu_ticks[0]); -+ cpumon->cpu_usage_pct[UTIL_ID] = 0; -+ if (pe_sync_stop(ctrl, (1 << UTIL_ID)) < 0) -+ return; -+ ticks[0] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4)); -+ ticks[1] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr + 4, 4)); -+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4); -+ pe_dmem_write(UTIL_ID, 0, dmem_addr + 4, 4); -+ pe_start(ctrl, (1 << UTIL_ID)); -+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow -+ ticks[1] >>= 8; -+ if (ticks[0] != 0) -+ cpumon->cpu_usage_pct[UTIL_ID] = compute_active_pct(ticks[0], ticks[1]); -+#endif -+} -+ -+static int pfe_cpumon_init(struct pfe *pfe) -+{ -+ timer_init(&cpumon_timer, cpumon_timer_handler); -+ timer_add(&cpumon_timer, CT_CPUMON_INTERVAL); -+ return 0; -+} -+ -+static void pfe_cpumon_exit(struct pfe *pfe) -+{ -+ timer_del(&cpumon_timer); -+} -+ -+ -+/*********************************************************************************/ -+ -+// Memory monitor functions -+ -+void * pfe_kmalloc(size_t size, int flags) -+{ -+ struct pfe_memmon *memmon = &pfe->memmon; -+ void *ptr; -+ ptr = kmalloc(size, flags); -+ if (ptr) -+ memmon->kernel_memory_allocated += ksize(ptr); -+ return ptr; -+} -+ -+void * pfe_kzalloc(size_t size, int flags) -+{ -+ struct pfe_memmon *memmon = &pfe->memmon; -+ void *ptr; -+ ptr = kzalloc(size, flags); -+ if (ptr) -+ memmon->kernel_memory_allocated += ksize(ptr); -+ return ptr; -+} -+ -+void pfe_kfree(void *ptr) -+{ -+ struct pfe_memmon *memmon = &pfe->memmon; -+ memmon->kernel_memory_allocated -= ksize(ptr); -+ kfree(ptr); -+} -+ -+static int pfe_memmon_init(struct pfe *pfe) -+{ -+ return 0; -+} -+ -+static void pfe_memmon_exit(struct pfe *pfe) -+{ -+} -+ -+/*********************************************************************************/ -+ -+ -+int pfe_perfmon_init(struct pfe *pfe) -+{ -+ pfe_cpumon_init(pfe); -+ pfe_memmon_init(pfe); -+ return 0; -+} -+ -+void pfe_perfmon_exit(struct pfe *pfe) -+{ -+ pfe_cpumon_exit(pfe); -+ pfe_memmon_exit(pfe); -+} ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h -@@ -0,0 +1,41 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_PERFMON_H_ -+#define _PFE_PERFMON_H_ -+ -+#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC) -+ -+struct pfe_cpumon { -+ u32 cpu_usage_pct[MAX_PE]; -+ u32 class_usage_pct; -+}; -+ -+struct pfe_memmon { -+ u32 kernel_memory_allocated; -+}; -+ -+void * pfe_kmalloc(size_t size, int flags); -+void * pfe_kzalloc(size_t size, int flags); -+void pfe_kfree(void *ptr); -+ -+int pfe_perfmon_init(struct pfe *pfe); -+void pfe_perfmon_exit(struct pfe *pfe); -+ -+#endif /* _PFE_PERFMON_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_platform.c -@@ -0,0 +1,358 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "pfe_mod.h" -+ -+/** -+ * pfe_platform_probe - -+ * -+ * -+ */ -+static int pfe_platform_probe(struct platform_device *pdev) -+{ -+ struct resource *r; -+ int rc; -+ struct clk *clk_axi; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL); -+ if (!pfe) { -+ rc = -ENOMEM; -+ goto err_alloc; -+ } -+ -+ platform_set_drvdata(pdev, pfe); -+ -+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ddr"); -+ if (!r) { -+ printk(KERN_INFO "platform_get_resource_byname(ddr) failed\n"); -+ rc = -ENXIO; -+ goto err_ddr; -+ } -+ -+ pfe->ddr_phys_baseaddr = r->start; -+ pfe->ddr_size = resource_size(r); -+ -+ pfe->ddr_baseaddr = ioremap(r->start, resource_size(r)); -+ if (!pfe->ddr_baseaddr) { -+ printk(KERN_INFO "ioremap() ddr failed\n"); -+ rc = -ENOMEM; -+ goto err_ddr; -+ } -+ -+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "axi"); -+ if (!r) { -+ printk(KERN_INFO "platform_get_resource_byname(axi) failed\n"); -+ rc = -ENXIO; -+ goto err_axi; -+ } -+ -+ pfe->cbus_baseaddr = ioremap(r->start, resource_size(r)); -+ if (!pfe->cbus_baseaddr) { -+ printk(KERN_INFO "ioremap() axi failed\n"); -+ rc = -ENOMEM; -+ goto err_axi; -+ } -+ -+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); -+ if (!r) { -+ printk(KERN_INFO "platform_get_resource_byname(apb) failed\n"); -+ rc = -ENXIO; -+ goto err_apb; -+ } -+ -+ pfe->apb_baseaddr = ioremap(r->start, resource_size(r)); -+ if (!pfe->apb_baseaddr) { -+ printk(KERN_INFO "ioremap() apb failed\n"); -+ rc = -ENOMEM; -+ goto err_apb; -+ } -+ -+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iram"); -+ if (!r) { -+ printk(KERN_INFO "platform_get_resource_byname(iram) failed\n"); -+ rc = -ENXIO; -+ goto err_iram; -+ } -+ -+ pfe->iram_phys_baseaddr = r->start; -+ pfe->iram_baseaddr = ioremap(r->start, resource_size(r)); -+ if (!pfe->iram_baseaddr) { -+ printk(KERN_INFO "ioremap() iram failed\n"); -+ rc = -ENOMEM; -+ goto err_iram; -+ } -+ -+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipsec"); -+ if (!r) { -+ printk(KERN_INFO "platform_get_resource_byname(ipsec) failed\n"); -+ rc = -ENXIO; -+ goto err_ipsec; -+ } -+ -+ pfe->ipsec_phys_baseaddr = r->start; -+ /* Just map only initial 1MB , as its enough to access espah engine -+ */ -+ //pfe->ipsec_baseaddr = ioremap(r->start, resource_size(r)); -+ pfe->ipsec_baseaddr = ioremap(r->start, 1*1024*1024); -+ if (!pfe->ipsec_baseaddr) { -+ printk(KERN_INFO "ioremap() ipsec failed\n"); -+ rc = -ENOMEM; -+ goto err_ipsec; -+ } -+ -+ printk(KERN_INFO "ipsec: baseaddr :%x --- %x\n", (u32)pfe->ipsec_phys_baseaddr, (u32)pfe->ipsec_baseaddr); -+ -+ pfe->hif_irq = platform_get_irq_byname(pdev, "hif"); -+ if (pfe->hif_irq < 0) { -+ printk(KERN_INFO "platform_get_irq_byname(hif) failed\n"); -+ rc = pfe->hif_irq; -+ goto err_hif_irq; -+ } -+ -+#if 0 -+ pfe->hif_client_irq = platform_get_irq_byname(pdev, "hif_client"); -+ if (pfe->hif_client_irq < 0) { -+ printk(KERN_INFO "platform_get_irq_byname(hif_client) failed\n"); -+ rc = pfe->hif_client_irq; -+ goto err_hif_irq; -+ } -+#endif -+ -+ pfe->dev = &pdev->dev; -+ -+ -+ /* Get the system clock */ -+ clk_axi = clk_get(NULL,"axi"); -+ if (IS_ERR(clk_axi)) { -+ printk(KERN_INFO "clk_get call failed\n"); -+ rc = -ENXIO; -+ goto err_clk; -+ } -+ -+ /* HFE core clock */ -+ pfe->hfe_clock = clk_get(NULL, "hfe_core"); -+ if (IS_ERR(pfe->hfe_clock)) { -+ printk(KERN_INFO "clk_get call failed\n"); -+ rc = -ENXIO; -+ goto err_hfe_clock; -+ } -+ -+ clk_disable(pfe->hfe_clock); -+ c2000_block_reset(COMPONENT_PFE_SYS, 1); -+ mdelay(1); -+ c2000_block_reset(COMPONENT_PFE_SYS, 0); -+ clk_enable(pfe->hfe_clock); -+ -+ pfe->ctrl.clk_axi = clk_axi; -+ pfe->ctrl.sys_clk = clk_get_rate(clk_axi) / 1000; // save sys_clk value as KHz -+ -+ rc = pfe_probe(pfe); -+ if (rc < 0) -+ goto err_probe; -+ -+ return 0; -+ -+err_probe: -+ clk_put(pfe->hfe_clock); -+err_hfe_clock: -+ clk_put(clk_axi); -+err_clk: -+err_hif_irq: -+ iounmap(pfe->ipsec_baseaddr); -+err_ipsec: -+ iounmap(pfe->iram_baseaddr); -+err_iram: -+ iounmap(pfe->apb_baseaddr); -+ -+err_apb: -+ iounmap(pfe->cbus_baseaddr); -+ -+err_axi: -+ iounmap(pfe->ddr_baseaddr); -+ -+err_ddr: -+ platform_set_drvdata(pdev, NULL); -+ -+ kfree(pfe); -+ -+err_alloc: -+ return rc; -+} -+ -+ -+/** -+ * pfe_platform_remove - -+ * -+ * -+ */ -+static int pfe_platform_remove(struct platform_device *pdev) -+{ -+ struct pfe *pfe = platform_get_drvdata(pdev); -+ int rc; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ rc = pfe_remove(pfe); -+ -+ c2000_block_reset(COMPONENT_PFE_SYS, 1); -+ clk_disable(pfe->hfe_clock); -+ clk_put(pfe->hfe_clock); -+ clk_put(pfe->ctrl.clk_axi); -+ iounmap(pfe->ipsec_baseaddr); -+ iounmap(pfe->iram_baseaddr); -+ iounmap(pfe->apb_baseaddr); -+ iounmap(pfe->cbus_baseaddr); -+ iounmap(pfe->ddr_baseaddr); -+ -+ platform_set_drvdata(pdev, NULL); -+ -+ kfree(pfe); -+ -+ return rc; -+} -+ -+#ifdef CONFIG_PM -+ -+#ifdef CONFIG_PM_SLEEP -+static int pfe_platform_suspend(struct device *dev) -+{ -+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev)); -+ struct net_device *netdev; -+ int i; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ pfe->wake = 0; -+ -+ for (i = 0; i < (NUM_GEMAC_SUPPORT - 1); i++ ) { -+ netdev = pfe->eth.eth_priv[i]->dev; -+ -+ netif_device_detach(netdev); -+ -+ if (netif_running(netdev)) -+ if(pfe_eth_suspend(netdev)) -+ pfe->wake =1; -+ } -+ -+ /* Shutdown PFE only if we're not waking up the system */ -+ if (!pfe->wake) { -+ pfe_ctrl_suspend(&pfe->ctrl); -+ pfe_hif_exit(pfe); -+ pfe_hif_lib_exit(pfe); -+ -+ class_disable(); -+ tmu_disable(0xf); -+#if !defined(CONFIG_UTIL_DISABLED) -+ util_disable(); -+#endif -+ pfe_hw_exit(pfe); -+ c2000_block_reset(COMPONENT_PFE_SYS, 1); -+ clk_disable(pfe->hfe_clock); -+ } -+ -+ return 0; -+} -+ -+static int pfe_platform_resume(struct device *dev) -+{ -+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev)); -+ struct net_device *netdev; -+ int i; -+ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ if (!pfe->wake) { -+ /* Sequence follows VLSI recommendation (bug 71927) */ -+ c2000_block_reset(COMPONENT_PFE_SYS, 1); -+ mdelay(1); -+ c2000_block_reset(COMPONENT_PFE_SYS, 0); -+ clk_enable(pfe->hfe_clock); -+ -+ pfe_hw_init(pfe, 1); -+ pfe_hif_lib_init(pfe); -+ pfe_hif_init(pfe); -+#if !defined(CONFIG_UTIL_DISABLED) -+ util_enable(); -+#endif -+ tmu_enable(0xf); -+ class_enable(); -+ pfe_ctrl_resume(&pfe->ctrl); -+ } -+ -+ for(i = 0; i < (NUM_GEMAC_SUPPORT - 1); i++) { -+ netdev = pfe->eth.eth_priv[i]->dev; -+ -+ if (pfe->eth.eth_priv[i]->mii_bus) -+ pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus); -+ -+ if (netif_running(netdev)) -+ pfe_eth_resume(netdev); -+ -+ netif_device_attach(netdev); -+ } -+ return 0; -+} -+#else -+#define pfe_platform_suspend NULL -+#define pfe_platform_resume NULL -+#endif -+ -+static const struct dev_pm_ops pfe_platform_pm_ops = { -+ SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume) -+}; -+ -+#endif -+ -+static struct platform_driver pfe_platform_driver = { -+ .probe = pfe_platform_probe, -+ .remove = pfe_platform_remove, -+ .driver = { -+ .name = "pfe", -+#ifdef CONFIG_PM -+ .pm = &pfe_platform_pm_ops, -+#endif -+ }, -+}; -+ -+ -+static int __init pfe_module_init(void) -+{ -+ printk(KERN_INFO "%s\n", __func__); -+ -+ return platform_driver_register(&pfe_platform_driver); -+} -+ -+ -+static void __exit pfe_module_exit(void) -+{ -+ platform_driver_unregister(&pfe_platform_driver); -+ -+ printk(KERN_INFO "%s\n", __func__); -+} -+ -+MODULE_LICENSE("GPL"); -+module_init(pfe_module_init); -+module_exit(pfe_module_exit); ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c -@@ -0,0 +1,855 @@ -+/* -+ * (C) Copyright 2011 -+ * Author : Freescale Semiconductor, Inc. -+ * -+ * See file CREDITS for list of people who contributed to this -+ * project. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of -+ * the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, -+ * MA 02111-1307 USA -+ * */ -+ -+#include -+#include -+ -+#include "pfe_mod.h" -+#include "pfe_ctrl_hal.h" -+ -+#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8 -+#define NUM_QUEUES 16 -+ -+static char register_name[20][5] = { -+ "EPC", "ECAS", "EID", "ED", -+ "r0", "r1", "r2", "r3", -+ "r4", "r5", "r6", "r7", -+ "r8", "r9", "r10", "r11", -+ "r12", "r13", "r14", "r15", -+}; -+ -+static char exception_name[14][20] = { -+ "Reset", -+ "HardwareFailure", -+ "NMI", -+ "InstBreakpoint", -+ "DataBreakpoint", -+ "Unsupported", -+ "PrivilegeViolation", -+ "InstBusError", -+ "DataBusError", -+ "AlignmentError", -+ "ArithmeticError", -+ "SystemCall", -+ "MemoryManagement", -+ "Interrupt", -+}; -+ -+static unsigned long class_do_clear = 0; -+static unsigned long tmu_do_clear = 0; -+#if !defined(CONFIG_UTIL_DISABLED) -+static unsigned long util_do_clear = 0; -+#endif -+ -+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset); -+ -+static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long do_clear) -+{ -+ ssize_t len = 0; -+ u32 val; -+ char statebuf[5]; -+ struct pfe_cpumon *cpumon = &pfe->cpumon; -+ u32 debug_indicator; -+ u32 debug[20]; -+ -+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4); -+ dmem_addr += 4; -+ -+ statebuf[4] = '\0'; -+ len += sprintf(buf + len, "state=%4s ", statebuf); -+ -+ val = pe_dmem_read(id, dmem_addr, 4); -+ dmem_addr += 4; -+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val)); -+ -+ val = pe_dmem_read(id, dmem_addr, 4); -+ if (do_clear && val) -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ dmem_addr += 4; -+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val)); -+ -+ val = pe_dmem_read(id, dmem_addr, 4); -+ if (do_clear && val) -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ dmem_addr += 4; -+ if (id >= TMU0_ID && id <= TMU_MAX_ID) -+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val)); -+ else -+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val)); -+ -+ val = pe_dmem_read(id, dmem_addr, 4); -+ if (do_clear && val) -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ dmem_addr += 4; -+ if (val) -+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val)); -+ -+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]); -+ -+ len += sprintf(buf + len, "\n"); -+ -+ debug_indicator = pe_dmem_read(id, dmem_addr, 4); -+ dmem_addr += 4; -+ if (!strncmp((char *)&debug_indicator, "DBUG", 4)) -+ { -+ int j, last = 0; -+ for (j = 0; j < 16; j++) -+ { -+ debug[j] = pe_dmem_read(id, dmem_addr, 4); -+ if (debug[j]) -+ { -+ if (do_clear) -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ last = j + 1; -+ } -+ dmem_addr += 4; -+ } -+ for (j = 0; j < last; j++) -+ { -+ len += sprintf(buf + len, "%08x%s", cpu_to_be32(debug[j]), -+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " "); -+ } -+ } -+ -+ if (!strncmp(statebuf, "DEAD", 4)) -+ { -+ u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS; -+ -+ len += sprintf(buf + len, "Exception details:\n"); -+ for (i = 0; i < 20; i++) { -+ debug[i] = pe_dmem_read(id, dump, 4); -+ dump +=4; -+ if (i == 2) -+ len += sprintf(buf + len, "%4s = %08x (=%s) ", register_name[i], cpu_to_be32(debug[i]), exception_name[min((u32) cpu_to_be32(debug[i]), (u32)13)]); -+ else -+ len += sprintf(buf + len, "%4s = %08x%s", register_name[i], cpu_to_be32(debug[i]), -+ (i & 0x3) == 0x3 || i == 19 ? "\n" : " "); -+ } -+ } -+ -+ return len; -+} -+ -+static ssize_t class_phy_stats(char *buf, int phy) -+{ -+ ssize_t len = 0; -+ int off1 = phy * 0x28; -+ int off2 = phy * 0x10; -+ -+ if (phy == 3) -+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS; -+ -+ len += sprintf(buf + len, "phy: %d\n", phy); -+ len += sprintf(buf + len, " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n", -+ readl(CLASS_PHY1_RX_PKTS + off1), readl(CLASS_PHY1_TX_PKTS + off1), -+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1), readl(CLASS_PHY1_V4_PKTS + off1), -+ readl(CLASS_PHY1_V6_PKTS + off1)); -+ -+ len += sprintf(buf + len, " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n", -+ readl(CLASS_PHY1_ICMP_PKTS + off2), readl(CLASS_PHY1_IGMP_PKTS + off2), -+ readl(CLASS_PHY1_TCP_PKTS + off2), readl(CLASS_PHY1_UDP_PKTS + off2)); -+ -+ len += sprintf(buf + len, " err\n"); -+ len += sprintf(buf + len, " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n", -+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1), readl(CLASS_PHY1_INTF_FAIL_PKTS + off1), -+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1), readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1), -+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1)); -+ -+ return len; -+} -+ -+/** qm_read_drop_stat -+ * This function is used to read the drop statistics from the TMU -+ * hw drop counter. Since the hw counter is always cleared afer -+ * reading, this function maintains the previous drop count, and -+ * adds the new value to it. That value can be retrieved by -+ * passing a pointer to it with the total_drops arg. -+ * -+ * @param tmu TMU number (0 - 3) -+ * @param queue queue number (0 - 15) -+ * @param total_drops pointer to location to store total drops (or NULL) -+ * @param do_reset if TRUE, clear total drops after updating -+ * -+ */ -+ -+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset) -+{ -+ static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES]; -+ u32 val; -+ writel((tmu << 8) | queue, TMU_TEQ_CTRL); -+ writel((tmu << 8) | queue, TMU_LLM_CTRL); -+ val = readl(TMU_TEQ_DROP_STAT); -+ qtotal[tmu][queue] += val; -+ if (total_drops) -+ *total_drops = qtotal[tmu][queue]; -+ if (do_reset) -+ qtotal[tmu][queue] = 0; -+ return val; -+} -+ -+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue) -+{ -+ ssize_t len = 0; -+ u32 drops; -+ -+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue); -+ -+ drops = qm_read_drop_stat(tmu, queue, NULL, 0); -+ -+ /* Select queue */ -+ writel((tmu << 8) | queue, TMU_TEQ_CTRL); -+ writel((tmu << 8) | queue, TMU_LLM_CTRL); -+ -+ len += sprintf(buf + len, "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n", -+ drops, readl(TMU_TEQ_TRANS_STAT), -+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR), -+ readl(TMU_LLM_QUE_DROPCNT)); -+ -+ return len; -+} -+ -+ -+static ssize_t tmu_queues(char *buf, int tmu) -+{ -+ ssize_t len = 0; -+ int queue; -+ -+ for (queue = 0; queue < 16; queue++) -+ len += tmu_queue_stats(buf + len, tmu, queue); -+ -+ return len; -+} -+ -+static ssize_t tmu_ctx(char *buf, int tmu) -+{ -+ ssize_t len = 0; -+ int i; -+ u32 val, tmu_context_addr = TMU_CONTEXT_ADDR; -+ -+ len += sprintf(buf+len, " TMU %d \n", TMU0_ID+tmu); -+ for (i = 1; i <= 160 ; i++, tmu_context_addr += 4) -+ { -+ val = pe_dmem_read(TMU0_ID+tmu, tmu_context_addr , 4); -+ if (i == 5) -+ len += sprintf(buf+len, "\nShapers: Each shaper structure is 8 bytes and there are 10 shapers\n"); -+ -+ if (i == 25) -+ len += sprintf(buf+len, "\nScheduler: Each scheduler structure is 48 bytes and there are 8 schedulers\n"); -+ if (i == 121) -+ len += sprintf(buf+len, "\nQueue: Each queue structure is 2 bytes and there are 16 queues\n"); -+ -+ if (i == 129) -+ len += sprintf(buf+len, "\nqlenmasks array for 16 queues\n"); -+ if (i == 145) -+ len += sprintf(buf+len, "\nqresultmap array for 16 queues\n"); -+ if (i%8 == 0) -+ len += sprintf(buf+len, "%08x \n", cpu_to_be32(val)); -+ else -+ len += sprintf(buf+len, "%08x ", cpu_to_be32(val)); -+ } -+ -+ len += sprintf(buf+len, "\n"); -+ -+ return len; -+} -+ -+static ssize_t block_version(char *buf, void *addr) -+{ -+ ssize_t len = 0; -+ u32 val; -+ -+ val = readl(addr); -+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n", (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff); -+ -+ return len; -+} -+ -+static ssize_t bmu(char *buf, int id, void *base) -+{ -+ ssize_t len = 0; -+ -+ len += sprintf(buf + len, "bmu: %d\n ", id); -+ -+ len += block_version(buf + len, base + BMU_VERSION); -+ -+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base + BMU_BUF_SIZE))); -+ len += sprintf(buf + len, " buf count: %x\n", readl(base + BMU_BUF_CNT)); -+ len += sprintf(buf + len, " buf rem: %x\n", readl(base + BMU_REM_BUF_CNT)); -+ len += sprintf(buf + len, " buf curr: %x\n", readl(base + BMU_CURR_BUF_CNT)); -+ len += sprintf(buf + len, " free err: %x\n", readl(base + BMU_FREE_ERR_ADDR)); -+ -+ return len; -+} -+ -+static ssize_t gpi(char *buf, int id, void *base) -+{ -+ ssize_t len = 0; -+ u32 val; -+ -+ len += sprintf(buf + len, "gpi%d:\n ", id); -+ len += block_version(buf + len, base + GPI_VERSION); -+ -+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base + GPI_FIFO_STATUS)); -+ val = readl(base + GPI_FIFO_DEBUG); -+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) & 0x3f); -+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) & 0x3f); -+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) & 0x1ff); -+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) & 0x1ff); -+ len += sprintf(buf + len, " overrun: %x\n", readl(base + GPI_OVERRUN_DROPCNT)); -+ -+ return len; -+} -+ -+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -+{ -+ class_do_clear = simple_strtoul(buf, NULL, 0); -+ return count; -+} -+ -+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ int id; -+ u32 val; -+ struct pfe_cpumon *cpumon = &pfe->cpumon; -+ -+ len += block_version(buf + len, CLASS_VERSION); -+ -+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) -+ { -+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID); -+ -+ val = readl(CLASS_PE0_DEBUG + id * 4); -+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff); -+ -+ len += display_pe_status(buf + len, id, PESTATUS_ADDR_CLASS, class_do_clear); -+ } -+ len += sprintf(buf + len, "aggregate load=%d%%\n\n", cpumon->class_usage_pct); -+ -+ len += sprintf(buf + len, "pe status: 0x%x\n", readl(CLASS_PE_STATUS)); -+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n", readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES)); -+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n", readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES)); -+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE)); -+ -+ len += class_phy_stats(buf + len, 0); -+ len += class_phy_stats(buf + len, 1); -+ len += class_phy_stats(buf + len, 2); -+ len += class_phy_stats(buf + len, 3); -+ -+ return len; -+} -+ -+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -+{ -+ tmu_do_clear = simple_strtoul(buf, NULL, 0); -+ return count; -+} -+ -+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ int id; -+ u32 val; -+ -+ len += block_version(buf + len, TMU_VERSION); -+ -+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) -+ { -+#if defined(CONFIG_PLATFORM_LS1012A) -+ if(id == TMU2_ID) continue; -+#endif -+ len += sprintf(buf + len, "%d: ", id - TMU0_ID); -+ -+ len += display_pe_status(buf + len, id, PESTATUS_ADDR_TMU, tmu_do_clear); -+ } -+ -+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS)); -+ len += sprintf(buf + len, "inq fifo cnt: %x\n", readl(TMU_PHY_INQ_FIFO_CNT)); -+ val = readl(TMU_INQ_STAT); -+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff); -+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10); -+ -+ -+ return len; -+} -+ -+ -+static unsigned long drops_do_clear = 0; -+static u32 CLASS_DMEM_SH2(drop_counter)[CLASS_NUM_DROP_COUNTERS]; -+#if !defined(CONFIG_UTIL_DISABLED) -+static u32 UTIL_DMEM_SH2(drop_counter)[UTIL_NUM_DROP_COUNTERS]; -+#endif -+ -+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = { -+ "ICC", -+ "Host Pkt Error", -+ "Rx Error", -+ "IPsec Outbound", -+ "IPsec Inbound", -+ "EXPT IPsec Error", -+ "Reassembly", -+ "Fragmenter", -+ "NAT-T", -+ "Socket", -+ "Multicast", -+ "NAT-PT", -+ "Tx Disabled", -+}; -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = { -+ "IPsec Outbound", -+ "IPsec Inbound", -+ "IPsec Rate Limiter", -+ "Fragmenter", -+ "Socket", -+ "Tx Disabled", -+ "Rx Error", -+}; -+#endif -+ -+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -+{ -+ drops_do_clear = simple_strtoul(buf, NULL, 0); -+ return count; -+} -+ -+static u32 tmu_drops[4][16]; -+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ int id, dropnum; -+ int tmu, queue; -+ u32 val; -+ u32 dmem_addr; -+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0; -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ memset(class_drop_counter, 0, sizeof(class_drop_counter)); -+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) -+ { -+ if (drops_do_clear) -+ pe_sync_stop(ctrl, (1 << id)); -+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++) -+ { -+ dmem_addr = virt_to_class_dmem(&class_drop_counter[dropnum]); -+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4)); -+ class_drop_counter[dropnum] += val; -+ num_class_drops += val; -+ if (drops_do_clear) -+ pe_dmem_write(id, 0, dmem_addr, 4); -+ } -+ if (drops_do_clear) -+ pe_start(ctrl, (1 << id)); -+ } -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ if (drops_do_clear) -+ pe_sync_stop(ctrl, (1 << UTIL_ID)); -+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) -+ { -+ dmem_addr = virt_to_util_dmem(&util_drop_counter[dropnum]); -+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4)); -+ util_drop_counter[dropnum] = val; -+ num_util_drops += val; -+ if (drops_do_clear) -+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4); -+ } -+ if (drops_do_clear) -+ pe_start(ctrl, (1 << UTIL_ID)); -+#endif -+ for (tmu = 0; tmu < 4; tmu++) -+ { -+ for (queue = 0; queue < 16; queue++) -+ { -+ qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue], drops_do_clear); -+ num_tmu_drops += tmu_drops[tmu][queue]; -+ } -+ } -+ -+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0) -+ len += sprintf(buf + len, "No PE drops\n\n"); -+ -+ if (num_class_drops > 0) -+ { -+ len += sprintf(buf + len, "Class PE drops --\n"); -+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++) -+ { -+ if (class_drop_counter[dropnum] > 0) -+ len += sprintf(buf + len, " %s: %d\n", class_drop_description[dropnum], class_drop_counter[dropnum]); -+ } -+ len += sprintf(buf + len, "\n"); -+ } -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ if (num_util_drops > 0) -+ { -+ len += sprintf(buf + len, "Util PE drops --\n"); -+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) -+ { -+ if (util_drop_counter[dropnum] > 0) -+ len += sprintf(buf + len, " %s: %d\n", util_drop_description[dropnum], util_drop_counter[dropnum]); -+ } -+ len += sprintf(buf + len, "\n"); -+ } -+#endif -+ if (num_tmu_drops > 0) -+ { -+ len += sprintf(buf + len, "TMU drops --\n"); -+ for (tmu = 0; tmu < 4; tmu++) -+ { -+ for (queue = 0; queue < 16; queue++) -+ { -+ if (tmu_drops[tmu][queue] > 0) -+ len += sprintf(buf + len, " TMU%d-Q%d: %d\n", tmu, queue, tmu_drops[tmu][queue]); -+ } -+ } -+ len += sprintf(buf + len, "\n"); -+ } -+ -+ return len; -+} -+ -+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_queues(buf, 0); -+} -+ -+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_queues(buf, 1); -+} -+ -+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_queues(buf, 2); -+} -+ -+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_queues(buf, 3); -+} -+ -+static ssize_t pfe_show_tmu0_ctx(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_ctx(buf, 0); -+} -+static ssize_t pfe_show_tmu1_ctx(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_ctx(buf, 1); -+} -+static ssize_t pfe_show_tmu2_ctx(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_ctx(buf, 2); -+} -+ -+static ssize_t pfe_show_tmu3_ctx(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return tmu_ctx(buf, 3); -+} -+ -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -+{ -+ util_do_clear = simple_strtoul(buf, NULL, 0); -+ return count; -+} -+ -+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ struct pfe_ctrl *ctrl = &pfe->ctrl; -+ -+ -+ len += block_version(buf + len, UTIL_VERSION); -+ -+ pe_sync_stop(ctrl, (1 << UTIL_ID)); -+ len += display_pe_status(buf + len, UTIL_ID, PESTATUS_ADDR_UTIL, util_do_clear); -+ pe_start(ctrl, (1 << UTIL_ID)); -+ -+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS)); -+ len += sprintf(buf + len, "max buf cnt: %x\n", readl(UTIL_MAX_BUF_CNT)); -+ len += sprintf(buf + len, "tsq max cnt: %x\n", readl(UTIL_TSQ_MAX_CNT)); -+ -+ return len; -+} -+#endif -+ -+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ -+ len += bmu(buf + len, 1, BMU1_BASE_ADDR); -+ len += bmu(buf + len, 2, BMU2_BASE_ADDR); -+ -+ return len; -+} -+ -+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ -+ len += sprintf(buf + len, "hif:\n "); -+ len += block_version(buf + len, HIF_VERSION); -+ -+ len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_TX_CURR_BD_ADDR)); -+ len += sprintf(buf + len, " tx status: %x\n", readl(HIF_TX_STATUS)); -+ len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_TX_DMA_STATUS)); -+ -+ len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_RX_CURR_BD_ADDR)); -+ len += sprintf(buf + len, " rx status: %x\n", readl(HIF_RX_STATUS)); -+ len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_RX_DMA_STATUS)); -+ -+ len += sprintf(buf + len, "hif nocopy:\n "); -+ len += block_version(buf + len, HIF_NOCPY_VERSION); -+ -+ len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_NOCPY_TX_CURR_BD_ADDR)); -+ len += sprintf(buf + len, " tx status: %x\n", readl(HIF_NOCPY_TX_STATUS)); -+ len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_NOCPY_TX_DMA_STATUS)); -+ -+ len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_NOCPY_RX_CURR_BD_ADDR)); -+ len += sprintf(buf + len, " rx status: %x\n", readl(HIF_NOCPY_RX_STATUS)); -+ len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_NOCPY_RX_DMA_STATUS)); -+ -+ return len; -+} -+ -+ -+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ -+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR); -+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR); -+#if !defined(CONFIG_PLATFORM_LS1012A) -+ len += gpi(buf + len, 2, EGPI3_BASE_ADDR); -+#endif -+ len += gpi(buf + len, 3, HGPI_BASE_ADDR); -+ -+ return len; -+} -+ -+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ ssize_t len = 0; -+ struct pfe_memmon *memmon = &pfe->memmon; -+ -+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n", memmon->kernel_memory_allocated, (memmon->kernel_memory_allocated + 1023) / 1024); -+ -+ return len; -+} -+ -+#ifdef HIF_NAPI_STATS -+static ssize_t pfe_show_hif_napi_stats(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct platform_device *pdev = to_platform_device(dev); -+ struct pfe *pfe = platform_get_drvdata(pdev); -+ ssize_t len = 0; -+ -+ len += sprintf(buf + len, "sched: %u\n", pfe->hif.napi_counters[NAPI_SCHED_COUNT]); -+ len += sprintf(buf + len, "poll: %u\n", pfe->hif.napi_counters[NAPI_POLL_COUNT]); -+ len += sprintf(buf + len, "packet: %u\n", pfe->hif.napi_counters[NAPI_PACKET_COUNT]); -+ len += sprintf(buf + len, "budget: %u\n", pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]); -+ len += sprintf(buf + len, "desc: %u\n", pfe->hif.napi_counters[NAPI_DESC_COUNT]); -+ len += sprintf(buf + len, "full: %u\n", pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]); -+ -+ return len; -+} -+ -+static ssize_t pfe_set_hif_napi_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -+{ -+ struct platform_device *pdev = to_platform_device(dev); -+ struct pfe *pfe = platform_get_drvdata(pdev); -+ -+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters)); -+ -+ return count; -+} -+ -+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats, pfe_set_hif_napi_stats); -+#endif -+ -+ -+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class); -+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu); -+#if !defined(CONFIG_UTIL_DISABLED) -+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util); -+#endif -+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL); -+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL); -+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL); -+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops); -+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL); -+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL); -+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL); -+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL); -+static DEVICE_ATTR(tmu0_ctx, 0444, pfe_show_tmu0_ctx, NULL); -+static DEVICE_ATTR(tmu1_ctx, 0444, pfe_show_tmu1_ctx, NULL); -+static DEVICE_ATTR(tmu2_ctx, 0444, pfe_show_tmu2_ctx, NULL); -+static DEVICE_ATTR(tmu3_ctx, 0444, pfe_show_tmu3_ctx, NULL); -+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL); -+ -+ -+int pfe_sysfs_init(struct pfe *pfe) -+{ -+ if (device_create_file(pfe->dev, &dev_attr_class)) -+ goto err_class; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu)) -+ goto err_tmu; -+ -+#if !defined(CONFIG_UTIL_DISABLED) -+ if (device_create_file(pfe->dev, &dev_attr_util)) -+ goto err_util; -+#endif -+ -+ if (device_create_file(pfe->dev, &dev_attr_bmu)) -+ goto err_bmu; -+ -+ if (device_create_file(pfe->dev, &dev_attr_hif)) -+ goto err_hif; -+ -+ if (device_create_file(pfe->dev, &dev_attr_gpi)) -+ goto err_gpi; -+ -+ if (device_create_file(pfe->dev, &dev_attr_drops)) -+ goto err_drops; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues)) -+ goto err_tmu0_queues; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues)) -+ goto err_tmu1_queues; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues)) -+ goto err_tmu2_queues; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues)) -+ goto err_tmu3_queues; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu0_ctx)) -+ goto err_tmu0_ctx; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu1_ctx)) -+ goto err_tmu1_ctx; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu2_ctx)) -+ goto err_tmu2_ctx; -+ -+ if (device_create_file(pfe->dev, &dev_attr_tmu3_ctx)) -+ goto err_tmu3_ctx; -+ -+ if (device_create_file(pfe->dev, &dev_attr_pfemem)) -+ goto err_pfemem; -+ -+#ifdef HIF_NAPI_STATS -+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats)) -+ goto err_hif_napi_stats; -+#endif -+ -+ return 0; -+ -+#ifdef HIF_NAPI_STATS -+err_hif_napi_stats: -+ device_remove_file(pfe->dev, &dev_attr_pfemem); -+#endif -+ -+err_pfemem: -+ device_remove_file(pfe->dev, &dev_attr_tmu3_ctx); -+ -+err_tmu3_ctx: -+ device_remove_file(pfe->dev, &dev_attr_tmu2_ctx); -+ -+err_tmu2_ctx: -+ device_remove_file(pfe->dev, &dev_attr_tmu1_ctx); -+ -+err_tmu1_ctx: -+ device_remove_file(pfe->dev, &dev_attr_tmu0_ctx); -+ -+err_tmu0_ctx: -+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues); -+ -+err_tmu3_queues: -+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues); -+ -+err_tmu2_queues: -+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues); -+ -+err_tmu1_queues: -+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues); -+ -+err_tmu0_queues: -+ device_remove_file(pfe->dev, &dev_attr_drops); -+ -+err_drops: -+ device_remove_file(pfe->dev, &dev_attr_gpi); -+ -+err_gpi: -+ device_remove_file(pfe->dev, &dev_attr_hif); -+ -+err_hif: -+ device_remove_file(pfe->dev, &dev_attr_bmu); -+ -+err_bmu: -+#if !defined(CONFIG_UTIL_DISABLED) -+ device_remove_file(pfe->dev, &dev_attr_util); -+ -+err_util: -+#endif -+ device_remove_file(pfe->dev, &dev_attr_tmu); -+ -+err_tmu: -+ device_remove_file(pfe->dev, &dev_attr_class); -+ -+err_class: -+ return -1; -+} -+ -+ -+void pfe_sysfs_exit(struct pfe *pfe) -+{ -+#ifdef HIF_NAPI_STATS -+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats); -+#endif -+ -+ device_remove_file(pfe->dev, &dev_attr_pfemem); -+ device_remove_file(pfe->dev, &dev_attr_tmu3_ctx); -+ device_remove_file(pfe->dev, &dev_attr_tmu2_ctx); -+ device_remove_file(pfe->dev, &dev_attr_tmu1_ctx); -+ device_remove_file(pfe->dev, &dev_attr_tmu0_ctx); -+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues); -+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues); -+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues); -+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues); -+ device_remove_file(pfe->dev, &dev_attr_drops); -+ device_remove_file(pfe->dev, &dev_attr_gpi); -+ device_remove_file(pfe->dev, &dev_attr_hif); -+ device_remove_file(pfe->dev, &dev_attr_bmu); -+#if !defined(CONFIG_UTIL_DISABLED) -+ device_remove_file(pfe->dev, &dev_attr_util); -+#endif -+ device_remove_file(pfe->dev, &dev_attr_tmu); -+ device_remove_file(pfe->dev, &dev_attr_class); -+} -+ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h -@@ -0,0 +1,34 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PFE_SYSFS_H_ -+#define _PFE_SYSFS_H_ -+ -+#include -+ -+#define PESTATUS_ADDR_CLASS 0x800 -+#define PESTATUS_ADDR_TMU 0x80 -+#define PESTATUS_ADDR_UTIL 0x0 -+ -+#define TMU_CONTEXT_ADDR 0x3c8 -+#define IPSEC_CNTRS_ADDR 0x840 -+ -+int pfe_sysfs_init(struct pfe *pfe); -+void pfe_sysfs_exit(struct pfe *pfe); -+#endif /* _PFE_SYSFS_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl_ppfe/platform.h -@@ -0,0 +1,25 @@ -+/* -+ * -+ * Copyright (C) 2007 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _PLATFORM_H_ -+#define _PLATFORM_H_ -+ -+#define virt_to_phys(virt) ((unsigned long)virt) -+ -+#endif /* _PLATFORM_H_ */ ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -858,6 +858,17 @@ static inline struct sk_buff *alloc_skb_ - return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); - } - -+extern struct sk_buff *__alloc_skb_header(unsigned int size, void *data, -+ gfp_t gfp_mask, -+ int fclone, -+ int node); -+static inline struct sk_buff *alloc_skb_header(unsigned int size, -+ u8 *data, -+ gfp_t priority) -+{ -+ return __alloc_skb_header(size, data, priority, 0, -1); -+} -+ - struct sk_buff *__alloc_skb_head(gfp_t priority, int node); - static inline struct sk_buff *alloc_skb_head(gfp_t priority) - { ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -283,6 +283,90 @@ nodata: - EXPORT_SYMBOL(__alloc_skb); - - /** -+ * __alloc_skb_header - allocate a network buffer -+ * @size: size to allocate -+ * @gfp_mask: allocation mask -+ * @fclone: allocate from fclone cache instead of head cache -+ * and allocate a cloned (child) skb -+ * -+ * Allocate a new &sk_buff. The returned buffer has no headroom and a -+ * tail room of size bytes. The object has a reference count of one. -+ * The return is the buffer. On a failure the return is %NULL. -+ * -+ * Buffers may only be allocated from interrupts using a @gfp_mask of -+ * %GFP_ATOMIC. -+ */ -+struct sk_buff *__alloc_skb_header(unsigned int size, void *data, -+ gfp_t gfp_mask, int fclone, int node) -+{ -+ struct kmem_cache *cache; -+ struct skb_shared_info *shinfo; -+ struct sk_buff *skb; -+ -+ cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; -+ -+ if (size <= SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) { -+ skb = NULL; -+ goto out; -+ } -+ -+ /* Get the HEAD */ -+ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); -+ if (!skb) -+ goto out; -+ prefetchw(skb); -+ -+ /* kmalloc might give us more room than requested. -+ * Put skb_shared_info exactly at the end of allocated zone, -+ * to allow max possible filling before reallocation. -+ */ -+ size = SKB_WITH_OVERHEAD(ksize(data)); -+ prefetchw(data + size); -+ -+ /* Only clear those fields we need to clear, not those that we will -+ * actually initialise below. Hence, don't put any more fields after -+ * the tail pointer in struct sk_buff! -+ */ -+ memset(skb, 0, offsetof(struct sk_buff, tail)); -+ /* Account for allocated memory : skb + skb->head */ -+ skb->truesize = SKB_TRUESIZE(size); -+ atomic_set(&skb->users, 1); -+ skb->head = data; -+ skb->data = data; -+ skb_reset_tail_pointer(skb); -+ skb->end = skb->tail + size; -+#ifdef NET_SKBUFF_DATA_USES_OFFSET -+ skb->mac_header = ~0U; -+#endif -+ -+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT) -+ skb->mspd_data = NULL; -+ skb->mspd_len = 0; -+#endif -+ -+ /* make sure we initialize shinfo sequentially */ -+ shinfo = skb_shinfo(skb); -+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); -+ atomic_set(&shinfo->dataref, 1); -+ kmemcheck_annotate_variable(shinfo->destructor_arg); -+ -+ if (fclone) { -+ struct sk_buff *child = skb + 1; -+ atomic_t *fclone_ref = (atomic_t *)(child + 1); -+ -+ kmemcheck_annotate_bitfield(child, flags1); -+ kmemcheck_annotate_bitfield(child, flags2); -+ skb->fclone = SKB_FCLONE_ORIG; -+ atomic_set(fclone_ref, 1); -+ -+ child->fclone = SKB_FCLONE_UNAVAILABLE; -+ } -+out: -+ return skb; -+} -+EXPORT_SYMBOL(__alloc_skb_header); -+ -+/** - * __build_skb - build a network buffer - * @data: data buffer provided by caller - * @frag_size: size of data, or 0 if head was kmalloced diff --git a/target/linux/layerscape/patches-4.4/7126-net-phy-add-driver-for-aquantia-AQR106-107-phy.patch b/target/linux/layerscape/patches-4.4/7126-net-phy-add-driver-for-aquantia-AQR106-107-phy.patch deleted file mode 100644 index 474982c2c..000000000 --- a/target/linux/layerscape/patches-4.4/7126-net-phy-add-driver-for-aquantia-AQR106-107-phy.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 637a6e183edf302111b28461c0c98b9634b30437 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Fri, 1 Apr 2016 17:11:10 +0800 -Subject: [PATCH 126/141] net: phy: add driver for aquantia AQR106/107 phy - -Signed-off-by: Mingkai Hu ---- - drivers/net/phy/aquantia.c | 30 ++++++++++++++++++++++++++++++ - 1 file changed, 30 insertions(+) - ---- a/drivers/net/phy/aquantia.c -+++ b/drivers/net/phy/aquantia.c -@@ -21,6 +21,8 @@ - #define PHY_ID_AQ1202 0x03a1b445 - #define PHY_ID_AQ2104 0x03a1b460 - #define PHY_ID_AQR105 0x03a1b4a2 -+#define PHY_ID_AQR106 0x03a1b4d0 -+#define PHY_ID_AQR107 0x03a1b4e0 - #define PHY_ID_AQR405 0x03a1b4b0 - - #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ -@@ -157,6 +159,32 @@ static struct phy_driver aquantia_driver - .driver = { .owner = THIS_MODULE,}, - }, - { -+ .phy_id = PHY_ID_AQR106, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR106", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQR107, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR107", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ - .phy_id = PHY_ID_AQR405, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQR405", -@@ -177,6 +205,8 @@ static struct mdio_device_id __maybe_unu - { PHY_ID_AQ1202, 0xfffffff0 }, - { PHY_ID_AQ2104, 0xfffffff0 }, - { PHY_ID_AQR105, 0xfffffff0 }, -+ { PHY_ID_AQR106, 0xfffffff0 }, -+ { PHY_ID_AQR107, 0xfffffff0 }, - { PHY_ID_AQR405, 0xfffffff0 }, - { } - }; diff --git a/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch b/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch deleted file mode 100644 index ef8cf01b4..000000000 --- a/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 0ac69de37277aec31d18a8c7b9d9a3a65b629526 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Wed, 12 Oct 2016 16:30:57 +0800 -Subject: [PATCH 144/226] dpaa: call arch_setup_dma_ops before using dma_ops - -A previous patch caused dpaa call trace. This patch provides -a temporary workaround for this until this is fixed by upstream. - -Fixes: 1dccb598df54 ("arm64: simplify dma_get_ops") -Signed-off-by: Yangbo Lu ---- - .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 12 ++++++------ - drivers/staging/fsl_qbman/qman_high.c | 1 + - 2 files changed, 7 insertions(+), 6 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -@@ -754,6 +754,12 @@ dpa_bp_alloc(struct dpa_bp *dpa_bp) - goto pdev_register_failed; - } - -+#ifdef CONFIG_FMAN_ARM -+ /* force coherency */ -+ pdev->dev.archdata.dma_coherent = true; -+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true); -+#endif -+ - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40)); - if (err) - goto pdev_mask_failed; -@@ -765,12 +771,6 @@ dpa_bp_alloc(struct dpa_bp *dpa_bp) - goto pdev_mask_failed; - } - --#ifdef CONFIG_FMAN_ARM -- /* force coherency */ -- pdev->dev.archdata.dma_coherent = true; -- arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true); --#endif -- - dpa_bp->dev = &pdev->dev; - - if (dpa_bp->seed_cb) { ---- a/drivers/staging/fsl_qbman/qman_high.c -+++ b/drivers/staging/fsl_qbman/qman_high.c -@@ -662,6 +662,7 @@ struct qman_portal *qman_create_portal( - portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); - portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask; - #else -+ arch_setup_dma_ops(&portal->pdev->dev, 0, 0, NULL, false); - if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) { - pr_err("qman_portal - dma_set_mask() failed\n"); - goto fail_devadd; diff --git a/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch b/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch deleted file mode 100644 index 1f3a9f050..000000000 --- a/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch +++ /dev/null @@ -1,400 +0,0 @@ -From 8ebb892cd56d14e72580ab36c3b5eb2d4603a7fe Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:21 -0600 -Subject: [PATCH 145/226] staging: fsl-mc: Added generic MSI support for - FSL-MC devices - -Created an MSI domain for the fsl-mc bus-- including functions -to create a domain, find a domain, alloc/free domain irqs, and -bus specific overrides for domain and irq_chip ops. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/Kconfig | 1 + - drivers/staging/fsl-mc/bus/Makefile | 1 + - drivers/staging/fsl-mc/bus/mc-msi.c | 276 +++++++++++++++++++++++++++ - drivers/staging/fsl-mc/include/dprc.h | 2 +- - drivers/staging/fsl-mc/include/mc-private.h | 17 ++ - drivers/staging/fsl-mc/include/mc.h | 17 ++ - 6 files changed, 313 insertions(+), 1 deletion(-) - create mode 100644 drivers/staging/fsl-mc/bus/mc-msi.c - ---- a/drivers/staging/fsl-mc/bus/Kconfig -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -9,6 +9,7 @@ - config FSL_MC_BUS - tristate "Freescale Management Complex (MC) bus driver" - depends on OF && ARM64 -+ select GENERIC_MSI_IRQ_DOMAIN - help - Driver to enable the bus infrastructure for the Freescale - QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware ---- a/drivers/staging/fsl-mc/bus/Makefile -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -13,5 +13,6 @@ mc-bus-driver-objs := mc-bus.o \ - dpmng.o \ - dprc-driver.o \ - mc-allocator.o \ -+ mc-msi.o \ - dpmcp.o \ - dpbp.o ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-msi.c -@@ -0,0 +1,276 @@ -+/* -+ * Freescale Management Complex (MC) bus driver MSI support -+ * -+ * Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../include/mc-sys.h" -+#include "dprc-cmd.h" -+ -+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, -+ struct msi_desc *desc) -+{ -+ arg->desc = desc; -+ arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index; -+} -+ -+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info) -+{ -+ struct msi_domain_ops *ops = info->ops; -+ -+ if (WARN_ON(!ops)) -+ return; -+ -+ /* -+ * set_desc should not be set by the caller -+ */ -+ if (WARN_ON(ops->set_desc)) -+ return; -+ -+ ops->set_desc = fsl_mc_msi_set_desc; -+} -+ -+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, -+ struct fsl_mc_device_irq *mc_dev_irq) -+{ -+ int error; -+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev; -+ struct msi_desc *msi_desc = mc_dev_irq->msi_desc; -+ struct dprc_irq_cfg irq_cfg; -+ -+ /* -+ * msi_desc->msg.address is 0x0 when this function is invoked in -+ * the free_irq() code path. In this case, for the MC, we don't -+ * really need to "unprogram" the MSI, so we just return. -+ */ -+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0) -+ return; -+ -+ if (WARN_ON(!owner_mc_dev)) -+ return; -+ -+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) | -+ msi_desc->msg.address_lo; -+ irq_cfg.val = msi_desc->msg.data; -+ irq_cfg.user_irq_id = msi_desc->irq; -+ -+ if (owner_mc_dev == mc_bus_dev) { -+ /* -+ * IRQ is for the mc_bus_dev's DPRC itself -+ */ -+ error = dprc_set_irq(mc_bus_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus_dev->mc_handle, -+ mc_dev_irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_set_irq() failed: %d\n", error); -+ } -+ } else { -+ /* -+ * IRQ is for for a child device of mc_bus_dev -+ */ -+ error = dprc_set_obj_irq(mc_bus_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus_dev->mc_handle, -+ owner_mc_dev->obj_desc.type, -+ owner_mc_dev->obj_desc.id, -+ mc_dev_irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_obj_set_irq() failed: %d\n", error); -+ } -+ } -+} -+ -+/* -+ * NOTE: This function is invoked with interrupts disabled -+ */ -+static void fsl_mc_msi_write_msg(struct irq_data *irq_data, -+ struct msi_msg *msg) -+{ -+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data); -+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct fsl_mc_device_irq *mc_dev_irq = -+ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index]; -+ -+ WARN_ON(mc_dev_irq->msi_desc != msi_desc); -+ msi_desc->msg = *msg; -+ -+ /* -+ * Program the MSI (paddr, value) pair in the device: -+ */ -+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq); -+} -+ -+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info) -+{ -+ struct irq_chip *chip = info->chip; -+ -+ if (WARN_ON((!chip))) -+ return; -+ -+ /* -+ * irq_write_msi_msg should not be set by the caller -+ */ -+ if (WARN_ON(chip->irq_write_msi_msg)) -+ return; -+ -+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg; -+} -+ -+/** -+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain -+ * @np: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ * -+ * Updates the domain and chip ops and creates a fsl-mc MSI -+ * interrupt domain. -+ * -+ * Returns: -+ * A domain pointer or NULL in case of failure. -+ */ -+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, -+ struct msi_domain_info *info, -+ struct irq_domain *parent) -+{ -+ struct irq_domain *domain; -+ -+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) -+ fsl_mc_msi_update_dom_ops(info); -+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) -+ fsl_mc_msi_update_chip_ops(info); -+ -+ domain = msi_create_irq_domain(fwnode, info, parent); -+ if (domain) -+ domain->bus_token = DOMAIN_BUS_FSL_MC_MSI; -+ -+ return domain; -+} -+ -+int fsl_mc_find_msi_domain(struct device *mc_platform_dev, -+ struct irq_domain **mc_msi_domain) -+{ -+ struct irq_domain *msi_domain; -+ struct device_node *mc_of_node = mc_platform_dev->of_node; -+ -+ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node, -+ DOMAIN_BUS_FSL_MC_MSI); -+ if (!msi_domain) { -+ pr_err("Unable to find fsl-mc MSI domain for %s\n", -+ mc_of_node->full_name); -+ -+ return -ENOENT; -+ } -+ -+ *mc_msi_domain = msi_domain; -+ return 0; -+} -+ -+static void fsl_mc_msi_free_descs(struct device *dev) -+{ -+ struct msi_desc *desc, *tmp; -+ -+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { -+ list_del(&desc->list); -+ free_msi_entry(desc); -+ } -+} -+ -+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count) -+ -+{ -+ unsigned int i; -+ int error; -+ struct msi_desc *msi_desc; -+ -+ for (i = 0; i < irq_count; i++) { -+ msi_desc = alloc_msi_entry(dev); -+ if (!msi_desc) { -+ dev_err(dev, "Failed to allocate msi entry\n"); -+ error = -ENOMEM; -+ goto cleanup_msi_descs; -+ } -+ -+ msi_desc->fsl_mc.msi_index = i; -+ msi_desc->nvec_used = 1; -+ INIT_LIST_HEAD(&msi_desc->list); -+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); -+ } -+ -+ return 0; -+ -+cleanup_msi_descs: -+ fsl_mc_msi_free_descs(dev); -+ return error; -+} -+ -+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, -+ unsigned int irq_count) -+{ -+ struct irq_domain *msi_domain; -+ int error; -+ -+ if (WARN_ON(!list_empty(dev_to_msi_list(dev)))) -+ return -EINVAL; -+ -+ error = fsl_mc_msi_alloc_descs(dev, irq_count); -+ if (error < 0) -+ return error; -+ -+ msi_domain = dev_get_msi_domain(dev); -+ if (WARN_ON(!msi_domain)) { -+ error = -EINVAL; -+ goto cleanup_msi_descs; -+ } -+ -+ /* -+ * NOTE: Calling this function will trigger the invocation of the -+ * its_fsl_mc_msi_prepare() callback -+ */ -+ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count); -+ -+ if (error) { -+ dev_err(dev, "Failed to allocate IRQs\n"); -+ goto cleanup_msi_descs; -+ } -+ -+ return 0; -+ -+cleanup_msi_descs: -+ fsl_mc_msi_free_descs(dev); -+ return error; -+} -+ -+void fsl_mc_msi_domain_free_irqs(struct device *dev) -+{ -+ struct irq_domain *msi_domain; -+ -+ msi_domain = dev_get_msi_domain(dev); -+ if (WARN_ON(!msi_domain)) -+ return; -+ -+ msi_domain_free_irqs(msi_domain, dev); -+ -+ if (WARN_ON(list_empty(dev_to_msi_list(dev)))) -+ return; -+ -+ fsl_mc_msi_free_descs(dev); -+} ---- a/drivers/staging/fsl-mc/include/dprc.h -+++ b/drivers/staging/fsl-mc/include/dprc.h -@@ -176,7 +176,7 @@ int dprc_reset_container(struct fsl_mc_i - * @user_irq_id: A user defined number associated with this IRQ - */ - struct dprc_irq_cfg { -- u64 paddr; -+ phys_addr_t paddr; - u32 val; - int user_irq_id; - }; ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -26,6 +26,9 @@ - strcmp(_obj_type, "dpmcp") == 0 || \ - strcmp(_obj_type, "dpcon") == 0) - -+struct irq_domain; -+struct msi_domain_info; -+ - /** - * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device - * @root_mc_bus_dev: MC object device representing the root DPRC -@@ -79,11 +82,13 @@ struct fsl_mc_resource_pool { - * @resource_pools: array of resource pools (one pool per resource type) - * for this MC bus. These resources represent allocatable entities - * from the physical DPRC. -+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool - * @scan_mutex: Serializes bus scanning - */ - struct fsl_mc_bus { - struct fsl_mc_device mc_dev; - struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; -+ struct fsl_mc_device_irq *irq_resources; - struct mutex scan_mutex; /* serializes bus scanning */ - }; - -@@ -116,4 +121,16 @@ int __must_check fsl_mc_resource_allocat - - void fsl_mc_resource_free(struct fsl_mc_resource *resource); - -+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, -+ struct msi_domain_info *info, -+ struct irq_domain *parent); -+ -+int fsl_mc_find_msi_domain(struct device *mc_platform_dev, -+ struct irq_domain **mc_msi_domain); -+ -+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, -+ unsigned int irq_count); -+ -+void fsl_mc_msi_domain_free_irqs(struct device *dev); -+ - #endif /* _FSL_MC_PRIVATE_H_ */ ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -104,6 +104,23 @@ struct fsl_mc_resource { - }; - - /** -+ * struct fsl_mc_device_irq - MC object device message-based interrupt -+ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs() -+ * @mc_dev: MC object device that owns this interrupt -+ * @dev_irq_index: device-relative IRQ index -+ * @resource: MC generic resource associated with the interrupt -+ */ -+struct fsl_mc_device_irq { -+ struct msi_desc *msi_desc; -+ struct fsl_mc_device *mc_dev; -+ u8 dev_irq_index; -+ struct fsl_mc_resource resource; -+}; -+ -+#define to_fsl_mc_irq(_mc_resource) \ -+ container_of(_mc_resource, struct fsl_mc_device_irq, resource) -+ -+/** - * Bit masks for a MC object device (struct fsl_mc_device) flags - */ - #define FSL_MC_IS_DPRC 0x0001 diff --git a/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch b/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch deleted file mode 100644 index 5b254d70d..000000000 --- a/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch +++ /dev/null @@ -1,167 +0,0 @@ -From 85cb8ae26b6c69f0a118f32b7b7cd4f22d782da3 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:22 -0600 -Subject: [PATCH 146/226] staging: fsl-mc: Added GICv3-ITS support for FSL-MC - MSIs - -Added platform-specific MSI support layer for FSL-MC devices. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/Makefile | 1 + - .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 127 ++++++++++++++++++++ - drivers/staging/fsl-mc/include/mc-private.h | 4 + - 3 files changed, 132 insertions(+) - create mode 100644 drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c - ---- a/drivers/staging/fsl-mc/bus/Makefile -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -14,5 +14,6 @@ mc-bus-driver-objs := mc-bus.o \ - dprc-driver.o \ - mc-allocator.o \ - mc-msi.o \ -+ irq-gic-v3-its-fsl-mc-msi.o \ - dpmcp.o \ - dpbp.o ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c -@@ -0,0 +1,127 @@ -+/* -+ * Freescale Management Complex (MC) bus driver MSI support -+ * -+ * Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../include/mc-sys.h" -+#include "dprc-cmd.h" -+ -+static struct irq_chip its_msi_irq_chip = { -+ .name = "fsl-mc-bus-msi", -+ .irq_mask = irq_chip_mask_parent, -+ .irq_unmask = irq_chip_unmask_parent, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_set_affinity = msi_domain_set_affinity -+}; -+ -+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, -+ struct device *dev, -+ int nvec, msi_alloc_info_t *info) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct msi_domain_info *msi_info; -+ -+ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) -+ return -EINVAL; -+ -+ mc_bus_dev = to_fsl_mc_device(dev); -+ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) -+ return -EINVAL; -+ -+ /* -+ * Set the device Id to be passed to the GIC-ITS: -+ * -+ * NOTE: This device id corresponds to the IOMMU stream ID -+ * associated with the DPRC object (ICID). -+ */ -+ info->scratchpad[0].ul = mc_bus_dev->icid; -+ msi_info = msi_get_domain_info(msi_domain->parent); -+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); -+} -+ -+static struct msi_domain_ops its_fsl_mc_msi_ops = { -+ .msi_prepare = its_fsl_mc_msi_prepare, -+}; -+ -+static struct msi_domain_info its_fsl_mc_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), -+ .ops = &its_fsl_mc_msi_ops, -+ .chip = &its_msi_irq_chip, -+}; -+ -+static const struct of_device_id its_device_id[] = { -+ { .compatible = "arm,gic-v3-its", }, -+ {}, -+}; -+ -+int __init its_fsl_mc_msi_init(void) -+{ -+ struct device_node *np; -+ struct irq_domain *parent; -+ struct irq_domain *mc_msi_domain; -+ -+ for (np = of_find_matching_node(NULL, its_device_id); np; -+ np = of_find_matching_node(np, its_device_id)) { -+ if (!of_property_read_bool(np, "msi-controller")) -+ continue; -+ -+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); -+ if (!parent || !msi_get_domain_info(parent)) { -+ pr_err("%s: unable to locate ITS domain\n", -+ np->full_name); -+ continue; -+ } -+ -+ mc_msi_domain = fsl_mc_msi_create_irq_domain( -+ of_node_to_fwnode(np), -+ &its_fsl_mc_msi_domain_info, -+ parent); -+ if (!mc_msi_domain) { -+ pr_err("%s: unable to create fsl-mc domain\n", -+ np->full_name); -+ continue; -+ } -+ -+ WARN_ON(mc_msi_domain-> -+ host_data != &its_fsl_mc_msi_domain_info); -+ -+ pr_info("fsl-mc MSI: %s domain created\n", np->full_name); -+ } -+ -+ return 0; -+} -+ -+void its_fsl_mc_msi_cleanup(void) -+{ -+ struct device_node *np; -+ -+ for (np = of_find_matching_node(NULL, its_device_id); np; -+ np = of_find_matching_node(np, its_device_id)) { -+ struct irq_domain *mc_msi_domain = irq_find_matching_host( -+ np, -+ DOMAIN_BUS_FSL_MC_MSI); -+ -+ if (!of_property_read_bool(np, "msi-controller")) -+ continue; -+ -+ mc_msi_domain = irq_find_matching_host(np, -+ DOMAIN_BUS_FSL_MC_MSI); -+ if (mc_msi_domain && -+ mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info) -+ irq_domain_remove(mc_msi_domain); -+ } -+} ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -133,4 +133,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct - - void fsl_mc_msi_domain_free_irqs(struct device *dev); - -+int __init its_fsl_mc_msi_init(void); -+ -+void its_fsl_mc_msi_cleanup(void); -+ - #endif /* _FSL_MC_PRIVATE_H_ */ diff --git a/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch b/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch deleted file mode 100644 index c02c892b2..000000000 --- a/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch +++ /dev/null @@ -1,326 +0,0 @@ -From 23b09c6b4162a8264b600f35d7048256a7afc0cd Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:23 -0600 -Subject: [PATCH 147/226] staging: fsl-mc: Extended MC bus allocator to - include IRQs - -All the IRQs for DPAA2 objects in the same DPRC must use -the ICID of that DPRC, as their device Id in the GIC-ITS. -Thus, all these IRQs must share the same ITT table in the GIC. -As a result, a pool of IRQs with the same device Id must be -preallocated per DPRC (fsl-mc bus instance). So, the fsl-mc -bus object allocator is extended to also provide services -to allocate IRQs to DPAA2 devices, from their parent fsl-mc bus -IRQ pool. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-allocator.c | 199 +++++++++++++++++++++++++++ - drivers/staging/fsl-mc/include/mc-private.h | 15 ++ - drivers/staging/fsl-mc/include/mc.h | 9 ++ - 3 files changed, 223 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -15,6 +15,7 @@ - #include "../include/dpcon-cmd.h" - #include "dpmcp-cmd.h" - #include "dpmcp.h" -+#include - - /** - * fsl_mc_resource_pool_add_device - add allocatable device to a resource -@@ -160,6 +161,7 @@ static const char *const fsl_mc_pool_typ - [FSL_MC_POOL_DPMCP] = "dpmcp", - [FSL_MC_POOL_DPBP] = "dpbp", - [FSL_MC_POOL_DPCON] = "dpcon", -+ [FSL_MC_POOL_IRQ] = "irq", - }; - - static int __must_check object_type_to_pool_type(const char *object_type, -@@ -465,6 +467,203 @@ void fsl_mc_object_free(struct fsl_mc_de - } - EXPORT_SYMBOL_GPL(fsl_mc_object_free); - -+/* -+ * Initialize the interrupt pool associated with a MC bus. -+ * It allocates a block of IRQs from the GIC-ITS -+ */ -+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count) -+{ -+ unsigned int i; -+ struct msi_desc *msi_desc; -+ struct fsl_mc_device_irq *irq_resources; -+ struct fsl_mc_device_irq *mc_dev_irq; -+ int error; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ if (WARN_ON(irq_count == 0 || -+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) -+ return -EINVAL; -+ -+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count); -+ if (error < 0) -+ return error; -+ -+ irq_resources = devm_kzalloc(&mc_bus_dev->dev, -+ sizeof(*irq_resources) * irq_count, -+ GFP_KERNEL); -+ if (!irq_resources) { -+ error = -ENOMEM; -+ goto cleanup_msi_irqs; -+ } -+ -+ for (i = 0; i < irq_count; i++) { -+ mc_dev_irq = &irq_resources[i]; -+ -+ /* -+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set -+ * by the fsl_mc_msi_write_msg() callback -+ */ -+ mc_dev_irq->resource.type = res_pool->type; -+ mc_dev_irq->resource.data = mc_dev_irq; -+ mc_dev_irq->resource.parent_pool = res_pool; -+ INIT_LIST_HEAD(&mc_dev_irq->resource.node); -+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list); -+ } -+ -+ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) { -+ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index]; -+ mc_dev_irq->msi_desc = msi_desc; -+ mc_dev_irq->resource.id = msi_desc->irq; -+ } -+ -+ res_pool->max_count = irq_count; -+ res_pool->free_count = irq_count; -+ mc_bus->irq_resources = irq_resources; -+ return 0; -+ -+cleanup_msi_irqs: -+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev); -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); -+ -+/** -+ * Teardown the interrupt pool associated with an MC bus. -+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. -+ */ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) -+{ -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ if (WARN_ON(res_pool->max_count == 0)) -+ return; -+ -+ if (WARN_ON(res_pool->free_count != res_pool->max_count)) -+ return; -+ -+ INIT_LIST_HEAD(&res_pool->free_list); -+ res_pool->max_count = 0; -+ res_pool->free_count = 0; -+ mc_bus->irq_resources = NULL; -+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); -+ -+/** -+ * It allocates the IRQs required by a given MC object device. The -+ * IRQs are allocated from the interrupt pool associated with the -+ * MC bus that contains the device, if the device is not a DPRC device. -+ * Otherwise, the IRQs are allocated from the interrupt pool associated -+ * with the MC bus that represents the DPRC device itself. -+ */ -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ int res_allocated_count = 0; -+ int error = -EINVAL; -+ struct fsl_mc_device_irq **irqs = NULL; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_resource_pool *res_pool; -+ -+ if (WARN_ON(mc_dev->irqs)) -+ return -EINVAL; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ if (WARN_ON(irq_count == 0)) -+ return -EINVAL; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return -EINVAL; -+ -+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ if (res_pool->free_count < irq_count) { -+ dev_err(&mc_dev->dev, -+ "Not able to allocate %u irqs for device\n", irq_count); -+ return -ENOSPC; -+ } -+ -+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), -+ GFP_KERNEL); -+ if (!irqs) -+ return -ENOMEM; -+ -+ for (i = 0; i < irq_count; i++) { -+ struct fsl_mc_resource *resource; -+ -+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, -+ &resource); -+ if (error < 0) -+ goto error_resource_alloc; -+ -+ irqs[i] = to_fsl_mc_irq(resource); -+ res_allocated_count++; -+ -+ WARN_ON(irqs[i]->mc_dev); -+ irqs[i]->mc_dev = mc_dev; -+ irqs[i]->dev_irq_index = i; -+ } -+ -+ mc_dev->irqs = irqs; -+ return 0; -+ -+error_resource_alloc: -+ for (i = 0; i < res_allocated_count; i++) { -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); -+ -+/* -+ * It frees the IRQs that were allocated for a MC object device, by -+ * returning them to the corresponding interrupt pool. -+ */ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_device_irq **irqs = mc_dev->irqs; -+ -+ if (WARN_ON(!irqs)) -+ return; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ for (i = 0; i < irq_count; i++) { -+ WARN_ON(!irqs[i]->mc_dev); -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ mc_dev->irqs = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); -+ - /** - * fsl_mc_allocator_probe - callback invoked when an allocatable device is - * being added to the system ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -30,6 +30,16 @@ struct irq_domain; - struct msi_domain_info; - - /** -+ * Maximum number of total IRQs that can be pre-allocated for an MC bus' -+ * IRQ pool -+ */ -+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 -+ -+struct device_node; -+struct irq_domain; -+struct msi_domain_info; -+ -+/** - * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device - * @root_mc_bus_dev: MC object device representing the root DPRC - * @num_translation_ranges: number of entries in addr_translation_ranges -@@ -137,4 +147,9 @@ int __init its_fsl_mc_msi_init(void); - - void its_fsl_mc_msi_cleanup(void); - -+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count); -+ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); -+ - #endif /* _FSL_MC_PRIVATE_H_ */ ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -14,12 +14,14 @@ - #include - #include - #include -+#include - #include "../include/dprc.h" - - #define FSL_MC_VENDOR_FREESCALE 0x1957 - - struct fsl_mc_device; - struct fsl_mc_io; -+struct fsl_mc_bus; - - /** - * struct fsl_mc_driver - MC object device driver object -@@ -75,6 +77,7 @@ enum fsl_mc_pool_type { - FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ - FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ - FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ -+ FSL_MC_POOL_IRQ, - - /* - * NOTE: New resource pool types must be added before this entry -@@ -141,6 +144,7 @@ struct fsl_mc_device_irq { - * NULL if none. - * @obj_desc: MC description of the DPAA device - * @regions: pointer to array of MMIO region entries -+ * @irqs: pointer to array of pointers to interrupts allocated to this device - * @resource: generic resource associated with this MC object device, if any. - * - * Generic device object for MC object devices that are "attached" to a -@@ -172,6 +176,7 @@ struct fsl_mc_device { - struct fsl_mc_io *mc_io; - struct dprc_obj_desc obj_desc; - struct resource *regions; -+ struct fsl_mc_device_irq **irqs; - struct fsl_mc_resource *resource; - }; - -@@ -215,6 +220,10 @@ int __must_check fsl_mc_object_allocate( - - void fsl_mc_object_free(struct fsl_mc_device *mc_adev); - -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); -+ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -+ - extern struct bus_type fsl_mc_bus_type; - - #endif /* _FSL_MC_H_ */ diff --git a/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch b/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch deleted file mode 100644 index 17dcb7589..000000000 --- a/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 0f2a65dea2024b7898e3c0b42e0a7864d6538567 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:24 -0600 -Subject: [PATCH 148/226] staging: fsl-mc: Changed DPRC built-in portal's - mc_io to be atomic - -The DPRC built-in portal's mc_io is used to send commands to the MC -to program MSIs for MC objects. This is done by the -fsl_mc_msi_write_msg() callback, which is invoked by the generic MSI -layer with interrupts disabled. As a result, the mc_io used in -fsl_mc_msi_write_msg needs to be an atomic mc_io. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +++- - drivers/staging/fsl-mc/bus/mc-bus.c | 3 ++- - 2 files changed, 5 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -396,7 +396,9 @@ static int dprc_probe(struct fsl_mc_devi - error = fsl_create_mc_io(&mc_dev->dev, - mc_dev->regions[0].start, - region_size, -- NULL, 0, &mc_dev->mc_io); -+ NULL, -+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &mc_dev->mc_io); - if (error < 0) - return error; - } ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -702,7 +702,8 @@ static int fsl_mc_bus_probe(struct platf - mc_portal_phys_addr = res.start; - mc_portal_size = resource_size(&res); - error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, -- mc_portal_size, NULL, 0, &mc_io); -+ mc_portal_size, NULL, -+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io); - if (error < 0) - return error; - diff --git a/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch b/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch deleted file mode 100644 index e72d5a7ef..000000000 --- a/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 78ab7589777526022757e9c95b9d5864786eb4e5 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:25 -0600 -Subject: [PATCH 149/226] staging: fsl-mc: Populate the IRQ pool for an MC bus - instance - -Scan the corresponding DPRC container to get total count -of IRQs needed by all its child DPAA2 objects. Then, -preallocate a set of MSI IRQs with the DPRC's ICID -(GIT-ITS device Id) to populate the the DPRC's IRQ pool. -Each child DPAA2 object in the DPRC and the DPRC object itself -will allocate their necessary MSI IRQs from the DPRC's IRQ pool, -in their driver probe function. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 24 ++++++++++++++++++++++-- - drivers/staging/fsl-mc/include/mc-private.h | 3 ++- - 2 files changed, 24 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -241,6 +241,7 @@ static void dprc_cleanup_all_resource_po - * dprc_scan_objects - Discover objects in a DPRC - * - * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @total_irq_count: total number of IRQs needed by objects in the DPRC. - * - * Detects objects added and removed from a DPRC and synchronizes the - * state of the Linux bus driver, MC by adding and removing -@@ -254,11 +255,13 @@ static void dprc_cleanup_all_resource_po - * populated before they can get allocation requests from probe callbacks - * of the device drivers for the non-allocatable devices. - */ --int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev) -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ unsigned int *total_irq_count) - { - int num_child_objects; - int dprc_get_obj_failures; - int error; -+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; - struct dprc_obj_desc *child_obj_desc_array = NULL; - - error = dprc_get_obj_count(mc_bus_dev->mc_io, -@@ -307,6 +310,7 @@ int dprc_scan_objects(struct fsl_mc_devi - continue; - } - -+ irq_count += obj_desc->irq_count; - dev_dbg(&mc_bus_dev->dev, - "Discovered object: type %s, id %d\n", - obj_desc->type, obj_desc->id); -@@ -319,6 +323,7 @@ int dprc_scan_objects(struct fsl_mc_devi - } - } - -+ *total_irq_count = irq_count; - dprc_remove_devices(mc_bus_dev, child_obj_desc_array, - num_child_objects); - -@@ -344,6 +349,7 @@ EXPORT_SYMBOL_GPL(dprc_scan_objects); - int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) - { - int error; -+ unsigned int irq_count; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - - dprc_init_all_resource_pools(mc_bus_dev); -@@ -352,11 +358,25 @@ int dprc_scan_container(struct fsl_mc_de - * Discover objects in the DPRC: - */ - mutex_lock(&mc_bus->scan_mutex); -- error = dprc_scan_objects(mc_bus_dev); -+ error = dprc_scan_objects(mc_bus_dev, &irq_count); - mutex_unlock(&mc_bus->scan_mutex); - if (error < 0) - goto error; - -+ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) { -+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) { -+ dev_warn(&mc_bus_dev->dev, -+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", -+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); -+ } -+ -+ error = fsl_mc_populate_irq_pool( -+ mc_bus, -+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); -+ if (error < 0) -+ goto error; -+ } -+ - return 0; - error: - dprc_cleanup_all_resource_pools(mc_bus_dev); ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -114,7 +114,8 @@ void fsl_mc_device_remove(struct fsl_mc_ - - int dprc_scan_container(struct fsl_mc_device *mc_bus_dev); - --int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev); -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ unsigned int *total_irq_count); - - int __init dprc_driver_init(void); - diff --git a/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch b/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch deleted file mode 100644 index 0c69783ad..000000000 --- a/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch +++ /dev/null @@ -1,103 +0,0 @@ -From 15bfab2641c61fb50a876860e8909ab84d2b8701 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:26 -0600 -Subject: [PATCH 150/226] staging: fsl-mc: set MSI domain for DPRC objects - -THE MSI domain associated with a root DPRC object is -obtained form the device tree. Child DPRCs inherit -the parent DPRC MSI domain. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 39 ++++++++++++++++++++++++++++++ - 1 file changed, 39 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -13,6 +13,7 @@ - #include "../include/mc-sys.h" - #include - #include -+#include - #include "dprc-cmd.h" - - struct dprc_child_objs { -@@ -398,11 +399,16 @@ static int dprc_probe(struct fsl_mc_devi - { - int error; - size_t region_size; -+ struct device *parent_dev = mc_dev->dev.parent; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ bool msi_domain_set = false; - - if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) - return -EINVAL; - -+ if (WARN_ON(dev_get_msi_domain(&mc_dev->dev))) -+ return -EINVAL; -+ - if (!mc_dev->mc_io) { - /* - * This is a child DPRC: -@@ -421,6 +427,30 @@ static int dprc_probe(struct fsl_mc_devi - &mc_dev->mc_io); - if (error < 0) - return error; -+ /* -+ * Inherit parent MSI domain: -+ */ -+ dev_set_msi_domain(&mc_dev->dev, -+ dev_get_msi_domain(parent_dev)); -+ msi_domain_set = true; -+ } else { -+ /* -+ * This is a root DPRC -+ */ -+ struct irq_domain *mc_msi_domain; -+ -+ if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type)) -+ return -EINVAL; -+ -+ error = fsl_mc_find_msi_domain(parent_dev, -+ &mc_msi_domain); -+ if (error < 0) { -+ dev_warn(&mc_dev->dev, -+ "WARNING: MC bus without interrupt support\n"); -+ } else { -+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain); -+ msi_domain_set = true; -+ } - } - - error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -@@ -446,6 +476,9 @@ error_cleanup_open: - (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); - - error_cleanup_mc_io: -+ if (msi_domain_set) -+ dev_set_msi_domain(&mc_dev->dev, NULL); -+ - fsl_destroy_mc_io(mc_dev->mc_io); - return error; - } -@@ -463,6 +496,7 @@ error_cleanup_mc_io: - static int dprc_remove(struct fsl_mc_device *mc_dev) - { - int error; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); - - if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) - return -EINVAL; -@@ -475,6 +509,11 @@ static int dprc_remove(struct fsl_mc_dev - if (error < 0) - dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); - -+ if (dev_get_msi_domain(&mc_dev->dev)) { -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ dev_set_msi_domain(&mc_dev->dev, NULL); -+ } -+ - dev_info(&mc_dev->dev, "DPRC device unbound from driver"); - return 0; - } diff --git a/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch b/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch deleted file mode 100644 index 3324048a0..000000000 --- a/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 22aa842ae501ea8724afd45fcb0d7b17a67cb950 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:27 -0600 -Subject: [PATCH 151/226] staging: fsl-mc: Fixed bug in dprc_probe() error - path - -Destroy mc_io in error path in dprc_probe() only if the mc_io was -created in this function. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 17 ++++++++++++++--- - 1 file changed, 14 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -401,6 +401,7 @@ static int dprc_probe(struct fsl_mc_devi - size_t region_size; - struct device *parent_dev = mc_dev->dev.parent; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ bool mc_io_created = false; - bool msi_domain_set = false; - - if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) -@@ -413,6 +414,9 @@ static int dprc_probe(struct fsl_mc_devi - /* - * This is a child DPRC: - */ -+ if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type)) -+ return -EINVAL; -+ - if (WARN_ON(mc_dev->obj_desc.region_count == 0)) - return -EINVAL; - -@@ -427,6 +431,9 @@ static int dprc_probe(struct fsl_mc_devi - &mc_dev->mc_io); - if (error < 0) - return error; -+ -+ mc_io_created = true; -+ - /* - * Inherit parent MSI domain: - */ -@@ -457,7 +464,7 @@ static int dprc_probe(struct fsl_mc_devi - &mc_dev->mc_handle); - if (error < 0) { - dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); -- goto error_cleanup_mc_io; -+ goto error_cleanup_msi_domain; - } - - mutex_init(&mc_bus->scan_mutex); -@@ -475,11 +482,15 @@ static int dprc_probe(struct fsl_mc_devi - error_cleanup_open: - (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); - --error_cleanup_mc_io: -+error_cleanup_msi_domain: - if (msi_domain_set) - dev_set_msi_domain(&mc_dev->dev, NULL); - -- fsl_destroy_mc_io(mc_dev->mc_io); -+ if (mc_io_created) { -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ } -+ - return error; - } - diff --git a/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch b/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch deleted file mode 100644 index 61b7ee71e..000000000 --- a/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch +++ /dev/null @@ -1,301 +0,0 @@ -From aa83997b14c31b34d9af24cb42726b55fa630464 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:28 -0600 -Subject: [PATCH 152/226] staging: fsl-mc: Added DPRC interrupt handler - -The interrupt handler for DPRC IRQs is added. DPRC IRQs are -generated for hot plug events related to DPAA2 objects in a given -DPRC. These events include, creating/destroying DPAA2 objects in -the DPRC, changing the "plugged" state of DPAA2 objects and moving -objects between DPRCs. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 247 ++++++++++++++++++++++++++++++ - 1 file changed, 247 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include "dprc-cmd.h" - - struct dprc_child_objs { -@@ -386,6 +387,230 @@ error: - EXPORT_SYMBOL_GPL(dprc_scan_container); - - /** -+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+/** -+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) -+{ -+ int error; -+ u32 status; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc; -+ -+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", -+ irq_num, smp_processor_id()); -+ -+ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) -+ return IRQ_HANDLED; -+ -+ mutex_lock(&mc_bus->scan_mutex); -+ if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num)) -+ goto out; -+ -+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0, -+ &status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_get_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, -+ status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_clear_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | -+ DPRC_IRQ_EVENT_OBJ_REMOVED | -+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_CREATED)) { -+ unsigned int irq_count; -+ -+ error = dprc_scan_objects(mc_dev, &irq_count); -+ if (error < 0) { -+ /* -+ * If the error is -ENXIO, we ignore it, as it indicates -+ * that the object scan was aborted, as we detected that -+ * an object was removed from the DPRC in the MC, while -+ * we were scanning the DPRC. -+ */ -+ if (error != -ENXIO) { -+ dev_err(dev, "dprc_scan_objects() failed: %d\n", -+ error); -+ } -+ -+ goto out; -+ } -+ -+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) { -+ dev_warn(dev, -+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", -+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); -+ } -+ } -+ -+out: -+ mutex_unlock(&mc_bus->scan_mutex); -+ return IRQ_HANDLED; -+} -+ -+/* -+ * Disable and clear interrupt for a given DPRC object -+ */ -+static int disable_dprc_irq(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ -+ WARN_ON(mc_dev->obj_desc.irq_count != 1); -+ -+ /* -+ * Disable generation of interrupt, while we configure it: -+ */ -+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Disable all interrupt causes for the interrupt: -+ */ -+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Clear any leftover interrupts: -+ */ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0]; -+ -+ WARN_ON(mc_dev->obj_desc.irq_count != 1); -+ -+ /* -+ * NOTE: devm_request_threaded_irq() invokes the device-specific -+ * function that programs the MSI physically in the device -+ */ -+ error = devm_request_threaded_irq(&mc_dev->dev, -+ irq->msi_desc->irq, -+ dprc_irq0_handler, -+ dprc_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ "FSL MC DPRC irq0", -+ &mc_dev->dev); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "devm_request_threaded_irq() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int enable_dprc_irq(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ /* -+ * Enable all interrupt causes for the interrupt: -+ */ -+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, -+ ~0x0u); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n", -+ error); -+ -+ return error; -+ } -+ -+ /* -+ * Enable generation of the interrupt: -+ */ -+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n", -+ error); -+ -+ return error; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Setup interrupt for a given DPRC device -+ */ -+static int dprc_setup_irq(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ error = fsl_mc_allocate_irqs(mc_dev); -+ if (error < 0) -+ return error; -+ -+ error = disable_dprc_irq(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = register_dprc_irq_handler(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = enable_dprc_irq(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ return 0; -+ -+error_free_irqs: -+ fsl_mc_free_irqs(mc_dev); -+ return error; -+} -+ -+/** - * dprc_probe - callback invoked when a DPRC is being bound to this driver - * - * @mc_dev: Pointer to fsl-mc device representing a DPRC -@@ -476,6 +701,13 @@ static int dprc_probe(struct fsl_mc_devi - if (error < 0) - goto error_cleanup_open; - -+ /* -+ * Configure interrupt for the DPRC object associated with this MC bus: -+ */ -+ error = dprc_setup_irq(mc_dev); -+ if (error < 0) -+ goto error_cleanup_open; -+ - dev_info(&mc_dev->dev, "DPRC device bound to driver"); - return 0; - -@@ -494,6 +726,15 @@ error_cleanup_msi_domain: - return error; - } - -+/* -+ * Tear down interrupt for a given DPRC object -+ */ -+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev) -+{ -+ (void)disable_dprc_irq(mc_dev); -+ fsl_mc_free_irqs(mc_dev); -+} -+ - /** - * dprc_remove - callback invoked when a DPRC is being unbound from this driver - * -@@ -514,6 +755,12 @@ static int dprc_remove(struct fsl_mc_dev - if (WARN_ON(!mc_dev->mc_io)) - return -EINVAL; - -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return -EINVAL; -+ -+ if (dev_get_msi_domain(&mc_dev->dev)) -+ dprc_teardown_irq(mc_dev); -+ - device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); - dprc_cleanup_all_resource_pools(mc_dev); - error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); diff --git a/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch b/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch deleted file mode 100644 index cbc6c5e65..000000000 --- a/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch +++ /dev/null @@ -1,59 +0,0 @@ -From f588a135d9260f2e7fe29b0bb0b5294fc9c99f6c Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:29 -0600 -Subject: [PATCH 153/226] staging: fsl-mc: Added MSI support to the MC bus - driver - -Initialize/Cleanup ITS-MSI support for the MC bus driver at driver -init/exit time. Associate an MSI domain with each DPAA2 child device. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -16,6 +16,8 @@ - #include - #include - #include -+#include -+#include - #include "../include/dpmng.h" - #include "../include/mc-sys.h" - #include "dprc-cmd.h" -@@ -472,6 +474,8 @@ int fsl_mc_device_add(struct dprc_obj_de - mc_dev->icid = parent_mc_dev->icid; - mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; - mc_dev->dev.dma_mask = &mc_dev->dma_mask; -+ dev_set_msi_domain(&mc_dev->dev, -+ dev_get_msi_domain(&parent_mc_dev->dev)); - } - - /* -@@ -833,8 +837,15 @@ static int __init fsl_mc_bus_driver_init - if (error < 0) - goto error_cleanup_dprc_driver; - -+ error = its_fsl_mc_msi_init(); -+ if (error < 0) -+ goto error_cleanup_mc_allocator; -+ - return 0; - -+error_cleanup_mc_allocator: -+ fsl_mc_allocator_driver_exit(); -+ - error_cleanup_dprc_driver: - dprc_driver_exit(); - -@@ -856,6 +867,7 @@ static void __exit fsl_mc_bus_driver_exi - if (WARN_ON(!mc_dev_cache)) - return; - -+ its_fsl_mc_msi_cleanup(); - fsl_mc_allocator_driver_exit(); - dprc_driver_exit(); - platform_driver_unregister(&fsl_mc_bus_driver); diff --git a/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch b/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch deleted file mode 100644 index 64af81c19..000000000 --- a/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 6ce3c078c4eac406b38de689c8e366d7345a51ba Mon Sep 17 00:00:00 2001 -From: Janani Ravichandran -Date: Thu, 11 Feb 2016 18:00:25 -0500 -Subject: [PATCH 154/226] staging: fsl-mc: Remove unneeded parentheses - -Remove unneeded parentheses on the right hand side of assignment -statements. -Semantic patch: - -@@ -expression a, b, c; -@@ - -( - a = (b == c) -| - a = -- ( - b -- ) -) - -Signed-off-by: Janani Ravichandran -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -129,7 +129,7 @@ static void check_plugged_state_change(s - { - int error; - u32 plugged_flag_at_mc = -- (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); -+ obj_desc->state & DPRC_OBJ_STATE_PLUGGED; - - if (plugged_flag_at_mc != - (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { diff --git a/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch b/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch deleted file mode 100644 index 1ae62537f..000000000 --- a/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 322ff2fe86ec4dead2d2bceb20b624c72bdd1405 Mon Sep 17 00:00:00 2001 -From: Thierry Reding -Date: Mon, 15 Feb 2016 14:22:22 +0100 -Subject: [PATCH 155/226] staging: fsl-mc: Do not allow building as a module - -This driver uses functionality (MSI IRQ domain) whose symbols aren't -exported, and hence the modular build fails. While arguably there might -be reasons to make these symbols available to modules, that change would -be fairly involved and the set of exported functions should be carefully -auditioned. Fix the build failure for now by marking the driver boolean. - -Cc: J. German Rivera -Cc: Greg Kroah-Hartman -Signed-off-by: Thierry Reding -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/Kconfig -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -7,7 +7,7 @@ - # - - config FSL_MC_BUS -- tristate "Freescale Management Complex (MC) bus driver" -+ bool "Freescale Management Complex (MC) bus driver" - depends on OF && ARM64 - select GENERIC_MSI_IRQ_DOMAIN - help diff --git a/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch b/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch deleted file mode 100644 index 60fecd9b0..000000000 --- a/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch +++ /dev/null @@ -1,43 +0,0 @@ -From b2e5cfb43faf26517d191de65121f1a40166340f Mon Sep 17 00:00:00 2001 -From: Thierry Reding -Date: Mon, 15 Feb 2016 14:22:23 +0100 -Subject: [PATCH 156/226] staging: fsl-mc: Avoid section mismatch - -The fsl_mc_allocator_driver_exit() function is marked __exit, but is -called by the error handling code in fsl_mc_allocator_driver_init(). -This results in a section mismatch, which in turn could lead to -executing random code. - -Remove the __exit annotation to fix this. - -Cc: J. German Rivera -Cc: Greg Kroah-Hartman -Signed-off-by: Thierry Reding -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-allocator.c | 2 +- - drivers/staging/fsl-mc/include/mc-private.h | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -756,7 +756,7 @@ int __init fsl_mc_allocator_driver_init( - return fsl_mc_driver_register(&fsl_mc_allocator_driver); - } - --void __exit fsl_mc_allocator_driver_exit(void) -+void fsl_mc_allocator_driver_exit(void) - { - fsl_mc_driver_unregister(&fsl_mc_allocator_driver); - } ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -123,7 +123,7 @@ void dprc_driver_exit(void); - - int __init fsl_mc_allocator_driver_init(void); - --void __exit fsl_mc_allocator_driver_exit(void); -+void fsl_mc_allocator_driver_exit(void); - - int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, - enum fsl_mc_pool_type pool_type, diff --git a/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch b/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch deleted file mode 100644 index ee0d1f62b..000000000 --- a/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 5f82c6ff69f3a4bb635e619a893292bea711421e Mon Sep 17 00:00:00 2001 -From: Janani Ravichandran -Date: Thu, 18 Feb 2016 17:22:50 -0500 -Subject: [PATCH 157/226] staging: fsl-mc: Remove unneeded else following a - return - -Remove unnecessary else when there is a return statement in the -corresponding if block. Coccinelle patch used: - -@rule1@ -expression e1; -@@ - - if (e1) { ... return ...; } -- else{ - ... -- } - -@rule2@ -expression e2; -statement s1; -@@ - - if(e2) { ... return ...; } -- else - s1 - -Signed-off-by: Janani Ravichandran -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -248,8 +248,7 @@ static bool fsl_mc_is_root_dprc(struct d - fsl_mc_get_root_dprc(dev, &root_dprc_dev); - if (!root_dprc_dev) - return false; -- else -- return dev == root_dprc_dev; -+ return dev == root_dprc_dev; - } - - static int get_dprc_icid(struct fsl_mc_io *mc_io, diff --git a/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch b/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch deleted file mode 100644 index 51e879298..000000000 --- a/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch +++ /dev/null @@ -1,43 +0,0 @@ -From d9605741556a15dceed105afd7369d644aa46207 Mon Sep 17 00:00:00 2001 -From: Janani Ravichandran -Date: Thu, 25 Feb 2016 14:46:11 -0500 -Subject: [PATCH 158/226] staging: fsl-mc: Drop unneeded void pointer cast - -Void pointers need not be cast to other pointer types. -Semantic patch used: - -@r@ -expression x; -void *e; -type T; -identifier f; -@@ - -( - *((T *)e) -| - ((T *)x) [...] -| - ((T *)x)->f -| -- (T *) - e -) - -Signed-off-by: Janani Ravichandran -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -407,7 +407,7 @@ static irqreturn_t dprc_irq0_handler_thr - { - int error; - u32 status; -- struct device *dev = (struct device *)arg; -+ struct device *dev = arg; - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); - struct fsl_mc_io *mc_io = mc_dev->mc_io; diff --git a/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch b/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch deleted file mode 100644 index 74ac4fd28..000000000 --- a/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch +++ /dev/null @@ -1,73 +0,0 @@ -From ecd7b5d9616e50f48a400749f17db19fd8a43f25 Mon Sep 17 00:00:00 2001 -From: Bhaktipriya Shridhar -Date: Sun, 28 Feb 2016 23:58:05 +0530 -Subject: [PATCH 159/226] staging: fsl-mc: bus: Eliminate double function call - -A call to irq_find_matching_host was already made and the result -has been stored in mc_msi_domain. mc_msi_domain is again reassigned -using the same function call which is redundant. - -irq_find_matching_host returns/locates a domain for a given fwnode. -The domain is identified using device node and bus_token(if several -domains have same device node but different purposes they can be -distinguished using bus-specific token). -http://www.bricktou.com/include/linux/irqdomain_irq_find_matching_host_en.html - -Also, of_property_read_bool finds and reads a boolean from a property -device node from which the property value is to be read. It doesn't -alter the device node. -http://lists.infradead.org/pipermail/linux-arm-kernel/2012-February/083698.html - -Since, both the function calls have the same device node and bus_token, -the return values shall be the same. Hence, the second call has been -removed. - -This was done using Coccinelle: - -@r@ -idexpression *x; -identifier f; -position p1,p2; -@@ - -x@p1 = f(...) -... when != x -( -x@p2 = f(...) -) - -@script:python@ -p1 << r.p1; -p2 << r.p2; -@@ - -if (p1[0].line == p2[0].line): - cocci.include_match(False) - -@@ -idexpression *x; -identifier f; -position r.p1,r.p2; -@@ - -*x@p1 = f(...) -... -*x@p2 = f(...) - -Signed-off-by: Bhaktipriya Shridhar -Signed-off-by: Greg Kroah-Hartman ---- - .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 2 -- - 1 file changed, 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c -+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c -@@ -118,8 +118,6 @@ void its_fsl_mc_msi_cleanup(void) - if (!of_property_read_bool(np, "msi-controller")) - continue; - -- mc_msi_domain = irq_find_matching_host(np, -- DOMAIN_BUS_FSL_MC_MSI); - if (mc_msi_domain && - mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info) - irq_domain_remove(mc_msi_domain); diff --git a/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch b/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch deleted file mode 100644 index 2d9c94726..000000000 --- a/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 8727f71717b449a4c74a5a599374c05822d525f7 Mon Sep 17 00:00:00 2001 -From: Bhumika Goyal -Date: Fri, 4 Mar 2016 19:14:52 +0530 -Subject: [PATCH 160/226] Staging: fsl-mc: Replace pr_debug with dev_dbg - -This patch replaces pr_debug calls with dev_dbg when the device structure -is available as dev_* prints identifying information about the struct -device. -Done using coccinelle: - -@r exists@ -identifier f, s; -identifier x; -position p; -@@ -f(...,struct s *x,...) { -<+... -when != x == NULL -\(pr_err@p\|pr_debug@p\|pr_info\)(...); -...+> -} - -@r2@ -identifier fld2; -identifier r.s; -@@ - -struct s { - ... - struct device *fld2; - ... -}; - -@@ -identifier r.x,r2.fld2; -position r.p; -@@ - -( --pr_err@p -+dev_err - ( -+ &x->fld2, -...) -| -- pr_debug@p -+ dev_dbg - ( -+ &x->fld2, -...) -| -- pr_info@p -+ dev_info - ( -+ &x->fld2, -...) -) - -Signed-off-by: Bhumika Goyal -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-sys.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-sys.c -+++ b/drivers/staging/fsl-mc/bus/mc-sys.c -@@ -328,7 +328,8 @@ static int mc_polling_wait_preemptible(s - MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - - if (time_after_eq(jiffies, jiffies_until_timeout)) { -- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -+ dev_dbg(&mc_io->dev, -+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, - (unsigned int) - MC_CMD_HDR_READ_TOKEN(cmd->header), -@@ -369,7 +370,8 @@ static int mc_polling_wait_atomic(struct - udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; - if (timeout_usecs == 0) { -- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -+ dev_dbg(&mc_io->dev, -+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, - (unsigned int) - MC_CMD_HDR_READ_TOKEN(cmd->header), -@@ -424,7 +426,8 @@ int mc_send_command(struct fsl_mc_io *mc - goto common_exit; - - if (status != MC_CMD_STATUS_OK) { -- pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", -+ dev_dbg(&mc_io->dev, -+ "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", - mc_io->portal_phys_addr, - (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), - (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), diff --git a/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch b/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch deleted file mode 100644 index 3da41c16c..000000000 --- a/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 79b4625a6ab72251e00aa94ee22a6bfe32dbeeda Mon Sep 17 00:00:00 2001 -From: Bhumika Goyal -Date: Fri, 4 Mar 2016 19:15:55 +0530 -Subject: [PATCH 161/226] Staging: fsl-mc: Replace pr_err with dev_err - -This patch replaces pr_err calls with dev_err when the device structure -is available as dev_* prints identifying information about the struct device. -Done using coccinelle: - -@r exists@ -identifier f, s; -identifier x; -position p; -@@ -f(...,struct s *x,...) { -<+... -when != x == NULL -\(pr_err@p\|pr_debug@p\|pr_info\)(...); -...+> -} - -@r2@ -identifier fld2; -identifier r.s; -@@ - -struct s { - ... - struct device *fld2; - ... -}; - -@@ -identifier r.x,r2.fld2; -position r.p; -@@ - -( --pr_err@p -+dev_err - ( -+ &x->fld2, -...) -| -- pr_debug@p -+ dev_dbg - ( -+ &x->fld2, -...) -| -- pr_info@p -+ dev_info - ( -+ &x->fld2, -...) -) - -Signed-off-by: Bhumika Goyal -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -260,14 +260,15 @@ static int get_dprc_icid(struct fsl_mc_i - - error = dprc_open(mc_io, 0, container_id, &dprc_handle); - if (error < 0) { -- pr_err("dprc_open() failed: %d\n", error); -+ dev_err(&mc_io->dev, "dprc_open() failed: %d\n", error); - return error; - } - - memset(&attr, 0, sizeof(attr)); - error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr); - if (error < 0) { -- pr_err("dprc_get_attributes() failed: %d\n", error); -+ dev_err(&mc_io->dev, "dprc_get_attributes() failed: %d\n", -+ error); - goto common_cleanup; - } - diff --git a/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch b/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch deleted file mode 100644 index 4a326020d..000000000 --- a/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 83e0f572a74bceeb3736b19b929c91d12d1d6d80 Mon Sep 17 00:00:00 2001 -From: Cihangir Akturk -Date: Mon, 14 Mar 2016 18:14:06 +0200 -Subject: [PATCH 162/226] staging: fsl-mc: fix incorrect type passed to - dev_dbg macros - -dev_dbg macros expect const struct device ** as its second -argument but here the argument we are passing is of type -struct device ** this patch fixes this error. - -Fixes: de71daf5c839 ("Staging: fsl-mc: Replace pr_debug with dev_dbg") -Cc: Bhumika Goyal -Reported-by: Guenter Roeck -Signed-off-by: Cihangir Akturk -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-sys.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-sys.c -+++ b/drivers/staging/fsl-mc/bus/mc-sys.c -@@ -328,7 +328,7 @@ static int mc_polling_wait_preemptible(s - MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - - if (time_after_eq(jiffies, jiffies_until_timeout)) { -- dev_dbg(&mc_io->dev, -+ dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, - (unsigned int) -@@ -370,7 +370,7 @@ static int mc_polling_wait_atomic(struct - udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; - if (timeout_usecs == 0) { -- dev_dbg(&mc_io->dev, -+ dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, - (unsigned int) -@@ -426,7 +426,7 @@ int mc_send_command(struct fsl_mc_io *mc - goto common_exit; - - if (status != MC_CMD_STATUS_OK) { -- dev_dbg(&mc_io->dev, -+ dev_dbg(mc_io->dev, - "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", - mc_io->portal_phys_addr, - (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), diff --git a/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch b/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch deleted file mode 100644 index 024e10f29..000000000 --- a/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 79929c151efbc047a8a82f9cafcb9238465caa86 Mon Sep 17 00:00:00 2001 -From: Cihangir Akturk -Date: Mon, 14 Mar 2016 18:14:07 +0200 -Subject: [PATCH 163/226] staging: fsl-mc: fix incorrect type passed to - dev_err macros - -dev_err macros expect const struct device ** as its second -argument, but here the argument we are passing is of typ -struct device **. This patch fixes this error. - -Fixes: 454b0ec8bf99 ("Staging: fsl-mc: Replace pr_err with dev_err") -Cc: Bhumika Goyal -Reported-by: Guenter Roeck -Signed-off-by: Cihangir Akturk -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -260,14 +260,14 @@ static int get_dprc_icid(struct fsl_mc_i - - error = dprc_open(mc_io, 0, container_id, &dprc_handle); - if (error < 0) { -- dev_err(&mc_io->dev, "dprc_open() failed: %d\n", error); -+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error); - return error; - } - - memset(&attr, 0, sizeof(attr)); - error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr); - if (error < 0) { -- dev_err(&mc_io->dev, "dprc_get_attributes() failed: %d\n", -+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n", - error); - goto common_cleanup; - } diff --git a/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch b/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch deleted file mode 100644 index d16485f4c..000000000 --- a/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch +++ /dev/null @@ -1,207 +0,0 @@ -From d36a6b361a3a181559daebcf32e11ab18431a854 Mon Sep 17 00:00:00 2001 -From: Cihangir Akturk -Date: Sat, 9 Apr 2016 21:45:18 +0300 -Subject: [PATCH 164/226] staging: fsl-mc: get rid of mutex_locked variables - -Remove mutex_locked variables which are used to determine whether mutex is -locked, instead add another label to unlock mutex on premature exits due to -an error. - -This patch also addresses the folowing warnings reported by coccinelle: - -drivers/staging/fsl-mc/bus/mc-allocator.c:237:1-7: preceding lock on line 204 -drivers/staging/fsl-mc/bus/mc-allocator.c:89:1-7: preceding lock on line 57 -drivers/staging/fsl-mc/bus/mc-allocator.c:157:1-7: preceding lock on line 124 - -Signed-off-by: Cihangir Akturk -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-allocator.c | 61 ++++++++++++----------------- - 1 file changed, 24 insertions(+), 37 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_ - struct fsl_mc_resource *resource; - struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; - int error = -EINVAL; -- bool mutex_locked = false; - - if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) - goto out; -@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_ - goto out; - - mutex_lock(&res_pool->mutex); -- mutex_locked = true; - - if (WARN_ON(res_pool->max_count < 0)) -- goto out; -+ goto out_unlock; - if (WARN_ON(res_pool->free_count < 0 || - res_pool->free_count > res_pool->max_count)) -- goto out; -+ goto out_unlock; - - resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), - GFP_KERNEL); -@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_ - error = -ENOMEM; - dev_err(&mc_bus_dev->dev, - "Failed to allocate memory for fsl_mc_resource\n"); -- goto out; -+ goto out_unlock; - } - - resource->type = pool_type; -@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_ - res_pool->free_count++; - res_pool->max_count++; - error = 0; -+out_unlock: -+ mutex_unlock(&res_pool->mutex); - out: -- if (mutex_locked) -- mutex_unlock(&res_pool->mutex); -- - return error; - } - -@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_ - struct fsl_mc_resource_pool *res_pool; - struct fsl_mc_resource *resource; - int error = -EINVAL; -- bool mutex_locked = false; - - if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) - goto out; -@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_ - goto out; - - mutex_lock(&res_pool->mutex); -- mutex_locked = true; - - if (WARN_ON(res_pool->max_count <= 0)) -- goto out; -+ goto out_unlock; - if (WARN_ON(res_pool->free_count <= 0 || - res_pool->free_count > res_pool->max_count)) -- goto out; -+ goto out_unlock; - - /* - * If the device is currently allocated, its resource is not -@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_ - dev_err(&mc_bus_dev->dev, - "Device %s cannot be removed from resource pool\n", - dev_name(&mc_dev->dev)); -- goto out; -+ goto out_unlock; - } - - list_del(&resource->node); -@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_ - devm_kfree(&mc_bus_dev->dev, resource); - mc_dev->resource = NULL; - error = 0; -+out_unlock: -+ mutex_unlock(&res_pool->mutex); - out: -- if (mutex_locked) -- mutex_unlock(&res_pool->mutex); -- - return error; - } - -@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocat - struct fsl_mc_resource *resource; - struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; - int error = -EINVAL; -- bool mutex_locked = false; - - BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != - FSL_MC_NUM_POOL_TYPES); - - *new_resource = NULL; - if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) -- goto error; -+ goto out; - - res_pool = &mc_bus->resource_pools[pool_type]; - if (WARN_ON(res_pool->mc_bus != mc_bus)) -- goto error; -+ goto out; - - mutex_lock(&res_pool->mutex); -- mutex_locked = true; - resource = list_first_entry_or_null(&res_pool->free_list, - struct fsl_mc_resource, node); - -@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocat - dev_err(&mc_bus_dev->dev, - "No more resources of type %s left\n", - fsl_mc_pool_type_strings[pool_type]); -- goto error; -+ goto out_unlock; - } - - if (WARN_ON(resource->type != pool_type)) -- goto error; -+ goto out_unlock; - if (WARN_ON(resource->parent_pool != res_pool)) -- goto error; -+ goto out_unlock; - if (WARN_ON(res_pool->free_count <= 0 || - res_pool->free_count > res_pool->max_count)) -- goto error; -+ goto out_unlock; - - list_del(&resource->node); - INIT_LIST_HEAD(&resource->node); - - res_pool->free_count--; -+ error = 0; -+out_unlock: - mutex_unlock(&res_pool->mutex); - *new_resource = resource; -- return 0; --error: -- if (mutex_locked) -- mutex_unlock(&res_pool->mutex); -- -+out: - return error; - } - EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); -@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_alloca - void fsl_mc_resource_free(struct fsl_mc_resource *resource) - { - struct fsl_mc_resource_pool *res_pool; -- bool mutex_locked = false; - - res_pool = resource->parent_pool; - if (WARN_ON(resource->type != res_pool->type)) -- goto out; -+ return; - - mutex_lock(&res_pool->mutex); -- mutex_locked = true; - if (WARN_ON(res_pool->free_count < 0 || - res_pool->free_count >= res_pool->max_count)) -- goto out; -+ goto out_unlock; - - if (WARN_ON(!list_empty(&resource->node))) -- goto out; -+ goto out_unlock; - - list_add_tail(&resource->node, &res_pool->free_list); - res_pool->free_count++; --out: -- if (mutex_locked) -- mutex_unlock(&res_pool->mutex); -+out_unlock: -+ mutex_unlock(&res_pool->mutex); - } - EXPORT_SYMBOL_GPL(fsl_mc_resource_free); - diff --git a/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch b/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch deleted file mode 100644 index 858c6e21e..000000000 --- a/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 7b3bffea6d36f396faf1814088f03a6b8efe1ccb Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:48:37 -0500 -Subject: [PATCH 165/226] staging: fsl-mc: TODO updates - -remove 3 of the remaining TODO items: - - -multiple root fsl-mc buses-- done in patch series starting with - commit 14f928054a05 ("staging: fsl-mc: abstract test for existence - of fsl-mc bus") - - -interrupt support-- done in patch series starting with - commit 9b1b282ccd81 ("irqdomain: Added domain bus token - DOMAIN_BUS_FSL_MC_MSI") - - -MC command serialization-- done in commit 63f2be5c3b358 ("staging: - fsl-mc: Added serialization to mc_send_command()") - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/TODO | 13 ------------- - 1 file changed, 13 deletions(-) - ---- a/drivers/staging/fsl-mc/TODO -+++ b/drivers/staging/fsl-mc/TODO -@@ -1,21 +1,8 @@ --* Decide if multiple root fsl-mc buses will be supported per Linux instance, -- and if so add support for this. -- - * Add at least one device driver for a DPAA2 object (child device of the - fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet - driver support, which depends on drivers for several objects: DPNI, - DPIO, DPMAC. Other pre-requisites include: - -- * interrupt support. for meaningful driver support we need -- interrupts, and thus need message interrupt support by the bus -- driver. -- -Note: this has dependencies on generic MSI support work -- in process upstream, see [1] and [2]. -- -- * Management Complex (MC) command serialization. locking mechanisms -- are needed by drivers to serialize commands sent to the MC, including -- from atomic context. -- - * MC firmware uprev. The MC firmware upon which the fsl-mc - bus driver and DPAA2 object drivers are based is continuing - to evolve, so minor updates are needed to keep in sync with binary diff --git a/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch b/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch deleted file mode 100644 index f63574d64..000000000 --- a/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch +++ /dev/null @@ -1,279 +0,0 @@ -From 720bf9c9a6fdff63ecc4b382a5092c0020fb7b42 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:48:42 -0500 -Subject: [PATCH 166/226] staging: fsl-mc: DPAA2 overview readme update - -incorporated feedback from review comments, other misc cleanup/tweaks - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/README.txt | 138 +++++++++++++++++++++---------------- - 1 file changed, 80 insertions(+), 58 deletions(-) - ---- a/drivers/staging/fsl-mc/README.txt -+++ b/drivers/staging/fsl-mc/README.txt -@@ -11,11 +11,11 @@ Contents summary - -Overview of DPAA2 objects - -DPAA2 Linux driver architecture overview - -bus driver -- -dprc driver -+ -DPRC driver - -allocator -- -dpio driver -+ -DPIO driver - -Ethernet -- -mac -+ -MAC - - DPAA2 Overview - -------------- -@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator - The MC provides memory-mapped I/O command interfaces (MC portals) - which DPAA2 software drivers use to operate on DPAA2 objects: - -+The diagram below shows an overview of the DPAA2 resource management -+architecture: -+ - +--------------------------------------+ - | OS | - | DPAA2 drivers | -@@ -77,13 +80,13 @@ DPIO objects. - - Overview of DPAA2 Objects - ------------------------- --The section provides a brief overview of some key objects --in the DPAA2 hardware. A simple scenario is described illustrating --the objects involved in creating a network interfaces. -+The section provides a brief overview of some key DPAA2 objects. -+A simple scenario is described illustrating the objects involved -+in creating a network interfaces. - - -DPRC (Datapath Resource Container) - -- A DPRC is an container object that holds all the other -+ A DPRC is a container object that holds all the other - types of DPAA2 objects. In the example diagram below there - are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC) - in the container. -@@ -101,23 +104,23 @@ the objects involved in creating a netwo - | | - +---------------------------------------------------------+ - -- From the point of view of an OS, a DPRC is bus-like. Like -- a plug-and-play bus, such as PCI, DPRC commands can be used to -- enumerate the contents of the DPRC, discover the hardware -- objects present (including mappable regions and interrupts). -+ From the point of view of an OS, a DPRC behaves similar to a plug and -+ play bus, like PCI. DPRC commands can be used to enumerate the contents -+ of the DPRC, discover the hardware objects present (including mappable -+ regions and interrupts). - -- dprc.1 (bus) -+ DPRC.1 (bus) - | - +--+--------+-------+-------+-------+ - | | | | | -- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1 -- dpmcp.2 dpio.2 -- dpmcp.3 -+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1 -+ DPMCP.2 DPIO.2 -+ DPMCP.3 - - Hardware objects can be created and destroyed dynamically, providing - the ability to hot plug/unplug objects in and out of the DPRC. - -- A DPRC has a mappable mmio region (an MC portal) that can be used -+ A DPRC has a mappable MMIO region (an MC portal) that can be used - to send MC commands. It has an interrupt for status events (like - hotplug). - -@@ -137,10 +140,11 @@ the objects involved in creating a netwo - A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX - queuing mechanisms, configuration mechanisms, buffer management, - physical ports, and interrupts. DPAA2 uses a more granular approach -- utilizing multiple hardware objects. Each object has specialized -- functions, and are used together by software to provide Ethernet network -- interface functionality. This approach provides efficient use of finite -- hardware resources, flexibility, and performance advantages. -+ utilizing multiple hardware objects. Each object provides specialized -+ functions. Groups of these objects are used by software to provide -+ Ethernet network interface functionality. This approach provides -+ efficient use of finite hardware resources, flexibility, and -+ performance advantages. - - The diagram below shows the objects needed for a simple - network interface configuration on a system with 2 CPUs. -@@ -168,46 +172,52 @@ the objects involved in creating a netwo - - Below the objects are described. For each object a brief description - is provided along with a summary of the kinds of operations the object -- supports and a summary of key resources of the object (mmio regions -- and irqs). -+ supports and a summary of key resources of the object (MMIO regions -+ and IRQs). - - -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a - hardware device that connects to an Ethernet PHY and allows - physical transmission and reception of Ethernet frames. -- -mmio regions: none -- -irqs: dpni link change -+ -MMIO regions: none -+ -IRQs: DPNI link change - -commands: set link up/down, link config, get stats, -- irq config, enable, reset -+ IRQ config, enable, reset - - -DPNI (Datapath Network Interface): contains TX/RX queues, -- network interface configuration, and rx buffer pool configuration -- mechanisms. -- -mmio regions: none -- -irqs: link state -+ network interface configuration, and RX buffer pool configuration -+ mechanisms. The TX/RX queues are in memory and are identified by -+ queue number. -+ -MMIO regions: none -+ -IRQs: link state - -commands: port config, offload config, queue config, -- parse/classify config, irq config, enable, reset -+ parse/classify config, IRQ config, enable, reset - - -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue -- packets and do hardware buffer pool management operations. For -- optimum performance there is typically DPIO per CPU. This allows -- each CPU to perform simultaneous enqueue/dequeue operations. -- -mmio regions: queue operations, buffer mgmt -- -irqs: data availability, congestion notification, buffer -+ packets and do hardware buffer pool management operations. The DPAA2 -+ architecture separates the mechanism to access queues (the DPIO object) -+ from the queues themselves. The DPIO provides an MMIO interface to -+ enqueue/dequeue packets. To enqueue something a descriptor is written -+ to the DPIO MMIO region, which includes the target queue number. -+ There will typically be one DPIO assigned to each CPU. This allows all -+ CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are -+ expected to be shared by different DPAA2 drivers. -+ -MMIO regions: queue operations, buffer management -+ -IRQs: data availability, congestion notification, buffer - pool depletion -- -commands: irq config, enable, reset -+ -commands: IRQ config, enable, reset - - -DPBP (Datapath Buffer Pool): represents a hardware buffer - pool. -- -mmio regions: none -- -irqs: none -+ -MMIO regions: none -+ -IRQs: none - -commands: enable, reset - - -DPMCP (Datapath MC Portal): provides an MC command portal. - Used by drivers to send commands to the MC to manage - objects. -- -mmio regions: MC command portal -- -irqs: command completion -- -commands: irq config, enable, reset -+ -MMIO regions: MC command portal -+ -IRQs: command completion -+ -commands: IRQ config, enable, reset - - Object Connections - ------------------ -@@ -268,22 +278,22 @@ of each driver follows. - | Stack | - +------------+ +------------+ - | Allocator |. . . . . . . | Ethernet | -- |(dpmcp,dpbp)| | (dpni) | -+ |(DPMCP,DPBP)| | (DPNI) | - +-.----------+ +---+---+----+ - . . ^ | - . . | | dequeue> - +-------------+ . | | - | DPRC driver | . +---+---V----+ +---------+ -- | (dprc) | . . . . . .| DPIO driver| | MAC | -- +----------+--+ | (dpio) | | (dpmac) | -+ | (DPRC) | . . . . . .| DPIO driver| | MAC | -+ +----------+--+ | (DPIO) | | (DPMAC) | - | +------+-----+ +-----+---+ - | | | - | | | - +----+--------------+ | +--+---+ -- | mc-bus driver | | | PHY | -+ | MC-bus driver | | | PHY | - | | | |driver| -- | /fsl-mc@80c000000 | | +--+---+ -+ | /soc/fsl-mc | | +--+---+ - +-------------------+ | | - | | - ================================ HARDWARE =========|=================|====== -@@ -298,25 +308,27 @@ of each driver follows. - - A brief description of each driver is provided below. - -- mc-bus driver -+ MC-bus driver - ------------- -- The mc-bus driver is a platform driver and is probed from an -- "/fsl-mc@xxxx" node in the device tree passed in by boot firmware. -- It is responsible for bootstrapping the DPAA2 kernel infrastructure. -+ The MC-bus driver is a platform driver and is probed from a -+ node in the device tree (compatible "fsl,qoriq-mc") passed in by boot -+ firmware. It is responsible for bootstrapping the DPAA2 kernel -+ infrastructure. - Key functions include: - -registering a new bus type named "fsl-mc" with the kernel, - and implementing bus call-backs (e.g. match/uevent/dev_groups) -- -implemeting APIs for DPAA2 driver registration and for device -+ -implementing APIs for DPAA2 driver registration and for device - add/remove -- -creates an MSI irq domain -- -do a device add of the 'root' DPRC device, which is needed -- to bootstrap things -+ -creates an MSI IRQ domain -+ -doing a 'device add' to expose the 'root' DPRC, in turn triggering -+ a bind of the root DPRC to the DPRC driver - - DPRC driver - ----------- -- The dprc-driver is bound DPRC objects and does runtime management -+ The DPRC driver is bound to DPRC objects and does runtime management - of a bus instance. It performs the initial bus scan of the DPRC -- and handles interrupts for container events such as hot plug. -+ and handles interrupts for container events such as hot plug by -+ re-scanning the DPRC. - - Allocator - ---------- -@@ -334,14 +346,20 @@ A brief description of each driver is pr - DPIO driver - ----------- - The DPIO driver is bound to DPIO objects and provides services that allow -- other drivers such as the Ethernet driver to receive and transmit data. -+ other drivers such as the Ethernet driver to enqueue and dequeue data for -+ their respective objects. - Key services include: - -data availability notifications - -hardware queuing operations (enqueue and dequeue of data) - -hardware buffer pool management - -+ To transmit a packet the Ethernet driver puts data on a queue and -+ invokes a DPIO API. For receive, the Ethernet driver registers -+ a data availability notification callback. To dequeue a packet -+ a DPIO API is used. -+ - There is typically one DPIO object per physical CPU for optimum -- performance, allowing each CPU to simultaneously enqueue -+ performance, allowing different CPUs to simultaneously enqueue - and dequeue data. - - The DPIO driver operates on behalf of all DPAA2 drivers -@@ -362,3 +380,7 @@ A brief description of each driver is pr - by the appropriate PHY driver via an mdio bus. The MAC driver - plays a role of being a proxy between the PHY driver and the - MC. It does this proxy via the MC commands to a DPMAC object. -+ If the PHY driver signals a link change, the MAC driver notifies -+ the MC via a DPMAC command. If a network interface is brought -+ up or down, the MC notifies the DPMAC driver via an interrupt and -+ the driver can take appropriate action. diff --git a/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch b/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch deleted file mode 100644 index b75960614..000000000 --- a/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch +++ /dev/null @@ -1,123 +0,0 @@ -From fa245614c92ffbdaec6a56552032432b5343b1dc Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:48:48 -0500 -Subject: [PATCH 167/226] staging: fsl-mc: update dpmcp binary interface to - v3.0 - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 5 ++--- - drivers/staging/fsl-mc/bus/dpmcp.c | 35 ++------------------------------ - drivers/staging/fsl-mc/bus/dpmcp.h | 10 ++------- - 3 files changed, 6 insertions(+), 44 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -@@ -33,8 +33,8 @@ - #define _FSL_DPMCP_CMD_H - - /* DPMCP Version */ --#define DPMCP_VER_MAJOR 2 --#define DPMCP_VER_MINOR 1 -+#define DPMCP_VER_MAJOR 3 -+#define DPMCP_VER_MINOR 0 - - /* Command IDs */ - #define DPMCP_CMDID_CLOSE 0x800 -@@ -52,6 +52,5 @@ - #define DPMCP_CMDID_SET_IRQ_MASK 0x014 - #define DPMCP_CMDID_GET_IRQ_MASK 0x015 - #define DPMCP_CMDID_GET_IRQ_STATUS 0x016 --#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017 - - #endif /* _FSL_DPMCP_CMD_H */ ---- a/drivers/staging/fsl-mc/bus/dpmcp.c -+++ b/drivers/staging/fsl-mc/bus/dpmcp.c -@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_i - cmd.params[0] |= mc_enc(0, 8, irq_index); - cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); - cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_i - /* retrieve response parameters */ - irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); - irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64); -- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); - *type = (int)mc_dec(cmd.params[2], 32, 32); - return 0; - } -@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_i - } - - /** -- * dpmcp_clear_irq_status() - Clear a pending interrupt's status -- * -- * @mc_io: Pointer to MC portal's I/O object -- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -- * @token: Token of DPMCP object -- * @irq_index: The interrupt index to configure -- * @status: Bits to clear (W1C) - one bit per cause: -- * 0 = don't change -- * 1 = clear status bit -- * -- * Return: '0' on Success; Error code otherwise. -- */ --int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io, -- u32 cmd_flags, -- u16 token, -- u8 irq_index, -- u32 status) --{ -- struct mc_command cmd = { 0 }; -- -- /* prepare command */ -- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS, -- cmd_flags, token); -- cmd.params[0] |= mc_enc(0, 32, status); -- cmd.params[0] |= mc_enc(32, 8, irq_index); -- -- /* send command to mc*/ -- return mc_send_command(mc_io, &cmd); --} -- --/** - * dpmcp_get_attributes - Retrieve DPMCP attributes. - * - * @mc_io: Pointer to MC portal's I/O object ---- a/drivers/staging/fsl-mc/bus/dpmcp.h -+++ b/drivers/staging/fsl-mc/bus/dpmcp.h -@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, - * struct dpmcp_irq_cfg - IRQ configuration - * @paddr: Address that must be written to signal a message-based interrupt - * @val: Value to write into irq_addr address -- * @user_irq_id: A user defined number associated with this IRQ -+ * @irq_num: A user defined number associated with this IRQ - */ - struct dpmcp_irq_cfg { - uint64_t paddr; - uint32_t val; -- int user_irq_id; -+ int irq_num; - }; - - int dpmcp_set_irq(struct fsl_mc_io *mc_io, -@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_i - uint8_t irq_index, - uint32_t *status); - --int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io, -- uint32_t cmd_flags, -- uint16_t token, -- uint8_t irq_index, -- uint32_t status); -- - /** - * struct dpmcp_attr - Structure representing DPMCP attributes - * @id: DPMCP object ID diff --git a/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch b/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch deleted file mode 100644 index 474baf42b..000000000 --- a/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch +++ /dev/null @@ -1,208 +0,0 @@ -From de0fa9842d52e4e80576d378f32aa9ca76a4270b Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:48:54 -0500 -Subject: [PATCH 168/226] staging: fsl-mc: update dpbp binary interface to - v2.2 - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dpbp.c | 77 ++++++++++++++++++++++++++++- - drivers/staging/fsl-mc/include/dpbp-cmd.h | 4 +- - drivers/staging/fsl-mc/include/dpbp.h | 51 ++++++++++++++++++- - 3 files changed, 127 insertions(+), 5 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dpbp.c -+++ b/drivers/staging/fsl-mc/bus/dpbp.c -@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io - cmd.params[0] |= mc_enc(0, 8, irq_index); - cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); - cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); -- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io - /* retrieve response parameters */ - irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); - irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64); -- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); - *type = (int)mc_dec(cmd.params[2], 32, 32); - return 0; - } -@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, - cmd_flags, token); -+ cmd.params[0] |= mc_enc(0, 32, *status); - cmd.params[0] |= mc_enc(32, 8, irq_index); - - /* send command to mc*/ -@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io - return 0; - } - EXPORT_SYMBOL(dpbp_get_attributes); -+ -+/** -+ * dpbp_set_notifications() - Set notifications towards software -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ u32 cmd_flags, -+ u16 token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry); -+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit); -+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry); -+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit); -+ cmd.params[2] |= mc_enc(0, 16, cfg->options); -+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx); -+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+/** -+ * dpbp_get_notifications() - Get the notifications configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ u32 cmd_flags, -+ u16 token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32); -+ cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32); -+ cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32); -+ cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32); -+ cfg->options = (u16)mc_dec(cmd.params[2], 0, 16); -+ cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64); -+ cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64); -+ -+ return 0; -+} ---- a/drivers/staging/fsl-mc/include/dpbp-cmd.h -+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h -@@ -34,7 +34,7 @@ - - /* DPBP Version */ - #define DPBP_VER_MAJOR 2 --#define DPBP_VER_MINOR 1 -+#define DPBP_VER_MINOR 2 - - /* Command IDs */ - #define DPBP_CMDID_CLOSE 0x800 -@@ -57,4 +57,6 @@ - #define DPBP_CMDID_GET_IRQ_STATUS 0x016 - #define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 - -+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 - #endif /* _FSL_DPBP_CMD_H */ ---- a/drivers/staging/fsl-mc/include/dpbp.h -+++ b/drivers/staging/fsl-mc/include/dpbp.h -@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io, - * struct dpbp_irq_cfg - IRQ configuration - * @addr: Address that must be written to signal a message-based interrupt - * @val: Value to write into irq_addr address -- * @user_irq_id: A user defined number associated with this IRQ -+ * @irq_num: A user defined number associated with this IRQ - */ - struct dpbp_irq_cfg { - u64 addr; - u32 val; -- int user_irq_id; -+ int irq_num; - }; - - int dpbp_set_irq(struct fsl_mc_io *mc_io, -@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io - u16 token, - struct dpbp_attr *attr); - -+/** -+ * DPBP notifications options -+ */ -+ -+/** -+ * BPSCN write will attempt to allocate into a cache (coherent write) -+ */ -+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 -+ -+/** -+ * struct dpbp_notification_cfg - Structure representing DPBP notifications -+ * towards software -+ * @depletion_entry: below this threshold the pool is "depleted"; -+ * set it to '0' to disable it -+ * @depletion_exit: greater than or equal to this threshold the pool exit its -+ * "depleted" state -+ * @surplus_entry: above this threshold the pool is in "surplus" state; -+ * set it to '0' to disable it -+ * @surplus_exit: less than or equal to this threshold the pool exit its -+ * "surplus" state -+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' -+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned. -+ * @message_ctx: The context that will be part of the BPSCN message and will -+ * be written to 'message_iova' -+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values -+ */ -+struct dpbp_notification_cfg { -+ u32 depletion_entry; -+ u32 depletion_exit; -+ u32 surplus_entry; -+ u32 surplus_exit; -+ u64 message_iova; -+ u64 message_ctx; -+ u16 options; -+}; -+ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ u32 cmd_flags, -+ u16 token, -+ struct dpbp_notification_cfg *cfg); -+ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ u32 cmd_flags, -+ u16 token, -+ struct dpbp_notification_cfg *cfg); -+ - /** @} */ - - #endif /* __FSL_DPBP_H */ diff --git a/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch b/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch deleted file mode 100644 index 4db299803..000000000 --- a/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch +++ /dev/null @@ -1,206 +0,0 @@ -From 45dce4cd82ddc618ade56747620a2a29f7d9a99d Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:48:59 -0500 -Subject: [PATCH 169/226] staging: fsl-mc: update dprc binary interface to - v5.1 - -The meaning of the "status" parameter in dprc_get_irq_status -has changed, and this patch updates the flib and caller -of the API. - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-cmd.h | 4 ++-- - drivers/staging/fsl-mc/bus/dprc-driver.c | 1 + - drivers/staging/fsl-mc/bus/dprc.c | 26 +++++++++++++------------- - drivers/staging/fsl-mc/bus/mc-msi.c | 2 +- - drivers/staging/fsl-mc/include/dprc.h | 19 ++++++++++++------- - 5 files changed, 29 insertions(+), 23 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-cmd.h -+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h -@@ -41,8 +41,8 @@ - #define _FSL_DPRC_CMD_H - - /* DPRC Version */ --#define DPRC_VER_MAJOR 4 --#define DPRC_VER_MINOR 0 -+#define DPRC_VER_MAJOR 5 -+#define DPRC_VER_MINOR 1 - - /* Command IDs */ - #define DPRC_CMDID_CLOSE 0x800 ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -423,6 +423,7 @@ static irqreturn_t dprc_irq0_handler_thr - if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num)) - goto out; - -+ status = 0; - error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0, - &status); - if (error < 0) { ---- a/drivers/staging/fsl-mc/bus/dprc.c -+++ b/drivers/staging/fsl-mc/bus/dprc.c -@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io - /* retrieve response parameters */ - irq_cfg->val = mc_dec(cmd.params[0], 0, 32); - irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); -- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32); -+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32); - *type = mc_dec(cmd.params[2], 32, 32); - - return 0; -@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io - cmd.params[0] |= mc_enc(32, 8, irq_index); - cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); - cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, - cmd_flags, token); -+ cmd.params[0] |= mc_enc(0, 32, *status); - cmd.params[0] |= mc_enc(32, 8, irq_index); - - /* send command to mc*/ -@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io - obj_desc->state = mc_dec(cmd.params[1], 32, 32); - obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); - obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); - obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); - obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); - obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); -@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io * - obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32); - obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16); - obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); - obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); - obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); - obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); -@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *m - cmd.params[0] |= mc_enc(32, 8, irq_index); - cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); - cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); - cmd.params[2] |= mc_enc(32, 32, obj_id); - cmd.params[3] |= mc_enc(0, 8, obj_type[0]); - cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *m - /* retrieve response parameters */ - irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); - irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64); -- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); - *type = (int)mc_dec(cmd.params[2], 32, 32); - - return 0; -@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label); - * @endpoint1: Endpoint 1 configuration parameters - * @endpoint2: Endpoint 2 configuration parameters - * @cfg: Connection configuration. The connection configuration is ignored for -- * connections made to DPMAC objects, where rate is set according to -- * MAC configuration. -- * The committed rate is the guaranteed rate for the connection. -- * The maximum rate is an upper limit allowed for the connection; it is -- * expected to be equal or higher than the committed rate. -- * When committed and maximum rates are both zero, the connection is set -- * to "best effort" mode, having lower priority compared to connections -- * with committed or maximum rates. -+ * connections made to DPMAC objects, where rate is retrieved from the -+ * MAC configuration. - * - * Return: '0' on Success; Error code otherwise. - */ -@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc - * @token: Token of DPRC object - * @endpoint1: Endpoint 1 configuration parameters - * @endpoint2: Returned endpoint 2 configuration parameters --* @state: Returned link state: 1 - link is up, 0 - link is down -+* @state: Returned link state: -+* 1 - link is up; -+* 0 - link is down; -+* -1 - no connection (endpoint2 information is irrelevant) - * - * Return: '0' on Success; -ENAVAIL if connection does not exist. - */ ---- a/drivers/staging/fsl-mc/bus/mc-msi.c -+++ b/drivers/staging/fsl-mc/bus/mc-msi.c -@@ -65,7 +65,7 @@ static void __fsl_mc_msi_write_msg(struc - irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) | - msi_desc->msg.address_lo; - irq_cfg.val = msi_desc->msg.data; -- irq_cfg.user_irq_id = msi_desc->irq; -+ irq_cfg.irq_num = msi_desc->irq; - - if (owner_mc_dev == mc_bus_dev) { - /* ---- a/drivers/staging/fsl-mc/include/dprc.h -+++ b/drivers/staging/fsl-mc/include/dprc.h -@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io, - */ - #define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 - --/* IOMMU bypass - indicates whether objects of this container are permitted -- * to bypass the IOMMU. -- */ --#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010 -- - /* AIOP - Indicates that container belongs to AIOP. */ - #define DPRC_CFG_OPT_AIOP 0x00000020 - -@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_i - * struct dprc_irq_cfg - IRQ configuration - * @paddr: Address that must be written to signal a message-based interrupt - * @val: Value to write into irq_addr address -- * @user_irq_id: A user defined number associated with this IRQ -+ * @irq_num: A user defined number associated with this IRQ - */ - struct dprc_irq_cfg { - phys_addr_t paddr; - u32 val; -- int user_irq_id; -+ int irq_num; - }; - - int dprc_set_irq(struct fsl_mc_io *mc_io, -@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io - #define DPRC_OBJ_STATE_PLUGGED 0x00000002 - - /** -+ * Shareability flag - Object flag indicating no memory shareability. -+ * the object generates memory accesses that are non coherent with other -+ * masters; -+ * user is responsible for proper memory handling through IOMMU configuration. -+ */ -+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 -+ -+/** - * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() - * @type: Type of object: NULL terminated string - * @id: ID of logical object resource -@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io - * @region_count: Number of mappable regions supported by the object - * @state: Object state: combination of DPRC_OBJ_STATE_ states - * @label: Object label -+ * @flags: Object's flags - */ - struct dprc_obj_desc { - char type[16]; -@@ -374,6 +378,7 @@ struct dprc_obj_desc { - u8 region_count; - u32 state; - char label[16]; -+ u16 flags; - }; - - int dprc_get_obj(struct fsl_mc_io *mc_io, diff --git a/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch b/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch deleted file mode 100644 index 2224d457e..000000000 --- a/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch +++ /dev/null @@ -1,136 +0,0 @@ -From 9382e1723e4de9832407f7e65bd4812b31e5a51d Mon Sep 17 00:00:00 2001 -From: Itai Katz -Date: Mon, 11 Apr 2016 11:55:40 -0500 -Subject: [PATCH 170/226] staging: fsl-mc: don't use object versions to make - binding decisions - -Up until now if the object version expected by a driver (in the API header -file) did not match the actual object version in the MC hardware the bus -driver refused to bind the object to the driver or printed out WARN_ON -dumps. - -This patch removes those checks, and the responsibility of object version -checking should now be done in the object drivers themselves. If the actual -version discovered is not supported, the driver's probe function should fail. -Drivers should use version checks to support new features and provide -backwards compatibility if at all possible. - -This patch also removes the checks that caused bus driver probing to fail -if the overall MC version discovered did not match the firmware version -from the API header...this was too strict. The overall MC version is -informational like a release number, and continues to be printed in the -boot log. - -Signed-off-by: Itai Katz -(Stuart: reworded commit log) -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +-- - drivers/staging/fsl-mc/bus/mc-allocator.c | 6 ----- - drivers/staging/fsl-mc/bus/mc-bus.c | 38 +---------------------------- - 3 files changed, 2 insertions(+), 46 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -780,9 +780,7 @@ static int dprc_remove(struct fsl_mc_dev - static const struct fsl_mc_device_match_id match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, -- .obj_type = "dprc", -- .ver_major = DPRC_VER_MAJOR, -- .ver_minor = DPRC_VER_MINOR}, -+ .obj_type = "dprc"}, - {.vendor = 0x0}, - }; - ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -709,20 +709,14 @@ static const struct fsl_mc_device_match_ - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpbp", -- .ver_major = DPBP_VER_MAJOR, -- .ver_minor = DPBP_VER_MINOR - }, - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpmcp", -- .ver_major = DPMCP_VER_MAJOR, -- .ver_minor = DPMCP_VER_MINOR - }, - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpcon", -- .ver_major = DPCON_VER_MAJOR, -- .ver_minor = DPCON_VER_MINOR - }, - {.vendor = 0x0}, - }; ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct devic - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); - bool found = false; -- bool major_version_mismatch = false; -- bool minor_version_mismatch = false; - - if (WARN_ON(!fsl_mc_bus_exists())) - goto out; -@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct devic - for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { - if (id->vendor == mc_dev->obj_desc.vendor && - strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { -- if (id->ver_major == mc_dev->obj_desc.ver_major) { -- found = true; -- if (id->ver_minor != mc_dev->obj_desc.ver_minor) -- minor_version_mismatch = true; -- } else { -- major_version_mismatch = true; -- } -+ found = true; - - break; - } - } - -- if (major_version_mismatch) { -- dev_warn(dev, -- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n", -- id->ver_major, id->ver_minor, -- mc_dev->obj_desc.ver_major, -- mc_dev->obj_desc.ver_minor); -- } else if (minor_version_mismatch) { -- dev_warn(dev, -- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n", -- id->ver_major, id->ver_minor, -- mc_dev->obj_desc.ver_major, -- mc_dev->obj_desc.ver_minor); -- } -- - out: - dev_dbg(dev, "%smatched\n", found ? "" : "not "); - return found; -@@ -722,20 +700,6 @@ static int fsl_mc_bus_probe(struct platf - "Freescale Management Complex Firmware version: %u.%u.%u\n", - mc_version.major, mc_version.minor, mc_version.revision); - -- if (mc_version.major < MC_VER_MAJOR) { -- dev_err(&pdev->dev, -- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n", -- MC_VER_MAJOR, MC_VER_MINOR); -- error = -ENOTSUPP; -- goto error_cleanup_mc_io; -- } -- -- if (mc_version.major > MC_VER_MAJOR) { -- dev_warn(&pdev->dev, -- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n", -- MC_VER_MAJOR, MC_VER_MINOR); -- } -- - error = get_mc_addr_translation_ranges(&pdev->dev, - &mc->translation_ranges, - &mc->num_translation_ranges); diff --git a/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch b/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch deleted file mode 100644 index d6de805e6..000000000 --- a/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 3657147d6fea1977c07373325626bf50fe15bcfc Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Mon, 11 Apr 2016 11:49:13 -0500 -Subject: [PATCH 171/226] staging: fsl-mc: set up coherent dma ops for added - devices - -Unless discovered devices have the no shareability flag set, -set up coherent dma ops for them. - -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -469,6 +469,10 @@ int fsl_mc_device_add(struct dprc_obj_de - goto error_cleanup_dev; - } - -+ /* Objects are coherent, unless 'no shareability' flag set. */ -+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) -+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); -+ - /* - * The device-specific probe callback will get invoked by device_add() - */ diff --git a/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch b/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch deleted file mode 100644 index 7de34d19e..000000000 --- a/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch +++ /dev/null @@ -1,30 +0,0 @@ -From f7011c18a26d40a07b837a79d0efdad795ad7250 Mon Sep 17 00:00:00 2001 -From: Itai Katz -Date: Mon, 11 Apr 2016 11:55:48 -0500 -Subject: [PATCH 172/226] staging: fsl-mc: set cacheable flag for added - devices if applicable - -Some DPAA2 devices have mmio regions that should be mapped as -cacheable by drivers. Set IORESOURCE_CACHEABLE in the region's -flags if applicable. - -Signed-off-by: Itai Katz -[Stuart: update subject and commit message] -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -354,6 +354,8 @@ static int fsl_mc_device_get_mmio_region - regions[i].end = regions[i].start + region_desc.size - 1; - regions[i].name = "fsl-mc object MMIO region"; - regions[i].flags = IORESOURCE_IO; -+ if (region_desc.flags & DPRC_REGION_CACHEABLE) -+ regions[i].flags |= IORESOURCE_CACHEABLE; - } - - mc_dev->regions = regions; diff --git a/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch b/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch deleted file mode 100644 index ade5b33c4..000000000 --- a/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 2df13a365ecda7e3321cf9d4e1a9ebd63e58c28b Mon Sep 17 00:00:00 2001 -From: Itai Katz -Date: Mon, 11 Apr 2016 11:55:55 -0500 -Subject: [PATCH 173/226] staging: fsl-mc: get version of root dprc from MC - hardware - -The root dprc is discovered as a platform device in the device tree. The -version of that dprc was previously set using hardcoded values from the API -header in the kernel). This patch removes the use of the hardcoded version -numbers and instead reads the actual dprc version from the hardware. - -Signed-off-by: Itai Katz -(Stuart: resolved merge conflict, updated commit subject/log) -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 45 ++++++++++++++++++++++++++++------- - 1 file changed, 37 insertions(+), 8 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -229,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct d - return dev == root_dprc_dev; - } - --static int get_dprc_icid(struct fsl_mc_io *mc_io, -- int container_id, u16 *icid) -+static int get_dprc_attr(struct fsl_mc_io *mc_io, -+ int container_id, struct dprc_attributes *attr) - { - u16 dprc_handle; -- struct dprc_attributes attr; - int error; - - error = dprc_open(mc_io, 0, container_id, &dprc_handle); -@@ -242,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_i - return error; - } - -- memset(&attr, 0, sizeof(attr)); -- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr); -+ memset(attr, 0, sizeof(struct dprc_attributes)); -+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr); - if (error < 0) { - dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n", - error); - goto common_cleanup; - } - -- *icid = attr.icid; - error = 0; - - common_cleanup: -@@ -258,6 +256,34 @@ common_cleanup: - return error; - } - -+static int get_dprc_icid(struct fsl_mc_io *mc_io, -+ int container_id, u16 *icid) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) -+ *icid = attr.icid; -+ -+ return error; -+} -+ -+static int get_dprc_version(struct fsl_mc_io *mc_io, -+ int container_id, u16 *major, u16 *minor) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) { -+ *major = attr.version.major; -+ *minor = attr.version.minor; -+ } -+ -+ return error; -+} -+ - static int translate_mc_addr(struct fsl_mc_device *mc_dev, - enum dprc_region_type mc_region_type, - u64 mc_offset, phys_addr_t *phys_addr) -@@ -719,11 +745,14 @@ static int fsl_mc_bus_probe(struct platf - goto error_cleanup_mc_io; - } - -+ error = get_dprc_version(mc_io, container_id, -+ &obj_desc.ver_major, &obj_desc.ver_minor); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ - obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; - strcpy(obj_desc.type, "dprc"); - obj_desc.id = container_id; -- obj_desc.ver_major = DPRC_VER_MAJOR; -- obj_desc.ver_minor = DPRC_VER_MINOR; - obj_desc.irq_count = 1; - obj_desc.region_count = 0; - diff --git a/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch b/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch deleted file mode 100644 index a5ec35c66..000000000 --- a/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 653898b483e5448084b15214a8c20959b418dbe7 Mon Sep 17 00:00:00 2001 -From: Itai Katz -Date: Mon, 11 Apr 2016 11:56:05 -0500 -Subject: [PATCH 174/226] staging: fsl-mc: add dprc version check - -The dprc driver supports dprc version 5.0 and above. -This patch adds the code to check the version. - -Signed-off-by: Itai Katz -(Stuart: resolved merge conflicts, split dpseci quirk into separate patch) -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-cmd.h | 6 +++--- - drivers/staging/fsl-mc/bus/dprc-driver.c | 19 +++++++++++++++++++ - drivers/staging/fsl-mc/bus/mc-bus.c | 1 + - drivers/staging/fsl-mc/include/mc-private.h | 2 ++ - 4 files changed, 25 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-cmd.h -+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h -@@ -40,9 +40,9 @@ - #ifndef _FSL_DPRC_CMD_H - #define _FSL_DPRC_CMD_H - --/* DPRC Version */ --#define DPRC_VER_MAJOR 5 --#define DPRC_VER_MINOR 1 -+/* Minimal supported DPRC Version */ -+#define DPRC_MIN_VER_MAJOR 5 -+#define DPRC_MIN_VER_MINOR 0 - - /* Command IDs */ - #define DPRC_CMDID_CLOSE 0x800 ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -693,6 +693,25 @@ static int dprc_probe(struct fsl_mc_devi - goto error_cleanup_msi_domain; - } - -+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &mc_bus->dprc_attr); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", -+ error); -+ goto error_cleanup_open; -+ } -+ -+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || -+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && -+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { -+ dev_err(&mc_dev->dev, -+ "ERROR: DPRC version %d.%d not supported\n", -+ mc_bus->dprc_attr.version.major, -+ mc_bus->dprc_attr.version.minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_open; -+ } -+ - mutex_init(&mc_bus->scan_mutex); - - /* ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -745,6 +745,7 @@ static int fsl_mc_bus_probe(struct platf - goto error_cleanup_mc_io; - } - -+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); - error = get_dprc_version(mc_io, container_id, - &obj_desc.ver_major, &obj_desc.ver_minor); - if (error < 0) ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool { - * from the physical DPRC. - * @irq_resources: Pointer to array of IRQ objects for the IRQ pool - * @scan_mutex: Serializes bus scanning -+ * @dprc_attr: DPRC attributes - */ - struct fsl_mc_bus { - struct fsl_mc_device mc_dev; - struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; - struct fsl_mc_device_irq *irq_resources; - struct mutex scan_mutex; /* serializes bus scanning */ -+ struct dprc_attributes dprc_attr; - }; - - #define to_fsl_mc_bus(_mc_dev) \ diff --git a/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch b/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch deleted file mode 100644 index 36c6f89fc..000000000 --- a/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 5366dc8896ca7cf028db73643860821b189a1dfd Mon Sep 17 00:00:00 2001 -From: Horia Geanta -Date: Mon, 11 Apr 2016 11:50:26 -0500 -Subject: [PATCH 175/226] staging: fsl-mc: add quirk handling for dpseci - objects < 4.0 - -dpseci objects < 4.0 are not coherent-- in spite of the fact -that the MC reports them to be coherent in certain versions. -Add a special case to set the no shareability flag for dpseci -objects < 4.0. - -Signed-off-by: Horia Geanta -(Stuart: reworded commit message, updated comment in patch) -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_devi - continue; - } - -+ /* -+ * add a quirk for all versions of dpsec < 4.0...none -+ * are coherent regardless of what the MC reports. -+ */ -+ if ((strcmp(obj_desc->type, "dpseci") == 0) && -+ (obj_desc->ver_major < 4)) -+ obj_desc->flags |= -+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY; -+ - irq_count += obj_desc->irq_count; - dev_dbg(&mc_bus_dev->dev, - "Discovered object: type %s, id %d\n", diff --git a/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch b/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch deleted file mode 100644 index 148a724c0..000000000 --- a/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 035789ffb3b89b9764d7cc79d209a5795c18fa93 Mon Sep 17 00:00:00 2001 -From: Itai Katz -Date: Mon, 11 Apr 2016 11:56:11 -0500 -Subject: [PATCH 176/226] staging: fsl-mc: add dpmcp version check - -The dpmcp driver supports dpmcp version 3.0 and above. -This patch adds the code to check the version. - -Signed-off-by: Itai Katz -Signed-off-by: Stuart Yoder -Acked-by: German Rivera - - drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 6 +++--- - drivers/staging/fsl-mc/bus/mc-allocator.c | 11 +++++++++++ - 2 files changed, 14 insertions(+), 3 deletions(-) -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 6 +++--- - drivers/staging/fsl-mc/bus/mc-allocator.c | 11 +++++++++++ - 2 files changed, 14 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -@@ -32,9 +32,9 @@ - #ifndef _FSL_DPMCP_CMD_H - #define _FSL_DPMCP_CMD_H - --/* DPMCP Version */ --#define DPMCP_VER_MAJOR 3 --#define DPMCP_VER_MINOR 0 -+/* Minimal supported DPMCP Version */ -+#define DPMCP_MIN_VER_MAJOR 3 -+#define DPMCP_MIN_VER_MINOR 0 - - /* Command IDs */ - #define DPMCP_CMDID_CLOSE 0x800 ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -297,6 +297,17 @@ int __must_check fsl_mc_portal_allocate( - if (WARN_ON(!dpmcp_dev)) - goto error_cleanup_resource; - -+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR || -+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR && -+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) { -+ dev_err(&dpmcp_dev->dev, -+ "ERROR: Version %d.%d of DPMCP not supported.\n", -+ dpmcp_dev->obj_desc.ver_major, -+ dpmcp_dev->obj_desc.ver_minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_resource; -+ } -+ - if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) - goto error_cleanup_resource; - diff --git a/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch b/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch deleted file mode 100644 index 3a5a3f564..000000000 --- a/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 324147c1a6806301d9441a8d83c7c5ac880140cd Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Horia=20Geant=C4=83?= -Date: Mon, 11 Apr 2016 11:56:16 -0500 -Subject: [PATCH 177/226] staging: fsl-mc: return -EINVAL for all - fsl_mc_portal_allocate() failures -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -There are some error paths that allow for a NULL new_mc_io and err = 0 -return code. Return -EINVAL instead. - -Signed-off-by: Horia Geantă -Signed-off-by: Stuart Yoder -Acked-by: German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-allocator.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -293,6 +293,7 @@ int __must_check fsl_mc_portal_allocate( - if (error < 0) - return error; - -+ error = -EINVAL; - dpmcp_dev = resource->data; - if (WARN_ON(!dpmcp_dev)) - goto error_cleanup_resource; diff --git a/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch b/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch deleted file mode 100644 index dae0a5701..000000000 --- a/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 9821898bbfa5a21254baafe19b3cc97516fc6019 Mon Sep 17 00:00:00 2001 -From: Matthias Brugger -Date: Thu, 14 Apr 2016 23:24:26 +0200 -Subject: [PATCH 178/226] staging: fsl-mc: bus: Drop warning - -When updating the irq_chip and msi_domain_ops, the code checkes for -already present functions. -When more then one ITS controller are present in the system, -irq_chip and msi_domain_ops got already set and a warning is invoked. - -This patch deletes the warning, as the funtions are just already set to -the needed callbacks. - -Signed-off-by: Matthias Brugger -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-msi.c | 12 ++++-------- - 1 file changed, 4 insertions(+), 8 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-msi.c -+++ b/drivers/staging/fsl-mc/bus/mc-msi.c -@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(st - /* - * set_desc should not be set by the caller - */ -- if (WARN_ON(ops->set_desc)) -- return; -- -- ops->set_desc = fsl_mc_msi_set_desc; -+ if (ops->set_desc == NULL) -+ ops->set_desc = fsl_mc_msi_set_desc; - } - - static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, -@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(s - /* - * irq_write_msi_msg should not be set by the caller - */ -- if (WARN_ON(chip->irq_write_msi_msg)) -- return; -- -- chip->irq_write_msi_msg = fsl_mc_msi_write_msg; -+ if (chip->irq_write_msi_msg == NULL) -+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg; - } - - /** diff --git a/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch b/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch deleted file mode 100644 index 030c195f7..000000000 --- a/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 227c693741ce1fbf0ad146c87f03369334941f2e Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:42 -0500 -Subject: [PATCH 179/226] staging: fsl-mc: add support for the modalias sysfs - attribute - -In order to support uevent based module loading implement modalias support -for the fsl-mc bus driver. Aliases are based on vendor and object/device -id and are of the form "fsl-mc:vNdN". - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 25 +++++++++++++++++++++++++ - 1 file changed, 25 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -82,10 +82,35 @@ static int fsl_mc_bus_uevent(struct devi - return 0; - } - -+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor, -+ mc_dev->obj_desc.type); -+} -+static DEVICE_ATTR_RO(modalias); -+ -+static struct attribute *fsl_mc_dev_attrs[] = { -+ &dev_attr_modalias.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_dev_group = { -+ .attrs = fsl_mc_dev_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_dev_groups[] = { -+ &fsl_mc_dev_group, -+ NULL, -+}; -+ - struct bus_type fsl_mc_bus_type = { - .name = "fsl-mc", - .match = fsl_mc_bus_match, - .uevent = fsl_mc_bus_uevent, -+ .dev_groups = fsl_mc_dev_groups, - }; - EXPORT_SYMBOL_GPL(fsl_mc_bus_type); - diff --git a/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch b/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch deleted file mode 100644 index 266219b8a..000000000 --- a/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 721966c3990bc4596c6270afc1ea68c756b72f0d Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:43 -0500 -Subject: [PATCH 180/226] staging: fsl-mc: implement uevent callback and set - the modalias - -Replace placeholder code in the uevent callback to properly -set the MODALIAS env variable. - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -78,7 +78,13 @@ out: - */ - static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) - { -- pr_debug("%s invoked\n", __func__); -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s", -+ mc_dev->obj_desc.vendor, -+ mc_dev->obj_desc.type)) -+ return -ENOMEM; -+ - return 0; - } - diff --git a/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch b/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch deleted file mode 100644 index e5c58debd..000000000 --- a/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch +++ /dev/null @@ -1,85 +0,0 @@ -From c7b1e04ae4f47323800ca2b3d3430ecf1d9ed7df Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:44 -0500 -Subject: [PATCH 181/226] staging: fsl-mc: clean up the device id struct - --rename the struct used for fsl-mc device ids to be more - consistent with other busses --remove the now obsolete and unused version fields - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- - drivers/staging/fsl-mc/bus/mc-allocator.c | 2 +- - drivers/staging/fsl-mc/bus/mc-bus.c | 2 +- - drivers/staging/fsl-mc/include/mc.h | 10 +++------- - 4 files changed, 6 insertions(+), 10 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -805,7 +805,7 @@ static int dprc_remove(struct fsl_mc_dev - return 0; - } - --static const struct fsl_mc_device_match_id match_id_table[] = { -+static const struct fsl_mc_device_id match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dprc"}, ---- a/drivers/staging/fsl-mc/bus/mc-allocator.c -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -717,7 +717,7 @@ static int fsl_mc_allocator_remove(struc - return 0; - } - --static const struct fsl_mc_device_match_id match_id_table[] = { -+static const struct fsl_mc_device_id match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpbp", ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -36,7 +36,7 @@ static bool fsl_mc_is_root_dprc(struct d - */ - static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) - { -- const struct fsl_mc_device_match_id *id; -+ const struct fsl_mc_device_id *id; - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); - bool found = false; ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -39,7 +39,7 @@ struct fsl_mc_bus; - */ - struct fsl_mc_driver { - struct device_driver driver; -- const struct fsl_mc_device_match_id *match_id_table; -+ const struct fsl_mc_device_id *match_id_table; - int (*probe)(struct fsl_mc_device *dev); - int (*remove)(struct fsl_mc_device *dev); - void (*shutdown)(struct fsl_mc_device *dev); -@@ -51,20 +51,16 @@ struct fsl_mc_driver { - container_of(_drv, struct fsl_mc_driver, driver) - - /** -- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching -+ * struct fsl_mc_device_id - MC object device Id entry for driver matching - * @vendor: vendor ID - * @obj_type: MC object type -- * @ver_major: MC object version major number -- * @ver_minor: MC object version minor number - * - * Type of entries in the "device Id" table for MC object devices supported by - * a MC object device driver. The last entry of the table has vendor set to 0x0 - */ --struct fsl_mc_device_match_id { -+struct fsl_mc_device_id { - u16 vendor; - const char obj_type[16]; -- u32 ver_major; -- u32 ver_minor; - }; - - /** diff --git a/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch b/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch deleted file mode 100644 index 5ebb4591a..000000000 --- a/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch +++ /dev/null @@ -1,98 +0,0 @@ -From bd83c4253992d263cb83108e26b4687058f11deb Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:45 -0500 -Subject: [PATCH 182/226] staging: fsl-mc: add support for device table - matching - -Move the definition of fsl_mc_device_id to its proper location in -mod_devicetable.h, and add fsl-mc bus support to devicetable-offsets.c -and file2alias.c to enable device table matching. With this patch udev -based module loading of fsl-mc drivers is supported. - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/include/mc.h | 13 ------------- - include/linux/mod_devicetable.h | 16 ++++++++++++++++ - scripts/mod/devicetable-offsets.c | 4 ++++ - scripts/mod/file2alias.c | 12 ++++++++++++ - 4 files changed, 32 insertions(+), 13 deletions(-) - ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -51,19 +51,6 @@ struct fsl_mc_driver { - container_of(_drv, struct fsl_mc_driver, driver) - - /** -- * struct fsl_mc_device_id - MC object device Id entry for driver matching -- * @vendor: vendor ID -- * @obj_type: MC object type -- * -- * Type of entries in the "device Id" table for MC object devices supported by -- * a MC object device driver. The last entry of the table has vendor set to 0x0 -- */ --struct fsl_mc_device_id { -- u16 vendor; -- const char obj_type[16]; --}; -- --/** - * enum fsl_mc_pool_type - Types of allocatable MC bus resources - * - * Entries in these enum are used as indices in the array of resource ---- a/include/linux/mod_devicetable.h -+++ b/include/linux/mod_devicetable.h -@@ -657,4 +657,20 @@ struct ulpi_device_id { - kernel_ulong_t driver_data; - }; - -+/** -+ * struct fsl_mc_device_id - MC object device identifier -+ * @vendor: vendor ID -+ * @obj_type: MC object type -+ * @ver_major: MC object version major number -+ * @ver_minor: MC object version minor number -+ * -+ * Type of entries in the "device Id" table for MC object devices supported by -+ * a MC object device driver. The last entry of the table has vendor set to 0x0 -+ */ -+struct fsl_mc_device_id { -+ __u16 vendor; -+ const char obj_type[16]; -+}; -+ -+ - #endif /* LINUX_MOD_DEVICETABLE_H */ ---- a/scripts/mod/devicetable-offsets.c -+++ b/scripts/mod/devicetable-offsets.c -@@ -202,5 +202,9 @@ int main(void) - DEVID_FIELD(hda_device_id, rev_id); - DEVID_FIELD(hda_device_id, api_version); - -+ DEVID(fsl_mc_device_id); -+ DEVID_FIELD(fsl_mc_device_id, vendor); -+ DEVID_FIELD(fsl_mc_device_id, obj_type); -+ - return 0; - } ---- a/scripts/mod/file2alias.c -+++ b/scripts/mod/file2alias.c -@@ -1271,6 +1271,18 @@ static int do_hda_entry(const char *file - } - ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry); - -+/* Looks like: fsl-mc:vNdN */ -+static int do_fsl_mc_entry(const char *filename, void *symval, -+ char *alias) -+{ -+ DEF_FIELD(symval, fsl_mc_device_id, vendor); -+ DEF_FIELD_ADDR(symval, fsl_mc_device_id, obj_type); -+ -+ sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type); -+ return 1; -+} -+ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry); -+ - /* Does namelen bytes of name exactly match the symbol? */ - static bool sym_is(const char *name, unsigned namelen, const char *symbol) - { diff --git a/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch b/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch deleted file mode 100644 index 330a1a885..000000000 --- a/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 4087dc71f82a71c25f9d051773094f4ae3f4238d Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:46 -0500 -Subject: [PATCH 183/226] staging: fsl-mc: export mc_get_version - -some drivers (built as modules) rely on mc_get_version() - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dpmng.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/staging/fsl-mc/bus/dpmng.c -+++ b/drivers/staging/fsl-mc/bus/dpmng.c -@@ -67,6 +67,7 @@ int mc_get_version(struct fsl_mc_io *mc_ - - return 0; - } -+EXPORT_SYMBOL(mc_get_version); - - /** - * dpmng_get_container_id() - Get container ID associated with a given portal. diff --git a/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch b/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch deleted file mode 100644 index 92b9c59a0..000000000 --- a/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 82981b28f3a8a7f4ac61d8dc87a0abaeebfbe6dc Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:47 -0500 -Subject: [PATCH 184/226] staging: fsl-mc: make fsl_mc_is_root_dprc() global - -make fsl_mc_is_root_dprc() global so that the dprc driver -can use it - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 28 +++++++++++++--------------- - drivers/staging/fsl-mc/include/mc.h | 2 ++ - 2 files changed, 15 insertions(+), 15 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -24,8 +24,6 @@ - - static struct kmem_cache *mc_dev_cache; - --static bool fsl_mc_is_root_dprc(struct device *dev); -- - /** - * fsl_mc_bus_match - device to driver matching callback - * @dev: the MC object device structure to match against -@@ -247,19 +245,6 @@ static void fsl_mc_get_root_dprc(struct - } - } - --/** -- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc -- */ --static bool fsl_mc_is_root_dprc(struct device *dev) --{ -- struct device *root_dprc_dev; -- -- fsl_mc_get_root_dprc(dev, &root_dprc_dev); -- if (!root_dprc_dev) -- return false; -- return dev == root_dprc_dev; --} -- - static int get_dprc_attr(struct fsl_mc_io *mc_io, - int container_id, struct dprc_attributes *attr) - { -@@ -424,6 +409,19 @@ error_cleanup_regions: - } - - /** -+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc -+ */ -+bool fsl_mc_is_root_dprc(struct device *dev) -+{ -+ struct device *root_dprc_dev; -+ -+ fsl_mc_get_root_dprc(dev, &root_dprc_dev); -+ if (!root_dprc_dev) -+ return false; -+ return dev == root_dprc_dev; -+} -+ -+/** - * Add a newly discovered MC object device to be visible in Linux - */ - int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -207,6 +207,8 @@ int __must_check fsl_mc_allocate_irqs(st - - void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); - -+bool fsl_mc_is_root_dprc(struct device *dev); -+ - extern struct bus_type fsl_mc_bus_type; - - #endif /* _FSL_MC_H_ */ diff --git a/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch b/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch deleted file mode 100644 index b148c7606..000000000 --- a/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 4e55a4c296d3a93c95320cdac0b8e72f3cfefb98 Mon Sep 17 00:00:00 2001 -From: Bharat Bhushan -Date: Wed, 22 Jun 2016 16:40:48 -0500 -Subject: [PATCH 185/226] staging: fsl-mc: fix asymmetry in destroy of mc_io - -An mc_io represents a mapped MC portal. Previously, an mc_io was -created for the root dprc in fsl_mc_bus_probe() and for child dprcs -in dprc_probe(). But the free of that data structure happened in the -general bus remove callback. This asymmetry resulted in some bugs due -to unwanted destroys of mc_io object in some scenarios (e.g. vfio). - -Fix this bug by making things symmetric-- mc_io created in -fsl_mc_bus_probe() is freed in fsl_mc_bus_remove(). The mc_io created -in dprc_probe() is freed in dprc_remove(). - -Signed-off-by: Bharat Bhushan -[Stuart: added check for root dprc and reworded commit message] -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 5 +++++ - drivers/staging/fsl-mc/bus/mc-bus.c | 8 ++++---- - 2 files changed, 9 insertions(+), 4 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -801,6 +801,11 @@ static int dprc_remove(struct fsl_mc_dev - dev_set_msi_domain(&mc_dev->dev, NULL); - } - -+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) { -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ } -+ - dev_info(&mc_dev->dev, "DPRC device unbound from driver"); - return 0; - } ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -579,10 +579,6 @@ void fsl_mc_device_remove(struct fsl_mc_ - - if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { - mc_bus = to_fsl_mc_bus(mc_dev); -- if (mc_dev->mc_io) { -- fsl_destroy_mc_io(mc_dev->mc_io); -- mc_dev->mc_io = NULL; -- } - - if (fsl_mc_is_root_dprc(&mc_dev->dev)) { - if (atomic_read(&root_dprc_count) > 0) -@@ -810,6 +806,10 @@ static int fsl_mc_bus_remove(struct plat - return -EINVAL; - - fsl_mc_device_remove(mc->root_mc_bus_dev); -+ -+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); -+ mc->root_mc_bus_dev->mc_io = NULL; -+ - dev_info(&pdev->dev, "Root MC bus device removed"); - return 0; - } diff --git a/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch b/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch deleted file mode 100644 index 0a485ec3e..000000000 --- a/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 159abffaa5e2acf910b5e4cdca81a7b6d2dd958f Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:49 -0500 -Subject: [PATCH 186/226] staging: fsl-mc: dprc: add missing irq free - -add missing free of the Linux irq when tearing down interrupts - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -760,7 +760,12 @@ error_cleanup_msi_domain: - */ - static void dprc_teardown_irq(struct fsl_mc_device *mc_dev) - { -+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0]; -+ - (void)disable_dprc_irq(mc_dev); -+ -+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev); -+ - fsl_mc_free_irqs(mc_dev); - } - diff --git a/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch b/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch deleted file mode 100644 index c03e8d9b1..000000000 --- a/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch +++ /dev/null @@ -1,41 +0,0 @@ -From b104ed7497745e2e6da214b37ef22edaf38098c7 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:50 -0500 -Subject: [PATCH 187/226] staging: fsl-mc: dprc: fix ordering problem freeing - resources in remove of dprc - -When unbinding a dprc from the dprc driver the cleanup of -the resource pools must happen after irq pool cleanup -is done. - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -796,16 +796,18 @@ static int dprc_remove(struct fsl_mc_dev - dprc_teardown_irq(mc_dev); - - device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); -- dprc_cleanup_all_resource_pools(mc_dev); -- error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -- if (error < 0) -- dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); - - if (dev_get_msi_domain(&mc_dev->dev)) { - fsl_mc_cleanup_irq_pool(mc_bus); - dev_set_msi_domain(&mc_dev->dev, NULL); - } - -+ dprc_cleanup_all_resource_pools(mc_dev); -+ -+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ if (error < 0) -+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); -+ - if (!fsl_mc_is_root_dprc(&mc_dev->dev)) { - fsl_destroy_mc_io(mc_dev->mc_io); - mc_dev->mc_io = NULL; diff --git a/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch b/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch deleted file mode 100644 index 34aadad78..000000000 --- a/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch +++ /dev/null @@ -1,48 +0,0 @@ -From f5f9462cb947922817225b69240740e637de0149 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 22 Jun 2016 16:40:51 -0500 -Subject: [PATCH 188/226] staging: fsl-mc: properly set hwirq in msi set_desc - -For an MSI domain the hwirq is an arbitrary but unique -id to identify an interrupt. Previously the hwirq was set to -the MSI index of the interrupt, but that only works if there is -one DPRC. Additional DPRCs require an expanded namespace. Use -both the ICID (which is unique per DPRC) and the MSI index to -compose a hwirq value. - -Signed-off-by: Stuart Yoder -Signed-off-by: Greg Kroah-Hartman ---- - drivers/staging/fsl-mc/bus/mc-msi.c | 17 ++++++++++++++++- - 1 file changed, 16 insertions(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/mc-msi.c -+++ b/drivers/staging/fsl-mc/bus/mc-msi.c -@@ -20,11 +20,26 @@ - #include "../include/mc-sys.h" - #include "dprc-cmd.h" - -+/* -+ * Generate a unique ID identifying the interrupt (only used within the MSI -+ * irqdomain. Combine the icid with the interrupt index. -+ */ -+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev, -+ struct msi_desc *desc) -+{ -+ /* -+ * Make the base hwirq value for ICID*10000 so it is readable -+ * as a decimal value in /proc/interrupts. -+ */ -+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000)); -+} -+ - static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, - struct msi_desc *desc) - { - arg->desc = desc; -- arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index; -+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev), -+ desc); - } - - static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info) diff --git a/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch b/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch deleted file mode 100644 index 5f60ea7d8..000000000 --- a/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch +++ /dev/null @@ -1,964 +0,0 @@ -From 95c8565453e068db2664b5ee9cb0b7eced9a8d24 Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Fri, 3 Jul 2015 19:02:45 +0300 -Subject: [PATCH 189/226] staging: fsl-mc: update dpcon binary interface to - v2.2 - --this includes adding the command building/parsing - wrapper functions - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/Makefile | 3 +- - drivers/staging/fsl-mc/bus/dpcon.c | 407 ++++++++++++++++++++++++++++ - drivers/staging/fsl-mc/include/dpcon-cmd.h | 102 ++++++- - drivers/staging/fsl-mc/include/dpcon.h | 407 ++++++++++++++++++++++++++++ - 4 files changed, 917 insertions(+), 2 deletions(-) - create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c - create mode 100644 drivers/staging/fsl-mc/include/dpcon.h - ---- a/drivers/staging/fsl-mc/bus/Makefile -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -16,4 +16,5 @@ mc-bus-driver-objs := mc-bus.o \ - mc-msi.o \ - irq-gic-v3-its-fsl-mc-msi.o \ - dpmcp.o \ -- dpbp.o -+ dpbp.o \ -+ dpcon.o ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpcon.c -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpcon.h" -+#include "../include/dpcon-cmd.h" -+ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCON_CMD_OPEN(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_open); -+ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_close); -+ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCON_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_enable); -+ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_disable); -+ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_get_attributes); -+ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_set_notification); -+ ---- a/drivers/staging/fsl-mc/include/dpcon-cmd.h -+++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h -@@ -34,7 +34,7 @@ - - /* DPCON Version */ - #define DPCON_VER_MAJOR 2 --#define DPCON_VER_MINOR 1 -+#define DPCON_VER_MINOR 2 - - /* Command IDs */ - #define DPCON_CMDID_CLOSE 0x800 -@@ -59,4 +59,104 @@ - - #define DPCON_CMDID_SET_NOTIFICATION 0x100 - -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_OPEN(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+} while (0) -+ - #endif /* _FSL_DPCON_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpcon.h -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCON_H -+#define __FSL_DPCON_H -+ -+/* Data Path Concentrator API -+ * Contains initialization APIs and runtime control APIs for DPCON -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCON macros */ -+ -+/** -+ * Use it to disable notifications; see dpcon_set_notification() -+ */ -+#define DPCON_INVALID_DPIO_ID (int)(-1) -+ -+/** -+ * dpcon_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpcon_id: DPCON unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpcon_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token); -+ -+/** -+ * dpcon_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_cfg - Structure representing DPCON configuration -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_cfg { -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_create() - Create the DPCON object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCON object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpcon_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpcon_destroy() - Destroy the DPCON object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_enable() - Enable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_disable() - Disable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_is_enabled() - Check if the DPCON is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpcon_reset() - Reset the DPCON, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpcon_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_get_irq() - Get IRQ information from the DPCON. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpcon_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpcon_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpcon_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpcon_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpcon_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpcon_attr - Structure representing DPCON attributes -+ * @id: DPCON object ID -+ * @version: DPCON version -+ * @qbman_ch_id: Channel ID to be used by dequeue operation -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_attr { -+ int id; -+ /** -+ * struct version - DPCON version -+ * @major: DPCON major version -+ * @minor: DPCON minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t qbman_ch_id; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_get_attributes() - Retrieve DPCON attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr); -+ -+/** -+ * struct dpcon_notification_cfg - Structure representing notification parameters -+ * @dpio_id: DPIO object ID; must be configured with a notification channel; -+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; -+ * @priority: Priority selection within the DPIO channel; valid values -+ * are 0-7, depending on the number of priorities in that channel -+ * @user_ctx: User context value provided with each CDAN message -+ */ -+struct dpcon_notification_cfg { -+ int dpio_id; -+ uint8_t priority; -+ uint64_t user_ctx; -+}; -+ -+/** -+ * dpcon_set_notification() - Set DPCON notification destination -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @cfg: Notification parameters -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg); -+ -+#endif /* __FSL_DPCON_H */ diff --git a/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch b/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch deleted file mode 100644 index a38907b0b..000000000 --- a/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 75b607ff8725eac74f3375b3370f7d121d1827a3 Mon Sep 17 00:00:00 2001 -From: Lijun Pan -Date: Mon, 8 Feb 2016 17:40:14 -0600 -Subject: [PATCH 190/226] staging: fsl-mc: root dprc rescan attribute to sync - kernel with MC - -Introduce the rescan attribute as a device attribute to -synchronize the fsl-mc bus objects and the MC firmware. - -To rescan the root dprc only, e.g. -echo 1 > /sys/bus/fsl-mc/devices/dprc.1/rescan - -Signed-off-by: Lijun Pan -[Stuart: resolved merge conflict] -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 29 +++++++++++++++++++++++++++++ - 1 file changed, 29 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -96,8 +96,37 @@ static ssize_t modalias_show(struct devi - } - static DEVICE_ATTR_RO(modalias); - -+static ssize_t rescan_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (!fsl_mc_is_root_dprc(dev)) -+ return -EINVAL; -+ -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) { -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return count; -+} -+static DEVICE_ATTR_WO(rescan); -+ - static struct attribute *fsl_mc_dev_attrs[] = { - &dev_attr_modalias.attr, -+ &dev_attr_rescan.attr, - NULL, - }; - diff --git a/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch b/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch deleted file mode 100644 index d89815d81..000000000 --- a/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 417d71b1e291725c01893bf1553478924d05952f Mon Sep 17 00:00:00 2001 -From: Lijun Pan -Date: Mon, 8 Feb 2016 17:40:16 -0600 -Subject: [PATCH 191/226] staging: fsl-mc: bus rescan attribute to sync kernel - with MC - -Introduce the rescan attribute as a bus attribute to -synchronize the fsl-mc bus objects and the MC firmware. - -To rescan the fsl-mc bus, e.g., -echo 1 > /sys/bus/fsl-mc/rescan - -Signed-off-by: Lijun Pan ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 47 +++++++++++++++++++++++++++++++++++ - 1 file changed, 47 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -139,11 +139,58 @@ static const struct attribute_group *fsl - NULL, - }; - -+static int scan_fsl_mc_bus(struct device *dev, void *data) -+{ -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (fsl_mc_is_root_dprc(dev)) { -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return 0; -+} -+ -+static ssize_t bus_rescan_store(struct bus_type *bus, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) -+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); -+ -+ return count; -+} -+static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store); -+ -+static struct attribute *fsl_mc_bus_attrs[] = { -+ &bus_attr_rescan.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_bus_group = { -+ .attrs = fsl_mc_bus_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_bus_groups[] = { -+ &fsl_mc_bus_group, -+ NULL, -+}; -+ - struct bus_type fsl_mc_bus_type = { - .name = "fsl-mc", - .match = fsl_mc_bus_match, - .uevent = fsl_mc_bus_uevent, - .dev_groups = fsl_mc_dev_groups, -+ .bus_groups = fsl_mc_bus_groups, - }; - EXPORT_SYMBOL_GPL(fsl_mc_bus_type); - diff --git a/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch b/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch deleted file mode 100644 index 20b952e5f..000000000 --- a/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch +++ /dev/null @@ -1,193 +0,0 @@ -From 2b9110586a96afc0d0e246835da176c48ae7c973 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Fri, 13 Mar 2015 15:03:32 -0500 -Subject: [PATCH 192/226] staging: fsl-mc: Propagate driver_override for a - child DPRC's children - -When a child DPRC is bound to the vfio_fsl_mc driver via driver_override, -its own children should not be bound to corresponding host kernel -drivers, but instead should be bound to the vfio_fsl_mc driver as -well. - -Currently, when a child container is scanned by the vfio_fsl_mc driver, -child devices found are automatically bound to corresponding host kernel -drivers (e.g., DPMCP and DPBP objects are bound to the fsl_mc_allocator -driver, DPNI objects are bound to the ldpaa_eth driver, etc), Then, -the user has to manually unbind these child devices from their drivers, -set the driver_override sysfs attribute to vfio_fsl_mc driver, for each -of them and rebind them. - -Signed-off-by: J. German Rivera -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 14 ++++++++++---- - drivers/staging/fsl-mc/bus/mc-bus.c | 20 +++++++++++++++++--- - drivers/staging/fsl-mc/include/mc-private.h | 2 ++ - drivers/staging/fsl-mc/include/mc.h | 2 ++ - 4 files changed, 31 insertions(+), 7 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -152,6 +152,8 @@ static void check_plugged_state_change(s - * dprc_add_new_devices - Adds devices to the logical bus for a DPRC - * - * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. - * @obj_desc_array: array of device descriptors for child devices currently - * present in the physical DPRC. - * @num_child_objects_in_mc: number of entries in obj_desc_array -@@ -161,6 +163,7 @@ static void check_plugged_state_change(s - * in the physical DPRC. - */ - static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, - struct dprc_obj_desc *obj_desc_array, - int num_child_objects_in_mc) - { -@@ -184,7 +187,7 @@ static void dprc_add_new_devices(struct - } - - error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, -- &child_dev); -+ driver_override, &child_dev); - if (error < 0) - continue; - } -@@ -243,6 +246,8 @@ static void dprc_cleanup_all_resource_po - * dprc_scan_objects - Discover objects in a DPRC - * - * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. - * @total_irq_count: total number of IRQs needed by objects in the DPRC. - * - * Detects objects added and removed from a DPRC and synchronizes the -@@ -258,6 +263,7 @@ static void dprc_cleanup_all_resource_po - * of the device drivers for the non-allocatable devices. - */ - int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, - unsigned int *total_irq_count) - { - int num_child_objects; -@@ -338,7 +344,7 @@ int dprc_scan_objects(struct fsl_mc_devi - dprc_remove_devices(mc_bus_dev, child_obj_desc_array, - num_child_objects); - -- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array, -+ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, - num_child_objects); - - if (child_obj_desc_array) -@@ -369,7 +375,7 @@ int dprc_scan_container(struct fsl_mc_de - * Discover objects in the DPRC: - */ - mutex_lock(&mc_bus->scan_mutex); -- error = dprc_scan_objects(mc_bus_dev, &irq_count); -+ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); - mutex_unlock(&mc_bus->scan_mutex); - if (error < 0) - goto error; -@@ -456,7 +462,7 @@ static irqreturn_t dprc_irq0_handler_thr - DPRC_IRQ_EVENT_OBJ_CREATED)) { - unsigned int irq_count; - -- error = dprc_scan_objects(mc_dev, &irq_count); -+ error = dprc_scan_objects(mc_dev, NULL, &irq_count); - if (error < 0) { - /* - * If the error is -ENXIO, we ignore it, as it indicates ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -116,7 +116,7 @@ static ssize_t rescan_store(struct devic - - if (val) { - mutex_lock(&root_mc_bus->scan_mutex); -- dprc_scan_objects(root_mc_dev, &irq_count); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); - mutex_unlock(&root_mc_bus->scan_mutex); - } - -@@ -149,7 +149,7 @@ static int scan_fsl_mc_bus(struct device - root_mc_dev = to_fsl_mc_device(dev); - root_mc_bus = to_fsl_mc_bus(root_mc_dev); - mutex_lock(&root_mc_bus->scan_mutex); -- dprc_scan_objects(root_mc_dev, &irq_count); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); - mutex_unlock(&root_mc_bus->scan_mutex); - } - -@@ -503,6 +503,7 @@ bool fsl_mc_is_root_dprc(struct device * - int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, - struct fsl_mc_io *mc_io, - struct device *parent_dev, -+ const char *driver_override, - struct fsl_mc_device **new_mc_dev) - { - int error; -@@ -535,6 +536,18 @@ int fsl_mc_device_add(struct dprc_obj_de - - mc_dev->obj_desc = *obj_desc; - mc_dev->mc_io = mc_io; -+ if (driver_override) { -+ /* -+ * We trust driver_override, so we don't need to use -+ * kstrndup() here -+ */ -+ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); -+ if (!mc_dev->driver_override) { -+ error = -ENOMEM; -+ goto error_cleanup_dev; -+ } -+ } -+ - device_initialize(&mc_dev->dev); - mc_dev->dev.parent = parent_dev; - mc_dev->dev.bus = &fsl_mc_bus_type; -@@ -858,7 +871,8 @@ static int fsl_mc_bus_probe(struct platf - obj_desc.irq_count = 1; - obj_desc.region_count = 0; - -- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev); -+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, -+ &mc_bus_dev); - if (error < 0) - goto error_cleanup_mc_io; - ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -110,6 +110,7 @@ struct fsl_mc_bus { - int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, - struct fsl_mc_io *mc_io, - struct device *parent_dev, -+ const char *driver_override, - struct fsl_mc_device **new_mc_dev); - - void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); -@@ -117,6 +118,7 @@ void fsl_mc_device_remove(struct fsl_mc_ - int dprc_scan_container(struct fsl_mc_device *mc_bus_dev); - - int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, - unsigned int *total_irq_count); - - int __init dprc_driver_init(void); ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -129,6 +129,7 @@ struct fsl_mc_device_irq { - * @regions: pointer to array of MMIO region entries - * @irqs: pointer to array of pointers to interrupts allocated to this device - * @resource: generic resource associated with this MC object device, if any. -+ * @driver_override: Driver name to force a match - * - * Generic device object for MC object devices that are "attached" to a - * MC bus. -@@ -161,6 +162,7 @@ struct fsl_mc_device { - struct resource *regions; - struct fsl_mc_device_irq **irqs; - struct fsl_mc_resource *resource; -+ const char *driver_override; - }; - - #define to_fsl_mc_device(_dev) \ diff --git a/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch b/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch deleted file mode 100644 index 60d294a1d..000000000 --- a/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 0bda83c15b2ecfc45fac0656df15d4f4fa65afa9 Mon Sep 17 00:00:00 2001 -From: Bharat Bhushan -Date: Wed, 18 Mar 2015 17:32:59 -0500 -Subject: [PATCH 193/226] staging: fsl-mc: add device binding path - 'driver_override' - -This patch is required for vfio-fsl-mc meta driver to successfully bind -layerscape container devices for device passthrough. This patch adds -a mechanism to allow a layerscape device to specify a driver rather than -a layerscape driver provide a device match. - -This patch is based on following proposed patches for PCI and platform devices -- https://lkml.org/lkml/2014/4/8/571 :- For Platform devices -- http://lists-archives.com/linux-kernel/28030441-pci-introduce-new-device-binding-path-using-pci_dev-driver_override.html :- For PCI devices - -Example to allow a device (dprc.1) to specifically bind -with driver (vfio-fsl-mc):- -- echo vfio-fsl-mc > /sys/bus/fsl-mc/devices/dprc.1/driver_override -- echo dprc.1 > /sys/bus/fsl-mc/drivers/fsl_mc_dprc/unbind -- echo dprc.1 > /sys/bus/fsl-mc/drivers/vfio-fsl-mc/bind - -Signed-off-by: J. German Rivera -(Stuart: resolved merge conflicts) -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 53 +++++++++++++++++++++++++++++++++++ - 1 file changed, 53 insertions(+) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -42,6 +42,12 @@ static int fsl_mc_bus_match(struct devic - if (WARN_ON(!fsl_mc_bus_exists())) - goto out; - -+ /* When driver_override is set, only bind to the matching driver */ -+ if (mc_dev->driver_override) { -+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); -+ goto out; -+ } -+ - if (!mc_drv->match_id_table) - goto out; - -@@ -96,6 +102,50 @@ static ssize_t modalias_show(struct devi - } - static DEVICE_ATTR_RO(modalias); - -+static ssize_t driver_override_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ const char *driver_override, *old = mc_dev->driver_override; -+ char *cp; -+ -+ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) -+ return -EINVAL; -+ -+ if (count > PATH_MAX) -+ return -EINVAL; -+ -+ driver_override = kstrndup(buf, count, GFP_KERNEL); -+ if (!driver_override) -+ return -ENOMEM; -+ -+ cp = strchr(driver_override, '\n'); -+ if (cp) -+ *cp = '\0'; -+ -+ if (strlen(driver_override)) { -+ mc_dev->driver_override = driver_override; -+ } else { -+ kfree(driver_override); -+ mc_dev->driver_override = NULL; -+ } -+ -+ kfree(old); -+ -+ return count; -+} -+ -+static ssize_t driver_override_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return sprintf(buf, "%s\n", mc_dev->driver_override); -+} -+ -+static DEVICE_ATTR_RW(driver_override); -+ - static ssize_t rescan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -@@ -127,6 +177,7 @@ static DEVICE_ATTR_WO(rescan); - static struct attribute *fsl_mc_dev_attrs[] = { - &dev_attr_modalias.attr, - &dev_attr_rescan.attr, -+ &dev_attr_driver_override.attr, - NULL, - }; - -@@ -677,6 +728,8 @@ void fsl_mc_device_remove(struct fsl_mc_ - } - } - -+ kfree(mc_dev->driver_override); -+ mc_dev->driver_override = NULL; - if (mc_bus) - devm_kfree(mc_dev->dev.parent, mc_bus); - else diff --git a/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch b/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch deleted file mode 100644 index 0b926850c..000000000 --- a/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 552d628c887d970b9a97d8db2629adc4820fb8e3 Mon Sep 17 00:00:00 2001 -From: Bharat Bhushan -Date: Thu, 16 Jul 2015 14:44:24 +0530 -Subject: [PATCH 194/226] staging: fsl-mc: export irq cleanup for vfio to use - -VFIO driver needs these basic functions for -setting up itt/its of dprc's bound to it. - -Signed-off-by: Bharat Bhushan -(Stuart: resolved merge conflict, commit log cleanup) -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/dprc-driver.c | 4 ++-- - drivers/staging/fsl-mc/include/mc-private.h | 4 ++++ - 2 files changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -193,7 +193,7 @@ static void dprc_add_new_devices(struct - } - } - --static void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) - { - int pool_type; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -@@ -234,7 +234,7 @@ static void dprc_cleanup_resource_pool(s - WARN_ON(free_count != res_pool->free_count); - } - --static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) - { - int pool_type; - ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -157,4 +157,8 @@ int fsl_mc_populate_irq_pool(struct fsl_ - - void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); - -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ - #endif /* _FSL_MC_PRIVATE_H_ */ diff --git a/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch b/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch deleted file mode 100644 index 438234b14..000000000 --- a/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 71d19cd1107fa435d056e08e7d7ef7d8f714cf35 Mon Sep 17 00:00:00 2001 -From: Lijun Pan -Date: Fri, 31 Jul 2015 15:07:32 -0500 -Subject: [PATCH 195/226] increment MC_CMD_COMPLETION_TIMEOUT_MS - -5000ms is barely enough for dpsw/dpdmux creation. -If MC firmware could run faster, we would decrement the value later on. - -Signed-off-by: Lijun Pan -(Stuart: resolved merge conflict) -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/mc-sys.c | 38 +++++++++++++++-------------------- - 1 file changed, 16 insertions(+), 22 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/mc-sys.c -+++ b/drivers/staging/fsl-mc/bus/mc-sys.c -@@ -43,8 +43,10 @@ - - /** - * Timeout in milliseconds to wait for the completion of an MC command -+ * 5000 ms is barely enough for dpsw/dpdmux creation -+ * TODO: if MC firmware could response faster, we should decrease this value - */ --#define MC_CMD_COMPLETION_TIMEOUT_MS 500 -+#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 - - /* - * usleep_range() min and max values used to throttle down polling -@@ -327,17 +329,8 @@ static int mc_polling_wait_preemptible(s - usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, - MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - -- if (time_after_eq(jiffies, jiffies_until_timeout)) { -- dev_dbg(mc_io->dev, -- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -- mc_io->portal_phys_addr, -- (unsigned int) -- MC_CMD_HDR_READ_TOKEN(cmd->header), -- (unsigned int) -- MC_CMD_HDR_READ_CMDID(cmd->header)); -- -+ if (time_after_eq(jiffies, jiffies_until_timeout)) - return -ETIMEDOUT; -- } - } - - *mc_status = status; -@@ -369,17 +362,8 @@ static int mc_polling_wait_atomic(struct - - udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); - timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; -- if (timeout_usecs == 0) { -- dev_dbg(mc_io->dev, -- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -- mc_io->portal_phys_addr, -- (unsigned int) -- MC_CMD_HDR_READ_TOKEN(cmd->header), -- (unsigned int) -- MC_CMD_HDR_READ_CMDID(cmd->header)); -- -+ if (timeout_usecs == 0) - return -ETIMEDOUT; -- } - } - - *mc_status = status; -@@ -422,9 +406,19 @@ int mc_send_command(struct fsl_mc_io *mc - else - error = mc_polling_wait_atomic(mc_io, cmd, &status); - -- if (error < 0) -+ if (error < 0) { -+ if (error == -ETIMEDOUT) { -+ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -+ mc_io->portal_phys_addr, -+ (unsigned int) -+ MC_CMD_HDR_READ_TOKEN(cmd->header), -+ (unsigned int) -+ MC_CMD_HDR_READ_CMDID(cmd->header)); -+ } - goto common_exit; - -+ } -+ - if (status != MC_CMD_STATUS_OK) { - dev_dbg(mc_io->dev, - "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", diff --git a/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch b/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch deleted file mode 100644 index d465f90c3..000000000 --- a/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 12b1317fb3ab5b56efd833fa3b22965adf1d2c96 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Fri, 15 Apr 2016 17:07:16 -0500 -Subject: [PATCH 196/226] staging: fsl-mc: make fsl_mc_get_root_dprc public - -this is needed by other components (e.g. vfio) to find -the root dprc - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/mc-bus.c | 3 ++- - drivers/staging/fsl-mc/include/mc.h | 3 +++ - 2 files changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/staging/fsl-mc/bus/mc-bus.c -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -358,7 +358,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_exists); - /** - * fsl_mc_get_root_dprc - function to traverse to the root dprc - */ --static void fsl_mc_get_root_dprc(struct device *dev, -+void fsl_mc_get_root_dprc(struct device *dev, - struct device **root_dprc_dev) - { - if (WARN_ON(!dev)) { -@@ -371,6 +371,7 @@ static void fsl_mc_get_root_dprc(struct - *root_dprc_dev = (*root_dprc_dev)->parent; - } - } -+EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc); - - static int get_dprc_attr(struct fsl_mc_io *mc_io, - int container_id, struct dprc_attributes *attr) ---- a/drivers/staging/fsl-mc/include/mc.h -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -191,6 +191,9 @@ void fsl_mc_driver_unregister(struct fsl - - bool fsl_mc_bus_exists(void); - -+void fsl_mc_get_root_dprc(struct device *dev, -+ struct device **root_dprc_dev); -+ - int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, - u16 mc_io_flags, - struct fsl_mc_io **new_mc_io); diff --git a/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch b/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch deleted file mode 100644 index 2a5e5df2b..000000000 --- a/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch +++ /dev/null @@ -1,489 +0,0 @@ -From fb4881d149742e4c5595aca8bf86c99d2ea155ad Mon Sep 17 00:00:00 2001 -From: Lijun Pan -Date: Mon, 8 Feb 2016 17:40:18 -0600 -Subject: [PATCH 197/226] staging: fsl-mc: Management Complex restool driver - -The kernel support for the restool (a user space resource management -tool) is a driver for the /dev/dprc.N device file. -Its purpose is to provide an ioctl interface, -which the restool uses to interact with the MC bus driver -and with the MC firmware. -We allocate a dpmcp at driver initialization, -and keep that dpmcp until driver exit. -We use that dpmcp by default. -If that dpmcp is in use, we create another portal at run time -and destroy the newly created portal after use. -The ioctl RESTOOL_SEND_MC_COMMAND sends user space command to fsl-mc -bus and utilizes the fsl-mc bus to communicate with MC firmware. -The ioctl RESTOOL_DPRC_SYNC request the mc-bus launch -objects scan under root dprc. -In order to support multiple root dprc, we utilize the bus notify -mechanism to scan fsl_mc_bus_type for the newly added root dprc. -After discovering the root dprc, it creates a miscdevice -/dev/dprc.N to associate with this root dprc. - -Signed-off-by: Lijun Pan -[Stuart: minor fix to resolve compile error] -Signed-off-by: Stuart Yoder ---- - Documentation/ioctl/ioctl-number.txt | 1 + - drivers/staging/fsl-mc/bus/Kconfig | 7 +- - drivers/staging/fsl-mc/bus/Makefile | 3 + - drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 ++ - drivers/staging/fsl-mc/bus/mc-restool.c | 392 +++++++++++++++++++++++++++++++ - 5 files changed, 424 insertions(+), 1 deletion(-) - create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h - create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c - ---- a/Documentation/ioctl/ioctl-number.txt -+++ b/Documentation/ioctl/ioctl-number.txt -@@ -170,6 +170,7 @@ Code Seq#(hex) Include File Comments - 'R' 00-1F linux/random.h conflict! - 'R' 01 linux/rfkill.h conflict! - 'R' C0-DF net/bluetooth/rfcomm.h -+'R' E0-EF drivers/staging/fsl-mc/bus/mc-ioctl.h - 'S' all linux/cdrom.h conflict! - 'S' 80-81 scsi/scsi_ioctl.h conflict! - 'S' 82-FF scsi/scsi.h conflict! ---- a/drivers/staging/fsl-mc/bus/Kconfig -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -22,4 +22,9 @@ config FSL_MC_BUS - Only enable this option when building the kernel for - Freescale QorQIQ LS2xxxx SoCs. - -- -+config FSL_MC_RESTOOL -+ tristate "Freescale Management Complex (MC) restool driver" -+ depends on FSL_MC_BUS -+ help -+ Driver that provides kernel support for the Freescale Management -+ Complex resource manager user-space tool. ---- a/drivers/staging/fsl-mc/bus/Makefile -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -18,3 +18,6 @@ mc-bus-driver-objs := mc-bus.o \ - dpmcp.o \ - dpbp.o \ - dpcon.o -+ -+# MC restool kernel support -+obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h -@@ -0,0 +1,22 @@ -+/* -+ * Freescale Management Complex (MC) ioclt interface -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: Lijun Pan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_IOCTL_H_ -+#define _FSL_MC_IOCTL_H_ -+ -+#include -+#include "../include/mc-sys.h" -+ -+#define RESTOOL_IOCTL_TYPE 'R' -+ -+#define RESTOOL_SEND_MC_COMMAND \ -+ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command) -+ -+#endif /* _FSL_MC_IOCTL_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-restool.c -@@ -0,0 +1,392 @@ -+/* -+ * Freescale Management Complex (MC) restool driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: Lijun Pan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "mc-ioctl.h" -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpmng.h" -+ -+/** -+ * Maximum number of DPRCs that can be opened at the same time -+ */ -+#define MAX_DPRC_HANDLES 64 -+ -+/** -+ * restool_misc - information associated with the newly added miscdevice -+ * @misc: newly created miscdevice associated with root dprc -+ * @miscdevt: device id of this miscdevice -+ * @list: a linked list node representing this miscdevcie -+ * @static_mc_io: pointer to the static MC I/O object used by the restool -+ * @dynamic_instance_count: number of dynamically created instances -+ * @static_instance_in_use: static instance is in use or not -+ * @mutex: mutex lock to serialze the open/release operations -+ * @dev: root dprc associated with this miscdevice -+ */ -+struct restool_misc { -+ struct miscdevice misc; -+ dev_t miscdevt; -+ struct list_head list; -+ struct fsl_mc_io *static_mc_io; -+ u32 dynamic_instance_count; -+ bool static_instance_in_use; -+ struct mutex mutex; /* serialze the open/release operations */ -+ struct device *dev; -+}; -+ -+/* -+ * initialize a global list to link all -+ * the miscdevice nodes (struct restool_misc) -+ */ -+static LIST_HEAD(misc_list); -+static DEFINE_MUTEX(misc_list_mutex); -+ -+static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_device *root_mc_dev; -+ int error; -+ struct fsl_mc_io *dynamic_mc_io = NULL; -+ struct restool_misc *restool_misc = NULL; -+ struct restool_misc *restool_misc_cursor; -+ -+ mutex_lock(&misc_list_mutex); -+ -+ list_for_each_entry(restool_misc_cursor, &misc_list, list) { -+ if (restool_misc_cursor->miscdevt == inode->i_rdev) { -+ restool_misc = restool_misc_cursor; -+ break; -+ } -+ } -+ -+ mutex_unlock(&misc_list_mutex); -+ -+ if (!restool_misc) -+ return -EINVAL; -+ -+ if (WARN_ON(!restool_misc->dev)) -+ return -EINVAL; -+ -+ mutex_lock(&restool_misc->mutex); -+ -+ if (!restool_misc->static_instance_in_use) { -+ restool_misc->static_instance_in_use = true; -+ filep->private_data = restool_misc->static_mc_io; -+ } else { -+ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL); -+ if (!dynamic_mc_io) { -+ error = -ENOMEM; -+ goto err_unlock; -+ } -+ -+ root_mc_dev = to_fsl_mc_device(restool_misc->dev); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto free_dynamic_mc_io; -+ } -+ ++restool_misc->dynamic_instance_count; -+ filep->private_data = dynamic_mc_io; -+ } -+ -+ mutex_unlock(&restool_misc->mutex); -+ -+ return 0; -+ -+free_dynamic_mc_io: -+ kfree(dynamic_mc_io); -+err_unlock: -+ mutex_unlock(&restool_misc->mutex); -+ -+ return error; -+} -+ -+static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_io *local_mc_io = filep->private_data; -+ struct restool_misc *restool_misc = NULL; -+ struct restool_misc *restool_misc_cursor; -+ -+ if (WARN_ON(!filep->private_data)) -+ return -EINVAL; -+ -+ mutex_lock(&misc_list_mutex); -+ -+ list_for_each_entry(restool_misc_cursor, &misc_list, list) { -+ if (restool_misc_cursor->miscdevt == inode->i_rdev) { -+ restool_misc = restool_misc_cursor; -+ break; -+ } -+ } -+ -+ mutex_unlock(&misc_list_mutex); -+ -+ if (!restool_misc) -+ return -EINVAL; -+ -+ mutex_lock(&restool_misc->mutex); -+ -+ if (WARN_ON(restool_misc->dynamic_instance_count == 0 && -+ !restool_misc->static_instance_in_use)) { -+ mutex_unlock(&restool_misc->mutex); -+ return -EINVAL; -+ } -+ -+ /* Globally clean up opened/untracked handles */ -+ fsl_mc_portal_reset(local_mc_io); -+ -+ /* -+ * must check -+ * whether local_mc_io is dynamic or static instance -+ * Otherwise it will free up the reserved portal by accident -+ * or even not free up the dynamic allocated portal -+ * if 2 or more instances running concurrently -+ */ -+ if (local_mc_io == restool_misc->static_mc_io) { -+ restool_misc->static_instance_in_use = false; -+ } else { -+ fsl_mc_portal_free(local_mc_io); -+ kfree(filep->private_data); -+ --restool_misc->dynamic_instance_count; -+ } -+ -+ filep->private_data = NULL; -+ mutex_unlock(&restool_misc->mutex); -+ -+ return 0; -+} -+ -+static int restool_send_mc_command(unsigned long arg, -+ struct fsl_mc_io *local_mc_io) -+{ -+ int error; -+ struct mc_command mc_cmd; -+ -+ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd))) -+ return -EFAULT; -+ -+ /* -+ * Send MC command to the MC: -+ */ -+ error = mc_send_command(local_mc_io, &mc_cmd); -+ if (error < 0) -+ return error; -+ -+ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd))) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+static long -+fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ int error; -+ -+ switch (cmd) { -+ case RESTOOL_SEND_MC_COMMAND: -+ error = restool_send_mc_command(arg, file->private_data); -+ break; -+ default: -+ pr_err("%s: unexpected ioctl call number\n", __func__); -+ error = -EINVAL; -+ } -+ -+ return error; -+} -+ -+static const struct file_operations fsl_mc_restool_dev_fops = { -+ .owner = THIS_MODULE, -+ .open = fsl_mc_restool_dev_open, -+ .release = fsl_mc_restool_dev_release, -+ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, -+}; -+ -+static int restool_add_device_file(struct device *dev) -+{ -+ u32 name1 = 0; -+ char name2[20] = {0}; -+ int error; -+ struct fsl_mc_device *root_mc_dev; -+ struct restool_misc *restool_misc; -+ -+ if (dev->bus == &platform_bus_type && dev->driver_data) { -+ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2) -+ return -EINVAL; -+ -+ if (strcmp(name2, "fsl-mc") == 0) -+ pr_debug("platform's root dprc name is: %s\n", -+ dev_name(&(((struct fsl_mc *) -+ (dev->driver_data))->root_mc_bus_dev->dev))); -+ } -+ -+ if (!fsl_mc_is_root_dprc(dev)) -+ return 0; -+ -+ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL); -+ if (!restool_misc) -+ return -ENOMEM; -+ -+ restool_misc->dev = dev; -+ root_mc_dev = to_fsl_mc_device(dev); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, -+ &restool_misc->static_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto free_restool_misc; -+ } -+ -+ restool_misc->misc.minor = MISC_DYNAMIC_MINOR; -+ restool_misc->misc.name = dev_name(dev); -+ restool_misc->misc.fops = &fsl_mc_restool_dev_fops; -+ -+ error = misc_register(&restool_misc->misc); -+ if (error < 0) { -+ pr_err("misc_register() failed: %d\n", error); -+ goto free_portal; -+ } -+ -+ restool_misc->miscdevt = restool_misc->misc.this_device->devt; -+ mutex_init(&restool_misc->mutex); -+ mutex_lock(&misc_list_mutex); -+ list_add(&restool_misc->list, &misc_list); -+ mutex_unlock(&misc_list_mutex); -+ -+ pr_info("/dev/%s driver registered\n", dev_name(dev)); -+ -+ return 0; -+ -+free_portal: -+ fsl_mc_portal_free(restool_misc->static_mc_io); -+free_restool_misc: -+ kfree(restool_misc); -+ -+ return error; -+} -+ -+static int restool_bus_notifier(struct notifier_block *nb, -+ unsigned long action, void *data) -+{ -+ int error; -+ struct device *dev = data; -+ -+ switch (action) { -+ case BUS_NOTIFY_ADD_DEVICE: -+ error = restool_add_device_file(dev); -+ if (error) -+ return error; -+ break; -+ case BUS_NOTIFY_DEL_DEVICE: -+ case BUS_NOTIFY_REMOVED_DEVICE: -+ case BUS_NOTIFY_BIND_DRIVER: -+ case BUS_NOTIFY_BOUND_DRIVER: -+ case BUS_NOTIFY_UNBIND_DRIVER: -+ case BUS_NOTIFY_UNBOUND_DRIVER: -+ break; -+ default: -+ pr_err("%s: unrecognized device action from %s\n", __func__, -+ dev_name(dev)); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int add_to_restool(struct device *dev, void *data) -+{ -+ return restool_add_device_file(dev); -+} -+ -+static int __init fsl_mc_restool_driver_init(void) -+{ -+ int error; -+ struct notifier_block *nb; -+ -+ nb = kzalloc(sizeof(*nb), GFP_KERNEL); -+ if (!nb) -+ return -ENOMEM; -+ -+ nb->notifier_call = restool_bus_notifier; -+ error = bus_register_notifier(&fsl_mc_bus_type, nb); -+ if (error) -+ goto free_nb; -+ -+ /* -+ * This driver runs after fsl-mc bus driver runs. -+ * Hence, many of the root dprcs are already attached to fsl-mc bus -+ * In order to make sure we find all the root dprcs, -+ * we need to scan the fsl_mc_bus_type. -+ */ -+ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool); -+ if (error) { -+ bus_unregister_notifier(&fsl_mc_bus_type, nb); -+ kfree(nb); -+ pr_err("restool driver registration failure\n"); -+ return error; -+ } -+ -+ return 0; -+ -+free_nb: -+ kfree(nb); -+ return error; -+} -+ -+module_init(fsl_mc_restool_driver_init); -+ -+static void __exit fsl_mc_restool_driver_exit(void) -+{ -+ struct restool_misc *restool_misc; -+ struct restool_misc *restool_misc_tmp; -+ char name1[20] = {0}; -+ u32 name2 = 0; -+ -+ list_for_each_entry_safe(restool_misc, restool_misc_tmp, -+ &misc_list, list) { -+ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2) -+ != 2) -+ continue; -+ -+ pr_debug("name1=%s,name2=%u\n", name1, name2); -+ pr_debug("misc-device: %s\n", restool_misc->misc.name); -+ if (strcmp(name1, "dprc") != 0) -+ continue; -+ -+ if (WARN_ON(!restool_misc->static_mc_io)) -+ return; -+ -+ if (WARN_ON(restool_misc->dynamic_instance_count != 0)) -+ return; -+ -+ if (WARN_ON(restool_misc->static_instance_in_use)) -+ return; -+ -+ misc_deregister(&restool_misc->misc); -+ pr_info("/dev/%s driver unregistered\n", -+ restool_misc->misc.name); -+ fsl_mc_portal_free(restool_misc->static_mc_io); -+ list_del(&restool_misc->list); -+ kfree(restool_misc); -+ } -+} -+ -+module_exit(fsl_mc_restool_driver_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor Inc."); -+MODULE_DESCRIPTION("Freescale's MC restool driver"); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch b/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch deleted file mode 100644 index 7613d0a63..000000000 --- a/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch +++ /dev/null @@ -1,8943 +0,0 @@ -From 331b26080961f0289c3a8a8e5e65f6524b23be19 Mon Sep 17 00:00:00 2001 -From: Jeffrey Ladouceur -Date: Tue, 7 Apr 2015 23:24:55 -0400 -Subject: [PATCH 198/226] staging: fsl-mc: dpio services driver -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This is a commit of a squash of the cummulative dpio services patches -in the sdk 2.0 kernel as of 3/7/2016. - -staging: fsl-mc: dpio: initial implementation of dpio services - -* Port from kernel 3.16 to 3.19 -* upgrade to match MC fw 7.0.0 -* return -EPROBE_DEFER if fsl_mc_portal_allocate() fails. -* enable DPIO interrupt support -* implement service FQDAN handling -* DPIO service selects DPIO objects using crude algorithms for now, we - will look to make this smarter later on. -* Locks all DPIO ops that aren't innately lockless. Smarter selection - logic may allow locking to be relaxed eventually. -* Portable QBMan driver source (and low-level MC flib code for DPIO) is - included and encapsulated within the DPIO driver. - -Signed-off-by: Geoff Thorpe -Signed-off-by: Haiying Wang -Signed-off-by: Roy Pledge -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Ioana Radulescu -Signed-off-by: Cristian Sovaiala -Signed-off-by: J. German Rivera -Signed-off-by: Jeffrey Ladouceur -[Stuart: resolved merge conflicts] -Signed-off-by: Stuart Yoder - -dpio: Use locks when querying fq state - -merged from patch in 3.19-bringup branch. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Jeffrey Ladouceur -Change-Id: Ia4d09f8a0cf4d8a4a2aa1cb39be789c34425286d -Reviewed-on: http://git.am.freescale.net:8181/34707 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Stuart Yoder - -qbman: Fix potential race in VDQCR handling - -Remove atomic_read() check of the VDQCR busy marker. These checks were racy -as the flag could be incorrectly cleared if checked while another thread was -starting a pull command. The check is unneeded since we can determine the -owner of the outstanding pull command through other means. - -Signed-off-by: Roy Pledge -Change-Id: Icc64577c0a4ce6dadef208975e980adfc6796c86 -Reviewed-on: http://git.am.freescale.net:8181/34705 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio: Fix IRQ handler and remove useless spinlock - -The IRQ handler for a threaded IRQ requires two parts: initally the handler -should check status and inhibit the IRQ then the threaded portion should -process and reenable. - -Also remove a spinlock that was redundant with the QMan driver and a debug -check that could trigger under a race condition - -Signed-off-by: Roy Pledge -Signed-off-by: Jeffrey Ladouceur -Change-Id: I64926583af0be954228de94ae354fa005c8ec88a -Reviewed-on: http://git.am.freescale.net:8181/34706 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -staging: fsl-mc: dpio: Implement polling if IRQ not available - -Temporarly add a polling mode to DPIO in the case that the IRQ -registration fails - -Signed-off-by: Roy Pledge -Change-Id: Iebbd488fd14dd9878ef846e40f3ebcbcd0eb1e80 -Reviewed-on: http://git.am.freescale.net:8181/34775 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Jeffrey Ladouceur -Reviewed-by: Stuart Yoder - -fsl-mc-dpio: Fix to make this work without interrupt - -Some additional fixes to make dpio driver work in poll mode. -This is needed for direct assignment to KVM Guest. - -Signed-off-by: Bharat Bhushan -Change-Id: Icf66b8c0c7f7e1610118f78396534c067f594934 -Reviewed-on: http://git.am.freescale.net:8181/35333 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -fsl-mc-dpio: Make QBMan token tracking internal - -Previousy the QBMan portal code required the caller to properly set and -check for a token value used by the driver to detect when the QMan -hardware had completed a dequeue. This patch simplifes the driver -interface by internally dealing with token values. The driver will now -set the token value to 0 once it has dequeued a frame while a token -value of 1 indicates the HW has completed the dequeue but SW has not -consumed the frame yet. - -Signed-off-by: Roy Pledge -Change-Id: If94d9728b0faa0fd79b47108f5cb05a425b89c18 -Reviewed-on: http://git.am.freescale.net:8181/35433 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Stuart Yoder - -fsl-mc-dpio: Distribute DPIO IRQs among cores - -Configure the DPIO IRQ affinities across all available cores - -Signed-off-by: Roy Pledge -Change-Id: Ib45968a070460b7e9410bfe6067b20ecd3524c54 -Reviewed-on: http://git.am.freescale.net:8181/35540 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpio/qbman: add flush after finishing cena write - -Signed-off-by: Haiying Wang -Change-Id: I19537f101f7f5b443d60c0ad0e5d96c1dc302223 -Reviewed-on: http://git.am.freescale.net:8181/35854 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio/qbman: rename qbman_dq_entry to qbman_result - -Currently qbman_dq_entry is used for both dq result in dqrr -and memory, and notifications in dqrr and memory. It doesn't -make sense to have dq_entry in name for those notifications -which have nothing to do with dq. So we rename this as -qbman_result which is meaningful for both cases. - -Signed-off-by: Haiying Wang -Change-Id: I62b3e729c571a1195e8802a9fab3fca97a14eae4 -Reviewed-on: http://git.am.freescale.net:8181/35535 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio/qbman: add APIs to parse BPSCN and CGCU - -BPSCN and CGCU are notifications which can only be written to memory. -We need to consider the host endianness while parsing these notification. -Also modify the check of FQRN/CSCN_MEM with the same consideration. - -Signed-off-by: Haiying Wang -Change-Id: I572e0aa126107aed40e1ce326d5df7956882a939 -Reviewed-on: http://git.am.freescale.net:8181/35536 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio/qbman: remove EXPORT_SYMBOL for qbman APIs - -because they are only used by dpio. - -Signed-off-by: Haiying Wang -Change-Id: I12e7b81c2d32f3c7b3df9fd73b742b1b675f4b8b -Reviewed-on: http://git.am.freescale.net:8181/35537 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio/qbman: add invalidate and prefetch support - -for cachable memory access. -Also remove the redundant memory barriers. - -Signed-off-by: Haiying Wang -Change-Id: I452a768278d1c5ef37e5741e9b011d725cb57b30 -Reviewed-on: http://git.am.freescale.net:8181/35873 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpio-driver: Fix qman-portal interrupt masking in poll mode - -DPIO driver should mask qman-portal interrupt reporting When -working in poll mode. has_irq flag is used for same, but -interrupt maksing was happening before it was decided that -system will work in poll mode of interrupt mode. - -This patch fixes the issue and not irq masking/enabling is -happening after irq/poll mode is decided. - -Signed-off-by: Bharat Bhushan -Change-Id: I44de07b6142e80b3daea45e7d51a2d2799b2ed8d -Reviewed-on: http://git.am.freescale.net:8181/37100 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder -(cherry picked from commit 3579244250dcb287a0fe58bcc3b3780076d040a2) - -dpio: Add a function to query buffer pool depth - -Add a debug function thay allows users to query the number -of buffers in a specific buffer pool - -Signed-off-by: Roy Pledge -Change-Id: Ie9a5f2e86d6a04ae61868bcc807121780c53cf6c -Reviewed-on: http://git.am.freescale.net:8181/36069 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder -(cherry picked from commit 3c749d860592f62f6b219232580ca35fd1075337) - -dpio: Use normal cachable non-shareable memory for qbman cena - -QBMan SWP CENA portal memory requires the memory to be cacheable, -and non-shareable. - -Signed-off-by: Haiying Wang -Change-Id: I1c01cffe9ff2503fea2396d7cc761508f6e1ca85 -Reviewed-on: http://git.am.freescale.net:8181/35487 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder -(cherry picked from commit 2a7e1ede7e155d9219006999893912e0b029ce4c) - -fsl-dpio: Process frames in IRQ context - -Stop using threaded IRQs and move back to hardirq top-halves. -This is the first patch of a small series adapting the DPIO and Ethernet -code to these changes. - -Signed-off-by: Roy Pledge -Tested-by: Ioana Radulescu -Tested-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder -[Stuart: split out dpaa-eth part separately] -Signed-off-by: Stuart Yoder - -fsl-dpio: Fast DPIO object selection - -The DPIO service code had a couple of problems with performance impact: - - The DPIO service object was protected by a global lock, within - functions called from the fast datapath on multiple CPUs. - - The DPIO service code would iterate unnecessarily through its linked - list, while most of the time it looks for CPU-bound objects. - -Add a fast-access array pointing to the same dpaa_io objects as the DPIO -service's linked list, used in non-preemptible contexts. -Avoid list access/reordering if a specific CPU was requested. This -greatly limits contention on the global service lock. -Make explicit calls for per-CPU DPIO service objects if the current -context permits (which is the case on most of the Ethernet fastpath). - -These changes incidentally fix a functional problem, too: according to -the specification of struct dpaa_io_notification_ctx, registration should -fail if the specification of 'desired_cpu' cannot be observed. Instead, -dpaa_io_service_register() would keep searching for non-affine DPIO -objects, even when that was not requested. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I2dd78bc56179f97d3fd78052a653456e5f89ed82 -Reviewed-on: http://git.am.freescale.net:8181/37689 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -DPIO: Implement a missing lock in DPIO - -Implement missing DPIO service notification deregistration lock - -Signed-off-by: Roy Pledge -Change-Id: Ida9a4d00cc3a66bc215c260a8df2b197366736f7 -Reviewed-on: http://git.am.freescale.net:8181/38497 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Stuart Yoder - -staging: fsl-mc: migrated dpio flibs for MC fw 8.0.0 - -Signed-off-by: Stuart Yoder - -fsl_qbman: Ensure SDQCR is only enabled if a channel is selected - -QMan HW considers an SDQCR command that does not indicate any -channels to dequeue from to be an error. This change ensures that -a NULL command is set in the case no channels are selected for dequeue - -Signed-off-by: Roy Pledge -Change-Id: I8861304881885db00df4a29d760848990d706c70 -Reviewed-on: http://git.am.freescale.net:8181/38498 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Haiying Wang -Reviewed-by: Stuart Yoder - -flib: dpio: Fix compiler warning. - -Gcc takes the credit here. -To be merged with other fixes on this branch. - -Signed-off-by: Bogdan Hamciuc -Change-Id: If81f35ab3e8061aae1e03b72ab16a4c1dc390c3a -Reviewed-on: http://git.am.freescale.net:8181/39148 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -staging: fsl-mc: dpio: remove programing of MSIs in dpio driver - -this is now handled in the bus driver - -Signed-off-by: Stuart Yoder - -fsl_qbman: Enable CDAN generation - -Enable CDAN notificiation registration in both QBMan and DPIO - -Signed-off-by: Roy Pledge - -fsl_dpio: Implement API to dequeue from a channel - -Implement an API that allows users to dequeue from a channel - -Signed-off-by: Roy Pledge - -fsl-dpio: Change dequeue command type - -For now CDANs don't work with priority precedence. - -Signed-off-by: Ioana Radulescu - -fsl-dpio: Export FQD context getter function - -Signed-off-by: Ioana Radulescu - -fsl_dpio: Fix DPIO polling thread logic - -Fix the logic for the DPIO polling logic and ensure the thread -is not parked - -Signed-off-by: Roy Pledge -[Stuart: fixed typo in comment] -Signed-off-by: Stuart Yoder - -fsl-dpio,qbman: Export functions - -A few of the functions used by the Ethernet driver were not exported -yet. Needed in order to compile Eth driver as a module. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Stuart Yoder - -fsl_qbman: Use proper accessors when reading QBMan portals - -Use accessors that properly byteswap when accessing QBMan portals - -Signed-off-by: Roy Pledge - -fsl_qbman: Fix encoding of 64 byte values - -The QBMan driver encodes commands in 32 bit host endianess then -coverts to little endian before sending to HW. This means 64 -byte values need to be encoded so that the values will be -correctly swapped when the commands are written to HW. - -Signed-off-by: Roy Pledge - -dpaa_fd: Add functions for SG entries endianness conversions - -Scatter gather entries are little endian at the hardware level. -Add functions for converting the SG entry structure to cpu -endianness to avoid incorrect behaviour on BE kernels. - -Signed-off-by: Ioana Radulescu - -fsl_dpaa: update header files with kernel-doc format - -Signed-off-by: Haiying Wang - -qbman: update header fiels to follow kernel-doc format - -Plus rename orp_id as opr_id based on the BG. - -Signed-off-by: Haiying Wang - -fsl/dpio: rename ldpaa to dpaa2 - -Signed-off-by: Haiying Wang -(Stuart: removed eth part out into separate patch) -Signed-off-by: Stuart Yoder - -qbman_test: update qbman_test - -- Update to sync with latest change in qbman driver. -- Add bpscn test case - -Signed-off-by: Haiying Wang - -fsl-dpio: add FLE (Frame List Entry) for FMT=dpaa_fd_list support - -Signed-off-by: Horia Geantă - -fsl-dpio: add accessors for FD[FRC] - -Signed-off-by: Horia Geantă - -fsl-dpio: add accessors for FD[FLC] - -Signed-off-by: Horia Geantă -(Stuart: corrected typo in subject) -Signed-off-by: Stuart Yoder - -fsl/dpio: dpaa2_fd: Add the comments for newly added APIs. - -Signed-off-by: Haiying Wang -[Stuart: added fsl/dpio prefix on commit subject] -Signed-off-by: Stuart Yoder - -fsl-dpio: rename dpaa_* structure to dpaa2_* - -Signed-off-by: Haiying Wang -(Stuart: split eth and caam parts out into separate patches) -Signed-off-by: Stuart Yoder - -fsl-dpio: update the header file with more description in comments - -plus fix some typos. - -Signed-off-by: Haiying Wang -Signed-off-by: Roy Pledge - -fsl-dpio: fix Klocwork issues. - -Signed-off-by: Haiying Wang - -fsl_dpio: Fix kernel doc issues and add an overview - -Signed-off-by: Roy Pledge - -fsl-dpio,qbman: Prefer affine portal to acquire/release buffers - -The FQ enqueue/dequeue DPIO code attempts to select an affine QBMan -portal in order to minimize contention (under the assumption that most -of the calling code runs in affine contexts). Doing the same now for -buffer acquire/release. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpio: prefer affine QBMan portal in dpaa2_io_service_enqueue_fq - -Commit 7b057d9bc3d31 ("fsl-dpio: Fast DPIO object selection") -took care of dpaa2_io_service_enqueue_qd, missing -dpaa2_io_service_enqueue_fq. - -Cc: Bogdan Hamciuc -Signed-off-by: Horia Geantă - -fsl/dpio: update the dpio flib files from mc9.0.0 release - -Signed-off-by: Haiying Wang - -fsl/dpio: pass qman_version from dpio attributes to swp desc - -Signed-off-by: Haiying Wang - -fsl/dpio/qbman: Use qman version to determin dqrr size - -Signed-off-by: Haiying Wang - -fsl-dpio: Fix dequeue type enum values - -enum qbman_pull_type_e did not follow the volatile dequeue command -specification, for which VERB=b'00 is a valid value (but of no -interest to us). - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Roy Pledge - -fsl-dpio: Volatile dequeue with priority precedence - -Use priority precedence to do volatile dequeue from channels, rather -than active FQ precedence. - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Roy Pledge - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/Kconfig | 16 + - drivers/staging/fsl-mc/bus/Makefile | 3 + - drivers/staging/fsl-mc/bus/dpio/Makefile | 9 + - drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 405 +++++++ - drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 + - drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 ++++++++ - drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 +++++++++++++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 ++++++++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 +++ - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 ++ - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 ++++++++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++++++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++ - drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 ++++++++++++++++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 +++++ - drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 +++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 +++++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++++++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 +++++++++++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 ++++++++++ - 21 files changed, 8333 insertions(+) - create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h - ---- a/drivers/staging/fsl-mc/bus/Kconfig -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -28,3 +28,19 @@ config FSL_MC_RESTOOL - help - Driver that provides kernel support for the Freescale Management - Complex resource manager user-space tool. -+ -+config FSL_MC_DPIO -+ tristate "Freescale Data Path I/O (DPIO) driver" -+ depends on FSL_MC_BUS -+ help -+ Driver for Freescale Data Path I/O (DPIO) devices. -+ A DPIO device provides queue and buffer management facilities -+ for software to interact with other Data Path devices. This -+ driver does not expose the DPIO device individually, but -+ groups them under a service layer API. -+ -+config FSL_QBMAN_DEBUG -+ tristate "Freescale QBMAN Debug APIs" -+ depends on FSL_MC_DPIO -+ help -+ QBMan debug assistant APIs. ---- a/drivers/staging/fsl-mc/bus/Makefile -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -21,3 +21,6 @@ mc-bus-driver-objs := mc-bus.o \ - - # MC restool kernel support - obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o -+ -+# MC DPIO driver -+obj-$(CONFIG_FSL_MC_DPIO) += dpio/ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile -@@ -0,0 +1,9 @@ -+# -+# Freescale DPIO driver -+# -+ -+obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o -+ -+fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o -+ -+obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -@@ -0,0 +1,405 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+ -+#include "fsl_qbman_portal.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+#include "dpio-drv.h" -+ -+#define DPIO_DESCRIPTION "DPIO Driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION(DPIO_DESCRIPTION); -+ -+#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */ -+ -+struct dpio_priv { -+ struct dpaa2_io *io; -+ char irq_name[MAX_DPIO_IRQ_NAME]; -+ struct task_struct *thread; -+}; -+ -+static int dpio_thread(void *data) -+{ -+ struct dpaa2_io *io = data; -+ -+ while (!kthread_should_stop()) { -+ int err = dpaa2_io_poll(io); -+ -+ if (err) { -+ pr_err("dpaa2_io_poll() failed\n"); -+ return err; -+ } -+ msleep(50); -+ } -+ return 0; -+} -+ -+static irqreturn_t dpio_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct dpio_priv *priv = dev_get_drvdata(dev); -+ -+ return dpaa2_io_irq(priv->io); -+} -+ -+static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev) -+{ -+ int i; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); -+ } -+} -+ -+static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu) -+{ -+ struct dpio_priv *priv; -+ unsigned int i; -+ int error; -+ struct fsl_mc_device_irq *irq; -+ unsigned int num_irq_handlers_registered = 0; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ cpumask_t mask; -+ -+ priv = dev_get_drvdata(&ls_dev->dev); -+ -+ if (WARN_ON(irq_count != 1)) -+ return -EINVAL; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ error = devm_request_irq(&ls_dev->dev, -+ irq->msi_desc->irq, -+ dpio_irq_handler, -+ 0, -+ priv->irq_name, -+ &ls_dev->dev); -+ if (error < 0) { -+ dev_err(&ls_dev->dev, -+ "devm_request_irq() failed: %d\n", -+ error); -+ goto error_unregister_irq_handlers; -+ } -+ -+ /* Set the IRQ affinity */ -+ cpumask_clear(&mask); -+ cpumask_set_cpu(cpu, &mask); -+ if (irq_set_affinity(irq->msi_desc->irq, &mask)) -+ pr_err("irq_set_affinity failed irq %d cpu %d\n", -+ irq->msi_desc->irq, cpu); -+ -+ num_irq_handlers_registered++; -+ } -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ for (i = 0; i < num_irq_handlers_registered; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, -+ &ls_dev->dev); -+ } -+ -+ return error; -+} -+ -+static int __cold -+dpaa2_dpio_probe(struct fsl_mc_device *ls_dev) -+{ -+ struct dpio_attr dpio_attrs; -+ struct dpaa2_io_desc desc; -+ struct dpio_priv *priv; -+ int err = -ENOMEM; -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_io *defservice; -+ bool irq_allocated = false; -+ static int next_cpu; -+ -+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ goto err_priv_alloc; -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ err = -EPROBE_DEFER; -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ &dpio_attrs); -+ if (err) { -+ dev_err(dev, "dpio_get_attributes() failed %d\n", err); -+ goto err_get_attr; -+ } -+ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_enable() failed %d\n", err); -+ goto err_get_attr; -+ } -+ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n", -+ ls_dev->regions[0].start, -+ ls_dev->regions[1].start, -+ dpio_attrs.qbman_portal_id, -+ dpio_attrs.num_priorities); -+ -+ pr_info("ce_size=0x%llx, ci_size=0x%llx\n", -+ resource_size(&ls_dev->regions[0]), -+ resource_size(&ls_dev->regions[1])); -+ -+ desc.qman_version = dpio_attrs.qbman_version; -+ /* Build DPIO driver object out of raw MC object */ -+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; -+ desc.has_irq = 1; -+ desc.will_poll = 1; -+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; -+ desc.cpu = next_cpu; -+ desc.stash_affinity = 1; /* TODO: Figure out how to determine -+ this setting - will we ever have non-affine -+ portals where we stash to a platform cache? */ -+ next_cpu = (next_cpu + 1) % num_active_cpus(); -+ desc.dpio_id = ls_dev->obj_desc.id; -+ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start, -+ resource_size(&ls_dev->regions[0])); -+ desc.regs_cinh = ioremap(ls_dev->regions[1].start, -+ resource_size(&ls_dev->regions[1])); -+ -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n"); -+ desc.has_irq = 0; -+ } else { -+ irq_allocated = true; -+ -+ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d", -+ desc.dpio_id); -+ -+ err = register_dpio_irq_handlers(ls_dev, desc.cpu); -+ if (err) -+ desc.has_irq = 0; -+ } -+ -+ priv->io = dpaa2_io_create(&desc); -+ if (!priv->io) { -+ dev_err(dev, "DPIO setup failed\n"); -+ goto err_dpaa2_io_create; -+ } -+ -+ /* If no irq then go to poll mode */ -+ if (desc.has_irq == 0) { -+ dev_info(dev, "Using polling mode for DPIO %d\n", -+ desc.dpio_id); -+ /* goto err_register_dpio_irq; */ -+ /* TEMP: Start polling if IRQ could not -+ be registered. This will go away once -+ KVM support for MSI is present */ -+ if (irq_allocated == true) -+ fsl_mc_free_irqs(ls_dev); -+ -+ if (desc.stash_affinity) -+ priv->thread = kthread_create_on_cpu(dpio_thread, -+ priv->io, -+ desc.cpu, -+ "dpio_aff%u"); -+ else -+ priv->thread = -+ kthread_create(dpio_thread, -+ priv->io, -+ "dpio_non%u", -+ dpio_attrs.qbman_portal_id); -+ if (IS_ERR(priv->thread)) { -+ dev_err(dev, "DPIO thread failure\n"); -+ err = PTR_ERR(priv->thread); -+ goto err_dpaa_thread; -+ } -+ kthread_unpark(priv->thread); -+ wake_up_process(priv->thread); -+ } -+ -+ defservice = dpaa2_io_default_service(); -+ err = dpaa2_io_service_add(defservice, priv->io); -+ dpaa2_io_down(defservice); -+ if (err) { -+ dev_err(dev, "DPIO add-to-service failed\n"); -+ goto err_dpaa2_io_add; -+ } -+ -+ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id); -+ dev_info(dev, " receives_notifications = %d\n", -+ desc.receives_notifications); -+ dev_info(dev, " has_irq = %d\n", desc.has_irq); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ fsl_mc_portal_free(ls_dev->mc_io); -+ return 0; -+ -+err_dpaa2_io_add: -+ unregister_dpio_irq_handlers(ls_dev); -+/* TEMP: To be restored once polling is removed -+ err_register_dpio_irq: -+ fsl_mc_free_irqs(ls_dev); -+*/ -+err_dpaa_thread: -+err_dpaa2_io_create: -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_get_attr: -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ dev_set_drvdata(dev, NULL); -+ devm_kfree(dev, priv); -+err_priv_alloc: -+ return err; -+} -+ -+/* -+ * Tear down interrupts for a given DPIO object -+ */ -+static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev) -+{ -+ /* (void)disable_dpio_irqs(ls_dev); */ -+ unregister_dpio_irq_handlers(ls_dev); -+ fsl_mc_free_irqs(ls_dev); -+} -+ -+static int __cold -+dpaa2_dpio_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct dpio_priv *priv; -+ int err; -+ -+ dev = &ls_dev->dev; -+ priv = dev_get_drvdata(dev); -+ -+ /* there is no implementation yet for pulling a DPIO object out of a -+ * running service (and they're currently always running). -+ */ -+ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n"); -+ -+ if (priv->thread) -+ kthread_stop(priv->thread); -+ else -+ dpio_teardown_irqs(ls_dev); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ dev_set_drvdata(dev, NULL); -+ dpaa2_io_down(priv->io); -+ -+ err = 0; -+ -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ return err; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpio", -+ .ver_major = DPIO_VER_MAJOR, -+ .ver_minor = DPIO_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_dpio_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_dpio_probe, -+ .remove = dpaa2_dpio_remove, -+ .match_id_table = dpaa2_dpio_match_id_table -+}; -+ -+static int dpio_driver_init(void) -+{ -+ int err; -+ -+ err = dpaa2_io_service_driver_init(); -+ if (!err) { -+ err = fsl_mc_driver_register(&dpaa2_dpio_driver); -+ if (err) -+ dpaa2_io_service_driver_exit(); -+ } -+ return err; -+} -+static void dpio_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_dpio_driver); -+ dpaa2_io_service_driver_exit(); -+} -+module_init(dpio_driver_init); -+module_exit(dpio_driver_exit); ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h -@@ -0,0 +1,33 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+int dpaa2_io_service_driver_init(void); -+void dpaa2_io_service_driver_exit(void); ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c -@@ -0,0 +1,468 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../include/mc-sys.h" -+#include "../../include/mc-cmd.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPIO_CMD_OPEN(cmd, dpio_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPIO_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); -+ -+ return 0; -+} -+ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); -+ -+ return 0; -+} -+ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c -@@ -0,0 +1,801 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include "fsl_qbman_portal.h" -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+#include "fsl_dpio.h" -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpio-drv.h" -+#include "qbman_debug.h" -+ -+#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__) -+ -+#define MAGIC_SERVICE 0xabcd9876 -+#define MAGIC_OBJECT 0x1234fedc -+ -+struct dpaa2_io { -+ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part -+ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If -+ * it's neither, something got corrupted. This is mainly to satisfy -+ * dpaa2_io_from_registration(), which dereferences a caller- -+ * instantiated struct and so warrants a bug-checking step - hence the -+ * magic rather than a boolean. -+ */ -+ unsigned int magic; -+ atomic_t refs; -+ union { -+ struct dpaa2_io_service { -+ spinlock_t lock; -+ struct list_head list; -+ /* for targeted dpaa2_io selection */ -+ struct dpaa2_io *objects_by_cpu[NR_CPUS]; -+ cpumask_t cpus_notifications; -+ cpumask_t cpus_stashing; -+ int has_nonaffine; -+ /* slight hack. record the special case of the -+ * "default service", because that's the case where we -+ * need to avoid a kfree() ... */ -+ int is_defservice; -+ } service; -+ struct dpaa2_io_object { -+ struct dpaa2_io_desc dpio_desc; -+ struct qbman_swp_desc swp_desc; -+ struct qbman_swp *swp; -+ /* If the object is part of a service, this is it (and -+ * 'node' is linked into the service's list) */ -+ struct dpaa2_io *service; -+ struct list_head node; -+ /* Interrupt mask, as used with -+ * qbman_swp_interrupt_[gs]et_vanish(). This isn't -+ * locked, because the higher layer is driving all -+ * "ingress" processing. */ -+ uint32_t irq_mask; -+ /* As part of simplifying assumptions, we provide an -+ * irq-safe lock for each type of DPIO operation that -+ * isn't innately lockless. The selection algorithms -+ * (which are simplified) require this, whereas -+ * eventually adherence to cpu-affinity will presumably -+ * relax the locking requirements. */ -+ spinlock_t lock_mgmt_cmd; -+ spinlock_t lock_notifications; -+ struct list_head notifications; -+ } object; -+ }; -+}; -+ -+struct dpaa2_io_store { -+ unsigned int max; -+ dma_addr_t paddr; -+ struct dpaa2_dq *vaddr; -+ void *alloced_addr; /* the actual return from kmalloc as it may -+ be adjusted for alignment purposes */ -+ unsigned int idx; /* position of the next-to-be-returned entry */ -+ struct qbman_swp *swp; /* portal used to issue VDQCR */ -+ struct device *dev; /* device used for DMA mapping */ -+}; -+ -+static struct dpaa2_io def_serv; -+ -+/**********************/ -+/* Internal functions */ -+/**********************/ -+ -+static void service_init(struct dpaa2_io *d, int is_defservice) -+{ -+ struct dpaa2_io_service *s = &d->service; -+ -+ d->magic = MAGIC_SERVICE; -+ atomic_set(&d->refs, 1); -+ spin_lock_init(&s->lock); -+ INIT_LIST_HEAD(&s->list); -+ cpumask_clear(&s->cpus_notifications); -+ cpumask_clear(&s->cpus_stashing); -+ s->has_nonaffine = 0; -+ s->is_defservice = is_defservice; -+} -+ -+/* Selection algorithms, stupid ones at that. These are to handle the case where -+ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within -+ * it to use. -+ */ -+static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss, -+ int cpu) -+{ -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&ss->lock, irqflags); -+ /* TODO: this is about the dumbest and slowest selection algorithm you -+ * could imagine. (We're looking for something working first, and -+ * something efficient second...) -+ */ -+ list_for_each_entry(o, &ss->list, object.node) -+ if (o->object.dpio_desc.cpu == cpu) -+ goto found; -+ -+ /* No joy. Try the first nonaffine portal (bleurgh) */ -+ if (ss->has_nonaffine) -+ list_for_each_entry(o, &ss->list, object.node) -+ if (!o->object.dpio_desc.stash_affinity) -+ goto found; -+ -+ /* No joy. Try the first object. Told you it was horrible. */ -+ if (!list_empty(&ss->list)) -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ else -+ o = NULL; -+ -+found: -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu) -+{ -+ struct dpaa2_io_service *ss; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ ss = &d->service; -+ -+ /* If cpu==-1, choose the current cpu, with no guarantees about -+ * potentially being migrated away. -+ */ -+ if (unlikely(cpu < 0)) { -+ spin_lock_irqsave(&ss->lock, irqflags); -+ cpu = smp_processor_id(); -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ -+ return _service_select_by_cpu_slow(ss, cpu); -+ } -+ -+ /* If a specific cpu was requested, pick it up immediately */ -+ return ss->objects_by_cpu[cpu]; -+} -+ -+static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d) -+{ -+ struct dpaa2_io_service *ss; -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ /* -+ * Lock the service, looking for the first DPIO object in the list, -+ * ignore everything else about that DPIO, and choose it to do the -+ * operation! As a post-selection step, move the DPIO to the end of -+ * the list. It should improve load-balancing a little, although it -+ * might also incur a performance hit, given that the lock is *global* -+ * and this may be called on the fast-path... -+ */ -+ ss = &d->service; -+ spin_lock_irqsave(&ss->lock, irqflags); -+ if (!list_empty(&ss->list)) { -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ list_del(&o->object.node); -+ list_add_tail(&o->object.node, &ss->list); -+ } else -+ o = NULL; -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+/* If the context is not preemptible, select the service affine to the -+ * current cpu. Otherwise, "select any". -+ */ -+static inline struct dpaa2_io *_service_select(struct dpaa2_io *d) -+{ -+ struct dpaa2_io *temp = d; -+ -+ if (likely(!preemptible())) { -+ d = service_select_by_cpu(d, smp_processor_id()); -+ if (likely(d)) -+ return d; -+ } -+ return service_select_any(temp); -+} -+ -+/**********************/ -+/* Exported functions */ -+/**********************/ -+ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ struct dpaa2_io_object *o = &ret->object; -+ -+ if (!ret) -+ return NULL; -+ ret->magic = MAGIC_OBJECT; -+ atomic_set(&ret->refs, 1); -+ o->dpio_desc = *desc; -+ o->swp_desc.cena_bar = o->dpio_desc.regs_cena; -+ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh; -+ o->swp_desc.qman_version = o->dpio_desc.qman_version; -+ o->swp = qbman_swp_init(&o->swp_desc); -+ o->service = NULL; -+ if (!o->swp) { -+ kfree(ret); -+ return NULL; -+ } -+ INIT_LIST_HEAD(&o->node); -+ spin_lock_init(&o->lock_mgmt_cmd); -+ spin_lock_init(&o->lock_notifications); -+ INIT_LIST_HEAD(&o->notifications); -+ if (!o->dpio_desc.has_irq) -+ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff); -+ else { -+ /* For now only enable DQRR interrupts */ -+ qbman_swp_interrupt_set_trigger(o->swp, -+ QBMAN_SWP_INTERRUPT_DQRI); -+ } -+ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff); -+ if (o->dpio_desc.receives_notifications) -+ qbman_swp_push_set(o->swp, 0, 1); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create); -+ -+struct dpaa2_io *dpaa2_io_create_service(void) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ -+ if (ret) -+ service_init(ret, 0); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create_service); -+ -+struct dpaa2_io *dpaa2_io_default_service(void) -+{ -+ atomic_inc(&def_serv.refs); -+ return &def_serv; -+} -+EXPORT_SYMBOL(dpaa2_io_default_service); -+ -+void dpaa2_io_down(struct dpaa2_io *d) -+{ -+ if (!atomic_dec_and_test(&d->refs)) -+ return; -+ if (d->magic == MAGIC_SERVICE) { -+ BUG_ON(!list_empty(&d->service.list)); -+ if (d->service.is_defservice) -+ /* avoid the kfree()! */ -+ return; -+ } else { -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ BUG_ON(d->object.service); -+ BUG_ON(!list_empty(&d->object.notifications)); -+ } -+ kfree(d); -+} -+EXPORT_SYMBOL(dpaa2_io_down); -+ -+int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ struct dpaa2_io_object *oo = &o->object; -+ int res = -EINVAL; -+ -+ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT)) -+ return res; -+ atomic_inc(&o->refs); -+ atomic_inc(&s->refs); -+ spin_lock(&ss->lock); -+ /* 'obj' must not already be associated with a service */ -+ if (!oo->service) { -+ oo->service = s; -+ list_add(&oo->node, &ss->list); -+ if (oo->dpio_desc.receives_notifications) { -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_notifications); -+ /* Update the fast-access array */ -+ ss->objects_by_cpu[oo->dpio_desc.cpu] = -+ container_of(oo, struct dpaa2_io, object); -+ } -+ if (oo->dpio_desc.stash_affinity) -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_stashing); -+ if (!oo->dpio_desc.stash_affinity) -+ ss->has_nonaffine = 1; -+ /* success */ -+ res = 0; -+ } -+ spin_unlock(&ss->lock); -+ if (res) { -+ dpaa2_io_down(s); -+ dpaa2_io_down(o); -+ } -+ return res; -+} -+EXPORT_SYMBOL(dpaa2_io_service_add); -+ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc) -+{ -+ if (obj->magic == MAGIC_SERVICE) -+ return -EINVAL; -+ BUG_ON(obj->magic != MAGIC_OBJECT); -+ *desc = obj->object.dpio_desc; -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_get_descriptor); -+ -+#define DPAA_POLL_MAX 32 -+ -+int dpaa2_io_poll(struct dpaa2_io *obj) -+{ -+ const struct dpaa2_dq *dq; -+ struct qbman_swp *swp; -+ int max = 0; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ dq = qbman_swp_dqrr_next(swp); -+ while (dq) { -+ if (qbman_result_is_SCN(dq)) { -+ struct dpaa2_io_notification_ctx *ctx; -+ uint64_t q64; -+ -+ q64 = qbman_result_SCN_ctx(dq); -+ ctx = (void *)q64; -+ ctx->cb(ctx); -+ } else -+ pr_crit("Unrecognised/ignored DQRR entry\n"); -+ qbman_swp_dqrr_consume(swp, dq); -+ ++max; -+ if (max > DPAA_POLL_MAX) -+ return 0; -+ dq = qbman_swp_dqrr_next(swp); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_poll); -+ -+int dpaa2_io_irq(struct dpaa2_io *obj) -+{ -+ struct qbman_swp *swp; -+ uint32_t status; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ status = qbman_swp_interrupt_read_status(swp); -+ if (!status) -+ return IRQ_NONE; -+ dpaa2_io_poll(obj); -+ qbman_swp_interrupt_clear_status(swp, status); -+ qbman_swp_interrupt_set_inhibit(swp, 0); -+ return IRQ_HANDLED; -+} -+EXPORT_SYMBOL(dpaa2_io_irq); -+ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_pause_poll); -+ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_resume_poll); -+ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_notifications); -+} -+EXPORT_SYMBOL(dpaa2_io_service_notifications); -+ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_stashing); -+} -+EXPORT_SYMBOL(dpaa2_io_service_stashing); -+ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ return ss->has_nonaffine; -+} -+EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine); -+ -+int dpaa2_io_service_register(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ -+ d = service_select_by_cpu(d, ctx->desired_cpu); -+ if (!d) -+ return -ENODEV; -+ ctx->dpio_id = d->object.dpio_desc.dpio_id; -+ ctx->qman64 = (uint64_t)ctx; -+ ctx->dpio_private = d; -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_add(&ctx->node, &d->object.notifications); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (ctx->is_cdan) -+ /* Enable the generation of CDAN notifications */ -+ qbman_swp_CDAN_set_context_enable(d->object.swp, -+ (uint16_t)ctx->id, -+ ctx->qman64); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_register); -+ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ -+ if (!service) -+ service = &def_serv; -+ BUG_ON((service != d) && (service != d->object.service)); -+ if (ctx->is_cdan) -+ qbman_swp_CDAN_disable(d->object.swp, -+ (uint16_t)ctx->id); -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_del(&ctx->node); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_deregister); -+ -+int dpaa2_io_service_rearm(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ if (ctx->is_cdan) -+ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id); -+ else -+ err = qbman_swp_fq_schedule(d->object.swp, ctx->id); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_rearm); -+ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **io) -+{ -+ struct dpaa2_io_notification_ctx *tmp; -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ int ret = 0; -+ -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ /* Iterate the notifications associated with 'd' looking for a match. If -+ * not, we've been passed an unregistered ctx! */ -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_for_each_entry(tmp, &d->object.notifications, node) -+ if (tmp == ctx) -+ goto found; -+ ret = -EINVAL; -+found: -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (!ret) { -+ atomic_inc(&d->refs); -+ *io = d; -+ } -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_from_registration); -+ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret) -+{ -+ if (cpu == -1) -+ *ret = service_select_any(service); -+ else -+ *ret = service_select_by_cpu(service, cpu); -+ if (*ret) { -+ atomic_inc(&(*ret)->refs); -+ return 0; -+ } -+ return -ENODEV; -+} -+EXPORT_SYMBOL(dpaa2_io_service_get_persistent); -+ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_fq(&pd, fqid); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_fq); -+ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_channel); -+ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_fq(&ed, fqid); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); -+ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); -+ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ struct qbman_release_desc rd; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_release_desc_clear(&rd); -+ qbman_release_desc_set_bpid(&rd, bpid); -+ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers); -+} -+EXPORT_SYMBOL(dpaa2_io_service_release); -+ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_acquire); -+ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev) -+{ -+ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ size_t size; -+ -+ BUG_ON(!max_frames || (max_frames > 16)); -+ if (!ret) -+ return NULL; -+ ret->max = max_frames; -+ size = max_frames * sizeof(struct dpaa2_dq) + 64; -+ ret->alloced_addr = kmalloc(size, GFP_KERNEL); -+ if (!ret->alloced_addr) { -+ kfree(ret); -+ return NULL; -+ } -+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); -+ ret->paddr = dma_map_single(dev, ret->vaddr, -+ sizeof(struct dpaa2_dq) * max_frames, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, ret->paddr)) { -+ kfree(ret->alloced_addr); -+ kfree(ret); -+ return NULL; -+ } -+ ret->idx = 0; -+ ret->dev = dev; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_create); -+ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s) -+{ -+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, -+ DMA_FROM_DEVICE); -+ kfree(s->alloced_addr); -+ kfree(s); -+} -+EXPORT_SYMBOL(dpaa2_io_store_destroy); -+ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) -+{ -+ int match; -+ struct dpaa2_dq *ret = &s->vaddr[s->idx]; -+ -+ match = qbman_result_has_new_result(s->swp, ret); -+ if (!match) { -+ *is_last = 0; -+ return NULL; -+ } -+ BUG_ON(!qbman_result_is_DQ(ret)); -+ s->idx++; -+ if (dpaa2_dq_is_pull_complete(ret)) { -+ *is_last = 1; -+ s->idx = 0; -+ /* If we get an empty dequeue result to terminate a zero-results -+ * vdqcr, return NULL to the caller rather than expecting him to -+ * check non-NULL results every time. */ -+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) -+ ret = NULL; -+ } else -+ *is_last = 0; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_next); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_fq_query_state(swp, fqid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *fcnt = qbman_fq_state_frame_count(&state); -+ *bcnt = qbman_fq_state_byte_count(&state); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_fq_count); -+ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_bp_query(swp, bpid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *num = qbman_bp_info_num_free_bufs(&state); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_bp_count); -+ -+#endif -+ -+/* module init/exit hooks called from dpio-drv.c. These are declared in -+ * dpio-drv.h. -+ */ -+int dpaa2_io_service_driver_init(void) -+{ -+ service_init(&def_serv, 1); -+ return 0; -+} -+ -+void dpaa2_io_service_driver_exit(void) -+{ -+ if (atomic_read(&def_serv.refs) != 1) -+ pr_err("default DPIO service leaves dangling DPIO objects!\n"); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h -@@ -0,0 +1,460 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPIO_H -+#define __FSL_DPIO_H -+ -+/* Data Path I/O Portal API -+ * Contains initialization APIs and runtime control APIs for DPIO -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpio_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpio_id: DPIO unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpio_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token); -+ -+/** -+ * dpio_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpio_channel_mode - DPIO notification channel mode -+ * @DPIO_NO_CHANNEL: No support for notification channel -+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a -+ * dedicated channel in the DPIO; user should point the queue's -+ * destination in the relevant interface to this DPIO -+ */ -+enum dpio_channel_mode { -+ DPIO_NO_CHANNEL = 0, -+ DPIO_LOCAL_CHANNEL = 1, -+}; -+ -+/** -+ * struct dpio_cfg - Structure representing DPIO configuration -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ */ -+struct dpio_cfg { -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpio_create() - Create the DPIO object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPIO object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpio_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpio_destroy() - Destroy the DPIO object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_enable() - Enable the DPIO, allow I/O portal operations. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_is_enabled() - Check if the DPIO is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpio_reset() - Reset the DPIO, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_set_stashing_destination() - Set the stashing destination. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest); -+ -+/** -+ * dpio_get_stashing_destination() - Get the stashing destination.. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: Returns the stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest); -+ -+/** -+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * @channel_index: Returned channel index to be used in qbman API -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index); -+ -+/** -+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id); -+ -+/** -+ * DPIO IRQ Index and Events -+ */ -+ -+/** -+ * Irq software-portal index -+ */ -+#define DPIO_IRQ_SWP_INDEX 0 -+ -+/** -+ * struct dpio_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpio_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_get_irq() - Get IRQ information from the DPIO. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpio_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpio_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpio_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpio_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpio_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpio_attr - Structure representing DPIO attributes -+ * @id: DPIO object ID -+ * @version: DPIO version -+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area -+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area -+ * @qbman_portal_id: Software portal ID -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ * @qbman_version: QBMAN version -+ */ -+struct dpio_attr { -+ int id; -+ /** -+ * struct version - DPIO version -+ * @major: DPIO major version -+ * @minor: DPIO minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t qbman_portal_ce_offset; -+ uint64_t qbman_portal_ci_offset; -+ uint16_t qbman_portal_id; -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+ uint32_t qbman_version; -+}; -+ -+/** -+ * dpio_get_attributes() - Retrieve DPIO attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr); -+#endif /* __FSL_DPIO_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h -@@ -0,0 +1,184 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPIO_CMD_H -+#define _FSL_DPIO_CMD_H -+ -+/* DPIO Version */ -+#define DPIO_VER_MAJOR 3 -+#define DPIO_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPIO_CMDID_CLOSE 0x800 -+#define DPIO_CMDID_OPEN 0x803 -+#define DPIO_CMDID_CREATE 0x903 -+#define DPIO_CMDID_DESTROY 0x900 -+ -+#define DPIO_CMDID_ENABLE 0x002 -+#define DPIO_CMDID_DISABLE 0x003 -+#define DPIO_CMDID_GET_ATTR 0x004 -+#define DPIO_CMDID_RESET 0x005 -+#define DPIO_CMDID_IS_ENABLED 0x006 -+ -+#define DPIO_CMDID_SET_IRQ 0x010 -+#define DPIO_CMDID_GET_IRQ 0x011 -+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPIO_CMDID_SET_IRQ_MASK 0x014 -+#define DPIO_CMDID_GET_IRQ_MASK 0x015 -+#define DPIO_CMDID_GET_IRQ_STATUS 0x016 -+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPIO_CMDID_SET_STASHING_DEST 0x120 -+#define DPIO_CMDID_GET_STASHING_DEST 0x121 -+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 -+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_OPEN(cmd, dpio_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ -+ cfg->channel_mode);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ -+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+#endif /* _FSL_DPIO_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -@@ -0,0 +1,123 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_BASE_H -+#define _FSL_QBMAN_BASE_H -+ -+/** -+ * struct qbman_block_desc - qbman block descriptor structure -+ * -+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not -+ * control this QBMan instance, these values may simply be place-holders. The -+ * idea is simply that we be able to distinguish between them, eg. so that SWP -+ * descriptors can identify which QBMan instance they belong to. -+ */ -+struct qbman_block_desc { -+ void *ccsr_reg_bar; /* CCSR register map */ -+ int irq_rerr; /* Recoverable error interrupt line */ -+ int irq_nrerr; /* Non-recoverable error interrupt line */ -+}; -+ -+/** -+ * struct qbman_swp_desc - qbman software portal descriptor structure -+ * -+ * Descriptor for a QBMan software portal, expressed in terms that make sense to -+ * the user context. Ie. on MC, this information is likely to be true-physical, -+ * and instantiated statically at compile-time. On GPP, this information is -+ * likely to be obtained via "discovery" over a partition's "layerscape bus" -+ * (ie. in response to a MC portal command), and would take into account any -+ * virtualisation of the GPP user's address space and/or interrupt numbering. -+ */ -+struct qbman_swp_desc { -+ const struct qbman_block_desc *block; /* The QBMan instance */ -+ void *cena_bar; /* Cache-enabled portal register map */ -+ void *cinh_bar; /* Cache-inhibited portal register map */ -+ uint32_t qman_version; -+}; -+ -+/* Driver object for managing a QBMan portal */ -+struct qbman_swp; -+ -+/** -+ * struct qbman_fd - basci structure for qbman frame descriptor -+ * -+ * Place-holder for FDs, we represent it via the simplest form that we need for -+ * now. Different overlays may be needed to support different options, etc. (It -+ * is impractical to define One True Struct, because the resulting encoding -+ * routines (lots of read-modify-writes) would be worst-case performance whether -+ * or not circumstances required them.) -+ * -+ * Note, as with all data-structures exchanged between software and hardware (be -+ * they located in the portal register map or DMA'd to and from main-memory), -+ * the driver ensures that the caller of the driver API sees the data-structures -+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words -+ * contained within this structure are represented in host-endianness, even if -+ * hardware always treats them as little-endian. As such, if any of these fields -+ * are interpreted in a binary (rather than numerical) fashion by hardware -+ * blocks (eg. accelerators), then the user should be careful. We illustrate -+ * with an example; -+ * -+ * Suppose the desired behaviour of an accelerator is controlled by the "frc" -+ * field of the FDs that are sent to it. Suppose also that the behaviour desired -+ * by the user corresponds to an "frc" value which is expressed as the literal -+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit -+ * value in which 0xfe is the first byte and 0xba is the last byte, and as -+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If -+ * the software is little-endian also, this can simply be achieved by setting -+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set -+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is -+ * to treat the 32-bit words as numerical values, in which the offset of a field -+ * from the beginning of the first byte (as required or generated by hardware) -+ * is numerically encoded by a left-shift (ie. by raising the field to a -+ * corresponding power of 2). Ie. in the current example, software could set -+ * "frc" in the following way, and it would work correctly on both little-endian -+ * and big-endian operation; -+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); -+ */ -+struct qbman_fd { -+ union { -+ uint32_t words[8]; -+ struct qbman_fd_simple { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ uint32_t bpid_offset; -+ uint32_t frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ uint32_t ctrl; -+ /* flow context */ -+ uint32_t flc_lo; -+ uint32_t flc_hi; -+ } simple; -+ }; -+}; -+ -+#endif /* !_FSL_QBMAN_BASE_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h -@@ -0,0 +1,753 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_PORTAL_H -+#define _FSL_QBMAN_PORTAL_H -+ -+#include "fsl_qbman_base.h" -+ -+/** -+ * qbman_swp_init() - Create a functional object representing the given -+ * QBMan portal descriptor. -+ * @d: the given qbman swp descriptor -+ * -+ * Return qbman_swp portal object for success, NULL if the object cannot -+ * be created. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); -+/** -+ * qbman_swp_finish() - Create and destroy a functional object representing -+ * the given QBMan portal descriptor. -+ * @p: the qbman_swp object to be destroyed. -+ * -+ */ -+void qbman_swp_finish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_get_desc() - Get the descriptor of the given portal object. -+ * @p: the given portal object. -+ * -+ * Return the descriptor for this portal. -+ */ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); -+ -+ /**************/ -+ /* Interrupts */ -+ /**************/ -+ -+/* See the QBMan driver API documentation for details on the interrupt -+ * mechanisms. */ -+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) -+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) -+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) -+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) -+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) -+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) -+ -+/** -+ * qbman_swp_interrupt_get_vanish() -+ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IDSR register. -+ * -+ * Return the settings in SWP_ISDR register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_read_status() -+ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_ISR register. -+ * -+ * Return the settings in SWP_ISR register for Get function. -+ * -+ */ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_trigger() -+ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IER register. -+ * -+ * Return the settings in SWP_IER register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_inhibit() -+ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IIR register. -+ * -+ * Return the settings in SWP_IIR register for Get function. -+ */ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); -+ -+ /************/ -+ /* Dequeues */ -+ /************/ -+ -+/* See the QBMan driver API documentation for details on the enqueue -+ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is -+ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan -+ * driver. The structure is defined in the DPIO interface, but to avoid circular -+ * dependencies we just pre/re-declare it here opaquely. */ -+struct dpaa2_dq; -+ -+/* ------------------- */ -+/* Push-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * qbman_swp_push_get() - Get the push dequeue setup. -+ * @p: the software portal object. -+ * @channel_idx: the channel index to query. -+ * @enabled: returned boolean to show whether the push dequeue is enabled for -+ * the given channel. -+ */ -+void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled); -+/** -+ * qbman_swp_push_set() - Enable or disable push dequeue. -+ * @p: the software portal object. -+ * @channel_idx: the channel index.. -+ * @enable: enable or disable push dequeue. -+ * -+ * The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable); -+ -+/* ------------------- */ -+/* Pull-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * struct qbman_pull_desc - the structure for pull dequeue descriptor -+ */ -+struct qbman_pull_desc { -+ uint32_t dont_manipulate_directly[6]; -+}; -+ -+enum qbman_pull_type_e { -+ /* dequeue with priority precedence, respect intra-class scheduling */ -+ qbman_pull_type_prio = 1, -+ /* dequeue with active FQ precedence, respect ICS */ -+ qbman_pull_type_active, -+ /* dequeue with active FQ precedence, no ICS */ -+ qbman_pull_type_active_noics -+}; -+ -+/** -+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the pull dequeue descriptor to be cleared. -+ */ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d); -+ -+/** -+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage -+ * @d: the pull dequeue descriptor to be set. -+ * @storage: the pointer of the memory to store the dequeue result. -+ * @storage_phys: the physical address of the storage memory. -+ * @stash: to indicate whether write allocate is enabled. -+ * -+ * If not called, or if called with 'storage' as NULL, the result pull dequeues -+ * will produce results to DQRR. If 'storage' is non-NULL, then results are -+ * produced to the given memory location (using the physical/DMA address which -+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not -+ * those writes to main-memory express a cache-warming attribute. -+ */ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. -+ * @d: the pull dequeue descriptor to be set. -+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. -+ */ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes); -+ -+/** -+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. -+ * @fqid: the frame queue index of the given FQ. -+ * -+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. -+ * @wqid: composed of channel id and wqid within the channel. -+ * @dct: the dequeue command type. -+ * -+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command -+ * dequeues. -+ * @chid: the channel id to be dequeued. -+ * @dct: the dequeue command type. -+ * -+ * Exactly one of the following descriptor "actions" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - pull dequeue from the given frame queue (FQ) -+ * - pull dequeue from any FQ in the given work queue (WQ) -+ * - pull dequeue from any FQ in any WQ in the given channel -+ */ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid); -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid, -+ enum qbman_pull_type_e dct); -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid, -+ enum qbman_pull_type_e dct); -+ -+/** -+ * qbman_swp_pull() - Issue the pull dequeue command -+ * @s: the software portal object. -+ * @d: the software portal descriptor which has been configured with -+ * the set of qbman_pull_desc_set_*() calls. -+ * -+ * Return 0 for success, and -EBUSY if the software portal is not ready -+ * to do pull dequeue. -+ */ -+int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d); -+ -+/* -------------------------------- */ -+/* Polling DQRR for dequeue results */ -+/* -------------------------------- */ -+ -+/** -+ * qbman_swp_dqrr_next() - Get an valid DQRR entry. -+ * @s: the software portal object. -+ * -+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. -+ */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); -+ -+/** -+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from -+ * qbman_swp_dqrr_next(). -+ * @s: the software portal object. -+ * @dq: the DQRR entry to be consumed. -+ */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); -+ -+/* ------------------------------------------------- */ -+/* Polling user-provided storage for dequeue results */ -+/* ------------------------------------------------- */ -+/** -+ * qbman_result_has_new_result() - Check and get the dequeue response from the -+ * dq storage memory set in pull dequeue command -+ * @s: the software portal object. -+ * @dq: the dequeue result read from the memory. -+ * -+ * Only used for user-provided storage of dequeue results, not DQRR. For -+ * efficiency purposes, the driver will perform any required endianness -+ * conversion to ensure that the user's dequeue result storage is in host-endian -+ * format (whether or not that is the same as the little-endian format that -+ * hardware DMA'd to the user's storage). As such, once the user has called -+ * qbman_result_has_new_result() and been returned a valid dequeue result, -+ * they should not call it again on the same memory location (except of course -+ * if another dequeue command has been executed to produce a new result to that -+ * location). -+ * -+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid -+ * dequeue result. -+ */ -+int qbman_result_has_new_result(struct qbman_swp *, -+ const struct dpaa2_dq *); -+ -+/* -------------------------------------------------------- */ -+/* Parsing dequeue entries (DQRR and user-provided storage) */ -+/* -------------------------------------------------------- */ -+ -+/** -+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not -+ * @dq: the dequeue result to be checked. -+ * -+ * DQRR entries may contain non-dequeue results, ie. notifications -+ */ -+int qbman_result_is_DQ(const struct dpaa2_dq *); -+ -+/** -+ * qbman_result_is_SCN() - Check the dequeue result is notification or not -+ * @dq: the dequeue result to be checked. -+ * -+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change -+ * notifications" of one type or another. Some APIs apply to all of them, of the -+ * form qbman_result_SCN_***(). -+ */ -+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) -+{ -+ return !qbman_result_is_DQ(dq); -+} -+ -+/** -+ * Recognise different notification types, only required if the user allows for -+ * these to occur, and cares about them when they do. -+ */ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *); -+ /* FQ Data Availability */ -+int qbman_result_is_CDAN(const struct dpaa2_dq *); -+ /* Channel Data Availability */ -+int qbman_result_is_CSCN(const struct dpaa2_dq *); -+ /* Congestion State Change */ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *); -+ /* Buffer Pool State Change */ -+int qbman_result_is_CGCU(const struct dpaa2_dq *); -+ /* Congestion Group Count Update */ -+/* Frame queue state change notifications; (FQDAN in theory counts too as it -+ * leaves a FQ parked, but it is primarily a data availability notification) */ -+int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *); -+ /* Retirement Immediate */ -+int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */ -+ -+/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer -+ * dpaa2_dq_*() functions. */ -+ -+/* State-change notifications (FQDAN/CDAN/CSCN/...). */ -+/** -+ * qbman_result_SCN_state() - Get the state field in State-change notification -+ */ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid() - Get the resource id in State-change notification -+ */ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_ctx() - Get the context data in State-change notification -+ */ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_state_in_mem() - Get the state field in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *); -+ -+/* Type-specific "resource IDs". Mainly for illustration purposes, though it -+ * also gives the appropriate type widths. */ -+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+ -+/** -+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN -+ * -+ * Return the buffer pool id. -+ */ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free -+ * buffers in the pool from BPSCN. -+ * -+ * Return the number of free buffers. -+ */ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the -+ * buffer pool is depleted. -+ * -+ * Return the status of buffer pool depletion. -+ */ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer -+ * pool is surplus or not. -+ * -+ * Return the status of buffer pool surplus. -+ */ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message -+ * -+ * Return the BPSCN context. -+ */ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *); -+ -+/* Parsing CGCU */ -+/** -+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid -+ * -+ * Return the CGCU resource id. -+ */ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *); -+/** -+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU -+ * -+ * Return instantaneous count in the CGCU notification. -+ */ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *); -+ -+ /************/ -+ /* Enqueues */ -+ /************/ -+/** -+ * struct qbman_eq_desc - structure of enqueue descriptor -+ */ -+struct qbman_eq_desc { -+ uint32_t dont_manipulate_directly[8]; -+}; -+ -+/** -+ * struct qbman_eq_response - structure of enqueue response -+ */ -+struct qbman_eq_response { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/** -+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_eq_desc_clear(struct qbman_eq_desc *); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling -+ * any one of these will replace the effect of any prior call to one of these.) -+ * - enqueue without order-restoration -+ * - enqueue with order-restoration -+ * - fill a hole in the order-restoration sequence, without any enqueue -+ * - advance NESN (Next Expected Sequence Number), without any enqueue -+ * 'respond_success' indicates whether an enqueue response should be DMA'd -+ * after success (otherwise a response is DMA'd only after failure). -+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to -+ * be enqueued. -+ */ -+/** -+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ */ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); -+ -+/** -+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ * @incomplete: indiates whether this is the last fragments using the same -+ * sequeue number. -+ */ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete); -+ -+/** -+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_response() - Set the enqueue response info. -+ * @d: the enqueue descriptor -+ * @storage_phys: the physical address of the enqueue response in memory. -+ * @stash: indicate that the write allocation enabled or not. -+ * -+ * In the case where an enqueue response is DMA'd, this determines where that -+ * response should go. (The physical/DMA address is given for hardware's -+ * benefit, but software should interpret it as a "struct qbman_eq_response" -+ * data structure.) 'stash' controls whether or not the write to main-memory -+ * expresses a cache-warming attribute. -+ */ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_eq_desc_set_token() - Set token for the enqueue command -+ * @d: the enqueue descriptor -+ * @token: the token to be set. -+ * -+ * token is the value that shows up in an enqueue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing an enqueue, and use any non-zero 'token' -+ * value. -+ */ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); -+ -+/** -+ * qbman_eq_desc_set_fq() -+ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue -+ * command. -+ * @d: the enqueue descriptor -+ * @fqid: the id of the frame queue to be enqueued. -+ * @qdid: the id of the queuing destination to be enqueued. -+ * @qd_bin: the queuing destination bin -+ * @qd_prio: the queuing destination priority. -+ * -+ * Exactly one of the following descriptor "targets" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - enqueue to a frame queue -+ * - enqueue to a queuing destination -+ * Note, that none of these will have any affect if the "action" type has been -+ * set to "orp_hole" or "orp_nesn". -+ */ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid); -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio); -+ -+/** -+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt -+ * @d: the enqueue descriptor -+ * @enable: boolean to enable/disable EQDI -+ * -+ * Determines whether or not the portal's EQDI interrupt source should be -+ * asserted after the enqueue command is completed. -+ */ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable); -+ -+/** -+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. -+ * @d: the enqueue descriptor. -+ * @enable: enabled/disable DCA mode. -+ * @dqrr_idx: DCAP_CI, the DCAP consumer index. -+ * @park: determine the whether park the FQ or not -+ * -+ * Determines whether or not a portal DQRR entry should be consumed once the -+ * enqueue command is completed. (And if so, and the DQRR entry corresponds -+ * to a held-active (order-preserving) FQ, whether the FQ should be parked -+ * instead of being rescheduled.) -+ */ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable, -+ uint32_t dqrr_idx, int park); -+ -+/** -+ * qbman_swp_enqueue() - Issue an enqueue command. -+ * @s: the software portal used for enqueue. -+ * @d: the enqueue descriptor. -+ * @fd: the frame descriptor to be enqueued. -+ * -+ * Please note that 'fd' should only be NULL if the "action" of the -+ * descriptor is "orp_hole" or "orp_nesn". -+ * -+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. -+ */ -+int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *, -+ const struct qbman_fd *fd); -+ -+/** -+ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt. -+ * -+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer releases */ -+ /*******************/ -+/** -+ * struct qbman_release_desc - The structure for buffer release descriptor -+ */ -+struct qbman_release_desc { -+ uint32_t dont_manipulate_directly[1]; -+}; -+ -+/** -+ * qbman_release_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_release_desc_clear(struct qbman_release_desc *); -+ -+/** -+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to -+ */ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid); -+ -+/** -+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI -+ * interrupt source should be asserted after the release command is completed. -+ */ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable); -+ -+/** -+ * qbman_swp_release() - Issue a buffer release command. -+ * @s: the software portal object. -+ * @d: the release descriptor. -+ * @buffers: a pointer pointing to the buffer address to be released. -+ * @num_buffers: number of buffers to be released, must be less than 8. -+ * -+ * Return 0 for success, -EBUSY if the release command ring is not ready. -+ */ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers); -+ -+ /*******************/ -+ /* Buffer acquires */ -+ /*******************/ -+ -+/** -+ * qbman_swp_acquire() - Issue a buffer acquire command. -+ * @s: the software portal object. -+ * @bpid: the buffer pool index. -+ * @buffers: a pointer pointing to the acquired buffer address|es. -+ * @num_buffers: number of buffers to be acquired, must be less than 8. -+ * -+ * Return 0 for success, or negative error code if the acquire command -+ * fails. -+ */ -+int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers); -+ -+ /*****************/ -+ /* FQ management */ -+ /*****************/ -+ -+/** -+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be scheduled. -+ * -+ * There are a couple of different ways that a FQ can end up parked state, -+ * This schedules it. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be forced. -+ * -+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled -+ * and thus be available for selection by any channel-dequeuing behaviour (push -+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still -+ * empty at the time this happens, the resulting dq_entry will have no FD. -+ * (qbman_result_DQ_fd() will return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_xon() -+ * qbman_swp_fq_xoff() - XON/XOFF the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * These functions change the FQ flow-control stuff between XON/XOFF. (The -+ * default is XON.) This setting doesn't affect enqueues to the FQ, just -+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when -+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is -+ * changed to XOFF after it had already become truly-scheduled to a channel, and -+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, -+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will -+ * return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); -+ -+ /**********************/ -+ /* Channel management */ -+ /**********************/ -+ -+/* If the user has been allocated a channel object that is going to generate -+ * CDANs to another channel, then these functions will be necessary. -+ * CDAN-enabled channels only generate a single CDAN notification, after which -+ * it they need to be reenabled before they'll generate another. (The idea is -+ * that pull dequeuing will occur in reaction to the CDAN, followed by a -+ * reenable step.) Each function generates a distinct command to hardware, so a -+ * combination function is provided if the user wishes to modify the "context" -+ * (which shows up in each CDAN message) each time they reenable, as a single -+ * command to hardware. */ -+/** -+ * qbman_swp_CDAN_set_context() - Set CDAN context -+ * @s: the software portal object. -+ * @channelid: the channel index. -+ * @ctx: the context to be set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+/** -+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_disable() - disable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * @ctx: the context set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+#endif /* !_FSL_QBMAN_PORTAL_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c -@@ -0,0 +1,846 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+#include "qbman_debug.h" -+#include "fsl_qbman_portal.h" -+ -+/* QBMan portal management command code */ -+#define QBMAN_BP_QUERY 0x32 -+#define QBMAN_FQ_QUERY 0x44 -+#define QBMAN_FQ_QUERY_NP 0x45 -+#define QBMAN_CGR_QUERY 0x51 -+#define QBMAN_WRED_QUERY 0x54 -+#define QBMAN_CGR_STAT_QUERY 0x55 -+#define QBMAN_CGR_STAT_QUERY_CLR 0x56 -+ -+enum qbman_attr_usage_e { -+ qbman_attr_usage_fq, -+ qbman_attr_usage_bpool, -+ qbman_attr_usage_cgr, -+}; -+ -+struct int_qbman_attr { -+ uint32_t words[32]; -+ enum qbman_attr_usage_e usage; -+}; -+ -+#define attr_type_set(a, e) \ -+{ \ -+ struct qbman_attr *__attr = a; \ -+ enum qbman_attr_usage_e __usage = e; \ -+ ((struct int_qbman_attr *)__attr)->usage = __usage; \ -+} -+ -+#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) -+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) -+ -+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); -+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); -+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); -+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); -+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); -+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); -+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); -+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); -+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); -+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); -+static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); -+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); -+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); -+ -+void qbman_bp_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_bpool); -+} -+ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_bp_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_bp_bpid, p, bpid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_BP_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); -+ *va = !!qb_attr_code_decode(&code_bp_va, p); -+ *wae = !!qb_attr_code_decode(&code_bp_wae, p); -+} -+ -+static uint32_t qbman_bp_thresh_to_value(uint32_t val) -+{ -+ return (val & 0xff) << ((val & 0xf00) >> 8); -+} -+ -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, -+ p)); -+} -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, -+ p)); -+} -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, -+ p)); -+} -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, -+ p)); -+} -+ -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); -+} -+ -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *icid = qb_attr_code_decode(&code_bp_icid, p); -+ *pl = !!qb_attr_code_decode(&code_bp_pl, p); -+} -+ -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); -+} -+ -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); -+} -+ -+int qbman_bp_info_is_depleted(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); -+} -+ -+int qbman_bp_info_is_surplus(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); -+} -+ -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_fill, p); -+} -+ -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdptr, p); -+} -+ -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sdcnt, p); -+} -+ -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdcnt, p); -+} -+ -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sscnt, p); -+} -+ -+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); -+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); -+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); -+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); -+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); -+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); -+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); -+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); -+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); -+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); -+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); -+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); -+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); -+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); -+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); -+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); -+ -+void qbman_fq_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_fq); -+} -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(desc); -+ -+ qbman_fq_attr_clear(desc); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the WE-mask. -+ * For the query, word[0] of the result contains only the verb/rslt -+ * fields. Skip word[0] in the latter case. */ -+ word_copy(&d[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); -+} -+ -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); -+} -+ -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *destwq = qb_attr_code_decode(&code_fq_destwq, p); -+} -+ -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icscred = qb_attr_code_decode(&code_fq_icscred, p); -+} -+ -+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); -+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); -+static uint32_t qbman_thresh_to_value(uint32_t val) -+{ -+ uint32_t m, e; -+ -+ m = qb_attr_code_decode(&code_tdthresh_mant, &val); -+ e = qb_attr_code_decode(&code_tdthresh_exp, &val); -+ return m << e; -+} -+ -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, -+ p)); -+} -+ -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); -+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); -+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, -+ qb_attr_code_decode(&code_fq_oa_len, p)); -+} -+ -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); -+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); -+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); -+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); -+} -+ -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); -+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); -+} -+ -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icid = qb_attr_code_decode(&code_fq_icid, p); -+ *pl = !!qb_attr_code_decode(&code_fq_pl, p); -+} -+ -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); -+} -+ -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); -+} -+ -+/* Query FQ Non-Programmalbe Fields */ -+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); -+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); -+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); -+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); -+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); -+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); -+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(state); -+ -+ qbman_fq_attr_clear(state); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY_NP); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ word_copy(&d[0], &p[0], 16); -+ return 0; -+} -+ -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_state, p); -+} -+ -+int qbman_fq_state_force_eligible(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_fe, p); -+} -+ -+int qbman_fq_state_xoff(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_x, p); -+} -+ -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_r, p); -+} -+ -+int qbman_fq_state_overflow_error(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_oe, p); -+} -+ -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); -+} -+ -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); -+} -+ -+/* Query CGR */ -+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); -+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); -+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); -+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); -+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); -+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); -+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); -+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); -+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); -+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); -+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); -+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); -+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); -+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); -+ -+void qbman_cgr_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_cgr); -+} -+ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d[2]; -+ int i; -+ uint32_t query_verb; -+ -+ d[0] = ATTR32(attr); -+ d[1] = ATTR32_1(attr); -+ -+ qbman_cgr_attr_clear(attr); -+ -+ for (i = 0; i < 2; i++) { -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the -+ * verb/cgid. For the query, word[0] of the result contains -+ * only the verb/rslt fields. Skip word[0] in the latter case. -+ */ -+ word_copy(&d[i][1], &p[1], 15); -+ } -+ return 0; -+} -+ -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd) -+ { -+ uint32_t *p = ATTR32(d); -+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, -+ p); -+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); -+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); -+} -+ -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi) -+{ -+ uint32_t *p = ATTR32(d); -+ *mode = qb_attr_code_decode(&code_cgr_mode, p); -+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); -+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); -+} -+ -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, -+ p); -+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); -+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); -+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); -+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); -+} -+ -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd) -+{ -+ uint32_t *p = ATTR32(d); -+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); -+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); -+} -+ -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); -+} -+ -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres_x, p)); -+} -+ -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_td_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); -+} -+ -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); -+} -+ -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); -+} -+ -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); -+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); -+} -+ -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr) -+{ -+ uint32_t *p = ATTR32(d); -+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, -+ p); -+} -+ -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); -+} -+ -+#define WRED_EDP_WORD(n) (18 + n/4) -+#define WRED_EDP_OFFSET(n) (8 * (n % 4)) -+#define WRED_PARM_DP_WORD(n) (n + 20) -+#define WRED_WE_EDP(n) (16 + n * 2) -+#define WRED_WE_PARM_DP(n) (17 + n * 2) -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), -+ WRED_EDP_OFFSET(idx), 8); -+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); -+} -+ -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp) -+{ -+ uint8_t ma, mn, step_i, step_s, pn; -+ -+ ma = (uint8_t)(dp >> 24); -+ mn = (uint8_t)(dp >> 19) & 0x1f; -+ step_i = (uint8_t)(dp >> 11); -+ step_s = (uint8_t)(dp >> 6) & 0x1f; -+ pn = (uint8_t)dp & 0x3f; -+ -+ *maxp = ((pn<<2) * 100)/256; -+ -+ if (mn == 0) -+ *maxth = ma; -+ else -+ *maxth = ((ma+256) * (1<<(mn-1))); -+ -+ if (step_s == 0) -+ *minth = *maxth - step_i; -+ else -+ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); -+} -+ -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), -+ 0, 8); -+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); -+} -+ -+/* Query CGR/CCGR/CQ statistics */ -+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); -+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); -+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, -+ int clear, uint32_t command_type, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t query_verb; -+ uint32_t hi, lo; -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ if (command_type < 2) -+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); -+ query_verb = clear ? -+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query statistics of CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ -+ if (*frame_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); -+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ if (*byte_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); -+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ -+ return 0; -+} -+ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 1, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0, -+ frame_cnt, byte_cnt); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h -@@ -0,0 +1,136 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+struct qbman_attr { -+ uint32_t dont_manipulate_directly[40]; -+}; -+ -+/* Buffer pool query commands */ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a); -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a); -+int qbman_bp_info_is_depleted(struct qbman_attr *a); -+int qbman_bp_info_is_surplus(struct qbman_attr *a); -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *desc); -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len); -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps); -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); -+ -+/* FQ query command for non-programmable fields*/ -+enum qbman_fq_schedstate_e { -+ qbman_fq_schedstate_oos = 0, -+ qbman_fq_schedstate_retired, -+ qbman_fq_schedstate_tentatively_scheduled, -+ qbman_fq_schedstate_truly_scheduled, -+ qbman_fq_schedstate_parked, -+ qbman_fq_schedstate_held_active, -+}; -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state); -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); -+int qbman_fq_state_force_eligible(const struct qbman_attr *state); -+int qbman_fq_state_xoff(const struct qbman_attr *state); -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state); -+int qbman_fq_state_overflow_error(const struct qbman_attr *state); -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); -+ -+/* CGR query */ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, -+ struct qbman_attr *attr); -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd); -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi); -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va); -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd); -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x); -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid); -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl); -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr); -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp); -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp); -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp); -+ -+/* CGR/CCGR/CQ statistics query */ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c -@@ -0,0 +1,1212 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+ -+/* QBMan portal management command codes */ -+#define QBMAN_MC_ACQUIRE 0x30 -+#define QBMAN_WQCHAN_CONFIGURE 0x46 -+ -+/* CINH register offsets */ -+#define QBMAN_CINH_SWP_EQAR 0x8c0 -+#define QBMAN_CINH_SWP_DQPI 0xa00 -+#define QBMAN_CINH_SWP_DCAP 0xac0 -+#define QBMAN_CINH_SWP_SDQCR 0xb00 -+#define QBMAN_CINH_SWP_RAR 0xcc0 -+#define QBMAN_CINH_SWP_ISR 0xe00 -+#define QBMAN_CINH_SWP_IER 0xe40 -+#define QBMAN_CINH_SWP_ISDR 0xe80 -+#define QBMAN_CINH_SWP_IIR 0xec0 -+ -+/* CENA register offsets */ -+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_CR 0x600 -+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) -+#define QBMAN_CENA_SWP_VDQCR 0x780 -+ -+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ -+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) -+ -+/* QBMan FQ management command codes */ -+#define QBMAN_FQ_SCHEDULE 0x48 -+#define QBMAN_FQ_FORCE 0x49 -+#define QBMAN_FQ_XON 0x4d -+#define QBMAN_FQ_XOFF 0x4e -+ -+/*******************************/ -+/* Pre-defined attribute codes */ -+/*******************************/ -+ -+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); -+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); -+ -+/*************************/ -+/* SDQCR attribute codes */ -+/*************************/ -+ -+/* we put these here because at least some of them are required by -+ * qbman_swp_init() */ -+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); -+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); -+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); -+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) -+enum qbman_sdqcr_dct { -+ qbman_sdqcr_dct_null = 0, -+ qbman_sdqcr_dct_prio_ics, -+ qbman_sdqcr_dct_active_ics, -+ qbman_sdqcr_dct_active -+}; -+enum qbman_sdqcr_fc { -+ qbman_sdqcr_fc_one = 0, -+ qbman_sdqcr_fc_up_to_3 = 1 -+}; -+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); -+ -+/*********************************/ -+/* Portal constructor/destructor */ -+/*********************************/ -+ -+/* Software portals should always be in the power-on state when we initialise, -+ * due to the CCSR-based portal reset functionality that MC has. -+ * -+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR -+ * valid-bits, so we need to support a workaround where we don't trust -+ * valid-bits when detecting new entries until any stale ring entries have been -+ * overwritten at least once. The idea is that we read PI for the first few -+ * entries, then switch to valid-bit after that. The trick is to clear the -+ * bug-work-around boolean once the PI wraps around the ring for the first time. -+ * -+ * Note: this still carries a slight additional cost once the decrementer hits -+ * zero, so ideally the workaround should only be compiled in if the compiled -+ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for -+ * this. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) -+{ -+ int ret; -+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); -+ -+ if (!p) -+ return NULL; -+ p->desc = d; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit = QB_VALID_BIT; -+ p->sdq = 0; -+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); -+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); -+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); -+ atomic_set(&p->vdq.busy, 1); -+ p->vdq.valid_bit = QB_VALID_BIT; -+ p->dqrr.next_idx = 0; -+ p->dqrr.valid_bit = QB_VALID_BIT; -+ /* TODO: should also read PI/CI type registers and check that they're on -+ * PoR values. If we're asked to initialise portals that aren't in reset -+ * state, bad things will follow. */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ p->dqrr.reset_bug = 1; -+#endif -+ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100) -+ p->dqrr.dqrr_size = 4; -+ else -+ p->dqrr.dqrr_size = 8; -+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); -+ if (ret) { -+ kfree(p); -+ pr_err("qbman_swp_sys_init() failed %d\n", ret); -+ return NULL; -+ } -+ /* SDQCR needs to be initialized to 0 when no channels are -+ being dequeued from or else the QMan HW will indicate an -+ error. The values that were calculated above will be -+ applied when dequeues from a specific channel are enabled */ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); -+ return p; -+} -+ -+void qbman_swp_finish(struct qbman_swp *p) -+{ -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ qbman_swp_sys_finish(&p->sys); -+ kfree(p); -+} -+ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) -+{ -+ return p->desc; -+} -+ -+/**************/ -+/* Interrupts */ -+/**************/ -+ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); -+} -+ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); -+} -+ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); -+} -+ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); -+} -+ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); -+} -+ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); -+} -+ -+/***********************/ -+/* Management commands */ -+/***********************/ -+ -+/* -+ * Internal code common to all types of management commands. -+ */ -+ -+void *qbman_swp_mc_start(struct qbman_swp *p) -+{ -+ void *ret; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); -+#ifdef QBMAN_CHECKING -+ if (!ret) -+ p->mc.check = swp_mc_can_submit; -+#endif -+ return ret; -+} -+ -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) -+{ -+ uint32_t *v = cmd; -+#ifdef QBMAN_CHECKING -+ BUG_ON(!p->mc.check != swp_mc_can_submit); -+#endif -+ /* TBD: "|=" is going to hurt performance. Need to move as many fields -+ * out of word zero, and for those that remain, the "OR" needs to occur -+ * at the caller side. This debug check helps to catch cases where the -+ * caller wants to OR but has forgotten to do so. */ -+ BUG_ON((*v & cmd_verb) != *v); -+ *v = cmd_verb | p->mc.valid_bit; -+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_poll; -+#endif -+} -+ -+void *qbman_swp_mc_result(struct qbman_swp *p) -+{ -+ uint32_t *ret, verb; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_poll); -+#endif -+ qbman_cena_invalidate_prefetch(&p->sys, -+ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ /* Remove the valid-bit - command completed iff the rest is non-zero */ -+ verb = ret[0] & ~QB_VALID_BIT; -+ if (!verb) -+ return NULL; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit ^= QB_VALID_BIT; -+ return ret; -+} -+ -+/***********/ -+/* Enqueue */ -+/***********/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); -+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); -+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); -+static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); -+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); -+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); -+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); -+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); -+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); -+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ -+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); -+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); -+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); -+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); -+ -+enum qbman_eq_cmd_e { -+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ -+ qbman_eq_cmd_empty, -+ /* DMA an enqueue response once complete */ -+ qbman_eq_cmd_respond, -+ /* DMA an enqueue response only if the enqueue fails */ -+ qbman_eq_cmd_respond_reject -+}; -+ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 0); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+} -+ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); -+} -+ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); -+} -+ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); -+} -+ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); -+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); -+} -+ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); -+} -+ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 0); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); -+} -+ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 1); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); -+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); -+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); -+} -+ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); -+} -+ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); -+ if (enable) { -+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); -+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); -+ } -+} -+ -+#define EQAR_IDX(eqar) ((eqar) & 0x7) -+#define EQAR_VB(eqar) ((eqar) & 0x80) -+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) -+ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); -+ -+ pr_debug("EQAR=%08x\n", eqar); -+ if (!EQAR_SUCCESS(eqar)) -+ return -EBUSY; -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | EQAR_VB(eqar); -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), -+ p); -+ return 0; -+} -+ -+/*************************/ -+/* Static (push) dequeue */ -+/*************************/ -+ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) -+{ -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); -+} -+ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) -+{ -+ uint16_t dqsrc; -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ qb_attr_code_encode(&code, &s->sdq, !!enable); -+ /* Read make the complete src map. If no channels are enabled -+ the SDQCR must be 0 or else QMan will assert errors */ -+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); -+ if (dqsrc != 0) -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); -+ else -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); -+} -+ -+/***************************/ -+/* Volatile (pull) dequeue */ -+/***************************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); -+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); -+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); -+ -+enum qb_pull_dt_e { -+ qb_pull_dt_channel, -+ qb_pull_dt_workqueue, -+ qb_pull_dt_framequeue -+}; -+ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ /* Squiggle the pointer 'storage' into the extra 2 words of the -+ * descriptor (which aren't copied to the hw command) */ -+ *(void **)&cl[4] = storage; -+ if (!storage) { -+ qb_attr_code_encode(&code_pull_rls, cl, 0); -+ return; -+ } -+ qb_attr_code_encode(&code_pull_rls, cl, 1); -+ qb_attr_code_encode(&code_pull_stash, cl, !!stash); -+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); -+} -+ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ BUG_ON(!numframes || (numframes > 16)); -+ qb_attr_code_encode(&code_pull_numframes, cl, -+ (uint32_t)(numframes - 1)); -+} -+ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_token, cl, token); -+} -+ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, 1); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); -+} -+ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); -+} -+ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); -+ qb_attr_code_encode(&code_pull_dqsource, cl, chid); -+} -+ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) -+{ -+ uint32_t *p; -+ uint32_t *cl = qb_cl(d); -+ -+ if (!atomic_dec_and_test(&s->vdq.busy)) { -+ atomic_inc(&s->vdq.busy); -+ return -EBUSY; -+ } -+ s->vdq.storage = *(void **)&cl[4]; -+ qb_attr_code_encode(&code_pull_token, cl, 1); -+ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ word_copy(&p[1], &cl[1], 3); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | s->vdq.valid_bit; -+ s->vdq.valid_bit ^= QB_VALID_BIT; -+ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); -+ return 0; -+} -+ -+/****************/ -+/* Polling DQRR */ -+/****************/ -+ -+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); -+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); -+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); -+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); -+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ -+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); -+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); -+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); -+ -+#define QBMAN_RESULT_DQ 0x60 -+#define QBMAN_RESULT_FQRN 0x21 -+#define QBMAN_RESULT_FQRNI 0x22 -+#define QBMAN_RESULT_FQPN 0x24 -+#define QBMAN_RESULT_FQDAN 0x25 -+#define QBMAN_RESULT_CDAN 0x26 -+#define QBMAN_RESULT_CSCN_MEM 0x27 -+#define QBMAN_RESULT_CGCU 0x28 -+#define QBMAN_RESULT_BPSCN 0x29 -+#define QBMAN_RESULT_CSCN_WQ 0x2a -+ -+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); -+ -+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) -+{ -+ uint32_t verb; -+ uint32_t response_verb; -+ uint32_t flags; -+ const struct dpaa2_dq *dq; -+ const uint32_t *p; -+ -+ /* Before using valid-bit to detect if something is there, we have to -+ * handle the case of the DQRR reset bug... */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ if (unlikely(s->dqrr.reset_bug)) { -+ /* We pick up new entries by cache-inhibited producer index, -+ * which means that a non-coherent mapping would require us to -+ * invalidate and read *only* once that PI has indicated that -+ * there's an entry here. The first trip around the DQRR ring -+ * will be much less efficient than all subsequent trips around -+ * it... -+ */ -+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); -+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); -+ /* there are new entries iff pi != next_idx */ -+ if (pi == s->dqrr.next_idx) -+ return NULL; -+ /* if next_idx is/was the last ring index, and 'pi' is -+ * different, we can disable the workaround as all the ring -+ * entries have now been DMA'd to so valid-bit checking is -+ * repaired. Note: this logic needs to be based on next_idx -+ * (which increments one at a time), rather than on pi (which -+ * can burst and wrap-around between our snapshots of it). -+ */ -+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { -+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", -+ s->dqrr.next_idx, pi); -+ s->dqrr.reset_bug = 0; -+ } -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ } -+#endif -+ -+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ p = qb_cl(dq); -+ verb = qb_attr_code_decode(&code_dqrr_verb, p); -+ -+ /* If the valid-bit isn't of the expected polarity, nothing there. Note, -+ * in the DQRR reset bug workaround, we shouldn't need to skip these -+ * check, because we've already determined that a new entry is available -+ * and we've invalidated the cacheline before reading it, so the -+ * valid-bit behaviour is repaired and should tell us what we already -+ * knew from reading PI. -+ */ -+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return NULL; -+ } -+ /* There's something there. Move "next_idx" attention to the next ring -+ * entry (and prefetch it) before returning what we found. */ -+ s->dqrr.next_idx++; -+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ -+ /* TODO: it's possible to do all this without conditionals, optimise it -+ * later. */ -+ if (!s->dqrr.next_idx) -+ s->dqrr.valid_bit ^= QB_VALID_BIT; -+ -+ /* If this is the final response to a volatile dequeue command -+ indicate that the vdq is no longer busy */ -+ flags = dpaa2_dq_flags(dq); -+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); -+ if ((response_verb == QBMAN_RESULT_DQ) && -+ (flags & DPAA2_DQ_STAT_VOLATILE) && -+ (flags & DPAA2_DQ_STAT_EXPIRED)) -+ atomic_inc(&s->vdq.busy); -+ -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return dq; -+} -+ -+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) -+{ -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); -+} -+ -+/*********************************/ -+/* Polling user-provided storage */ -+/*********************************/ -+ -+int qbman_result_has_new_result(struct qbman_swp *s, -+ const struct dpaa2_dq *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = qb_cl((struct dpaa2_dq *)dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); -+ -+ /* Only now do we convert from hardware to host endianness. Also, as we -+ * are returning success, the user has promised not to call us again, so -+ * there's no risk of us converting the endianness twice... */ -+ make_le32_n(p, 16); -+ -+ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the -+ * fact "VDQCR" shows busy doesn't mean that the result we're looking at -+ * is from the same command. Eg. we may be looking at our 10th dequeue -+ * result from our first VDQCR command, yet the second dequeue command -+ * could have been kicked off already, after seeing the 1st result. Ie. -+ * the result we're looking at is not necessarily proof that we can -+ * reset "busy". We instead base the decision on whether the current -+ * result is sitting at the first 'storage' location of the busy -+ * command. */ -+ if (s->vdq.storage == dq) { -+ s->vdq.storage = NULL; -+ atomic_inc(&s->vdq.busy); -+ } -+ return 1; -+} -+ -+/********************************/ -+/* Categorising qbman_result */ -+/********************************/ -+ -+static struct qb_attr_code code_result_in_mem = -+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); -+ -+static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); -+ -+ return response_verb == x; -+} -+ -+static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); -+ -+ return (response_verb == x); -+} -+ -+int qbman_result_is_DQ(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); -+} -+ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); -+} -+ -+int qbman_result_is_CDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); -+} -+ -+int qbman_result_is_CSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || -+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); -+} -+ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); -+} -+ -+int qbman_result_is_CGCU(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); -+} -+ -+int qbman_result_is_FQRN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); -+} -+ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); -+} -+ -+int qbman_result_is_FQPN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); -+} -+ -+/*********************************/ -+/* Parsing frame dequeue results */ -+/*********************************/ -+ -+/* These APIs assume qbman_result_is_DQ() is TRUE */ -+ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_stat, p); -+} -+ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); -+} -+ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); -+} -+ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_fqid, p); -+} -+ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_byte_count, p); -+} -+ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_frame_count, p); -+} -+ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(dq); -+ -+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); -+} -+EXPORT_SYMBOL(dpaa2_dq_fqd_ctx); -+ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (const struct dpaa2_fd *)&p[8]; -+} -+EXPORT_SYMBOL(dpaa2_dq_fd); -+ -+/**************************************/ -+/* Parsing state-change notifications */ -+/**************************************/ -+ -+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_scn_state_in_mem = -+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); -+static struct qb_attr_code code_scn_rid_in_mem = -+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); -+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); -+ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); -+} -+ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return qb_attr_code_decode(&code_scn_rid, p); -+} -+ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(scn); -+ -+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); -+} -+ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); -+} -+ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ uint32_t result_rid; -+ -+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); -+ return make_le24(result_rid); -+} -+ -+/*****************/ -+/* Parsing BPSCN */ -+/*****************/ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; -+} -+ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn) -+{ -+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); -+} -+ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); -+} -+ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); -+} -+ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn); -+} -+ -+/*****************/ -+/* Parsing CGCU */ -+/*****************/ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; -+} -+ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF; -+} -+ -+/******************/ -+/* Buffer release */ -+/******************/ -+ -+/* These should be const, eventually */ -+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ -+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); -+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); -+ -+void qbman_release_desc_clear(struct qbman_release_desc *d) -+{ -+ uint32_t *cl; -+ -+ memset(d, 0, sizeof(*d)); -+ cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_set_me, cl, 1); -+} -+ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_bpid, cl, bpid); -+} -+ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); -+} -+ -+#define RAR_IDX(rar) ((rar) & 0x7) -+#define RAR_VB(rar) ((rar) & 0x80) -+#define RAR_SUCCESS(rar) ((rar) & 0x100) -+ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); -+ -+ pr_debug("RAR=%08x\n", rar); -+ if (!RAR_SUCCESS(rar)) -+ return -EBUSY; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ /* Start the release command */ -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ /* Copy the caller's buffer pointers to the command */ -+ u64_to_le32_copy(&p[2], buffers, num_buffers); -+ /* Set the verb byte, have to substitute in the valid-bit and the number -+ * of buffers. */ -+ p[0] = cl[0] | RAR_VB(rar) | num_buffers; -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), -+ p); -+ return 0; -+} -+ -+/*******************/ -+/* Buffer acquires */ -+/*******************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); -+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); -+ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt, num; -+ -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_acquire_bpid, p, bpid); -+ qb_attr_code_encode(&code_acquire_num, p, num_buffers); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ num = qb_attr_code_decode(&code_acquire_r_num, p); -+ BUG_ON(verb != QBMAN_MC_ACQUIRE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", -+ bpid, rslt); -+ return -EIO; -+ } -+ BUG_ON(num > num_buffers); -+ /* Copy the acquired buffers to the caller's array */ -+ u64_from_le32_copy(buffers, &p[2], num); -+ return (int)num; -+} -+ -+/*****************/ -+/* FQ management */ -+/*****************/ -+ -+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); -+ -+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, -+ uint8_t alt_fq_verb) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != alt_fq_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", -+ fqid, alt_fq_verb, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); -+} -+ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); -+} -+ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); -+} -+ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); -+} -+ -+/**********************/ -+/* Channel management */ -+/**********************/ -+ -+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); -+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); -+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); -+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); -+ -+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it -+ * would be irresponsible to expose it. */ -+#define CODE_CDAN_WE_EN 0x1 -+#define CODE_CDAN_WE_CTX 0x4 -+ -+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, -+ uint8_t we_mask, uint8_t cdan_en, -+ uint64_t ctx) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_cdan_cid, p, channelid); -+ qb_attr_code_encode(&code_cdan_we, p, we_mask); -+ qb_attr_code_encode(&code_cdan_en, p, cdan_en); -+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("CDAN cQID %d failed: code = 0x%02x\n", -+ channelid, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_CTX, -+ 0, ctx); -+} -+ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 1, 0); -+} -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 0, 0); -+} -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, -+ 1, ctx); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -@@ -0,0 +1,261 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+/* All QBMan command and result structures use this "valid bit" encoding */ -+#define QB_VALID_BIT ((uint32_t)0x80) -+ -+/* Management command result codes */ -+#define QBMAN_MC_RSLT_OK 0xf0 -+ -+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ -+#define QBMAN_DQRR_SIZE 4 -+ -+/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */ -+#define WORKAROUND_DQRR_RESET_BUG -+ -+/* --------------------- */ -+/* portal data structure */ -+/* --------------------- */ -+ -+struct qbman_swp { -+ const struct qbman_swp_desc *desc; -+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it -+ * needs in here. */ -+ struct qbman_swp_sys sys; -+ /* Management commands */ -+ struct { -+#ifdef QBMAN_CHECKING -+ enum swp_mc_check { -+ swp_mc_can_start, /* call __qbman_swp_mc_start() */ -+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ -+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ -+ } check; -+#endif -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ } mc; -+ /* Push dequeues */ -+ uint32_t sdq; -+ /* Volatile dequeues */ -+ struct { -+ /* VDQCR supports a "1 deep pipeline", meaning that if you know -+ * the last-submitted command is already executing in the -+ * hardware (as evidenced by at least 1 valid dequeue result), -+ * you can write another dequeue command to the register, the -+ * hardware will start executing it as soon as the -+ * already-executing command terminates. (This minimises latency -+ * and stalls.) With that in mind, this "busy" variable refers -+ * to whether or not a command can be submitted, not whether or -+ * not a previously-submitted command is still executing. In -+ * other words, once proof is seen that the previously-submitted -+ * command is executing, "vdq" is no longer "busy". -+ */ -+ atomic_t busy; -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ /* We need to determine when vdq is no longer busy. This depends -+ * on whether the "busy" (last-submitted) dequeue command is -+ * targeting DQRR or main-memory, and detected is based on the -+ * presence of the dequeue command's "token" showing up in -+ * dequeue entries in DQRR or main-memory (respectively). */ -+ struct dpaa2_dq *storage; /* NULL if DQRR */ -+ } vdq; -+ /* DQRR */ -+ struct { -+ uint32_t next_idx; -+ uint32_t valid_bit; -+ uint8_t dqrr_size; -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ int reset_bug; -+#endif -+ } dqrr; -+}; -+ -+/* -------------------------- */ -+/* portal management commands */ -+/* -------------------------- */ -+ -+/* Different management commands all use this common base layer of code to issue -+ * commands and poll for results. The first function returns a pointer to where -+ * the caller should fill in their MC command (though they should ignore the -+ * verb byte), the second function commits merges in the caller-supplied command -+ * verb (which should not include the valid-bit) and submits the command to -+ * hardware, and the third function checks for a completed response (returns -+ * non-NULL if only if the response is complete). */ -+void *qbman_swp_mc_start(struct qbman_swp *p); -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); -+void *qbman_swp_mc_result(struct qbman_swp *p); -+ -+/* Wraps up submit + poll-for-result */ -+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, -+ uint32_t cmd_verb) -+{ -+ int loopvar; -+ -+ qbman_swp_mc_submit(swp, cmd, cmd_verb); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ cmd = qbman_swp_mc_result(swp); -+ } while (!cmd); -+ return cmd; -+} -+ -+/* ------------ */ -+/* qb_attr_code */ -+/* ------------ */ -+ -+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which -+ * is either serving as a configuration command or a query result. The -+ * representation is inherently little-endian, as the indexing of the words is -+ * itself little-endian in nature and layerscape is little endian for anything -+ * that crosses a word boundary too (64-bit fields are the obvious examples). -+ */ -+struct qb_attr_code { -+ unsigned int word; /* which uint32_t[] array member encodes the field */ -+ unsigned int lsoffset; /* encoding offset from ls-bit */ -+ unsigned int width; /* encoding width. (bool must be 1.) */ -+}; -+ -+/* Some pre-defined codes */ -+extern struct qb_attr_code code_generic_verb; -+extern struct qb_attr_code code_generic_rslt; -+ -+/* Macros to define codes */ -+#define QB_CODE(a, b, c) { a, b, c} -+#define QB_CODE_NULL \ -+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) -+ -+/* Rotate a code "ms", meaning that it moves from less-significant bytes to -+ * more-significant, from less-significant words to more-significant, etc. The -+ * "ls" version does the inverse, from more-significant towards -+ * less-significant. -+ */ -+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ code->lsoffset += bits; -+ while (code->lsoffset > 31) { -+ code->word++; -+ code->lsoffset -= 32; -+ } -+} -+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ /* Don't be fooled, this trick should work because the types are -+ * unsigned. So the case that interests the while loop (the rotate has -+ * gone too far and the word count needs to compensate for it), is -+ * manifested when lsoffset is negative. But that equates to a really -+ * large unsigned value, starting with lots of "F"s. As such, we can -+ * continue adding 32 back to it until it wraps back round above zero, -+ * to a value of 31 or less... -+ */ -+ code->lsoffset -= bits; -+ while (code->lsoffset > 31) { -+ code->word--; -+ code->lsoffset += 32; -+ } -+} -+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ -+#define qb_attr_code_for_ms(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ms(code, bits)) -+#define qb_attr_code_for_ls(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ls(code, bits)) -+ -+/* decode a field from a cacheline */ -+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, -+ const uint32_t *cacheline) -+{ -+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); -+} -+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, -+ const uint64_t *cacheline) -+{ -+ uint64_t res; -+ u64_from_le32_copy(&res, &cacheline[code->word/2], 1); -+ return res; -+} -+ -+/* encode a field to a cacheline */ -+static inline void qb_attr_code_encode(const struct qb_attr_code *code, -+ uint32_t *cacheline, uint32_t val) -+{ -+ cacheline[code->word] = -+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) -+ | e32_uint32_t(code->lsoffset, code->width, val); -+} -+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, -+ uint64_t *cacheline, uint64_t val) -+{ -+ u64_to_le32_copy(&cacheline[code->word/2], &val, 1); -+} -+ -+/* Small-width signed values (two's-complement) will decode into medium-width -+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to -+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value -+ * 249. Likewise -120 would decode as 136.) This function allows the caller to -+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit -+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). -+ */ -+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, -+ uint32_t val) -+{ -+ BUG_ON(val >= (1 << code->width)); -+ /* If the high bit was set, it was encoding a negative */ -+ if (val >= (1 << (code->width - 1))) -+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - -+ val); -+ /* Otherwise, it was encoding a positive */ -+ return (int32_t)val; -+} -+ -+/* ---------------------- */ -+/* Descriptors/cachelines */ -+/* ---------------------- */ -+ -+/* To avoid needless dynamic allocation, the driver API often gives the caller -+ * a "descriptor" type that the caller can instantiate however they like. -+ * Ultimately though, it is just a cacheline of binary storage (or something -+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for -+ * holding pre-formatted pieces of hardware commands. The performance-critical -+ * code can then copy these descriptors directly into hardware command -+ * registers more efficiently than trying to construct/format commands -+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in -+ * order for the compiler to know its size, but the internal details are not -+ * exposed. The following macro is used within the driver for converting *any* -+ * descriptor pointer to a usable array pointer. The use of a macro (instead of -+ * an inline) is necessary to work with different descriptor types and to work -+ * correctly with const and non-const inputs (and similarly-qualified outputs). -+ */ -+#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h -@@ -0,0 +1,173 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*/ -+ -+/* Perform extra checking */ -+#define QBMAN_CHECKING -+ -+/* To maximise the amount of logic that is common between the Linux driver and -+ * other targets (such as the embedded MC firmware), we pivot here between the -+ * inclusion of two platform-specific headers. -+ * -+ * The first, qbman_sys_decl.h, includes any and all required system headers as -+ * well as providing any definitions for the purposes of compatibility. The -+ * second, qbman_sys.h, is where platform-specific routines go. -+ * -+ * The point of the split is that the platform-independent code (including this -+ * header) may depend on platform-specific declarations, yet other -+ * platform-specific routines may depend on platform-independent definitions. -+ */ -+ -+#include "qbman_sys_decl.h" -+ -+#define QMAN_REV_4000 0x04000000 -+#define QMAN_REV_4100 0x04010000 -+#define QMAN_REV_4101 0x04010001 -+ -+/* When things go wrong, it is a convenient trick to insert a few FOO() -+ * statements in the code to trace progress. TODO: remove this once we are -+ * hacking the code less actively. -+ */ -+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) -+ -+/* Any time there is a register interface which we poll on, this provides a -+ * "break after x iterations" scheme for it. It's handy for debugging, eg. -+ * where you don't want millions of lines of log output from a polling loop -+ * that won't, because such things tend to drown out the earlier log output -+ * that might explain what caused the problem. (NB: put ";" after each macro!) -+ * TODO: we should probably remove this once we're done sanitising the -+ * simulator... -+ */ -+#define DBG_POLL_START(loopvar) (loopvar = 10) -+#define DBG_POLL_CHECK(loopvar) \ -+ do {if (!(loopvar--)) BUG_ON(1); } while (0) -+ -+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets -+ * and widths, these macro-generated encode/decode/isolate/remove inlines can -+ * be used. -+ * -+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), -+ * where the field is located 3 bits "up" from the least-significant bit of the -+ * register (ie. the field location within the 32-bit register corresponds to a -+ * mask of 0x0001fff8), you would do; -+ * uint16_t field = d32_uint16_t(3, 14, reg_value); -+ * -+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, -+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" -+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the -+ * LS bit), do; -+ * reg_value |= e32_int(19, 1, !!field); -+ * -+ * If you wish to read-modify-write a register, such that you leave the 14-bit -+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit -+ * value using; -+ * reg_value = i32_uint16_t(3, 14, reg_value); -+ * -+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to -+ * zero) but leaving all other fields as-is; -+ * reg_val = r32_int(19, 1, reg_value); -+ * -+ */ -+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ -+ (uint32_t)((1 << width) - 1)) -+#define DECLARE_CODEC32(t) \ -+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ -+} \ -+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ -+} \ -+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ -+} \ -+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ~(MAKE_MASK32(width) << lsoffset) & val; \ -+} -+DECLARE_CODEC32(uint32_t) -+DECLARE_CODEC32(uint16_t) -+DECLARE_CODEC32(uint8_t) -+DECLARE_CODEC32(int) -+ -+ /*********************/ -+ /* Debugging assists */ -+ /*********************/ -+ -+static inline void __hexdump(unsigned long start, unsigned long end, -+ unsigned long p, size_t sz, const unsigned char *c) -+{ -+ while (start < end) { -+ unsigned int pos = 0; -+ char buf[64]; -+ int nl = 0; -+ -+ pos += sprintf(buf + pos, "%08lx: ", start); -+ do { -+ if ((start < p) || (start >= (p + sz))) -+ pos += sprintf(buf + pos, ".."); -+ else -+ pos += sprintf(buf + pos, "%02x", *(c++)); -+ if (!(++start & 15)) { -+ buf[pos++] = '\n'; -+ nl = 1; -+ } else { -+ nl = 0; -+ if (!(start & 1)) -+ buf[pos++] = ' '; -+ if (!(start & 3)) -+ buf[pos++] = ' '; -+ } -+ } while (start & 15); -+ if (!nl) -+ buf[pos++] = '\n'; -+ buf[pos] = '\0'; -+ pr_info("%s", buf); -+ } -+} -+static inline void hexdump(const void *ptr, size_t sz) -+{ -+ unsigned long p = (unsigned long)ptr; -+ unsigned long start = p & ~(unsigned long)15; -+ unsigned long end = (p + sz + 15) & ~(unsigned long)15; -+ const unsigned char *c = ptr; -+ -+ __hexdump(start, end, p, sz, c); -+} -+ -+#include "qbman_sys.h" ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h -@@ -0,0 +1,307 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the -+ * driver. They are only included via qbman_private.h, which is itself a -+ * platform-independent file and is included by all the other driver source. -+ * -+ * qbman_sys_decl.h is included prior to all other declarations and logic, and -+ * it exists to provide compatibility with any linux interfaces our -+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file -+ * provides linux compatibility. -+ * -+ * This qbman_sys.h header, on the other hand, is included *after* any common -+ * and platform-neutral declarations and logic in qbman_private.h, and exists to -+ * implement any platform-specific logic of the qbman driver itself. Ie. it is -+ * *not* to provide linux compatibility. -+ */ -+ -+/* Trace the 3 different classes of read/write access to QBMan. #undef as -+ * required. */ -+#undef QBMAN_CCSR_TRACE -+#undef QBMAN_CINH_TRACE -+#undef QBMAN_CENA_TRACE -+ -+static inline void word_copy(void *d, const void *s, unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = s; -+ -+ while (cnt--) -+ *(dd++) = *(ss++); -+} -+ -+/* Currently, the CENA support code expects each 32-bit word to be written in -+ * host order, and these are converted to hardware (little-endian) order on -+ * command submission. However, 64-bit quantities are must be written (and read) -+ * as two 32-bit words with the least-significant word first, irrespective of -+ * host endianness. */ -+static inline void u64_to_le32_copy(void *d, const uint64_t *s, -+ unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = (const uint32_t *)s; -+ -+ while (cnt--) { -+ /* TBD: the toolchain was choking on the use of 64-bit types up -+ * until recently so this works entirely with 32-bit variables. -+ * When 64-bit types become usable again, investigate better -+ * ways of doing this. */ -+#if defined(__BIG_ENDIAN) -+ *(dd++) = ss[1]; -+ *(dd++) = ss[0]; -+ ss += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+static inline void u64_from_le32_copy(uint64_t *d, const void *s, -+ unsigned int cnt) -+{ -+ const uint32_t *ss = s; -+ uint32_t *dd = (uint32_t *)d; -+ -+ while (cnt--) { -+#if defined(__BIG_ENDIAN) -+ dd[1] = *(ss++); -+ dd[0] = *(ss++); -+ dd += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+/* Convert a host-native 32bit value into little endian */ -+#if defined(__BIG_ENDIAN) -+static inline uint32_t make_le32(uint32_t val) -+{ -+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | -+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); -+} -+static inline uint32_t make_le24(uint32_t val) -+{ -+ return (((val & 0xff) << 16) | (val & 0xff00) | -+ ((val & 0xff0000) >> 16)); -+} -+#else -+#define make_le32(val) (val) -+#define make_le24(val) (val) -+#endif -+static inline void make_le32_n(uint32_t *val, unsigned int num) -+{ -+ while (num--) { -+ *val = make_le32(*val); -+ val++; -+ } -+} -+ -+ /******************/ -+ /* Portal access */ -+ /******************/ -+struct qbman_swp_sys { -+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi -+ * not an mmap() of the real portal registers, but an allocated -+ * place-holder, because the actual writes/reads to/from the portal are -+ * marshalled from these allocated areas using QBMan's "MC access -+ * registers". CINH accesses are atomic so there's no need for a -+ * place-holder. */ -+ void *cena; -+ void __iomem *addr_cena; -+ void __iomem *addr_cinh; -+}; -+ -+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal -+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) -+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index -+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) -+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) -+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) -+ */ -+ -+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, -+ uint32_t val) -+{ -+ -+ writel_relaxed(val, s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, val); -+#endif -+} -+ -+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t reg = readl_relaxed(s->addr_cinh + offset); -+ -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, reg); -+#endif -+ return reg; -+} -+ -+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ void *shadow = s->cena + offset; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ BUG_ON(offset & 63); -+ dcbz(shadow); -+ return shadow; -+} -+ -+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, -+ uint32_t offset, void *cmd) -+{ -+ const uint32_t *shadow = cmd; -+ int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+ hexdump(cmd, 64); -+#endif -+ for (loop = 15; loop >= 1; loop--) -+ writel_relaxed(shadow[loop], s->addr_cena + -+ offset + loop * 4); -+ lwsync(); -+ writel_relaxed(shadow[0], s->addr_cena + offset); -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t *shadow = s->cena + offset; -+ unsigned int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ -+ for (loop = 0; loop < 16; loop++) -+ shadow[loop] = readl_relaxed(s->addr_cena + offset -+ + loop * 4); -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return shadow; -+} -+ -+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dcivac(s->addr_cena + offset); -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+ /******************/ -+ /* Portal support */ -+ /******************/ -+ -+/* The SWP_CFG portal register is special, in that it is used by the -+ * platform-specific code rather than the platform-independent code in -+ * qbman_portal.c. So use of it is declared locally here. */ -+#define QBMAN_CINH_SWP_CFG 0xd00 -+ -+/* For MC portal use, we always configure with -+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) -+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0) -+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) -+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) -+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3) -+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE) -+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) -+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0) -+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) -+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0) -+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE) -+ */ -+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, -+ uint8_t est, uint8_t rpm, uint8_t dcm, -+ uint8_t epm, int sd, int sp, int se, -+ int dp, int de, int ep) -+{ -+ uint32_t reg; -+ -+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | -+ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) | -+ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) | -+ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) | -+ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) | -+ e32_uint8_t(14, 1, wn); -+ return reg; -+} -+ -+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, -+ const struct qbman_swp_desc *d, -+ uint8_t dqrr_size) -+{ -+ uint32_t reg; -+ -+ s->addr_cena = d->cena_bar; -+ s->addr_cinh = d->cinh_bar; -+ s->cena = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!s->cena) { -+ pr_err("Could not allocate page for cena shadow\n"); -+ return -1; -+ } -+ -+#ifdef QBMAN_CHECKING -+ /* We should never be asked to initialise for a portal that isn't in -+ * the power-on state. (Ie. don't forget to reset portals when they are -+ * decommissioned!) -+ */ -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ BUG_ON(reg); -+#endif -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0); -+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ if (!reg) { -+ pr_err("The portal is not enabled!\n"); -+ kfree(s->cena); -+ return -1; -+ } -+ return 0; -+} -+ -+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) -+{ -+ free_page((unsigned long)s->cena); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h -@@ -0,0 +1,86 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "fsl_qbman_base.h" -+ -+/* The platform-independent code shouldn't need endianness, except for -+ * weird/fast-path cases like qbman_result_has_token(), which needs to -+ * perform a passive and endianness-specific test on a read-only data structure -+ * very quickly. It's an exception, and this symbol is used for that case. */ -+#if defined(__BIG_ENDIAN) -+#define DQRR_TOK_OFFSET 0 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 -+#define SCN_STATE_OFFSET_IN_MEM 8 -+#define SCN_RID_OFFSET_IN_MEM 8 -+#else -+#define DQRR_TOK_OFFSET 24 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 -+#define SCN_STATE_OFFSET_IN_MEM 16 -+#define SCN_RID_OFFSET_IN_MEM 0 -+#endif -+ -+/* Similarly-named functions */ -+#define upper32(a) upper_32_bits(a) -+#define lower32(a) lower_32_bits(a) -+ -+ /****************/ -+ /* arch assists */ -+ /****************/ -+ -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } -+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } -+static inline void prefetch_for_load(void *p) -+{ -+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); -+} -+static inline void prefetch_for_store(void *p) -+{ -+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); -+} ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c -@@ -0,0 +1,664 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "qbman_debug.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+#define QBMAN_SWP_CENA_BASE 0x818000000 -+#define QBMAN_SWP_CINH_BASE 0x81c000000 -+ -+#define QBMAN_PORTAL_IDX 2 -+#define QBMAN_TEST_FQID 19 -+#define QBMAN_TEST_BPID 23 -+#define QBMAN_USE_QD -+#ifdef QBMAN_USE_QD -+#define QBMAN_TEST_QDID 1 -+#endif -+#define QBMAN_TEST_LFQID 0xf00010 -+ -+#define NUM_EQ_FRAME 10 -+#define NUM_DQ_FRAME 10 -+#define NUM_DQ_IN_DQRR 5 -+#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR) -+ -+static struct qbman_swp *swp; -+static struct qbman_eq_desc eqdesc; -+static struct qbman_pull_desc pulldesc; -+static struct qbman_release_desc releasedesc; -+static struct qbman_eq_response eq_storage[1]; -+static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64); -+static dma_addr_t eq_storage_phys; -+static dma_addr_t dq_storage_phys; -+ -+/* FQ ctx attribute values for the test code. */ -+#define FQCTX_HI 0xabbaf00d -+#define FQCTX_LO 0x98765432 -+#define FQ_VFQID 0x123456 -+ -+/* Sample frame descriptor */ -+static struct qbman_fd_simple fd = { -+ .addr_lo = 0xbabaf33d, -+ .addr_hi = 0x01234567, -+ .len = 0x7777, -+ .frc = 0xdeadbeef, -+ .flc_lo = 0xcafecafe, -+ .flc_hi = 0xbeadabba -+}; -+ -+static void fd_inc(struct qbman_fd_simple *_fd) -+{ -+ _fd->addr_lo += _fd->len; -+ _fd->flc_lo += 0x100; -+ _fd->frc += 0x10; -+} -+ -+static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb) -+{ -+ int i; -+ -+ for (i = 0; i < 8; i++) -+ if (fda->words[i] - fdb->words[i]) -+ return 1; -+ return 0; -+} -+ -+struct qbman_fd fd_eq[NUM_EQ_FRAME]; -+struct qbman_fd fd_dq[NUM_DQ_FRAME]; -+ -+/* "Buffers" to be released (and storage for buffers to be acquired) */ -+static uint64_t rbufs[320]; -+static uint64_t abufs[320]; -+ -+static void do_enqueue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+#ifdef QBMAN_USE_QD -+ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_QDID); -+#else -+ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_FQID); -+#endif -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ /*********************************/ -+ /* Prepare a enqueue descriptor */ -+ /*********************************/ -+ memset(eq_storage, 0, sizeof(eq_storage)); -+ eq_storage_phys = virt_to_phys(eq_storage); -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0); -+ qbman_eq_desc_set_token(&eqdesc, 0x99); -+#ifdef QBMAN_USE_QD -+ /**********************************/ -+ /* Prepare a Queueing Destination */ -+ /**********************************/ -+ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3); -+#else -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID); -+#endif -+ -+ /******************/ -+ /* Try an enqueue */ -+ /******************/ -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+static void do_push_dequeue(struct qbman_swp *swp) -+{ -+ int i, j; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Start push dequeue\n"); -+ for (i = 0; i < NUM_DQ_FRAME; i++) { -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("The push dequeue fails\n"); -+ } -+ } -+} -+ -+static void do_pull_dequeue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n", -+ NUM_DQ_IN_DQRR); -+ for (i = 0; i < NUM_DQ_IN_DQRR; i++) { -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("Dequeue with dq entry in DQRR fails\n"); -+ } -+ } -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n", -+ NUM_DQ_IN_MEM); -+ for (i = 0; i < NUM_DQ_IN_MEM; i++) { -+ dq_storage_phys = virt_to_phys(&dq_storage[i]); -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i], -+ dq_storage_phys, 1); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ ret = qbman_result_has_new_result(swp, -+ &dq_storage[i]); -+ } while (!ret); -+ -+ if (ret) { -+ for (j = 0; j < 8; j++) -+ fd_dq[i + NUM_DQ_IN_DQRR].words[j] = -+ dq_storage[i].dont_manipulate_directly[j + 8]; -+ j = i + NUM_DQ_IN_DQRR; -+ if (fd_cmp(&fd_eq[j], &fd_dq[j])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32); -+ hexdump(&dq_storage[i], 64); -+ } -+ } else { -+ pr_info("Dequeue with dq entry in memory fails\n"); -+ } -+ } -+} -+ -+static void release_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ qbman_release_desc_clear(&releasedesc); -+ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID); -+ pr_info("*****QBMan_test: Release %d buffers to BP %d\n", -+ num, QBMAN_TEST_BPID); -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j); -+ BUG_ON(ret); -+ } -+} -+ -+static void acquire_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n", -+ num, QBMAN_TEST_BPID); -+ -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j); -+ BUG_ON(ret != j); -+ } -+} -+ -+static void buffer_pool_test(struct qbman_swp *swp) -+{ -+ struct qbman_attr info; -+ struct dpaa2_dq *bpscn_message; -+ dma_addr_t bpscn_phys; -+ uint64_t bpscn_ctx; -+ uint64_t ctx = 0xbbccddaadeadbeefull; -+ int i, ret; -+ uint32_t hw_targ; -+ -+ pr_info("*****QBMan_test: test buffer pool management\n"); -+ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys); -+ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys, -+ virt_to_phys(&info)); -+ bpscn_message = phys_to_virt(bpscn_phys); -+ -+ for (i = 0; i < 320; i++) -+ rbufs[i] = 0xf00dabba01234567ull + i * 0x40; -+ -+ release_buffer(swp, 320); -+ -+ pr_info("QBMan_test: query the buffer pool\n"); -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ qbman_bp_attr_get_hw_targ(&info, &hw_targ); -+ pr_info("hw_targ is %d\n", hw_targ); -+ -+ /* Acquire buffers to trigger BPSCN */ -+ acquire_buffer(swp, 300); -+ /* BPSCN should be written to the memory */ -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ hexdump(bpscn_message, 64); -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depleted */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+ /* The ctx should match */ -+ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message); -+ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx); -+ BUG_ON(ctx != bpscn_ctx); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ /* Re-seed the buffer pool to trigger BPSCN */ -+ release_buffer(swp, 240); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is not depleted */ -+ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message)); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ acquire_buffer(swp, 260); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool while BPSCN generated */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depletion */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+} -+ -+static void ceetm_test(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID); -+ pr_info("*****QBMan_test: Enqueue to LFQID %x\n", -+ QBMAN_TEST_LFQID); -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+int qbman_test(void) -+{ -+ struct qbman_swp_desc pd; -+ uint32_t reg; -+ -+ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ -+ /* Detect whether the mc image is the test image with GPP setup */ -+ reg = readl_relaxed(pd.cena_bar + 0x4); -+ if (reg != 0xdeadbeef) { -+ pr_err("The MC image doesn't have GPP test setup, stop!\n"); -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX); -+ swp = qbman_swp_init(&pd); -+ if (!swp) { -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do pull dequeue */ -+ /*******************/ -+ do_pull_dequeue(swp); -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ qbman_swp_push_set(swp, 0, 1); -+ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID); -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do push dequeue */ -+ /*******************/ -+ do_push_dequeue(swp); -+ -+ /**************************/ -+ /* Test buffer pool funcs */ -+ /**************************/ -+ buffer_pool_test(swp); -+ -+ /******************/ -+ /* CEETM test */ -+ /******************/ -+ ceetm_test(swp); -+ -+ qbman_swp_finish(swp); -+ pr_info("*****QBMan_test: Kernel test Passed\n"); -+ return 0; -+} -+ -+/* user-space test-case, definitions: -+ * -+ * 1 portal only, using portal index 3. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define QBMAN_TEST_US_SWP 3 /* portal index for user space */ -+ -+#define QBMAN_TEST_MAGIC 'q' -+struct qbman_test_swp_ioctl { -+ unsigned long portal1_cinh; -+ unsigned long portal1_cena; -+}; -+struct qbman_test_dma_ioctl { -+ unsigned long ptr; -+ uint64_t phys_addr; -+}; -+ -+struct qbman_test_priv { -+ int has_swp_map; -+ int has_dma_map; -+ unsigned long pgoff; -+}; -+ -+#define QBMAN_TEST_SWP_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_SWP_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_DMA_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl) -+#define QBMAN_TEST_DMA_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl) -+ -+#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+ -+static int qbman_test_open(struct inode *inode, struct file *filp) -+{ -+ struct qbman_test_priv *priv; -+ -+ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL); -+ if (!priv) -+ return -EIO; -+ filp->private_data = priv; -+ priv->has_swp_map = 0; -+ priv->has_dma_map = 0; -+ priv->pgoff = 0; -+ return 0; -+} -+ -+static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ int ret; -+ struct qbman_test_priv *priv = filp->private_data; -+ -+ BUG_ON(!priv); -+ -+ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF) -+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); -+ else if (vma->vm_pgoff == priv->pgoff) -+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); -+ else { -+ pr_err("Damn, unrecognised pg_off!!\n"); -+ return -EINVAL; -+ } -+ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ vma->vm_end - vma->vm_start, -+ vma->vm_page_prot); -+ return ret; -+} -+ -+static long qbman_test_ioctl(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+ void __user *a = (void __user *)arg; -+ unsigned long longret, populate; -+ int ret = 0; -+ struct qbman_test_priv *priv = fp->private_data; -+ -+ BUG_ON(!priv); -+ -+ switch (cmd) { -+ case QBMAN_TEST_SWP_MAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (priv->has_swp_map) -+ return -EINVAL; -+ down_write(¤t->mm->mmap_sem); -+ /* Map portal1 CINH */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CINH_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cinh = longret; -+ /* Map portal1 CENA */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CENA_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cena = longret; -+ priv->has_swp_map = 1; -+out: -+ up_write(¤t->mm->mmap_sem); -+ if (!ret && copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return ret; -+ } -+ case QBMAN_TEST_SWP_UNMAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (!priv->has_swp_map) -+ return -EINVAL; -+ -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.portal1_cena, 0x10000); -+ do_munmap(current->mm, params.portal1_cinh, 0x10000); -+ up_write(¤t->mm->mmap_sem); -+ priv->has_swp_map = 0; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_MAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ void *vaddr; -+ -+ if (priv->has_dma_map) -+ return -EINVAL; -+ vaddr = (void *)get_zeroed_page(GFP_KERNEL); -+ params.phys_addr = virt_to_phys(vaddr); -+ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT; -+ down_write(¤t->mm->mmap_sem); -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ priv->pgoff, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ return ret; -+ } -+ params.ptr = longret; -+ priv->has_dma_map = 1; -+ up_write(¤t->mm->mmap_sem); -+ if (copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_UNMAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ -+ if (!priv->has_dma_map) -+ return -EINVAL; -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.ptr, PAGE_SIZE); -+ up_write(¤t->mm->mmap_sem); -+ free_page((unsigned long)phys_to_virt(params.phys_addr)); -+ priv->has_dma_map = 0; -+ return 0; -+ } -+ default: -+ pr_err("Bad ioctl cmd!\n"); -+ } -+ return -EINVAL; -+} -+ -+static const struct file_operations qbman_fops = { -+ .open = qbman_test_open, -+ .mmap = qbman_test_mmap, -+ .unlocked_ioctl = qbman_test_ioctl -+}; -+ -+static struct miscdevice qbman_miscdev = { -+ .name = "qbman-test", -+ .fops = &qbman_fops, -+ .minor = MISC_DYNAMIC_MINOR, -+}; -+ -+static int qbman_miscdev_init; -+ -+static int test_init(void) -+{ -+ int ret = qbman_test(); -+ -+ if (!ret) { -+ /* MC image supports the test cases, so instantiate the -+ * character devic that the user-space test case will use to do -+ * its memory mappings. */ -+ ret = misc_register(&qbman_miscdev); -+ if (ret) { -+ pr_err("qbman-test: failed to register misc device\n"); -+ return ret; -+ } -+ pr_info("qbman-test: misc device registered!\n"); -+ qbman_miscdev_init = 1; -+ } -+ return 0; -+} -+ -+static void test_exit(void) -+{ -+ if (qbman_miscdev_init) { -+ misc_deregister(&qbman_miscdev); -+ qbman_miscdev_init = 0; -+ } -+} -+ -+module_init(test_init); -+module_exit(test_exit); ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h -@@ -0,0 +1,774 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_FD_H -+#define __FSL_DPAA2_FD_H -+ -+/** -+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 -+ * -+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. -+ * Frames can be enqueued and dequeued to Frame Queues which are consumed -+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) -+ * -+ * There are three types of frames: Single, Scatter Gather and Frame Lists. -+ * -+ * The set of APIs in this file must be used to create, manipulate and -+ * query Frame Descriptor. -+ * -+ */ -+ -+/** -+ * struct dpaa2_fd - Place-holder for FDs. -+ * @words: for easier/faster copying the whole FD structure. -+ * @addr_lo: the lower 32 bits of the address in FD. -+ * @addr_hi: the upper 32 bits of the address in FD. -+ * @len: the length field in FD. -+ * @bpid_offset: represent the bpid and offset fields in FD -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * This structure represents the basic Frame Descriptor used in the system. -+ * We represent it via the simplest form that we need for now. Different -+ * overlays may be needed to support different options, etc. (It is impractical -+ * to define One True Struct, because the resulting encoding routines (lots of -+ * read-modify-writes) would be worst-case performance whether or not -+ * circumstances required them.) -+ */ -+struct dpaa2_fd { -+ union { -+ u32 words[8]; -+ struct dpaa2_fd_simple { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ u32 bpid_offset; -+ u32 frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ u32 ctrl; -+ /* flow context */ -+ u32 flc_lo; -+ u32 flc_hi; -+ } simple; -+ }; -+}; -+ -+enum dpaa2_fd_format { -+ dpaa2_fd_single = 0, -+ dpaa2_fd_list, -+ dpaa2_fd_sg -+}; -+ -+/* Accessors for SG entry fields -+ * -+ * These setters and getters assume little endian format. For converting -+ * between LE and cpu endianness, the specific conversion functions must be -+ * called before the SGE contents are accessed by the core (on Rx), -+ * respectively before the SG table is sent to hardware (on Tx) -+ */ -+ -+/** -+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the address in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32) -+ + fd->simple.addr_lo); -+} -+ -+/** -+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @addr: the address needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) -+{ -+ fd->simple.addr_hi = upper_32_bits(addr); -+ fd->simple.addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the frame context field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.frc; -+} -+ -+/** -+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * @frc: the frame context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) -+{ -+ fd->simple.frc = frc; -+} -+ -+/** -+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the flow context in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) + -+ fd->simple.flc_lo); -+} -+ -+/** -+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @flc_addr: the flow context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) -+{ -+ fd->simple.flc_hi = upper_32_bits(flc_addr); -+ fd->simple.flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fd_get_len() - Get the length in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the length field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.len; -+} -+ -+/** -+ * dpaa2_fd_set_len() - Set the length field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @len: the length needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) -+{ -+ fd->simple.len = len; -+} -+ -+/** -+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the offset. -+ */ -+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @offset: the offset needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) -+{ -+ fd->simple.bpid_offset &= 0xF000FFFF; -+ fd->simple.bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_fd_format dpaa2_fd_get_format( -+ const struct dpaa2_fd *fd) -+{ -+ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fd_set_format() - Set the format field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @format: the format needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, -+ enum dpaa2_fd_format format) -+{ -+ fd->simple.bpid_offset &= 0xCFFFFFFF; -+ fd->simple.bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the bpid. -+ */ -+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF); -+} -+ -+/** -+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @bpid: the bpid needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) -+{ -+ fd->simple.bpid_offset &= 0xFFFF0000; -+ fd->simple.bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * struct dpaa2_sg_entry - the scatter-gathering structure -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ */ -+struct dpaa2_sg_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+}; -+ -+enum dpaa2_sg_format { -+ dpaa2_sg_single = 0, -+ dpaa2_sg_frame_data, -+ dpaa2_sg_sgt_ext -+}; -+ -+/** -+ * dpaa2_sg_get_addr() - Get the address from SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the address. -+ */ -+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) -+{ -+ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo); -+} -+ -+/** -+ * dpaa2_sg_set_addr() - Set the address in SG entry -+ * @sg: the given scatter-gathering object. -+ * @addr: the address to be set. -+ */ -+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) -+{ -+ sg->addr_hi = upper_32_bits(addr); -+ sg->addr_lo = lower_32_bits(addr); -+} -+ -+ -+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) -+{ -+ return (sg->bpid_offset >> 30) & 0x1; -+} -+ -+/** -+ * dpaa2_sg_get_len() - Get the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the length. -+ */ -+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) -+{ -+ if (dpaa2_sg_short_len(sg)) -+ return sg->len & 0x1FFFF; -+ return sg->len; -+} -+ -+/** -+ * dpaa2_sg_set_len() - Set the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * @len: the length to be set. -+ */ -+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) -+{ -+ sg->len = len; -+} -+ -+/** -+ * dpaa2_sg_get_offset() - Get the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the offset. -+ */ -+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_sg_set_offset() - Set the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * @offset: the offset to be set. -+ */ -+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, -+ u16 offset) -+{ -+ sg->bpid_offset &= 0xF000FFFF; -+ sg->bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_sg_get_format() - Get the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_sg_format -+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) -+{ -+ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_sg_set_format() - Set the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * @format: the format to be set. -+ */ -+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, -+ enum dpaa2_sg_format format) -+{ -+ sg->bpid_offset &= 0xCFFFFFFF; -+ sg->bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the bpid. -+ */ -+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * @bpid: the bpid to be set. -+ */ -+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) -+{ -+ sg->bpid_offset &= 0xFFFFC000; -+ sg->bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * dpaa2_sg_is_final() - Check final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return bool. -+ */ -+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) -+{ -+ return !!(sg->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_sg_set_final() - Set the final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * @final: the final boolean to be set. -+ */ -+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) -+{ -+ sg->bpid_offset &= 0x7FFFFFFF; -+ sg->bpid_offset |= (u32)final << 31; -+} -+ -+/* Endianness conversion helper functions -+ * The accelerator drivers which construct / read scatter gather entries -+ * need to call these in order to account for endianness mismatches between -+ * hardware and cpu -+ */ -+#ifdef __BIG_ENDIAN -+/** -+ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu -+ * format little endian format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ cpu_to_le32s(p++); -+} -+ -+/** -+ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian -+ * format to native cpu format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ le32_to_cpus(p++); -+} -+#else -+#define dpaa2_sg_cpu_to_le(sg) -+#define dpaa2_sg_le_to_cpu(sg) -+#endif /* __BIG_ENDIAN */ -+ -+ -+/** -+ * struct dpaa2_fl_entry - structure for frame list entry. -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * Frame List Entry (FLE) -+ * Identical to dpaa2_fd.simple layout, but some bits are different -+ */ -+struct dpaa2_fl_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+ u32 frc; -+ u32 ctrl; -+ u32 flc_lo; -+ u32 flc_hi; -+}; -+ -+enum dpaa2_fl_format { -+ dpaa2_fl_single = 0, -+ dpaa2_fl_res, -+ dpaa2_fl_sg -+}; -+ -+/** -+ * dpaa2_fl_get_addr() - Get address in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return address for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo); -+} -+ -+/** -+ * dpaa2_fl_set_addr() - Set the address in the frame list entry -+ * @fle: the given frame list entry. -+ * @addr: the address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, -+ dma_addr_t addr) -+{ -+ fle->addr_hi = upper_32_bits(addr); -+ fle->addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return flow context for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo); -+} -+ -+/** -+ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * @flc_addr: the flow context address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, -+ dma_addr_t flc_addr) -+{ -+ fle->flc_hi = upper_32_bits(flc_addr); -+ fle->flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fl_get_len() - Get the length in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return length for the get function. -+ */ -+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) -+{ -+ return fle->len; -+} -+ -+/** -+ * dpaa2_fl_set_len() - Set the length in the frame list entry -+ * @fle: the given frame list entry. -+ * @len: the length needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) -+{ -+ fle->len = len; -+} -+ -+/** -+ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return offset for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fl_set_offset() - Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * @offset: the offset needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, -+ uint16_t offset) -+{ -+ fle->bpid_offset &= 0xF000FFFF; -+ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16; -+} -+ -+/** -+ * dpaa2_fl_get_format() - Get the format in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return frame list format for the get function. -+ */ -+static inline enum dpaa2_fl_format dpaa2_fl_get_format( -+ const struct dpaa2_fl_entry *fle) -+{ -+ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fl_set_format() - Set the format in the frame list entry -+ * @fle: the given frame list entry. -+ * @format: the frame list format needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, -+ enum dpaa2_fl_format format) -+{ -+ fle->bpid_offset &= 0xCFFFFFFF; -+ fle->bpid_offset |= (u32)(format & 0x3) << 28; -+} -+ -+/** -+ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return bpid for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * @bpid: the buffer pool id needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid) -+{ -+ fle->bpid_offset &= 0xFFFFC000; -+ fle->bpid_offset |= (u32)bpid; -+} -+ -+/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list. -+ * @fle: the given frame list entry. -+ * -+ * Return final bit settting. -+ */ -+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) -+{ -+ return !!(fle->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_fl_set_final() - Set the final bit in the frame list entry -+ * @fle: the given frame list entry. -+ * @final: the final bit needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) -+{ -+ fle->bpid_offset &= 0x7FFFFFFF; -+ fle->bpid_offset |= (u32)final << 31; -+} -+ -+/** -+ * struct dpaa2_dq - the qman result structure -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * possible qman dequeue result. -+ * -+ * When frames are dequeued, the FDs show up inside "dequeue" result structures -+ * (if at all, not all dequeue results contain valid FDs). This structure type -+ * is intentionally defined without internal detail, and the only reason it -+ * isn't declared opaquely (without size) is to allow the user to provide -+ * suitably-sized (and aligned) memory for these entries. -+ */ -+struct dpaa2_dq { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/* Parsing frame dequeue results */ -+/* FQ empty */ -+#define DPAA2_DQ_STAT_FQEMPTY 0x80 -+/* FQ held active */ -+#define DPAA2_DQ_STAT_HELDACTIVE 0x40 -+/* FQ force eligible */ -+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 -+/* Valid frame */ -+#define DPAA2_DQ_STAT_VALIDFRAME 0x10 -+/* FQ ODP enable */ -+#define DPAA2_DQ_STAT_ODPVALID 0x04 -+/* Volatile dequeue */ -+#define DPAA2_DQ_STAT_VOLATILE 0x02 -+/* volatile dequeue command is expired */ -+#define DPAA2_DQ_STAT_EXPIRED 0x01 -+ -+/** -+ * dpaa2_dq_flags() - Get the stat field of dequeue response -+ * @dq: the dequeue result. -+ */ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull -+ * command. -+ * @dq: the dequeue result. -+ * -+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. -+ */ -+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); -+} -+ -+/** -+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. -+ * @dq: the dequeue result. -+ * -+ * Return boolean. -+ */ -+static inline int dpaa2_dq_is_pull_complete( -+ const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); -+} -+ -+/** -+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response -+ * seqnum is valid only if VALIDFRAME flag is TRUE -+ * @dq: the dequeue result. -+ * -+ * Return seqnum. -+ */ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response -+ * odpid is valid only if ODPVAILD flag is TRUE. -+ * @dq: the dequeue result. -+ * -+ * Return odpid. -+ */ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fqid() - Get the fqid in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return fqid. -+ */ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the byte count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame queue context. -+ */ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame descriptor. -+ */ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq); -+ -+#endif /* __FSL_DPAA2_FD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h -@@ -0,0 +1,619 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_IO_H -+#define __FSL_DPAA2_IO_H -+ -+#include "fsl_dpaa2_fd.h" -+ -+struct dpaa2_io; -+struct dpaa2_io_store; -+ -+/** -+ * DOC: DPIO Service Management -+ * -+ * The DPIO service provides APIs for users to interact with the datapath -+ * by enqueueing and dequeing frame descriptors. -+ * -+ * The following set of APIs can be used to enqueue and dequeue frames -+ * as well as producing notification callbacks when data is available -+ * for dequeue. -+ */ -+ -+/** -+ * struct dpaa2_io_desc - The DPIO descriptor. -+ * @receives_notifications: Use notificaton mode. -+ * @has_irq: use irq-based proessing. -+ * @will_poll: use poll processing. -+ * @has_8prio: set for channel with 8 priority WQs. -+ * @cpu: the cpu index that at least interrupt handlers will execute on. -+ * @stash_affinity: the stash affinity for this portal favour 'cpu' -+ * @regs_cena: the cache enabled regs. -+ * @regs_cinh: the cache inhibited regs. -+ * @dpio_id: The dpio index. -+ * @qman_version: the qman version -+ * -+ * Describe the attributes and features of the DPIO object. -+ */ -+struct dpaa2_io_desc { -+ /* non-zero iff the DPIO has a channel */ -+ int receives_notifications; -+ /* non-zero if the DPIO portal interrupt is handled. If so, the -+ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */ -+ int has_irq; -+ /* non-zero if the caller/OS is prepared to called the -+ * dpaa2_io_service_poll() routine as part of its run-to-completion (or -+ * scheduling) loop. If so, the DPIO service may dynamically switch some -+ * of its processing between polling-based and irq-based. It is illegal -+ * combination to have (!has_irq && !will_poll). */ -+ int will_poll; -+ /* ignored unless 'receives_notifications'. Non-zero iff the channel has -+ * 8 priority WQs, otherwise the channel has 2. */ -+ int has_8prio; -+ /* the cpu index that at least interrupt handlers will execute on. And -+ * if 'stash_affinity' is non-zero, the cache targeted by stash -+ * transactions is affine to this cpu. */ -+ int cpu; -+ /* non-zero if stash transactions for this portal favour 'cpu' over -+ * other CPUs. (Eg. zero if there's no stashing, or stashing is to -+ * shared cache.) */ -+ int stash_affinity; -+ /* Caller-provided flags, determined by bus-scanning and/or creation of -+ * DPIO objects via MC commands. */ -+ void *regs_cena; -+ void *regs_cinh; -+ int dpio_id; -+ uint32_t qman_version; -+}; -+ -+/** -+ * dpaa2_io_create() - create a dpaa2_io object. -+ * @desc: the dpaa2_io descriptor -+ * -+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual -+ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO -+ * service") or later be added to a service-type "struct dpaa2_io" object. Note, -+ * the information required on 'cfg' is copied so the caller is free to do as -+ * they wish with the input parameter upon return. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_create_service() - Create an (initially empty) DPIO service. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create_service(void); -+ -+/** -+ * dpaa2_io_default_service() - Use the driver's own global (and initially -+ * empty) DPIO service. -+ * -+ * This increments the reference count, so don't forget to use dpaa2_io_down() -+ * for each time this function is called. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_default_service(void); -+ -+/** -+ * dpaa2_io_down() - release the dpaa2_io object. -+ * @d: the dpaa2_io object to be released. -+ * -+ * The "struct dpaa2_io" type can represent an individual DPIO object (as -+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", -+ * which can be used to group/encapsulate multiple DPIO objects. In all cases, -+ * each handle obtained should be released using this function. -+ */ -+void dpaa2_io_down(struct dpaa2_io *d); -+ -+/** -+ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service. -+ * @service: the given DPIO service. -+ * @obj: the given DPIO object. -+ * -+ * 'service' must have been created by dpaa2_io_create_service() and 'obj' -+ * must have been created by dpaa2_io_create(). This increments the reference -+ * count on the object that 'obj' refers to, so the user could call -+ * dpaa2_io_down(obj) after this and the object will persist within the service -+ * (and will be destroyed when the service is destroyed). -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object. -+ * @obj: the given DPIO object. -+ * @desc: the returned DPIO descriptor. -+ * -+ * This function will return failure if the given dpaa2_io struct represents a -+ * service rather than an individual DPIO object, otherwise it returns zero and -+ * the given 'cfg' structure is filled in. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that -+ * are polling-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are -+ * irq-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero. -+ * -+ * Return IRQ_HANDLED for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_irq(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_pause_poll() - Used to stop polling. -+ * @obj: the given DPIO object. -+ * -+ * If a polling application is going to stop polling for a period of time and -+ * supports interrupt processing, it can call this function to convert all -+ * processing to IRQ. (Eg. when sleeping.) -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_resume_poll() - Resume polling -+ * @obj: the given DPIO object. -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service -+ * can receive notifications on. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ * -+ * Note that this is a run-time snapshot. If things like cpu-hotplug are -+ * supported in the target system, then an attempt to register notifications -+ * for a cpu that appears present in the given mask might fail if that cpu has -+ * gone offline in the mean time. -+ */ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash -+ * affinity to. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ */ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity -+ * for stashing. -+ * @s: the given DPIO object. -+ * -+ * Return a boolean, whether or not the DPIO service has resources that have no -+ * particular cpu affinity for stashing. (Useful to know if you wish to operate -+ * on CPUs that the service has no affinity to, you would choose to use -+ * resources that are neutral, rather than affine to a different CPU.) Unlike -+ * other service-specific APIs, this one doesn't return an error if it is passed -+ * a non-service object. So don't do it. -+ */ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s); -+ -+/*************************/ -+/* Notification handling */ -+/*************************/ -+ -+/** -+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure. -+ * @cb: the callback to be invoked when the notification arrives. -+ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN. -+ * @id: FQID or channel ID, needed for rearm. -+ * @desired_cpu: the cpu on which the notifications will show up. -+ * @actual_cpu: the cpu the notification actually shows up. -+ * @migration_cb: callback function used for migration. -+ * @dpio_id: the dpio index. -+ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN. -+ * @node: the list node. -+ * @dpio_private: the dpio object internal to dpio_service. -+ * -+ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a -+ * context of the following type is used. The caller can embed it within a -+ * larger structure in order to add state that is tracked along with the -+ * notification (this may be useful when callbacks are invoked that pass this -+ * notification context as a parameter). -+ */ -+struct dpaa2_io_notification_ctx { -+ void (*cb)(struct dpaa2_io_notification_ctx *); -+ int is_cdan; -+ uint32_t id; -+ /* This specifies which cpu the user wants notifications to show up on -+ * (ie. to execute 'cb'). If notification-handling on that cpu is not -+ * available at the time of notification registration, the registration -+ * will fail. */ -+ int desired_cpu; -+ /* If the target platform supports cpu-hotplug or other features -+ * (related to power-management, one would expect) that can migrate IRQ -+ * handling of a given DPIO object, then this value will potentially be -+ * different to 'desired_cpu' at run-time. */ -+ int actual_cpu; -+ /* And if migration does occur and this callback is non-NULL, it will -+ * be invoked prior to any futher notification callbacks executing on -+ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the -+ * migration, and 'newcpu' is what it is now. Both could conceivably be -+ * different to 'desired_cpu'. */ -+ void (*migration_cb)(struct dpaa2_io_notification_ctx *, -+ int oldcpu, int newcpu); -+ /* These are returned from dpaa2_io_service_register(). -+ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that -+ * has been selected by the service for receiving the notifications. The -+ * caller can use this value in the MC command that attaches the FQ (or -+ * channel) of their DPNI (or DPCON, respectively) to this DPIO for -+ * notification-generation. -+ * 'qman64' is the 64-bit context value that needs to be sent in the -+ * same MC command in order to be programmed into the FQ or channel - -+ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to -+ * the DPIO object, and the DPIO service specifies this value back to -+ * the caller so that the notifications that show up will be -+ * comprensible/demux-able to the DPIO service. */ -+ int dpio_id; -+ uint64_t qman64; -+ /* These fields are internal to the DPIO service once the context is -+ * registered. TBD: may require more internal state fields. */ -+ struct list_head node; -+ void *dpio_private; -+}; -+ -+/** -+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN -+ * notifications on the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a -+ * DPIO object is performed after this function is called. In that way, (a) the -+ * DPIO service is "ready" to handle a notification arrival (which might happen -+ * before the "attach" command to MC has returned control of execution back to -+ * the caller), and (b) the DPIO service can provide back to the caller the -+ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command -+ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the -+ * right notification fields to the DPIO service. -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_register(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_deregister - The opposite of 'register'. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Note that 'register' should be called *before* -+ * making the MC call to attach the notification-producing device to the -+ * notification-handling DPIO service, the 'unregister' function should be -+ * called *after* making the MC call to detach the notification-producing -+ * device. -+ * -+ * Return 0 for success. -+ */ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is -+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that -+ * traffic source for as long as it likes. Eventually it may wish to "rearm" -+ * that source to allow it to produce another FQDAN/CDAN, that's what this -+ * function achieves. -+ * -+ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not -+ * being able to implement the rearm the notifiaton due to setting CDAN or -+ * scheduling fq. -+ */ -+int dpaa2_io_service_rearm(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_from_registration() - Get the DPIO object from the given notification -+ * context. -+ * @ctx: the given notifiation context. -+ * @ret: the returned DPIO object. -+ * -+ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the -+ * returned handle is not selected based on a 'cpu' argument, but is the same -+ * DPIO object that the given notification context is registered against. The -+ * returned handle carries a reference count, so a corresponding dpaa2_io_down() -+ * would be required when the reference is no longer needed. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **ret); -+ -+/**********************************/ -+/* General usage of DPIO services */ -+/**********************************/ -+ -+/** -+ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given -+ * notification context and cpu. -+ * @service: the DPIO service. -+ * @cpu: the cpu that the DPIO resource has stashing affinity to. -+ * @ret: the returned DPIO resource. -+ * -+ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers -+ * to an individual DPIO object or to a whole service. In the latter case, an -+ * internal choice is made for each operation. This function supports the former -+ * case, by selecting an individual DPIO object *from* the service in order for -+ * it to be used multiple times to provide "persistence". The returned handle -+ * also carries a reference count, so a corresponding dpaa2_io_down() would be -+ * required when the reference is no longer needed. Note, a parameter of -1 for -+ * 'cpu' will select a DPIO resource that has no particular stashing affinity to -+ * any cpu (eg. one that stashes to platform cache). -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret); -+ -+/*****************/ -+/* Pull dequeues */ -+/*****************/ -+ -+/** -+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. -+ * @d: the given DPIO service. -+ * @channelid: the given channel id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s); -+ -+/************/ -+/* Enqueues */ -+/************/ -+ -+/** -+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd); -+ -+/** -+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. -+ * @d: the given DPIO service. -+ * @qdid: the given queuing destination id. -+ * @prio: the given queuing priority. -+ * @qdbin: the given queuing destination bin. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd); -+ -+/*******************/ -+/* Buffer handling */ -+/*******************/ -+ -+/** -+ * dpaa2_io_service_release() - Release buffers to a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffers to be released. -+ * @num_buffers: the number of the buffers to be released. -+ * -+ * Return 0 for success, and negative error code for failure. -+ */ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/** -+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffer addresses for acquired buffers. -+ * @num_buffers: the expected number of the buffers to acquire. -+ * -+ * Return a negative error code if the command failed, otherwise it returns -+ * the number of buffers acquired, which may be less than the number requested. -+ * Eg. if the buffer pool is empty, this will return zero. -+ */ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/***************/ -+/* DPIO stores */ -+/***************/ -+ -+/* These are reusable memory blocks for retrieving dequeue results into, and to -+ * assist with parsing those results once they show up. They also hide the -+ * details of how to use "tokens" to make detection of DMA results possible (ie. -+ * comparing memory before the DMA and after it) while minimising the needless -+ * clearing/rewriting of those memory locations between uses. -+ */ -+ -+/** -+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue -+ * result. -+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. -+ * @dev: the device to allow mapping/unmapping the DMAable region. -+ * -+ * Constructor - max_frames must be <= 16. The user provides the -+ * device struct to allow mapping/unmapping of the DMAable region. Area for -+ * storage will be allocated during create. The size of this storage is -+ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a -+ * wrapper structure allocated within the DPIO code, which owns and manages -+ * allocated store. -+ * -+ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL -+ * if not getting the stroage for dequeue result in create API. -+ */ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev); -+ -+/** -+ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue -+ * result. -+ * @s: the storage memory to be destroyed. -+ * -+ * Frees to specified storage memory. -+ */ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_store_next() - Determine when the next dequeue result is available. -+ * @s: the dpaa2_io_store object. -+ * @is_last: indicate whether this is the last frame in the pull command. -+ * -+ * Once dpaa2_io_store has been passed to a function that performs dequeues to -+ * it, like dpaa2_ni_rx(), this function can be used to determine when the next -+ * frame result is available. Once this function returns non-NULL, a subsequent -+ * call to it will try to find the *next* dequeue result. -+ * -+ * Note that if a pull-dequeue has a null result because the target FQ/channel -+ * was empty, then this function will return NULL rather than expect the caller -+ * to always check for this on his own side. As such, "is_last" can be used to -+ * differentiate between "end-of-empty-dequeue" and "still-waiting". -+ * -+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. -+ */ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+/** -+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. -+ * @d: the given DPIO object. -+ * @fqid: the id of frame queue to be queried. -+ * @fcnt: the queried frame count. -+ * @bcnt: the queried byte count. -+ * -+ * Knowing the FQ count at run-time can be useful in debugging situations. -+ * The instantaneous frame- and byte-count are hereby returned. -+ * -+ * Return 0 for a successful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt); -+ -+/** -+ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a -+ * buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the index of buffer pool to be queried. -+ * @num: the queried number of buffers in the buffer pool. -+ * -+ * Return 0 for a sucessful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num); -+#endif -+#endif /* __FSL_DPAA2_IO_H */ diff --git a/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch b/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch deleted file mode 100644 index dd5eb7e1b..000000000 --- a/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch +++ /dev/null @@ -1,35 +0,0 @@ -From a4150e8ec8da3add3933dd026c7154dcca2ee2e7 Mon Sep 17 00:00:00 2001 -From: Mihai Caraman -Date: Tue, 5 Apr 2016 14:47:57 +0000 -Subject: [PATCH 199/226] dpaa2-dpio: Cosmetic cleanup - -Replace obsolete terms. - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 2 +- - drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -@@ -51,7 +51,7 @@ struct qbman_block_desc { - * Descriptor for a QBMan software portal, expressed in terms that make sense to - * the user context. Ie. on MC, this information is likely to be true-physical, - * and instantiated statically at compile-time. On GPP, this information is -- * likely to be obtained via "discovery" over a partition's "layerscape bus" -+ * likely to be obtained via "discovery" over a partition's "MC bus" - * (ie. in response to a MC portal command), and would take into account any - * virtualisation of the GPP user's address space and/or interrupt numbering. - */ ---- a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -@@ -138,7 +138,7 @@ static inline void *qbman_swp_mc_complet - /* This struct locates a sub-field within a QBMan portal (CENA) cacheline which - * is either serving as a configuration command or a query result. The - * representation is inherently little-endian, as the indexing of the words is -- * itself little-endian in nature and layerscape is little endian for anything -+ * itself little-endian in nature and DPAA2 is little endian for anything - * that crosses a word boundary too (64-bit fields are the obvious examples). - */ - struct qb_attr_code { diff --git a/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch deleted file mode 100644 index 91ff06a38..000000000 --- a/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 3cc23880ecb98efe2d868254201ac58f945d9e1d Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 15 Jun 2016 14:05:08 -0500 -Subject: [PATCH 200/226] staging: fsl-mc: dpio driver match id cleanup - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -@@ -364,12 +364,10 @@ err_mcportal: - return err; - } - --static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { -+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpio", -- .ver_major = DPIO_VER_MAJOR, -- .ver_minor = DPIO_VER_MINOR - }, - { .vendor = 0x0 } - }; diff --git a/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch b/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch deleted file mode 100644 index cbec14451..000000000 --- a/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch +++ /dev/null @@ -1,12268 +0,0 @@ -From e588172442093fe22374dc1bfc88a7da751d6b30 Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Tue, 15 Sep 2015 10:14:16 -0500 -Subject: [PATCH 201/226] staging: dpaa2-eth: initial commit of dpaa2-eth - driver - -commit 3106ece5d96784b63a4eabb26661baaefedd164f -[context adjustment] - -This is a commit of a squash of the cumulative dpaa2-eth patches -in the sdk 2.0 kernel as of 3/7/2016. - -flib,dpaa2-eth: flib header update (Rebasing onto kernel 3.19, MC 0.6) - -this patch was moved from 4.0 branch - -Signed-off-by: Bogdan Hamciuc -[Stuart: split into multiple patches] -Signed-off-by: Stuart Yoder -Integrated-by: Jilong Guo - -flib,dpaa2-eth: updated Eth (was: Rebasing onto kernel 3.19, MC 0.6) - -updated Ethernet driver from 4.0 branch - -Signed-off-by: Bogdan Hamciuc -[Stuart: cherry-picked patch from 4.0 and split it up] -Signed-off-by: Stuart Yoder - -Conflicts: - - drivers/staging/Makefile - -Signed-off-by: Stuart Yoder - -dpaa2-eth: Adjust 'options' size - -The 'options' field of various MC configuration structures has changed -from u64 to u32 as of MC firmware version 7.0. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I9ba0c19fc22f745e6be6cc40862afa18fa3ac3db -Reviewed-on: http://git.am.freescale.net:8181/35579 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Selectively disable preemption - -Temporary workaround for a MC Bus API quirk which only allows us to -specify exclusively, either a spinlock-protected MC Portal, or a -mutex-protected one, but then tries to match the runtime context in -order to enforce their usage. - -Te Be Reverted. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ida2ec1fdbdebfd2e427f96ddad7582880146fda9 -Reviewed-on: http://git.am.freescale.net:8181/35580 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Fix ethtool bug - -We were writing beyond the end of the allocated data area for ethtool -statistics. - -Signed-off-by: Ioana Radulescu -Change-Id: I6b77498a78dad06970508ebbed7144be73854f7f -Reviewed-on: http://git.am.freescale.net:8181/35583 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Retry read if store unexpectedly empty - -After we place a volatile dequeue command, we might get to inquire the -store before the DMA has actually completed. In such cases, we must -retry, lest we'll have the store overwritten by the next legitimate -volatile dequeue. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I314fbb8b4d9f589715e42d35fc6677d726b8f5ba -Reviewed-on: http://git.am.freescale.net:8181/35584 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -flib: Fix "missing braces around initializer" warning - -Gcc does not support (yet?) the ={0} initializer in the case of an array -of structs. Fixing the Flib in order to make the warning go away. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I8782ecb714c032cfeeecf4c8323cf9dbb702b10f -Reviewed-on: http://git.am.freescale.net:8181/35586 -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -Revert "dpaa2-eth: Selectively disable preemption" - -This reverts commit e1455823c33b8dd48b5d2d50a7e8a11d3934cc0d. - -dpaa2-eth: Fix memory leak - -A buffer kmalloc'ed at probe time was not freed after it was no -longer needed. - -Signed-off-by: Ioana Radulescu -Change-Id: Iba197209e9203ed306449729c6dcd23ec95f094d -Reviewed-on: http://git.am.freescale.net:8181/35756 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Remove unused field in ldpaa_eth_priv structure - -Signed-off-by: Ioana Radulescu -Change-Id: I124c3e4589b6420b1ea5cc05a03a51ea938b2bea -Reviewed-on: http://git.am.freescale.net:8181/35757 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Fix "NOHZ: local_softirq_pending" warning - -Explicitly run softirqs after we enable NAPI. This in particular gets us -rid of the "NOHZ: local_softirq_pending" warnings, but it also solves a -couple of other problems, among which fluctuating performance and high -ping latencies. - -Notes: - - This will prevent us from timely processing notifications and -other "non-frame events" coming into the software portal. So far, -though, we only expect Dequeue Available Notifications, so this patch -is good enough for now. - - A degradation in console responsiveness is expected, especially in -cases where the bottom-half runs on the same CPU as the console. - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Ioana Radulescu -Change-Id: Ia6f11da433024e80ee59e821c9eabfa5068df5e5 -Reviewed-on: http://git.am.freescale.net:8181/35830 -Reviewed-by: Alexandru Marginean -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -dpaa2-eth: Add polling mode for link state changes - -Add the Kconfigurable option of using a thread for polling on -the link state instead of relying on interrupts from the MC. - -Signed-off-by: Ioana Radulescu -Change-Id: If2fe66fc5c0fbee2568d7afa15d43ea33f92e8e2 -Reviewed-on: http://git.am.freescale.net:8181/35967 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpaa2-eth: Update copyright years. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I7e00eecfc5569027c908124726edaf06be357c02 -Reviewed-on: http://git.am.freescale.net:8181/37666 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder - -dpaa2-eth: Drain bpools when netdev is down - -In a data path layout with potentially a dozen interfaces, not all of -them may be up at the same time, yet they may consume a fair amount of -buffer space. -Drain the buffer pool upon ifdown and re-seed it at ifup. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I24a379b643c8b5161a33b966c3314cf91024ed4a -Reviewed-on: http://git.am.freescale.net:8181/37667 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder - -dpaa2-eth: Interrupts cleanup - -Add the code for cleaning up interrupts on driver removal. -This was lost during transition from kernel 3.16 to 3.19. - -Also, there's no need to call devm_free_irq() if probe fails -as the kernel will release all driver resources. - -Signed-off-by: Ioana Radulescu -Change-Id: Ifd404bbf399d5ba62e2896371076719c1d6b4214 -Reviewed-on: http://git.am.freescale.net:8181/36199 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Bharat Bhushan -Reviewed-by: Stuart Yoder -Reviewed-on: http://git.am.freescale.net:8181/37690 - -dpaa2-eth: Ethtool support for hashing - -Only one set of header fields is supported for all protocols, the driver -silently replaces previous configuration regardless of user selected -protocol. -Following fields are supported: - L2DA - VLAN tag - L3 proto - IP SA - IP DA - L4 bytes 0 & 1 [TCP/UDP src port] - L4 bytes 2 & 3 [TCP/UDP dst port] - -Signed-off-by: Alex Marginean - -Change-Id: I97c9dac1b842fe6bc7115e40c08c42f67dee8c9c -Reviewed-on: http://git.am.freescale.net:8181/37260 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpaa2-eth: Fix maximum number of FQs - -The maximum number of Rx/Tx conf FQs associated to a DPNI was not -updated when the implementation changed. It just happened to work -by accident. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: I03e30e0121a40d0d15fcdc4bee1fb98caa17c0ef -Reviewed-on: http://git.am.freescale.net:8181/37668 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Fix Rx buffer address alignment - -We need to align the start address of the Rx buffers to -LDPAA_ETH_BUF_ALIGN bytes. We were using SMP_CACHE_BYTES instead. -It happened to work because both defines have the value of 64, -but this may change at some point. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: I08a0f3f18f82c5581c491bd395e3ad066b25bcf5 -Reviewed-on: http://git.am.freescale.net:8181/37669 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Add buffer count to ethtool statistics - -Print the number of buffers available in the pool for a certain DPNI -along with the rest of the ethtool -S stats. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: Ia1f5cf341c8414ae2058a73f6bc81490ef134592 -Reviewed-on: http://git.am.freescale.net:8181/37671 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Add Rx error queue - -Add a Kconfigurable option that allows Rx error frames to be -enqueued on an error FQ. By default error frames are discarded, -but for debug purposes we may want to process them at driver -level. - -Note: Checkpatch issues a false positive about complex macros that -should be parenthesized. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: I7d19d00b5d5445514ebd112c886ce8ccdbb1f0da -Reviewed-on: http://git.am.freescale.net:8181/37672 -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -staging: fsl-dpaa2: FLib headers cleanup - -Going with the flow of moving fsl-dpaa2 headers into the drivers' -location rather than keeping them all in one place. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ia2870cd019a4934c7835d38752a46b2a0045f30e -Reviewed-on: http://git.am.freescale.net:8181/37674 -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -dpaa2-eth: Klocwork fixes - -Fix several issues reported by Klocwork. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: I1e23365765f3b0ff9b6474d8207df7c1f2433ccd -Reviewed-on: http://git.am.freescale.net:8181/37675 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Probe devices with no hash support - -Don't fail at probe if the DPNI doesn't have the hash distribution -option enabled. Instead, initialize a single Rx frame queue and -use it for all incoming traffic. - -Rx flow hashing configuration through ethtool will not work -in this case. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: Iaf17e05b15946e6901c39a21b5344b89e9f1d797 -Reviewed-on: http://git.am.freescale.net:8181/37676 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Process frames in IRQ context - -Stop using threaded IRQs and move back to hardirq top-halves. -This is the first patch of a small series adapting the DPIO and Ethernet -code to these changes. - -Signed-off-by: Roy Pledge -Tested-by: Ioana Radulescu -Tested-by: Bogdan Hamciuc -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder -[Stuart: split dpio and eth into separate patches, updated subject] -Signed-off-by: Stuart Yoder - -dpaa2-eth: Fix bug in NAPI poll - -We incorrectly rearmed FQDAN notifications at the end of a NAPI cycle, -regardless of whether the NAPI budget was consumed or not. We only need -to rearm notifications if the NAPI cycle cleaned less frames than its -budget, otherwise a new NAPI poll will be scheduled anyway. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: Ib55497bdbd769047420b3150668f2e2aef3c93f6 -Reviewed-on: http://git.am.freescale.net:8181/38317 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Use dma_map_sg on Tx - -Use the simpler dma_map_sg() along with the scatterlist API if the -egress frame is scatter-gather, at the cost of keeping some extra -information in the frame's software annotation area. - -Signed-off-by: Bogdan Hamciuc -Change-Id: If293aeabbd58d031f21456704357d4ff7e53c559 -Reviewed-on: http://git.am.freescale.net:8181/37681 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Reduce retries if Tx portal busy - -Too many retries due to Tx portal contention led to a significant cycle -waste and reduction in performance. -Reducing the number of enqueue retries and dropping frame if eventually -unsuccessful. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ib111ec61cd4294a7632348c25fa3d7f4002be0c0 -Reviewed-on: http://git.am.freescale.net:8181/37682 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Add sysfs support for TxConf affinity change - -This adds support in sysfs for affining Tx Confirmation queues to GPPs, -via the affine DPIO objects. - -The user can specify a cpu list in /sys/class/net/ni/txconf_affinity -to which the Ethernet driver will affine the TxConf FQs, in round-robin -fashion. This is naturally a bit coarse, because there is no "official" -mapping of the transmitting CPUs to Tx Confirmation queues. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I4b3da632e202ceeb22986c842d746aafe2a87a81 -Reviewed-on: http://git.am.freescale.net:8181/37684 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Implement ndo_select_queue - -Use a very simple selection function for the egress FQ. The purpose -behind this is to more evenly distribute Tx Confirmation traffic, -especially in the case of multiple egress flows, when bundling it all on -CPU 0 would make that CPU a bottleneck. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ibfe8aad7ad5c719cc95d7817d7de6d2094f0f7ed -Reviewed-on: http://git.am.freescale.net:8181/37685 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Reduce TxConf NAPI weight back to 64 - -It turns out that not only the kernel frowned upon the old budget of 256, -but the measured values were well below that anyway. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I62ddd3ea1dbfd8b51e2bcb2286e0d5eb10ac7f27 -Reviewed-on: http://git.am.freescale.net:8181/37688 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Try refilling the buffer pool less often - -We used to check if the buffer pool needs refilling at each Rx -frame. Instead, do that check (and the actual buffer release if -needed) only after a pull dequeue. - -Signed-off-by: Ioana Radulescu -Change-Id: Id52fab83873c40a711b8cadfcf909eb7e2e210f3 -Reviewed-on: http://git.am.freescale.net:8181/38318 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpaa2-eth: Stay in NAPI if exact budget is met - -An off-by-one bug would cause premature exiting from the NAPI cycle. -Performance degradation is particularly severe in IPFWD cases. - -Signed-off-by: Ioana Radulescu -Tested-by: Bogdan Hamciuc -Change-Id: I9de2580c7ff8e46cbca9613890b03737add35e26 -Reviewed-on: http://git.am.freescale.net:8181/37908 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpaa2-eth: Minor changes to FQ stats - -Signed-off-by: Ioana Radulescu -Change-Id: I0ced0e7b2eee28599cdea79094336c0d44f0d32b -Reviewed-on: http://git.am.freescale.net:8181/38319 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Bogdan Hamciuc -Reviewed-by: Stuart Yoder - -dpaa2-eth: Support fewer DPIOs than CPUs - -The previous DPIO functions would transparently choose a (perhaps -non-affine) CPU if the required CPU was not available. Now that their API -contract is enforced, we must make an explicit request for *any* DPIO if -the request for an *affine* DPIO has failed. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ib08047ffa33518993b1ffa4671d0d4f36d6793d0 -Reviewed-on: http://git.am.freescale.net:8181/38320 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Roy Pledge -Reviewed-by: Stuart Yoder - -dpaa2-eth: cosmetic changes in hashing code - -Signed-off-by: Alex Marginean -Change-Id: I79e21a69a6fb68cdbdb8d853c059661f8988dbf9 -Reviewed-on: http://git.am.freescale.net:8181/37258 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Prefetch data before initial access - -Signed-off-by: Ioana Radulescu -Change-Id: Ie8f0163651aea7e3e197a408f89ca98d296d4b8b -Reviewed-on: http://git.am.freescale.net:8181/38753 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Use netif_receive_skb - -netif_rx() is a leftover since our pre-NAPI codebase. - -Signed-off-by: Ioana Radulescu -Change-Id: I02ff0a059862964df1bf81b247853193994c2dfe -Reviewed-on: http://git.am.freescale.net:8181/38754 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Use napi_alloc_frag() on Rx. - -A bit better-suited than netdev_alloc_frag(). - -Signed-off-by: Ioana Radulescu -Change-Id: I8863a783502db963e5dc968f049534c36ad484e2 -Reviewed-on: http://git.am.freescale.net:8181/38755 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Silence skb_realloc_headroom() warning - -pktgen tests tend to be too noisy because pktgen does not observe the -net device's needed_headroom specification and we used to be pretty loud -about that. We'll print the warning message just once. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I3c12eba29c79aa9c487307d367f6d9f4dbe447a3 -Reviewed-on: http://git.am.freescale.net:8181/38756 -Reviewed-by: Ruxandra Ioana Radulescu -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Print message upon device unplugging - -Give a console notification when a DPNI is unplugged. This is useful for -automated tests to know the operation (which is not instantaneous) has -finished. - -Signed-off-by: Bogdan Hamciuc -Change-Id: If33033201fcee7671ad91c2b56badf3fb56a9e3e -Reviewed-on: http://git.am.freescale.net:8181/38757 -Reviewed-by: Ruxandra Ioana Radulescu -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Add debugfs support - -Add debugfs entries for showing detailed per-CPU and per-FQ -counters for each network interface. Also add a knob for -resetting these stats. -The agregated interface statistics were already available through -ethtool -S. - -Signed-off-by: Ioana Radulescu -Reviewed-by: Bogdan Hamciuc -Change-Id: I55f5bfe07a15b0d1bf0c6175d8829654163a4318 -Reviewed-on: http://git.am.freescale.net:8181/38758 -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -dpaa2-eth: limited support for flow steering - -Steering is supported on a sub-set of fields, including DMAC, IP SRC -and DST, L4 ports. -Steering and hashing configurations depend on each other, that makes -the whole thing tricky to configure. Currently FS can be configured -using only the fields selected for hashing and all the hashing fields -must be included in the match key - masking doesn't work yet. - -Signed-off-by: Alex Marginean -Change-Id: I9fa3199f7818a9a5f9d69d3483ffd839056cc468 -Reviewed-on: http://git.am.freescale.net:8181/38759 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder - -dpaa2-eth: Rename files into the dpaa2 nomenclature - -Signed-off-by: Bogdan Hamciuc -Change-Id: I1c3d62e2f19a59d4b65727234fd7df2dfd8683d9 -Reviewed-on: http://git.am.freescale.net:8181/38965 -Reviewed-by: Alexandru Marginean -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -staging: dpaa2-eth: migrated remaining flibs for MC fw 8.0.0 - -Signed-off-by: J. German Rivera -[Stuart: split eth part into separate patch, updated subject] -Signed-off-by: Stuart Yoder - -dpaa2-eth: Clear 'backup_pool' attribute - -New MC-0.7 firmware allows specifying an alternate buffer pool, but we -are momentarily not using that feature. - -Signed-off-by: Bogdan Hamciuc -Change-Id: I0a6e6626512b7bbddfef732c71f1400b67f3e619 -Reviewed-on: http://git.am.freescale.net:8181/39149 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -dpaa2-eth: Do programing of MSIs in devm_request_threaded_irq() - -With the new dprc_set_obj_irq() we can now program MSIS in the device -in the callback invoked from devm_request_threaded_irq(). -Since this callback is invoked with interrupts disabled, we need to -use an atomic portal, instead of the root DPRC's built-in portal -which is non-atomic. - -Signed-off-by: Itai Katz -Signed-off-by: J. German Rivera -[Stuart: split original patch into multiple patches] -Signed-off-by: Stuart Yoder - -dpaa2-eth: Do not map beyond skb tail - -On Tx do dma_map only until skb->tail, rather than skb->end. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Declare NETIF_F_LLTX as a capability - -We are effectively doing lock-less Tx. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Avoid bitcopy of 'backpointers' struct - -Make 'struct ldpaa_eth_swa bps' a pointer and void copying it on both Tx -and TxConf. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Use CDANs instead of FQDANs - -Use Channel Dequeue Available Notifications (CDANs) instead of -Frame Queue notifications. We allocate a QMan channel (or DPCON -object) for each available cpu and assign to it the Rx and Tx conf -queues associated with that cpu. - -We usually want to have affine DPIOs and DPCONs (one for each core). -If this is not possible due to insufficient resources, we distribute -all ingress traffic on the cores with affine DPIOs. - -NAPI instances are now one per channel instead of one per FQ, as the -interrupt source changes. Statistics counters change accordingly. - -Note that after this commit is applied, one needs to provide sufficient -DPCON objects (either through DPL on restool) in order for the Ethernet -interfaces to work. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Cleanup debugfs statistics - -Several minor changes to statistics reporting: -* Fix print alignment of statistics counters -* Fix a naming ambiguity in the cpu_stats debugfs ops -* Add Rx/Tx error counters; these were already used, but not -reported in the per-CPU stats - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Add tx shaping configuration in sysfs - -Egress traffic can be shaped via a per-DPNI SysFS entry: - echo M N > /sys/class/net/ni/tx_shaping -where: - M is the maximum throughput, expressed in Mbps. - N is the maximum burst size, expressed in bytes, at most 64000. - -To remove shaping, use M=0, N=0. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix "Tx busy" counter - -Under heavy egress load, when a large number of the transmitted packets -cannot be sent because of high portal contention, the "Tx busy" counter -was not properly incremented. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Fix memory cleanup in case of Tx congestion - -The error path of ldpaa_eth_tx() was not properly freeing the SGT buffer -if the enqueue had failed because of congestion. DMA unmapping was -missing, too. - -Factor the code originally inside the TxConf callback out into a -separate function that would be called on both TxConf and Tx paths. - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Use napi_gro_receive() - -Call napi_gro_receive(), effectively enabling GRO. -NOTE: We could further optimize this by looking ahead in the parse results -received from hardware and only using GRO when the L3+L4 combination is -appropriate. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix compilation of Rx Error FQ code - -Conditionally-compiled code slipped between cracks when FLibs were -updated. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: Add Kconfig dependency on DEBUG_FS - -The driver's debugfs support depends on the generic CONFIG_DEBUG_FS. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix interface down/up bug - -If a networking interface was brought down while still receiving -ingress traffic, the delay between DPNI disable and NAPI disable -was not enough to ensure all in-flight frames got processed. -Instead, some frames were left pending in the Rx queues. If the -net device was then removed (i.e. restool unbind/unplug), the -call to dpni_reset() silently failed and the kernel crashed on -device replugging. - -Fix this by increasing the FQ drain time. Also, at ifconfig up -we enable NAPI before starting the DPNI, to make sure we don't -miss any early CDANs. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Iterate only through initialized channels - -The number of DPIO objects available to a DPNI may be fewer than the -number of online cores. A typical example would be a DPNI with a -distribution size smaller than 8. Since we only initialize as many -channels (DPCONs) as there are DPIOs, iterating through all online cpus -would produce a nasty oops when retrieving ethtool stats. - -Signed-off-by: Bogdan Hamciuc - -net: pktgen: Observe needed_headroom of the device - -Allocate enough space so as not to force the outgoing net device to do -skb_realloc_headroom(). - -Signed-off-by: Bogdan Hamciuc -Signed-off-by: David S. Miller - -dpaa2-eth: Trace buffer pool seeding - -Add ftrace support for buffer pool seeding. Individual buffers are -described by virtual and dma addresses and sizes, as well as by bpid. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Explicitly set carrier off at ifconfig up - -If we don't, netif_carrier_ok() will still return true even if the link -state is marked as LINKWATCH_PENDING, which in a dpni-2-dpni case may -last indefinitely long. This will cause "ifconfig up" followed by "ip -link show" to report LOWER_UP when the peer DPNI is still down (and in -fact before we've even received any link notification at all). - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix FQ type in stats print - -Fix a bug where the type of the Rx error queue was printed -incorrectly in the debugfs statistics - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Don't build debugfs support as a separate module - -Instead have module init and exit functions declared explicitly for -the Ethernet driver and initialize/destroy the debugfs directory there. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Remove debugfs #ifdefs from dpaa2-eth.c - -Instead of conditionally compiling the calls to debugfs init -functions in dpaa2-eth.c, define no-op stubs for these functions -in case the debugfs Kconfig option is not enabled. This makes -the code more readable. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Use napi_complete_done() - -Replace napi_complete() with napi_complete_done(). - -Together with setting /sys/class/net/ethX/gro_flush_timeout, this -allows us to take better advantage of GRO coalescing and improves -throughput and cpu load in TCP termination tests. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Fix error path in probe - -NAPI delete was called at the wrong place when exiting probe -function on an error path - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Allocate channels based on queue count - -Limit the number of channels allocated per DPNI to the maximum -between the number of Rx queues per traffic class (distribution size) -and Tx confirmation queues (number of tx flows). -If this happens to be larger than the number of available cores, only -allocate one channel for each core and distribute the frame queues on -the cores/channels in a round robin fashion. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Use DPNI setting for number of Tx flows - -Instead of creating one Tx flow for each online cpu, use the DPNI -attributes for deciding how many senders we have. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Renounce sentinel in enum dpni_counter - -Bring back the Flib header dpni.h to its initial content by removing the -sentinel value in enum dpni_counter. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix Rx queue count - -We were missing a roundup to the next power of 2 in order to be in sync -with the MC implementation. -Actually, moved that logic in a separate function which we'll remove -once the MC API is updated. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Unmap the S/G table outside ldpaa_eth_free_rx_fd - -The Scatter-Gather table is already unmapped outside ldpaa_eth_free_rx_fd -so no need to try to unmap it once more. - -Signed-off-by: Cristian Sovaiala - -dpaa2-eth: Use napi_schedule_irqoff() - -At the time we schedule NAPI, the Dequeue Available Notifications (which -are the de facto triggers of NAPI processing) are already disabled. - -Signed-off-by: Ioana Radulescu -Signed-off-by: Bogdan Hamciuc - -net: Fix ethernet Kconfig - -Re-add missing 'source' directive. This exists on the integration -branch, but was mistakenly removed by an earlier dpaa2-eth rebase. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Manually update link state at ifup - -The DPMAC may have handled the link state notification before the DPNI -is up. A new PHY state transision may not subsequently occur, so the -DPNI must initiate a read of the DPMAC state. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Stop carrier upon ifdown - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix print messages in link state handling code - -Avoid an "(uninitialized)" message during DPNI probe by replacing -netdev_info() with its corresponding dev_info(). -Purge some related comments and add some netdev messages to assist -link state debugging. -Remove an excessively defensive assertion. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Do not allow ethtool settings change while the NI is up - -Due to a MC limitation, link state changes while the DPNI is enabled -will fail. For now, we'll just prevent the call from going down to the MC -if we know it will fail. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Reduce ethtool messages verbosity - -Transform a couple of netdev_info() calls into netdev_dbg(). - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Only unmask IRQs that we actually handle - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Produce fewer boot log messages - -No longer print one line for each all-zero hwaddr that was replaced with -a random MAC address; just inform the user once that this has occurred. -And reduce redundancy of some printouts in the bootlog. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix big endian issue - -We were not doing any endianness conversions on the scatter gather -table entries, which caused problems on big endian kernels. - -For frame descriptors the QMan driver takes care of this transparently, -but in the case of SG entries we need to do it ourselves. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Force atomic context for lazy bpool seeding - -We use the same ldpaa_bp_add_7() function for initial buffer pool -seeding (from .ndo_open) and for hotpath pool replenishing. The function -is using napi_alloc_frag() as an optimization for the Rx datapath, but -that turns out to require atomic execution because of a this_cpu_ptr() -call down its stack. -This patch temporarily disables preemption around the initial seeding of -the Rx buffer pool. - -Signed-off-by: Bogdan Hamciuc - -dpaa-eth: Integrate Flib version 0.7.1.2 - -Although API-compatible with 0.7.1.1, there are some ABI changes -that warrant a new integration. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: No longer adjust max_dist_per_tc - -The MC firmware until version 0.7.1.1/8.0.2 requires that -max_dist_per_tc have the value expected by the hardware, which would be -different from what the user expects. MC firmware 0.7.1.2/8.0.5 fixes -that, so we remove our transparent conversion. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Enforce 256-byte Rx alignment - -Hardware erratum enforced by MC requires that Rx buffer lengths and -addresses be 265-byte aligned. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Rename Tx buf alignment macro - -The existing "BUF_ALIGN" macro remained confined to Tx usage, after -separate alignment was introduced for Rx. Renaming accordingly. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Fix hashing distribution size - -Commit be3fb62623e4338e60fb60019f134b6055cbc127 -Author: Bogdan Hamciuc -Date: Fri Oct 23 18:26:44 2015 +0300 - - dpaa2-eth: No longer adjust max_dist_per_tc - -missed one usage of the ldpaa_queue_count() function, making -distribution size inadvertenly lower. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Remove ndo_select_queue - -Our implementation of ndo_select_queue would lead to questions regarding -our support for qdiscs. Until we find an optimal way to select the txq -without breaking future qdisc integration, just remove the -ndo_select_queue callback entirely and leave the stack figure out the -flow. -This incurs a ~2-3% penalty on some performance tests. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Select TxConf FQ based on processor id - -Use smp_processor_id instead of skb queue mapping to determine the tx -flow id and implicitly the confirmation queue. - -Signed-off-by: Bogdan Hamciuc - -dpaa2-eth: Reduce number of buffers in bpool - -Reduce the maximum number of buffers in each buffer pool associated -with a DPNI. This in turn reduces the number of memory allocations -performed in a single batch when buffers fall below a certain -threshold. - -Provides a significant performance boost (~5-10% increase) on both -termination and forwarding scenarios, while also reducing the driver -memory footprint. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Replace "ldpaa" with "dpaa2" - -Replace all instances of "ldpaa"/"LDPAA" in the Ethernet driver -(names of functions, structures, macros, etc), with "dpaa2"/"DPAA2", -except for DPIO API function calls. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: rename ldpaa to dpaa2 - -Signed-off-by: Haiying Wang -(Stuart: this patch was split out from the origin global rename patch) -Signed-off-by: Stuart Yoder - -dpaa2-eth: Rename dpaa_io_query_fq_count to dpaa2_io_query_fq_count - -Signed-off-by: Cristian Sovaiala - -fsl-dpio: rename dpaa_* structure to dpaa2_* - -Signed-off-by: Haiying Wang - -dpaa2-eth, dpni, fsl-mc: Updates for MC0.8.0 - -Several changes need to be performed in sync for supporting -the newest MC version: -* Update mc-cmd.h -* Update the dpni binary interface to v6.0 -* Update the DPAA2 Eth driver to account for several API changes - -Signed-off-by: Ioana Radulescu - -staging: fsl-dpaa2: ethernet: add support for hardware timestamping - -Signed-off-by: Yangbo Lu - -fsl-dpaa2: eth: Do not set bpid in egress fd - -We don't do FD recycling on egress, BPID is therefore not necessary. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Amend buffer refill comment - -A change request has been pending for placing an upper bound to the -buffer replenish logic on Rx. However, short of practical alternatives, -resort to amending the relevant comment and rely on ksoftirqd to -guarantee interactivity. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Configure a taildrop threshold for each Rx frame queue. - -The selected value allows for Rx jumbo (10K) frames processing -while at the same time helps balance the system in the case of -IP forwarding. - -Also compute the number of buffers in the pool based on the TD -threshold to avoid starving some of the ingress queues in small -frames, high throughput scenarios. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Check objects' FLIB version - -Make sure we support the DPNI, DPCON and DPBP version, otherwise -abort probing early on and provide an error message. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove likely/unlikely from cold paths - -Signed-off-by: Cristian Sovaiala -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Remove __cold attribute - -Signed-off-by: Cristian Sovaiala - -fsl-dpaa2: eth: Replace netdev_XXX with dev_XXX before register_netdevice() - -Signed-off-by: Cristian Sovaiala -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Fix coccinelle issue - -drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c:687:1-36: WARNING: -Assignment of bool to 0/1 - -Signed-off-by: Cristian Sovaiala - -fsl-dpaa2: eth: Fix minor spelling issue - -Signed-off-by: Cristian Sovaiala - -fsl-dpaa2: eth: Add a couple of 'unlikely' on hot path - -Signed-off-by: Cristian Sovaiala -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Fix a bunch of minor issues found by static analysis tools - -As found by Klocworks and Checkpatch: - - Unused variables - - Integer type replacements - - Unchecked memory allocations - - Whitespace, alignment and newlining - -Signed-off-by: Cristian Sovaiala -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Remove "inline" keyword from static functions - -Signed-off-by: Cristian Sovaiala - -fsl-dpaa2: eth: Remove BUG/BUG_ONs - -Signed-off-by: Cristian Sovaiala -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Use NAPI_POLL_WEIGHT - -No need to define our own macro as long as we're using the -default value of 64. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Move dpaa2_eth_swa structure to header file - -It was the only structure defined inside dpaa2-eth.c - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Replace uintX_t with uX - -Signed-off-by: Ioana Radulescu -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Minor fixes & cosmetics - -- Make driver log level an int, because this is what - netif_msg_init expects. -- Remove driver description macro as it was used only once, - immediately after being defined -- Remove include comment - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Move bcast address setup to dpaa2_eth_netdev_init - -It seems to fit better there than directly in probe. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Fix DMA mapping bug - -During hashing/flow steering configuration via ethtool, we were -doing a DMA unmap from the wrong address. Fix the issue by using -the DMA address that was initially mapped. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Associate buffer counting to queues instead of cpu - -Move the buffer counters from being percpu variables to being -associated with QMan channels. This is more natural as we need -to dimension the buffer pool count based on distribution size -rather than number of online cores. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Provide driver and fw version to ethtool - -Read fw version from the MC and interpret DPNI FLib major.minor as the -driver's version. Report these in 'ethool -i'. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Remove dependency on GCOV_KERNEL - -Signed-off-by: Cristian Sovaiala - -fsl-dpaa2: eth: Remove FIXME/TODO comments from the code - -Some of the concerns had already been addressed, a couple are being -fixed in place. -Left a few TODOs related to the flow-steering code, which needs to be -revisited before upstreaming anyway. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove forward declarations - -Instead move the functions such that they are defined prior to -being used. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove dead code in IRQ handler - -If any of those conditions were met, it is unlikely we'd ever be there -in the first place. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Remove dpaa2_dpbp_drain() - -Its sole caller was __dpaa2_dpbp_free(), so move its content and get rid -of one function call. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Remove duplicate define - -We somehow ended up with two defines for the maximum number -of tx queues. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Move header comment to .c file - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Make DPCON allocation failure produce a benign message - -Number of DPCONs may be smaller than the number of CPUs in a number of -valid scenarios. One such scenario is when the DPNI's distribution width -is smaller than the number of cores and we just don't want to -over-allocate DPCONs. -Make the DPCON allocation failure less menacing by changing the logged -message. - -While at it, remove a unused parameter in function prototype. - -Signed-off-by: Bogdan Hamciuc - -dpaa2 eth: irq update - -Signed-off-by: Stuart Yoder - -Conflicts: - drivers/staging/Kconfig - drivers/staging/Makefile ---- - MAINTAINERS | 15 + - drivers/staging/Kconfig | 2 + - drivers/staging/Makefile | 1 + - drivers/staging/fsl-dpaa2/Kconfig | 11 + - drivers/staging/fsl-dpaa2/Makefile | 5 + - drivers/staging/fsl-dpaa2/ethernet/Kconfig | 42 + - drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 319 +++ - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2793 ++++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 366 +++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 882 +++++++ - drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++ - drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 ++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 +++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 ++++++++++++++++++ - drivers/staging/fsl-mc/include/mc-cmd.h | 5 +- - drivers/staging/fsl-mc/include/net.h | 481 ++++ - net/core/pktgen.c | 1 + - 20 files changed, 10910 insertions(+), 1 deletion(-) - create mode 100644 drivers/staging/fsl-dpaa2/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h - create mode 100644 drivers/staging/fsl-mc/include/net.h - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -4539,6 +4539,21 @@ L: linux-kernel@vger.kernel.org - S: Maintained - F: drivers/staging/fsl-mc/ - -+FREESCALE DPAA2 ETH DRIVER -+M: Ioana Radulescu -+M: Bogdan Hamciuc -+M: Cristian Sovaiala -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/ethernet/ -+ -+FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER -+M: Lijun Pan -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-mc/bus/mc-ioctl.h -+F: drivers/staging/fsl-mc/bus/mc-restool.c -+ - FREEVXFS FILESYSTEM - M: Christoph Hellwig - W: ftp://ftp.openlinux.org/pub/people/hch/vxfs ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -114,4 +114,6 @@ source "drivers/staging/most/Kconfig" - - source "drivers/staging/fsl_ppfe/Kconfig" - -+source "drivers/staging/fsl-dpaa2/Kconfig" -+ - endif # STAGING ---- a/drivers/staging/Makefile -+++ b/drivers/staging/Makefile -@@ -49,3 +49,4 @@ obj-$(CONFIG_FSL_DPA) += fsl_q - obj-$(CONFIG_WILC1000) += wilc1000/ - obj-$(CONFIG_MOST) += most/ - obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/ -+obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -0,0 +1,11 @@ -+# -+# Freescale device configuration -+# -+ -+config FSL_DPAA2 -+ bool "Freescale DPAA2 devices" -+ depends on FSL_MC_BUS -+ ---help--- -+ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. -+# TODO move DPIO driver in-here? -+source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -0,0 +1,5 @@ -+# -+# Makefile for the Freescale network device drivers. -+# -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -@@ -0,0 +1,42 @@ -+# -+# Freescale DPAA Ethernet driver configuration -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+menuconfig FSL_DPAA2_ETH -+ tristate "Freescale DPAA2 Ethernet" -+ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO -+ select FSL_DPAA2_MAC -+ default y -+ ---help--- -+ Freescale Data Path Acceleration Architecture Ethernet -+ driver, using the Freescale MC bus driver. -+ -+if FSL_DPAA2_ETH -+config FSL_DPAA2_ETH_LINK_POLL -+ bool "Use polling mode for link state" -+ default n -+ ---help--- -+ Poll for detecting link state changes instead of using -+ interrupts. -+ -+config FSL_DPAA2_ETH_USE_ERR_QUEUE -+ bool "Enable Rx error queue" -+ default n -+ ---help--- -+ Allow Rx error frames to be enqueued on an error queue -+ and processed by the driver (by default they are dropped -+ in hardware). -+ This may impact performance, recommended for debugging -+ purposes only. -+ -+config FSL_DPAA2_ETH_DEBUGFS -+ depends on DEBUG_FS && FSL_QBMAN_DEBUG -+ bool "Enable debugfs support" -+ default n -+ ---help--- -+ Enable advanced statistics through debugfs interface. -+endif ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile -@@ -0,0 +1,21 @@ -+# -+# Makefile for the Freescale DPAA Ethernet controllers -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+ccflags-y += -DVERSION=\"\" -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o -+ -+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o -+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o -+ -+#Needed by the tracing framework -+CFLAGS_dpaa2-eth.o := -I$(src) -+ -+ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y) -+ GCOV_PROFILE := y -+endif ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -@@ -0,0 +1,319 @@ -+ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" -+ -+ -+static struct dentry *dpaa2_dbg_root; -+ -+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct rtnl_link_stats64 *stats; -+ struct dpaa2_eth_stats *extras; -+ int i; -+ -+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", -+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", -+ "Tx SG", "Enq busy"); -+ -+ for_each_online_cpu(i) { -+ stats = per_cpu_ptr(priv->percpu_stats, i); -+ extras = per_cpu_ptr(priv->percpu_extras, i); -+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", -+ i, -+ stats->rx_packets, -+ stats->rx_errors, -+ extras->rx_sg_frames, -+ stats->tx_packets, -+ stats->tx_errors, -+ extras->tx_conf_frames, -+ extras->tx_sg_frames, -+ extras->tx_portal_busy); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_cpu_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_cpu_ops = { -+ .open = dpaa2_dbg_cpu_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static char *fq_type_to_str(struct dpaa2_eth_fq *fq) -+{ -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ return "Rx"; -+ case DPAA2_TX_CONF_FQ: -+ return "Tx conf"; -+ case DPAA2_RX_ERR_FQ: -+ return "Rx err"; -+ default: -+ return "N/A"; -+ } -+} -+ -+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_fq *fq; -+ u32 fcnt, bcnt; -+ int i, err; -+ -+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s\n", -+ "VFQID", "CPU", "Type", "Frames", "Pending frames"); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); -+ if (err) -+ fcnt = 0; -+ -+ seq_printf(file, "%5d%16d%16s%16llu%16u\n", -+ fq->fqid, -+ fq->target_cpu, -+ fq_type_to_str(fq), -+ fq->stats.frames, -+ fcnt); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_fqs_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_fq_ops = { -+ .open = dpaa2_dbg_fqs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", -+ "CHID", "CPU", "Deq busy", "Frames", "CDANs", -+ "Avg frm/CDAN"); -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", -+ ch->ch_id, -+ ch->nctx.desired_cpu, -+ ch->stats.dequeue_portal_busy, -+ ch->stats.frames, -+ ch->stats.cdan, -+ ch->stats.frames / ch->stats.cdan); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_ch_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_ch_ops = { -+ .open = dpaa2_dbg_ch_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *offset) -+{ -+ struct dpaa2_eth_priv *priv = file->private_data; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_stats *percpu_extras; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for_each_online_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ memset(percpu_stats, 0, sizeof(*percpu_stats)); -+ -+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); -+ memset(percpu_extras, 0, sizeof(*percpu_extras)); -+ } -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ memset(&fq->stats, 0, sizeof(fq->stats)); -+ } -+ -+ for_each_cpu(i, &priv->dpio_cpumask) { -+ ch = priv->channel[i]; -+ memset(&ch->stats, 0, sizeof(ch->stats)); -+ } -+ -+ return count; -+} -+ -+static const struct file_operations dpaa2_dbg_reset_ops = { -+ .open = simple_open, -+ .write = dpaa2_dbg_reset_write, -+}; -+ -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_dbg_root) -+ return; -+ -+ /* Create a directory for the interface */ -+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, -+ dpaa2_dbg_root); -+ if (!priv->dbg.dir) { -+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); -+ return; -+ } -+ -+ /* per-cpu stats file */ -+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_cpu_ops); -+ if (!priv->dbg.cpu_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_cpu_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_fq_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_fq_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_ch_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_ch_stats; -+ } -+ -+ /* reset stats */ -+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_reset_ops); -+ if (!priv->dbg.reset_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_reset_stats; -+ } -+ -+ return; -+ -+err_reset_stats: -+ debugfs_remove(priv->dbg.ch_stats); -+err_ch_stats: -+ debugfs_remove(priv->dbg.fq_stats); -+err_fq_stats: -+ debugfs_remove(priv->dbg.cpu_stats); -+err_cpu_stats: -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) -+{ -+ debugfs_remove(priv->dbg.reset_stats); -+ debugfs_remove(priv->dbg.fq_stats); -+ debugfs_remove(priv->dbg.ch_stats); -+ debugfs_remove(priv->dbg.cpu_stats); -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_eth_dbg_init(void) -+{ -+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); -+ if (!dpaa2_dbg_root) { -+ pr_err("DPAA2-ETH: debugfs create failed\n"); -+ return; -+ } -+ -+ pr_info("DPAA2-ETH: debugfs created\n"); -+} -+ -+void __exit dpaa2_eth_dbg_exit(void) -+{ -+ debugfs_remove(dpaa2_dbg_root); -+} -+ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h -@@ -0,0 +1,61 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPAA2_ETH_DEBUGFS_H -+#define DPAA2_ETH_DEBUGFS_H -+ -+#include -+#include "dpaa2-eth.h" -+ -+extern struct dpaa2_eth_priv *priv; -+ -+struct dpaa2_debugfs { -+ struct dentry *dir; -+ struct dentry *fq_stats; -+ struct dentry *ch_stats; -+ struct dentry *cpu_stats; -+ struct dentry *reset_stats; -+}; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+void dpaa2_eth_dbg_init(void); -+void dpaa2_eth_dbg_exit(void); -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); -+#else -+static inline void dpaa2_eth_dbg_init(void) {} -+static inline void dpaa2_eth_dbg_exit(void) {} -+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} -+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} -+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ -+ -+#endif /* DPAA2_ETH_DEBUGFS_H */ -+ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h -@@ -0,0 +1,185 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM dpaa2_eth -+ -+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _DPAA2_ETH_TRACE_H -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include -+ -+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" -+/* trace_printk format for raw buffer event class */ -+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" -+ -+/* This is used to declare a class of events. -+ * individual events of this type will be defined below. -+ */ -+ -+/* Store details about a frame descriptor */ -+DECLARE_EVENT_CLASS(dpaa2_eth_fd, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, fd), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(u64, fd_addr) -+ __field(u32, fd_len) -+ __field(u16, fd_offset) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->fd_addr = dpaa2_fd_get_addr(fd); -+ __entry->fd_len = dpaa2_fd_get_len(fd); -+ __entry->fd_offset = dpaa2_fd_get_offset(fd); -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_FMT, -+ __get_str(name), -+ __entry->fd_addr, -+ __entry->fd_len, -+ __entry->fd_offset) -+); -+ -+/* Now declare events of the above type. Format is: -+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class -+ */ -+ -+/* Tx (egress) fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Rx fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Tx confirmation fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Log data about raw buffers. Useful for tracing DPBP content. */ -+TRACE_EVENT(dpaa2_eth_buf_seed, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ /* virtual address and size */ -+ void *vaddr, -+ size_t size, -+ /* dma map address and size */ -+ dma_addr_t dma_addr, -+ size_t map_size, -+ /* buffer pool id, if relevant */ -+ u16 bpid), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(void *, vaddr) -+ __field(size_t, size) -+ __field(dma_addr_t, dma_addr) -+ __field(size_t, map_size) -+ __field(u16, bpid) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->vaddr = vaddr; -+ __entry->size = size; -+ __entry->dma_addr = dma_addr; -+ __entry->map_size = map_size; -+ __entry->bpid = bpid; -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_BUF_FMT, -+ __get_str(name), -+ __entry->vaddr, -+ __entry->size, -+ &__entry->dma_addr, -+ __entry->map_size, -+ __entry->bpid) -+); -+ -+/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). -+ * The syntax is the same as for DECLARE_EVENT_CLASS(). -+ */ -+ -+#endif /* _DPAA2_ETH_TRACE_H */ -+ -+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ -+#undef TRACE_INCLUDE_PATH -+#define TRACE_INCLUDE_PATH . -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_FILE dpaa2-eth-trace -+#include ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -0,0 +1,2793 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+#include "dpaa2-eth.h" -+ -+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files -+ * using trace events only need to #include -+ */ -+#define CREATE_TRACE_POINTS -+#include "dpaa2-eth-trace.h" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); -+ -+static int debug = -1; -+module_param(debug, int, S_IRUGO); -+MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); -+ -+/* Oldest DPAA2 objects version we are compatible with */ -+#define DPAA2_SUPPORTED_DPNI_VERSION 6 -+#define DPAA2_SUPPORTED_DPBP_VERSION 2 -+#define DPAA2_SUPPORTED_DPCON_VERSION 2 -+ -+/* Iterate through the cpumask in a round-robin fashion. */ -+#define cpumask_rr(cpu, maskptr) \ -+do { \ -+ (cpu) = cpumask_next((cpu), (maskptr)); \ -+ if ((cpu) >= nr_cpu_ids) \ -+ (cpu) = cpumask_first((maskptr)); \ -+} while (0) -+ -+static void dpaa2_eth_rx_csum(struct dpaa2_eth_priv *priv, -+ u32 fd_status, -+ struct sk_buff *skb) -+{ -+ skb_checksum_none_assert(skb); -+ -+ /* HW checksum validation is disabled, nothing to do here */ -+ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) -+ return; -+ -+ /* Read checksum validation bits */ -+ if (!((fd_status & DPAA2_ETH_FAS_L3CV) && -+ (fd_status & DPAA2_ETH_FAS_L4CV))) -+ return; -+ -+ /* Inform the stack there's no need to compute L3/L4 csum anymore */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+} -+ -+/* Free a received FD. -+ * Not to be used for Tx conf FDs or on any other paths. -+ */ -+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ void *vaddr) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ -+ if (fd_format == dpaa2_fd_sg) { -+ struct dpaa2_sg_entry *sgt = vaddr + dpaa2_fd_get_offset(fd); -+ void *sg_vaddr; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ dpaa2_sg_le_to_cpu(&sgt[i]); -+ -+ addr = dpaa2_sg_get_addr(&sgt[i]); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, -+ DMA_FROM_DEVICE); -+ -+ sg_vaddr = phys_to_virt(addr); -+ put_page(virt_to_head_page(sg_vaddr)); -+ -+ if (dpaa2_sg_is_final(&sgt[i])) -+ break; -+ } -+ } -+ -+ put_page(virt_to_head_page(vaddr)); -+} -+ -+/* Build a linear skb based on a single-buffer frame descriptor */ -+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ void *fd_vaddr) -+{ -+ struct sk_buff *skb = NULL; -+ u16 fd_offset = dpaa2_fd_get_offset(fd); -+ u32 fd_length = dpaa2_fd_get_len(fd); -+ -+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUFFER_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) { -+ netdev_err(priv->net_dev, "build_skb() failed\n"); -+ return NULL; -+ } -+ -+ skb_reserve(skb, fd_offset); -+ skb_put(skb, fd_length); -+ -+ ch->buf_count--; -+ -+ return skb; -+} -+ -+/* Build a non linear (fragmented) skb based on a S/G table */ -+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ struct dpaa2_sg_entry *sgt) -+{ -+ struct sk_buff *skb = NULL; -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sg_vaddr; -+ dma_addr_t sg_addr; -+ u16 sg_offset; -+ u32 sg_length; -+ struct page *page, *head_page; -+ int page_offset; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ struct dpaa2_sg_entry *sge = &sgt[i]; -+ -+ dpaa2_sg_le_to_cpu(sge); -+ -+ /* We don't support anything else yet! */ -+ if (unlikely(dpaa2_sg_get_format(sge) != dpaa2_sg_single)) { -+ dev_warn_once(dev, "Unsupported S/G entry format: %d\n", -+ dpaa2_sg_get_format(sge)); -+ return NULL; -+ } -+ -+ /* Get the address, offset and length from the S/G entry */ -+ sg_addr = dpaa2_sg_get_addr(sge); -+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUFFER_SIZE, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, sg_addr))) { -+ netdev_err(priv->net_dev, "DMA unmap failed\n"); -+ return NULL; -+ } -+ sg_vaddr = phys_to_virt(sg_addr); -+ sg_length = dpaa2_sg_get_len(sge); -+ -+ if (i == 0) { -+ /* We build the skb around the first data buffer */ -+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUFFER_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) { -+ netdev_err(priv->net_dev, "build_skb failed\n"); -+ return NULL; -+ } -+ sg_offset = dpaa2_sg_get_offset(sge); -+ skb_reserve(skb, sg_offset); -+ skb_put(skb, sg_length); -+ } else { -+ /* Subsequent data in SGEntries are stored at -+ * offset 0 in their buffers, we don't need to -+ * compute sg_offset. -+ */ -+ WARN_ONCE(dpaa2_sg_get_offset(sge) != 0, -+ "Non-zero offset in SGE[%d]!\n", i); -+ -+ /* Rest of the data buffers are stored as skb frags */ -+ page = virt_to_page(sg_vaddr); -+ head_page = virt_to_head_page(sg_vaddr); -+ -+ /* Offset in page (which may be compound) */ -+ page_offset = ((unsigned long)sg_vaddr & -+ (PAGE_SIZE - 1)) + -+ (page_address(page) - page_address(head_page)); -+ -+ skb_add_rx_frag(skb, i - 1, head_page, page_offset, -+ sg_length, DPAA2_ETH_RX_BUFFER_SIZE); -+ } -+ -+ if (dpaa2_sg_is_final(sge)) -+ break; -+ } -+ -+ /* Count all data buffers + sgt buffer */ -+ ch->buf_count -= i + 2; -+ -+ return skb; -+} -+ -+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi) -+{ -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ void *vaddr; -+ struct sk_buff *skb; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_stats *percpu_extras; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_rx_fd(priv->net_dev, fd); -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ prefetch(vaddr + priv->buf_layout.private_data_size); -+ prefetch(vaddr + dpaa2_fd_get_offset(fd)); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ if (fd_format == dpaa2_fd_single) { -+ skb = dpaa2_eth_build_linear_skb(priv, ch, fd, vaddr); -+ } else if (fd_format == dpaa2_fd_sg) { -+ struct dpaa2_sg_entry *sgt = -+ vaddr + dpaa2_fd_get_offset(fd); -+ skb = dpaa2_eth_build_frag_skb(priv, ch, sgt); -+ put_page(virt_to_head_page(vaddr)); -+ percpu_extras->rx_sg_frames++; -+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); -+ } else { -+ /* We don't support any other format */ -+ netdev_err(priv->net_dev, "Received invalid frame format\n"); -+ goto err_frame_format; -+ } -+ -+ if (unlikely(!skb)) { -+ dev_err_once(dev, "error building skb\n"); -+ goto err_build_skb; -+ } -+ -+ prefetch(skb->data); -+ -+ if (priv->ts_rx_en) { -+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); -+ u64 *ns = (u64 *) (vaddr + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); -+ shhwtstamps->hwtstamp = ns_to_ktime(*ns); -+ } -+ -+ /* Check if we need to validate the L4 csum */ -+ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ dpaa2_eth_rx_csum(priv, status, skb); -+ } -+ -+ skb->protocol = eth_type_trans(skb, priv->net_dev); -+ -+ percpu_stats->rx_packets++; -+ percpu_stats->rx_bytes += skb->len; -+ -+ if (priv->net_dev->features & NETIF_F_GRO) -+ napi_gro_receive(napi, skb); -+ else -+ netif_receive_skb(skb); -+ -+ return; -+err_frame_format: -+err_build_skb: -+ dpaa2_eth_free_rx_fd(priv, fd, vaddr); -+ percpu_stats->rx_dropped++; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ void *vaddr; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ if (fd->simple.frc & DPAA2_FD_FRC_FASV) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ -+ /* All frames received on this queue should have at least -+ * one of the Rx error bits set */ -+ WARN_ON_ONCE((status & DPAA2_ETH_RX_ERR_MASK) == 0); -+ netdev_dbg(priv->net_dev, "Rx frame error: 0x%08x\n", -+ status & DPAA2_ETH_RX_ERR_MASK); -+ } -+ dpaa2_eth_free_rx_fd(priv, fd, vaddr); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_stats->rx_errors++; -+} -+#endif -+ -+/* Consume all frames pull-dequeued into the store. This is the simplest way to -+ * make sure we don't accidentally issue another volatile dequeue which would -+ * overwrite (leak) frames already in the store. -+ * -+ * Observance of NAPI budget is not our concern, leaving that to the caller. -+ */ -+static int dpaa2_eth_store_consume(struct dpaa2_eth_channel *ch) -+{ -+ struct dpaa2_eth_priv *priv = ch->priv; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_dq *dq; -+ const struct dpaa2_fd *fd; -+ int cleaned = 0; -+ int is_last; -+ -+ do { -+ dq = dpaa2_io_store_next(ch->store, &is_last); -+ if (unlikely(!dq)) { -+ if (unlikely(!is_last)) { -+ netdev_dbg(priv->net_dev, -+ "Channel %d reqturned no valid frames\n", -+ ch->ch_id); -+ /* MUST retry until we get some sort of -+ * valid response token (be it "empty dequeue" -+ * or a valid frame). -+ */ -+ continue; -+ } -+ break; -+ } -+ -+ /* Obtain FD and process it */ -+ fd = dpaa2_dq_fd(dq); -+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); -+ fq->stats.frames++; -+ -+ fq->consume(priv, ch, fd, &ch->napi); -+ cleaned++; -+ } while (!is_last); -+ -+ return cleaned; -+} -+ -+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sgt_buf = NULL; -+ dma_addr_t addr; -+ int nr_frags = skb_shinfo(skb)->nr_frags; -+ struct dpaa2_sg_entry *sgt; -+ int i, j, err; -+ int sgt_buf_size; -+ struct scatterlist *scl, *crt_scl; -+ int num_sg; -+ int num_dma_bufs; -+ struct dpaa2_eth_swa *bps; -+ -+ /* Create and map scatterlist. -+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have -+ * to go beyond nr_frags+1. -+ * Note: We don't support chained scatterlists -+ */ -+ WARN_ON(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1); -+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); -+ if (unlikely(!scl)) -+ return -ENOMEM; -+ -+ sg_init_table(scl, nr_frags + 1); -+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); -+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ if (unlikely(!num_dma_bufs)) { -+ netdev_err(priv->net_dev, "dma_map_sg() error\n"); -+ err = -ENOMEM; -+ goto dma_map_sg_failed; -+ } -+ -+ /* Prepare the HW SGT structure */ -+ sgt_buf_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); -+ if (unlikely(!sgt_buf)) { -+ netdev_err(priv->net_dev, "failed to allocate SGT buffer\n"); -+ err = -ENOMEM; -+ goto sgt_buf_alloc_failed; -+ } -+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here. -+ */ -+ memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); -+ -+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); -+ -+ /* Fill in the HW SGT structure. -+ * -+ * sgt_buf is zeroed out, so the following fields are implicit -+ * in all sgt entries: -+ * - offset is 0 -+ * - format is 'dpaa2_sg_single' -+ */ -+ for_each_sg(scl, crt_scl, num_dma_bufs, i) { -+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); -+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); -+ } -+ dpaa2_sg_set_final(&sgt[i - 1], true); -+ -+ /* Store the skb backpointer in the SGT buffer. -+ * Fit the scatterlist and the number of buffers alongside the -+ * skb backpointer in the SWA. We'll need all of them on Tx Conf. -+ */ -+ bps = (struct dpaa2_eth_swa *)sgt_buf; -+ bps->skb = skb; -+ bps->scl = scl; -+ bps->num_sg = num_sg; -+ bps->num_dma_bufs = num_dma_bufs; -+ -+ for (j = 0; j < i; j++) -+ dpaa2_sg_cpu_to_le(&sgt[j]); -+ -+ /* Separately map the SGT buffer */ -+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) { -+ netdev_err(priv->net_dev, "dma_map_single() failed\n"); -+ err = -ENOMEM; -+ goto dma_map_single_failed; -+ } -+ dpaa2_fd_set_offset(fd, priv->tx_data_offset); -+ dpaa2_fd_set_format(fd, dpaa2_fd_sg); -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_len(fd, skb->len); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ return 0; -+ -+dma_map_single_failed: -+ kfree(sgt_buf); -+sgt_buf_alloc_failed: -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+dma_map_sg_failed: -+ kfree(scl); -+ return err; -+} -+ -+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u8 *buffer_start; -+ struct sk_buff **skbh; -+ dma_addr_t addr; -+ -+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - -+ DPAA2_ETH_TX_BUF_ALIGN, -+ DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here. -+ */ -+ memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); -+ -+ /* Store a backpointer to the skb at the beginning of the buffer -+ * (in the private data area) such that we can release it -+ * on Tx confirm -+ */ -+ skbh = (struct sk_buff **)buffer_start; -+ *skbh = skb; -+ -+ addr = dma_map_single(dev, -+ buffer_start, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) { -+ dev_err(dev, "dma_map_single() failed\n"); -+ return -EINVAL; -+ } -+ -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); -+ dpaa2_fd_set_len(fd, skb->len); -+ dpaa2_fd_set_format(fd, dpaa2_fd_single); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ return 0; -+} -+ -+/* DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb -+ * back-pointed to is also freed. -+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of -+ * dpaa2_eth_tx(). -+ * Optionally, return the frame annotation status word (FAS), which needs -+ * to be checked if we're on the confirmation path. -+ */ -+static void dpaa2_eth_free_fd(const struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ u32 *status) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t fd_addr; -+ struct sk_buff **skbh, *skb; -+ unsigned char *buffer_start; -+ int unmap_size; -+ struct scatterlist *scl; -+ int num_sg, num_dma_bufs; -+ struct dpaa2_eth_swa *bps; -+ bool fd_single; -+ struct dpaa2_fas *fas; -+ -+ fd_addr = dpaa2_fd_get_addr(fd); -+ skbh = phys_to_virt(fd_addr); -+ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single); -+ -+ if (fd_single) { -+ skb = *skbh; -+ buffer_start = (unsigned char *)skbh; -+ /* Accessing the skb buffer is safe before dma unmap, because -+ * we didn't map the actual skb shell. -+ */ -+ dma_unmap_single(dev, fd_addr, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ } else { -+ bps = (struct dpaa2_eth_swa *)skbh; -+ skb = bps->skb; -+ scl = bps->scl; -+ num_sg = bps->num_sg; -+ num_dma_bufs = bps->num_dma_bufs; -+ -+ /* Unmap the scatterlist */ -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ kfree(scl); -+ -+ /* Unmap the SGT buffer */ -+ unmap_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); -+ } -+ -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { -+ struct skb_shared_hwtstamps shhwtstamps; -+ u64 *ns; -+ -+ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); -+ -+ ns = (u64 *)((void *)skbh + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ shhwtstamps.hwtstamp = ns_to_ktime(*ns); -+ skb_tstamp_tx(skb, &shhwtstamps); -+ } -+ -+ /* Check the status from the Frame Annotation after we unmap the first -+ * buffer but before we free it. -+ */ -+ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ ((void *)skbh + priv->buf_layout.private_data_size); -+ *status = le32_to_cpu(fas->status); -+ } -+ -+ /* Free SGT buffer kmalloc'ed on tx */ -+ if (!fd_single) -+ kfree(skbh); -+ -+ /* Move on with skb release */ -+ dev_kfree_skb(skb); -+} -+ -+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_fd fd; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_stats *percpu_extras; -+ int err, i; -+ /* TxConf FQ selection primarily based on cpu affinity; this is -+ * non-migratable context, so it's safe to call smp_processor_id(). -+ */ -+ u16 queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ /* Setup the FD fields */ -+ memset(&fd, 0, sizeof(fd)); -+ -+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { -+ struct sk_buff *ns; -+ -+ dev_info_once(net_dev->dev.parent, -+ "skb headroom too small, must realloc.\n"); -+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); -+ if (unlikely(!ns)) { -+ percpu_stats->tx_dropped++; -+ goto err_alloc_headroom; -+ } -+ dev_kfree_skb(skb); -+ skb = ns; -+ } -+ -+ /* We'll be holding a back-reference to the skb until Tx Confirmation; -+ * we don't want that overwritten by a concurrent Tx with a cloned skb. -+ */ -+ skb = skb_unshare(skb, GFP_ATOMIC); -+ if (unlikely(!skb)) { -+ netdev_err(net_dev, "Out of memory for skb_unshare()"); -+ /* skb_unshare() has already freed the skb */ -+ percpu_stats->tx_dropped++; -+ return NETDEV_TX_OK; -+ } -+ -+ if (skb_is_nonlinear(skb)) { -+ err = dpaa2_eth_build_sg_fd(priv, skb, &fd); -+ percpu_extras->tx_sg_frames++; -+ percpu_extras->tx_sg_bytes += skb->len; -+ } else { -+ err = dpaa2_eth_build_single_fd(priv, skb, &fd); -+ } -+ -+ if (unlikely(err)) { -+ percpu_stats->tx_dropped++; -+ goto err_build_fd; -+ } -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_fd(net_dev, &fd); -+ -+ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { -+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, -+ priv->fq[queue_mapping].flowid, -+ &fd); -+ if (err != -EBUSY) -+ break; -+ } -+ percpu_extras->tx_portal_busy += i; -+ if (unlikely(err < 0)) { -+ netdev_dbg(net_dev, "error enqueueing Tx frame\n"); -+ percpu_stats->tx_errors++; -+ /* Clean up everything, including freeing the skb */ -+ dpaa2_eth_free_fd(priv, &fd, NULL); -+ } else { -+ percpu_stats->tx_packets++; -+ percpu_stats->tx_bytes += skb->len; -+ } -+ -+ return NETDEV_TX_OK; -+ -+err_build_fd: -+err_alloc_headroom: -+ dev_kfree_skb(skb); -+ -+ return NETDEV_TX_OK; -+} -+ -+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_stats *percpu_extras; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); -+ -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ percpu_extras->tx_conf_frames++; -+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); -+ -+ dpaa2_eth_free_fd(priv, fd, &status); -+ -+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { -+ netdev_err(priv->net_dev, "TxConf frame error(s): 0x%08x\n", -+ status & DPAA2_ETH_TXCONF_ERR_MASK); -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ /* Tx-conf logically pertains to the egress path. */ -+ percpu_stats->tx_errors++; -+ } -+} -+ -+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ int err; -+ -+ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l3_chksum_validation() failed\n"); -+ return err; -+ } -+ -+ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l4_chksum_validation failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ struct dpaa2_eth_fq *fq; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ int err; -+ int i; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN | -+ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; -+ tx_flow_cfg.l3_chksum_gen = enable; -+ tx_flow_cfg.l4_chksum_gen = enable; -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ -+ /* The Tx flowid is kept in the corresponding TxConf FQ. */ -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n"); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_bp_add_7(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[7]; -+ void *buf; -+ dma_addr_t addr; -+ int i; -+ -+ for (i = 0; i < 7; i++) { -+ /* Allocate buffer visible to WRIOP + skb shared info + -+ * alignment padding -+ */ -+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); -+ if (unlikely(!buf)) { -+ dev_err(dev, "buffer allocation failed\n"); -+ goto err_alloc; -+ } -+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); -+ -+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUFFER_SIZE, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) { -+ dev_err(dev, "dma_map_single() failed\n"); -+ goto err_map; -+ } -+ buf_array[i] = addr; -+ -+ /* tracing point */ -+ trace_dpaa2_eth_buf_seed(priv->net_dev, -+ buf, DPAA2_ETH_BUF_RAW_SIZE, -+ addr, DPAA2_ETH_RX_BUFFER_SIZE, -+ bpid); -+ } -+ -+release_bufs: -+ /* In case the portal is busy, retry until successful. -+ * The buffer release function would only fail if the QBMan portal -+ * was busy, which implies portal contention (i.e. more CPUs than -+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, -+ * there is little we can realistically do, short of giving up - -+ * in which case we'd risk depleting the buffer pool and never again -+ * receiving the Rx interrupt which would kick-start the refill logic. -+ * So just keep retrying, at the risk of being moved to ksoftirqd. -+ */ -+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) -+ cpu_relax(); -+ return i; -+ -+err_map: -+ put_page(virt_to_head_page(buf)); -+err_alloc: -+ if (i) -+ goto release_bufs; -+ -+ return 0; -+} -+ -+static int dpaa2_dpbp_seed(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ int i, j; -+ int new_count; -+ -+ /* This is the lazy seeding of Rx buffer pools. -+ * dpaa2_bp_add_7() is also used on the Rx hotpath and calls -+ * napi_alloc_frag(). The trouble with that is that it in turn ends up -+ * calling this_cpu_ptr(), which mandates execution in atomic context. -+ * Rather than splitting up the code, do a one-off preempt disable. -+ */ -+ preempt_disable(); -+ for (j = 0; j < priv->num_channels; j++) { -+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += 7) { -+ new_count = dpaa2_bp_add_7(priv, bpid); -+ priv->channel[j]->buf_count += new_count; -+ -+ if (new_count < 7) { -+ preempt_enable(); -+ goto out_of_memory; -+ } -+ } -+ } -+ preempt_enable(); -+ -+ return 0; -+ -+out_of_memory: -+ return -ENOMEM; -+} -+ -+/** -+ * Drain the specified number of buffers from the DPNI's private buffer pool. -+ * @count must not exceeed 7 -+ */ -+static void dpaa2_dpbp_drain_cnt(struct dpaa2_eth_priv *priv, int count) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[7]; -+ void *vaddr; -+ int ret, i; -+ -+ do { -+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, -+ buf_array, count); -+ if (ret < 0) { -+ pr_err("dpaa2_io_service_acquire() failed\n"); -+ return; -+ } -+ for (i = 0; i < ret; i++) { -+ /* Same logic as on regular Rx path */ -+ dma_unmap_single(dev, buf_array[i], -+ DPAA2_ETH_RX_BUFFER_SIZE, -+ DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(buf_array[i]); -+ put_page(virt_to_head_page(vaddr)); -+ } -+ } while (ret); -+} -+ -+static void __dpaa2_dpbp_free(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ dpaa2_dpbp_drain_cnt(priv, 7); -+ dpaa2_dpbp_drain_cnt(priv, 1); -+ -+ for (i = 0; i < priv->num_channels; i++) -+ priv->channel[i]->buf_count = 0; -+} -+ -+/* Function is called from softirq context only, so we don't need to guard -+ * the access to percpu count -+ */ -+static int dpaa2_dpbp_refill(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ u16 bpid) -+{ -+ int new_count; -+ int err = 0; -+ -+ if (unlikely(ch->buf_count < DPAA2_ETH_REFILL_THRESH)) { -+ do { -+ new_count = dpaa2_bp_add_7(priv, bpid); -+ if (unlikely(!new_count)) { -+ /* Out of memory; abort for now, we'll -+ * try later on -+ */ -+ break; -+ } -+ ch->buf_count += new_count; -+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); -+ -+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) -+ err = -ENOMEM; -+ } -+ -+ return err; -+} -+ -+static int __dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) -+{ -+ int err; -+ int dequeues = -1; -+ struct dpaa2_eth_priv *priv = ch->priv; -+ -+ /* Retry while portal is busy */ -+ do { -+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); -+ dequeues++; -+ } while (err == -EBUSY); -+ if (unlikely(err)) -+ netdev_err(priv->net_dev, "dpaa2_io_service_pull err %d", err); -+ -+ ch->stats.dequeue_portal_busy += dequeues; -+ return err; -+} -+ -+static int dpaa2_eth_poll(struct napi_struct *napi, int budget) -+{ -+ struct dpaa2_eth_channel *ch; -+ int cleaned = 0, store_cleaned; -+ struct dpaa2_eth_priv *priv; -+ int err; -+ -+ ch = container_of(napi, struct dpaa2_eth_channel, napi); -+ priv = ch->priv; -+ -+ __dpaa2_eth_pull_channel(ch); -+ -+ do { -+ /* Refill pool if appropriate */ -+ dpaa2_dpbp_refill(priv, ch, priv->dpbp_attrs.bpid); -+ -+ store_cleaned = dpaa2_eth_store_consume(ch); -+ cleaned += store_cleaned; -+ -+ if (store_cleaned == 0 || -+ cleaned > budget - DPAA2_ETH_STORE_SIZE) -+ break; -+ -+ /* Try to dequeue some more */ -+ err = __dpaa2_eth_pull_channel(ch); -+ if (unlikely(err)) -+ break; -+ } while (1); -+ -+ if (cleaned < budget) { -+ napi_complete_done(napi, cleaned); -+ err = dpaa2_io_service_rearm(NULL, &ch->nctx); -+ if (unlikely(err)) -+ netdev_err(priv->net_dev, -+ "Notif rearm failed for channel %d\n", -+ ch->ch_id); -+ } -+ -+ ch->stats.frames += cleaned; -+ -+ return cleaned; -+} -+ -+static void dpaa2_eth_napi_enable(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_enable(&ch->napi); -+ } -+} -+ -+static void dpaa2_eth_napi_disable(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_disable(&ch->napi); -+ } -+} -+ -+static int dpaa2_link_state_update(struct dpaa2_eth_priv *priv) -+{ -+ struct dpni_link_state state; -+ int err; -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (unlikely(err)) { -+ netdev_err(priv->net_dev, -+ "dpni_get_link_state() failed\n"); -+ return err; -+ } -+ -+ /* Chech link state; speed / duplex changes are not treated yet */ -+ if (priv->link_state.up == state.up) -+ return 0; -+ -+ priv->link_state = state; -+ if (state.up) { -+ netif_carrier_on(priv->net_dev); -+ netif_tx_start_all_queues(priv->net_dev); -+ } else { -+ netif_tx_stop_all_queues(priv->net_dev); -+ netif_carrier_off(priv->net_dev); -+ } -+ -+ netdev_info(priv->net_dev, "Link Event: state %s", -+ state.up ? "up" : "down"); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_open(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = dpaa2_dpbp_seed(priv, priv->dpbp_attrs.bpid); -+ if (err) { -+ /* Not much to do; the buffer pool, though not filled up, -+ * may still contain some buffers which would enable us -+ * to limp on. -+ */ -+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", -+ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid); -+ } -+ -+ /* We'll only start the txqs when the link is actually ready; make sure -+ * we don't race against the link up notification, which may come -+ * immediately after dpni_enable(); -+ */ -+ netif_tx_stop_all_queues(net_dev); -+ dpaa2_eth_napi_enable(priv); -+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will -+ * return true and cause 'ip link show' to report the LOWER_UP flag, -+ * even though the link notification wasn't even received. -+ */ -+ netif_carrier_off(net_dev); -+ -+ err = dpni_enable(priv->mc_io, 0, priv->mc_token); -+ if (err < 0) { -+ dev_err(net_dev->dev.parent, "dpni_enable() failed\n"); -+ goto enable_err; -+ } -+ -+ /* If the DPMAC object has already processed the link up interrupt, -+ * we have to learn the link state ourselves. -+ */ -+ err = dpaa2_link_state_update(priv); -+ if (err < 0) { -+ dev_err(net_dev->dev.parent, "Can't update link state\n"); -+ goto link_state_err; -+ } -+ -+ return 0; -+ -+link_state_err: -+enable_err: -+ dpaa2_eth_napi_disable(priv); -+ __dpaa2_dpbp_free(priv); -+ return err; -+} -+ -+static int dpaa2_eth_stop(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ /* Stop Tx and Rx traffic */ -+ netif_tx_stop_all_queues(net_dev); -+ netif_carrier_off(net_dev); -+ dpni_disable(priv->mc_io, 0, priv->mc_token); -+ -+ msleep(500); -+ -+ dpaa2_eth_napi_disable(priv); -+ msleep(100); -+ -+ __dpaa2_dpbp_free(priv); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_init(struct net_device *net_dev) -+{ -+ u64 supported = 0; -+ u64 not_supported = 0; -+ const struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u32 options = priv->dpni_attrs.options; -+ -+ /* Capabilities listing */ -+ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; -+ -+ if (options & DPNI_OPT_UNICAST_FILTER) -+ supported |= IFF_UNICAST_FLT; -+ else -+ not_supported |= IFF_UNICAST_FLT; -+ -+ if (options & DPNI_OPT_MULTICAST_FILTER) -+ supported |= IFF_MULTICAST; -+ else -+ not_supported |= IFF_MULTICAST; -+ -+ net_dev->priv_flags |= supported; -+ net_dev->priv_flags &= ~not_supported; -+ -+ /* Features */ -+ net_dev->features = NETIF_F_RXCSUM | -+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_SG | NETIF_F_HIGHDMA | -+ NETIF_F_LLTX; -+ net_dev->hw_features = net_dev->features; -+ -+ return 0; -+} -+ -+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; -+ int err; -+ -+ err = eth_mac_addr(net_dev, addr); -+ if (err < 0) { -+ dev_err(dev, "eth_mac_addr() failed with error %d\n", err); -+ return err; -+ } -+ -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/** Fill in counters maintained by the GPP driver. These may be different from -+ * the hardware counters obtained by ethtool. -+ */ -+static struct rtnl_link_stats64 -+*dpaa2_eth_get_stats(struct net_device *net_dev, -+ struct rtnl_link_stats64 *stats) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct rtnl_link_stats64 *percpu_stats; -+ u64 *cpustats; -+ u64 *netstats = (u64 *)stats; -+ int i, j; -+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); -+ -+ for_each_possible_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ cpustats = (u64 *)percpu_stats; -+ for (j = 0; j < num; j++) -+ netstats[j] += cpustats[j]; -+ } -+ -+ return stats; -+} -+ -+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) { -+ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n", -+ mtu, DPAA2_ETH_MAX_MTU); -+ return -EINVAL; -+ } -+ -+ /* Set the maximum Rx frame length to match the transmit side; -+ * account for L2 headers when computing the MFL -+ */ -+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, -+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); -+ if (err) { -+ netdev_err(net_dev, "dpni_set_mfl() failed\n"); -+ return err; -+ } -+ -+ net_dev->mtu = mtu; -+ return 0; -+} -+ -+/* Convenience macro to make code littered with error checking more readable */ -+#define DPAA2_ETH_WARN_IF_ERR(err, netdevp, format, ...) \ -+do { \ -+ if (err) \ -+ netdev_warn(netdevp, format, ##__VA_ARGS__); \ -+} while (0) -+ -+/* Copy mac unicast addresses from @net_dev to @priv. -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void _dpaa2_eth_hw_add_uc_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_uc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev, -+ "Could not add ucast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+/* Copy mac multicast addresses from @net_dev to @priv -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void _dpaa2_eth_hw_add_mc_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_mc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev, -+ "Could not add mcast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int uc_count = netdev_uc_count(net_dev); -+ int mc_count = netdev_mc_count(net_dev); -+ u8 max_uc = priv->dpni_attrs.max_unicast_filters; -+ u8 max_mc = priv->dpni_attrs.max_multicast_filters; -+ u32 options = priv->dpni_attrs.options; -+ u16 mc_token = priv->mc_token; -+ struct fsl_mc_io *mc_io = priv->mc_io; -+ int err; -+ -+ /* Basic sanity checks; these probably indicate a misconfiguration */ -+ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) -+ netdev_info(net_dev, -+ "max_unicast_filters=%d, you must have DPNI_OPT_UNICAST_FILTER in the DPL\n", -+ max_uc); -+ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) -+ netdev_info(net_dev, -+ "max_multicast_filters=%d, you must have DPNI_OPT_MULTICAST_FILTER in the DPL\n", -+ max_mc); -+ -+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ -+ if (uc_count > max_uc) { -+ netdev_info(net_dev, -+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ uc_count, max_uc); -+ goto force_promisc; -+ } -+ if (mc_count > max_mc) { -+ netdev_info(net_dev, -+ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ mc_count, max_mc); -+ goto force_mc_promisc; -+ } -+ -+ /* Adjust promisc settings due to flag combinations */ -+ if (net_dev->flags & IFF_PROMISC) { -+ goto force_promisc; -+ } else if (net_dev->flags & IFF_ALLMULTI) { -+ /* First, rebuild unicast filtering table. This should be done -+ * in promisc mode, in order to avoid frame loss while we -+ * progressively add entries to the table. -+ * We don't know whether we had been in promisc already, and -+ * making an MC call to find it is expensive; so set uc promisc -+ * nonetheless. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc\n"); -+ -+ /* Actual uc table reconstruction. */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc filters\n"); -+ _dpaa2_eth_hw_add_uc_addr(net_dev, priv); -+ -+ /* Finally, clear uc promisc and set mc promisc as requested. */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc promisc\n"); -+ goto force_mc_promisc; -+ } -+ -+ /* Neither unicast, nor multicast promisc will be on... eventually. -+ * For now, rebuild mac filtering tables while forcing both of them on. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc (%d)\n", err); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mc promisc (%d)\n", err); -+ -+ /* Actual mac filtering tables reconstruction */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mac filters\n"); -+ _dpaa2_eth_hw_add_mc_addr(net_dev, priv); -+ _dpaa2_eth_hw_add_uc_addr(net_dev, priv); -+ -+ /* Now we can clear both ucast and mcast promisc, without risking -+ * to drop legitimate frames anymore. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear ucast promisc\n"); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mcast promisc\n"); -+ -+ return; -+ -+force_promisc: -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set ucast promisc\n"); -+force_mc_promisc: -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mcast promisc\n"); -+} -+ -+static int dpaa2_eth_set_features(struct net_device *net_dev, -+ netdev_features_t features) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ netdev_features_t changed = features ^ net_dev->features; -+ int err; -+ -+ if (changed & NETIF_F_RXCSUM) { -+ bool enable = !!(features & NETIF_F_RXCSUM); -+ -+ err = dpaa2_eth_set_rx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { -+ bool enable = !!(features & -+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); -+ err = dpaa2_eth_set_tx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(dev); -+ struct hwtstamp_config config; -+ -+ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) -+ return -EFAULT; -+ -+ switch (config.tx_type) { -+ case HWTSTAMP_TX_OFF: -+ priv->ts_tx_en = false; -+ break; -+ case HWTSTAMP_TX_ON: -+ priv->ts_tx_en = true; -+ break; -+ default: -+ return -ERANGE; -+ } -+ -+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) -+ priv->ts_rx_en = false; -+ else { -+ priv->ts_rx_en = true; -+ /* TS is set for all frame types, not only those requested */ -+ config.rx_filter = HWTSTAMP_FILTER_ALL; -+ } -+ -+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? -+ -EFAULT : 0; -+} -+ -+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ if (cmd == SIOCSHWTSTAMP) -+ return dpaa2_eth_ts_ioctl(dev, rq, cmd); -+ else -+ return -EINVAL; -+} -+ -+static const struct net_device_ops dpaa2_eth_ops = { -+ .ndo_open = dpaa2_eth_open, -+ .ndo_start_xmit = dpaa2_eth_tx, -+ .ndo_stop = dpaa2_eth_stop, -+ .ndo_init = dpaa2_eth_init, -+ .ndo_set_mac_address = dpaa2_eth_set_addr, -+ .ndo_get_stats64 = dpaa2_eth_get_stats, -+ .ndo_change_mtu = dpaa2_eth_change_mtu, -+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, -+ .ndo_set_features = dpaa2_eth_set_features, -+ .ndo_do_ioctl = dpaa2_eth_ioctl, -+}; -+ -+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_eth_channel *ch; -+ -+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); -+ -+ /* Update NAPI statistics */ -+ ch->stats.cdan++; -+ -+ napi_schedule_irqoff(&ch->napi); -+} -+ -+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ /* We have one TxConf FQ per Tx flow */ -+ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { -+ priv->fq[priv->num_fqs].netdev_priv = priv; -+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; -+ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; -+ } -+ -+ /* The number of Rx queues (Rx distribution width) may be different from -+ * the number of cores. -+ * We only support one traffic class for now. -+ */ -+ for (i = 0; i < dpaa2_queue_count(priv); i++) { -+ priv->fq[priv->num_fqs].netdev_priv = priv; -+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -+ priv->fq[priv->num_fqs++].flowid = (u16)i; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ /* We have exactly one Rx error queue per DPNI */ -+ priv->fq[priv->num_fqs].netdev_priv = priv; -+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; -+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; -+#endif -+} -+ -+static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) -+{ -+ char *name = ls_dev->obj_desc.type; -+ struct device *dev = &ls_dev->dev; -+ u16 supported_version, flib_version; -+ -+ if (strcmp(name, "dpni") == 0) { -+ flib_version = DPNI_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPNI_VERSION; -+ } else if (strcmp(name, "dpbp") == 0) { -+ flib_version = DPBP_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPBP_VERSION; -+ } else if (strcmp(name, "dpcon") == 0) { -+ flib_version = DPCON_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPCON_VERSION; -+ } else { -+ dev_err(dev, "invalid object type (%s)\n", name); -+ return -EINVAL; -+ } -+ -+ /* Check that the FLIB-defined version matches the one reported by MC */ -+ if (mc_version != flib_version) { -+ dev_err(dev, -+ "%s FLIB version mismatch: MC reports %d, we have %d\n", -+ name, mc_version, flib_version); -+ return -EINVAL; -+ } -+ -+ /* ... and that we actually support it */ -+ if (mc_version < supported_version) { -+ dev_err(dev, "Unsupported %s FLIB version (%d)\n", -+ name, mc_version); -+ return -EINVAL; -+ } -+ dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); -+ -+ return 0; -+} -+ -+static struct fsl_mc_device *dpaa2_dpcon_setup(struct dpaa2_eth_priv *priv) -+{ -+ struct fsl_mc_device *dpcon; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpcon_attr attrs; -+ int err; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), -+ FSL_MC_POOL_DPCON, &dpcon); -+ if (err) { -+ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); -+ return NULL; -+ } -+ -+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(dpcon, attrs.version.major); -+ if (err) -+ goto err_dpcon_ver; -+ -+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ return dpcon; -+ -+err_enable: -+err_dpcon_ver: -+err_get_attr: -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+err_open: -+ fsl_mc_object_free(dpcon); -+ -+ return NULL; -+} -+ -+static void dpaa2_dpcon_free(struct dpaa2_eth_priv *priv, -+ struct fsl_mc_device *dpcon) -+{ -+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+ fsl_mc_object_free(dpcon); -+} -+ -+static struct dpaa2_eth_channel * -+dpaa2_alloc_channel(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_attr attr; -+ struct device *dev = priv->net_dev->dev.parent; -+ int err; -+ -+ channel = kzalloc(sizeof(*channel), GFP_ATOMIC); -+ if (!channel) { -+ dev_err(dev, "Memory allocation failed\n"); -+ return NULL; -+ } -+ -+ channel->dpcon = dpaa2_dpcon_setup(priv); -+ if (!channel->dpcon) -+ goto err_setup; -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, -+ &attr); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ channel->dpcon_id = attr.id; -+ channel->ch_id = attr.qbman_ch_id; -+ channel->priv = priv; -+ -+ return channel; -+ -+err_get_attr: -+ dpaa2_dpcon_free(priv, channel->dpcon); -+err_setup: -+ kfree(channel); -+ return NULL; -+} -+ -+static void dpaa2_free_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *channel) -+{ -+ dpaa2_dpcon_free(priv, channel->dpcon); -+ kfree(channel); -+} -+ -+static int dpaa2_dpio_setup(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_io_notification_ctx *nctx; -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_notification_cfg dpcon_notif_cfg; -+ struct device *dev = priv->net_dev->dev.parent; -+ int i, err; -+ -+ /* Don't allocate more channels than strictly necessary and assign -+ * them to cores starting from the first one available in -+ * cpu_online_mask. -+ * If the number of channels is lower than the number of cores, -+ * there will be no rx/tx conf processing on the last cores in the mask. -+ */ -+ cpumask_clear(&priv->dpio_cpumask); -+ for_each_online_cpu(i) { -+ /* Try to allocate a channel */ -+ channel = dpaa2_alloc_channel(priv); -+ if (!channel) -+ goto err_alloc_ch; -+ -+ priv->channel[priv->num_channels] = channel; -+ -+ nctx = &channel->nctx; -+ nctx->is_cdan = 1; -+ nctx->cb = dpaa2_eth_cdan_cb; -+ nctx->id = channel->ch_id; -+ nctx->desired_cpu = i; -+ -+ /* Register the new context */ -+ err = dpaa2_io_service_register(NULL, nctx); -+ if (err) { -+ dev_info(dev, "No affine DPIO for core %d\n", i); -+ /* This core doesn't have an affine DPIO, but there's -+ * a chance another one does, so keep trying -+ */ -+ dpaa2_free_channel(priv, channel); -+ continue; -+ } -+ -+ /* Register DPCON notification with MC */ -+ dpcon_notif_cfg.dpio_id = nctx->dpio_id; -+ dpcon_notif_cfg.priority = 0; -+ dpcon_notif_cfg.user_ctx = nctx->qman64; -+ err = dpcon_set_notification(priv->mc_io, 0, -+ channel->dpcon->mc_handle, -+ &dpcon_notif_cfg); -+ if (err) { -+ dev_err(dev, "dpcon_set_notification failed()\n"); -+ goto err_set_cdan; -+ } -+ -+ /* If we managed to allocate a channel and also found an affine -+ * DPIO for this core, add it to the final mask -+ */ -+ cpumask_set_cpu(i, &priv->dpio_cpumask); -+ priv->num_channels++; -+ -+ if (priv->num_channels == dpaa2_max_channels(priv)) -+ break; -+ } -+ -+ /* Tx confirmation queues can only be serviced by cpus -+ * with an affine DPIO/channel -+ */ -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+ -+err_set_cdan: -+ dpaa2_io_service_deregister(NULL, nctx); -+ dpaa2_free_channel(priv, channel); -+err_alloc_ch: -+ if (cpumask_empty(&priv->dpio_cpumask)) { -+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); -+ return -ENODEV; -+ } -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+} -+ -+static void dpaa2_dpio_free(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ /* deregister CDAN notifications and free channels */ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ dpaa2_io_service_deregister(NULL, &ch->nctx); -+ dpaa2_free_channel(priv, ch); -+ } -+} -+ -+static struct dpaa2_eth_channel * -+dpaa2_get_channel_by_cpu(struct dpaa2_eth_priv *priv, int cpu) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ if (priv->channel[i]->nctx.desired_cpu == cpu) -+ return priv->channel[i]; -+ -+ /* We should never get here. Issue a warning and return -+ * the first channel, because it's still better than nothing -+ */ -+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); -+ -+ return priv->channel[0]; -+} -+ -+static void dpaa2_set_fq_affinity(struct dpaa2_eth_priv *priv) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_eth_fq *fq; -+ int rx_cpu, txconf_cpu; -+ int i; -+ -+ /* For each FQ, pick one channel/CPU to deliver frames to. -+ * This may well change at runtime, either through irqbalance or -+ * through direct user intervention. -+ */ -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); -+ txconf_cpu = cpumask_first(&priv->txconf_cpumask); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ case DPAA2_RX_ERR_FQ: -+ fq->target_cpu = rx_cpu; -+ cpumask_rr(rx_cpu, &priv->dpio_cpumask); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ fq->target_cpu = txconf_cpu; -+ cpumask_rr(txconf_cpu, &priv->txconf_cpumask); -+ break; -+ default: -+ dev_err(dev, "Unknown FQ type: %d\n", fq->type); -+ } -+ fq->channel = dpaa2_get_channel_by_cpu(priv, fq->target_cpu); -+ } -+} -+ -+static int dpaa2_dpbp_setup(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ struct fsl_mc_device *dpbp_dev; -+ struct device *dev = priv->net_dev->dev.parent; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, -+ &dpbp_dev); -+ if (err) { -+ dev_err(dev, "DPBP device allocation failed\n"); -+ return err; -+ } -+ -+ priv->dpbp_dev = dpbp_dev; -+ -+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, -+ &dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, -+ &priv->dpbp_attrs); -+ if (err) { -+ dev_err(dev, "dpbp_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); -+ if (err) -+ goto err_dpbp_ver; -+ -+ return 0; -+ -+err_dpbp_ver: -+err_get_attr: -+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_enable: -+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_open: -+ fsl_mc_object_free(dpbp_dev); -+ -+ return err; -+} -+ -+static void dpaa2_dpbp_free(struct dpaa2_eth_priv *priv) -+{ -+ __dpaa2_dpbp_free(priv); -+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ fsl_mc_object_free(priv->dpbp_dev); -+} -+ -+static int dpaa2_dpni_setup(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_eth_priv *priv; -+ struct net_device *net_dev; -+ void *dma_mem; -+ int err; -+ -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ priv->dpni_id = ls_dev->obj_desc.id; -+ -+ /* and get a handle for the DPNI this interface is associate with */ -+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); -+ if (err) { -+ dev_err(dev, "dpni_open() failed\n"); -+ goto err_open; -+ } -+ -+ ls_dev->mc_io = priv->mc_io; -+ ls_dev->mc_handle = priv->mc_token; -+ -+ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ goto err_alloc; -+ -+ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem, -+ DPAA2_EXT_CFG_SIZE, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) { -+ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n"); -+ goto err_dma_map; -+ } -+ -+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, -+ &priv->dpni_attrs); -+ if (err) { -+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); -+ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -+ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); -+ if (err) -+ goto err_dpni_ver; -+ -+ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -+ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -+ -+ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); -+ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_extract_extended_cfg() failed\n"); -+ goto err_extract; -+ } -+ -+ /* Configure our buffers' layout */ -+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | -+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | -+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; -+ priv->buf_layout.pass_parser_result = true; -+ priv->buf_layout.pass_frame_status = true; -+ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; -+ /* HW erratum mandates data alignment in multiples of 256 */ -+ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; -+ /* ...rx, ... */ -+ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* ... tx, ... */ -+ /* remove Rx-only options */ -+ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | -+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); -+ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* ... tx-confirm. */ -+ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; -+ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ priv->buf_layout.pass_timestamp = 1; -+ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* Now that we've set our tx buffer layout, retrieve the minimum -+ * required tx data offset. -+ */ -+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, -+ &priv->tx_data_offset); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_data_offset() failed\n"); -+ goto err_data_offset; -+ } -+ -+ /* Warn in case TX data offset is not multiple of 64 bytes. */ -+ WARN_ON(priv->tx_data_offset % 64); -+ -+ /* Accommodate SWA space. */ -+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; -+ -+ /* allocate classification rule space */ -+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * -+ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL); -+ if (!priv->cls_rule) -+ goto err_cls_rule; -+ -+ kfree(dma_mem); -+ -+ return 0; -+ -+err_cls_rule: -+err_data_offset: -+err_buf_layout: -+err_extract: -+err_dpni_ver: -+err_get_attr: -+err_dma_map: -+ kfree(dma_mem); -+err_alloc: -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_open: -+ return err; -+} -+ -+static void dpaa2_dpni_free(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ -+ err = dpni_reset(priv->mc_io, 0, priv->mc_token); -+ if (err) -+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", -+ err); -+ -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+} -+ -+static int dpaa2_rx_flow_setup(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_queue_attr rx_queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ memset(&queue_cfg, 0, sizeof(queue_cfg)); -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | -+ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH; -+ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &queue_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_flow() failed\n"); -+ return err; -+ } -+ -+ /* Get the actual FQID that was assigned by MC */ -+ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &rx_queue_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_rx_flow() failed\n"); -+ return err; -+ } -+ fq->fqid = rx_queue_attr.fqid; -+ -+ return 0; -+} -+ -+static int dpaa2_tx_flow_setup(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ struct dpni_tx_conf_cfg tx_conf_cfg; -+ struct dpni_tx_conf_attr tx_conf_attr; -+ int err; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; -+ tx_flow_cfg.use_common_tx_conf_queue = 0; -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_flow() failed\n"); -+ return err; -+ } -+ -+ tx_conf_cfg.errors_only = 0; -+ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | -+ DPNI_QUEUE_OPT_DEST; -+ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0; -+ -+ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf() failed\n"); -+ return err; -+ } -+ -+ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_conf() failed\n"); -+ return err; -+ } -+ -+ fq->fqid = tx_conf_attr.queue_attr.fqid; -+ -+ return 0; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+static int dpaa2_rx_err_setup(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct dpni_queue_attr queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ /* Configure the Rx error queue to generate CDANs, -+ * just like the Rx queues */ -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n"); -+ return err; -+ } -+ -+ /* Get the FQID */ -+ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_attr); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); -+ return err; -+ } -+ fq->fqid = queue_attr.fqid; -+ -+ return 0; -+} -+#endif -+ -+static int dpaa2_dpni_bind(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ struct dpni_pools_cfg pools_params; -+ struct dpni_error_cfg err_cfg; -+ int err = 0; -+ int i; -+ -+ pools_params.num_dpbp = 1; -+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; -+ pools_params.pools[0].backup_pool = 0; -+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUFFER_SIZE; -+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); -+ if (err) { -+ dev_err(dev, "dpni_set_pools() failed\n"); -+ return err; -+ } -+ -+ dpaa2_cls_check(net_dev); -+ -+ /* have the interface implicitly distribute traffic based on supported -+ * header fields -+ */ -+ if (dpaa2_eth_hash_enabled(priv)) { -+ err = dpaa2_set_hash(net_dev, DPAA2_RXH_SUPPORTED); -+ if (err) -+ return err; -+ } -+ -+ /* Configure handling of error frames */ -+ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK; -+ err_cfg.set_frame_annotation = 1; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; -+#else -+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; -+#endif -+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, -+ &err_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_errors_behavior failed\n"); -+ return err; -+ } -+ -+ /* Configure Rx and Tx conf queues to generate CDANs */ -+ for (i = 0; i < priv->num_fqs; i++) { -+ switch (priv->fq[i].type) { -+ case DPAA2_RX_FQ: -+ err = dpaa2_rx_flow_setup(priv, &priv->fq[i]); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ err = dpaa2_tx_flow_setup(priv, &priv->fq[i]); -+ break; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ case DPAA2_RX_ERR_FQ: -+ err = dpaa2_rx_err_setup(priv, &priv->fq[i]); -+ break; -+#endif -+ default: -+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); -+ return -EINVAL; -+ } -+ if (err) -+ return err; -+ } -+ -+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid); -+ if (err) { -+ dev_err(dev, "dpni_get_qdid() failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ priv->channel[i]->store = -+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); -+ if (!priv->channel[i]->store) { -+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); -+ goto err_ring; -+ } -+ } -+ -+ return 0; -+ -+err_ring: -+ for (i = 0; i < priv->num_channels; i++) { -+ if (!priv->channel[i]->store) -+ break; -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+ } -+ -+ return -ENOMEM; -+} -+ -+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+} -+ -+static int dpaa2_eth_netdev_init(struct net_device *net_dev) -+{ -+ int err; -+ struct device *dev = net_dev->dev.parent; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u8 mac_addr[ETH_ALEN]; -+ u8 bcast_addr[ETH_ALEN]; -+ -+ net_dev->netdev_ops = &dpaa2_eth_ops; -+ -+ /* If the DPL contains all-0 mac_addr, set a random hardware address */ -+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ mac_addr); -+ if (err) { -+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err); -+ return err; -+ } -+ if (is_zero_ether_addr(mac_addr)) { -+ /* Fills in net_dev->dev_addr, as required by -+ * register_netdevice() -+ */ -+ eth_hw_addr_random(net_dev); -+ /* Make the user aware, without cluttering the boot log */ -+ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random"); -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err); -+ return err; -+ } -+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all -+ * practical purposes, this will be our "permanent" mac address, -+ * at least until the next reboot. This move will also permit -+ * register_netdevice() to properly fill up net_dev->perm_addr. -+ */ -+ net_dev->addr_assign_type = NET_ADDR_PERM; -+ } else { -+ /* NET_ADDR_PERM is default, all we have to do is -+ * fill in the device addr. -+ */ -+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); -+ } -+ -+ /* Explicitly add the broadcast address to the MAC filtering table; -+ * the MC won't do that for us. -+ */ -+ eth_broadcast_addr(bcast_addr); -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); -+ if (err) { -+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); -+ /* Won't return an error; at least, we'd have egress traffic */ -+ } -+ -+ /* Reserve enough space to align buffer as per hardware requirement; -+ * NOTE: priv->tx_data_offset MUST be initialized at this point. -+ */ -+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); -+ -+ /* Our .ndo_init will be called herein */ -+ err = register_netdev(net_dev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev() = %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+static int dpaa2_poll_link_state(void *arg) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; -+ int err; -+ -+ while (!kthread_should_stop()) { -+ err = dpaa2_link_state_update(priv); -+ if (unlikely(err)) -+ return err; -+ -+ msleep(DPAA2_ETH_LINK_STATE_REFRESH); -+ } -+ -+ return 0; -+} -+#else -+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) -+{ -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 status, clear = 0; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); -+ struct net_device *net_dev = dev_get_drvdata(dev); -+ int err; -+ -+ netdev_dbg(net_dev, "IRQ %d received\n", irq_num); -+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, &status); -+ if (unlikely(err)) { -+ netdev_err(net_dev, "Can't get irq status (err %d)", err); -+ clear = 0xffffffff; -+ goto out; -+ } -+ -+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { -+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; -+ dpaa2_link_state_update(netdev_priv(net_dev)); -+ } -+ -+out: -+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, clear); -+ return IRQ_HANDLED; -+} -+ -+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) -+{ -+ int err = 0; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; -+ -+ /* The only interrupt supported now is the link state notification. */ -+ if (WARN_ON(irq_count != 1)) -+ return -EINVAL; -+ -+ irq = ls_dev->irqs[0]; -+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, -+ dpni_irq0_handler, -+ dpni_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&ls_dev->dev), &ls_dev->dev); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); -+ return err; -+ } -+ -+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, mask); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); -+ return err; -+ } -+ -+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, 1); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); -+ return err; -+ } -+ -+ return 0; -+} -+#endif -+ -+static void dpaa2_eth_napi_add(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ -+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, -+ NAPI_POLL_WEIGHT); -+ } -+} -+ -+static void dpaa2_eth_napi_del(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ netif_napi_del(&ch->napi); -+ } -+} -+ -+/* SysFS support */ -+ -+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ /* No MC API for getting the shaping config. We're stateful. */ -+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; -+ -+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); -+} -+ -+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ int err, items; -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpni_tx_shaping_cfg scfg; -+ -+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); -+ if (items != 2) { -+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); -+ return -EINVAL; -+ } -+ /* Size restriction as per MC API documentation */ -+ if (scfg.max_burst_size > 64000) { -+ pr_err("max_burst_size must be <= 64000, thanks.\n"); -+ return -EINVAL; -+ } -+ -+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_shaping() failed\n"); -+ return -EPERM; -+ } -+ /* If successful, save the current configuration for future inquiries */ -+ priv->shaping_cfg = scfg; -+ -+ return count; -+} -+ -+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ -+ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); -+} -+ -+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpaa2_eth_fq *fq; -+ bool running = netif_running(priv->net_dev); -+ int i, err; -+ -+ err = cpulist_parse(buf, &priv->txconf_cpumask); -+ if (err) -+ return err; -+ -+ /* Only accept CPUs that have an affine DPIO */ -+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { -+ netdev_info(priv->net_dev, -+ "cpumask must be a subset of 0x%lx\n", -+ *cpumask_bits(&priv->dpio_cpumask)); -+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, -+ &priv->txconf_cpumask); -+ } -+ -+ /* Rewiring the TxConf FQs requires interface shutdown. -+ */ -+ if (running) { -+ err = dpaa2_eth_stop(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ /* Set the new TxConf FQ affinities */ -+ dpaa2_set_fq_affinity(priv); -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit -+ * link up notification is received. Give the polling thread enough time -+ * to detect the link state change, or else we'll end up with the -+ * transmission side forever shut down. -+ */ -+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); -+#endif -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ dpaa2_tx_flow_setup(priv, fq); -+ } -+ -+ if (running) { -+ err = dpaa2_eth_open(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ return count; -+} -+ -+static struct device_attribute dpaa2_eth_attrs[] = { -+ __ATTR(txconf_cpumask, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_txconf_cpumask, -+ dpaa2_eth_write_txconf_cpumask), -+ -+ __ATTR(tx_shaping, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_tx_shaping, -+ dpaa2_eth_write_tx_shaping), -+}; -+ -+void dpaa2_eth_sysfs_init(struct device *dev) -+{ -+ int i, err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { -+ err = device_create_file(dev, &dpaa2_eth_attrs[i]); -+ if (err) { -+ dev_err(dev, "ERROR creating sysfs file\n"); -+ goto undo; -+ } -+ } -+ return; -+ -+undo: -+ while (i > 0) -+ device_remove_file(dev, &dpaa2_eth_attrs[--i]); -+} -+ -+void dpaa2_eth_sysfs_remove(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) -+ device_remove_file(dev, &dpaa2_eth_attrs[i]); -+} -+ -+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev = NULL; -+ struct dpaa2_eth_priv *priv = NULL; -+ int err = 0; -+ -+ dev = &dpni_dev->dev; -+ -+ /* Net device */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ priv->msg_enable = netif_msg_init(debug, -1); -+ -+ /* Obtain a MC portal */ -+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &priv->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_portal_alloc; -+ } -+ -+#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+ err = fsl_mc_allocate_irqs(dpni_dev); -+ if (err) { -+ dev_err(dev, "MC irqs allocation failed\n"); -+ goto err_irqs_alloc; -+ } -+#endif -+ -+ /* DPNI initialization */ -+ err = dpaa2_dpni_setup(dpni_dev); -+ if (err < 0) -+ goto err_dpni_setup; -+ -+ /* DPIO */ -+ err = dpaa2_dpio_setup(priv); -+ if (err) -+ goto err_dpio_setup; -+ -+ /* FQs */ -+ dpaa2_eth_setup_fqs(priv); -+ dpaa2_set_fq_affinity(priv); -+ -+ /* DPBP */ -+ err = dpaa2_dpbp_setup(priv); -+ if (err) -+ goto err_dpbp_setup; -+ -+ /* DPNI binding to DPIO and DPBPs */ -+ err = dpaa2_dpni_bind(priv); -+ if (err) -+ goto err_bind; -+ -+ dpaa2_eth_napi_add(priv); -+ -+ /* Percpu statistics */ -+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); -+ if (!priv->percpu_stats) { -+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_stats; -+ } -+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); -+ if (!priv->percpu_extras) { -+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_extras; -+ } -+ -+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); -+ if (!dev_valid_name(net_dev->name)) { -+ dev_warn(&net_dev->dev, -+ "netdevice name \"%s\" cannot be used, reverting to default..\n", -+ net_dev->name); -+ dev_alloc_name(net_dev, "eth%d"); -+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); -+ } -+ -+ err = dpaa2_eth_netdev_init(net_dev); -+ if (err) -+ goto err_netdev_init; -+ -+ /* Configure checksum offload based on current interface flags */ -+ err = dpaa2_eth_set_rx_csum(priv, -+ !!(net_dev->features & NETIF_F_RXCSUM)); -+ if (err) -+ goto err_csum; -+ -+ err = dpaa2_eth_set_tx_csum(priv, -+ !!(net_dev->features & -+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); -+ if (err) -+ goto err_csum; -+ -+ err = dpaa2_eth_alloc_rings(priv); -+ if (err) -+ goto err_alloc_rings; -+ -+ net_dev->ethtool_ops = &dpaa2_ethtool_ops; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+ priv->poll_thread = kthread_run(dpaa2_poll_link_state, priv, -+ "%s_poll_link", net_dev->name); -+#else -+ err = dpaa2_eth_setup_irqs(dpni_dev); -+ if (err) { -+ netdev_err(net_dev, "ERROR %d setting up interrupts", err); -+ goto err_setup_irqs; -+ } -+#endif -+ -+ dpaa2_eth_sysfs_init(&net_dev->dev); -+ dpaa2_dbg_add(priv); -+ -+ dev_info(dev, "Probed interface %s\n", net_dev->name); -+ return 0; -+ -+#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+err_setup_irqs: -+#endif -+ dpaa2_eth_free_rings(priv); -+err_alloc_rings: -+err_csum: -+ unregister_netdev(net_dev); -+err_netdev_init: -+ free_percpu(priv->percpu_extras); -+err_alloc_percpu_extras: -+ free_percpu(priv->percpu_stats); -+err_alloc_percpu_stats: -+ dpaa2_eth_napi_del(priv); -+err_bind: -+ dpaa2_dpbp_free(priv); -+err_dpbp_setup: -+ dpaa2_dpio_free(priv); -+err_dpio_setup: -+ kfree(priv->cls_rule); -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_dpni_setup: -+#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+ fsl_mc_free_irqs(dpni_dev); -+err_irqs_alloc: -+#endif -+ fsl_mc_portal_free(priv->mc_io); -+err_portal_alloc: -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev; -+ struct dpaa2_eth_priv *priv; -+ -+ dev = &ls_dev->dev; -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ dpaa2_dbg_remove(priv); -+ dpaa2_eth_sysfs_remove(&net_dev->dev); -+ -+ unregister_netdev(net_dev); -+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); -+ -+ dpaa2_dpio_free(priv); -+ dpaa2_eth_free_rings(priv); -+ dpaa2_eth_napi_del(priv); -+ dpaa2_dpbp_free(priv); -+ dpaa2_dpni_free(priv); -+ -+ fsl_mc_portal_free(priv->mc_io); -+ -+ free_percpu(priv->percpu_stats); -+ free_percpu(priv->percpu_extras); -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -+ kthread_stop(priv->poll_thread); -+#else -+ fsl_mc_free_irqs(ls_dev); -+#endif -+ -+ kfree(priv->cls_rule); -+ -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpni", -+ .ver_major = DPNI_VER_MAJOR, -+ .ver_minor = DPNI_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_eth_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_eth_probe, -+ .remove = dpaa2_eth_remove, -+ .match_id_table = dpaa2_eth_match_id_table -+}; -+ -+static int __init dpaa2_eth_driver_init(void) -+{ -+ int err; -+ -+ dpaa2_eth_dbg_init(); -+ -+ err = fsl_mc_driver_register(&dpaa2_eth_driver); -+ if (err) { -+ dpaa2_eth_dbg_exit(); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static void __exit dpaa2_eth_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_eth_driver); -+ dpaa2_eth_dbg_exit(); -+} -+ -+module_init(dpaa2_eth_driver_init); -+module_exit(dpaa2_eth_driver_exit); ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -0,0 +1,366 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA2_ETH_H -+#define __DPAA2_ETH_H -+ -+#include -+#include -+#include "../../fsl-mc/include/fsl_dpaa2_io.h" -+#include "../../fsl-mc/include/fsl_dpaa2_fd.h" -+#include "../../fsl-mc/include/dpbp.h" -+#include "../../fsl-mc/include/dpbp-cmd.h" -+#include "../../fsl-mc/include/dpcon.h" -+#include "../../fsl-mc/include/dpcon-cmd.h" -+#include "../../fsl-mc/include/dpmng.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+#include "dpaa2-eth-trace.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_STORE_SIZE 16 -+ -+/* Maximum receive frame size is 64K */ -+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUFFER_SIZE) -+ -+/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced -+ * Max Frame Length (currently 10k). -+ */ -+#define DPAA2_ETH_MFL (10 * 1024) -+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) -+/* Convert L3 MTU to L2 MFL */ -+#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN) -+ -+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo -+ * frames in the Rx queues (length of the current frame is not -+ * taken into account when making the taildrop decision) -+ */ -+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) -+ -+/* Buffer quota per queue. Must be large enough such that for minimum sized -+ * frames taildrop kicks in before the bpool gets depleted, so we compute -+ * how many 64B frames fit inside the taildrop threshold and add a margin -+ * to accommodate the buffer refill delay. -+ */ -+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) -+#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -+#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE -+ -+/* Hardware requires alignment for ingress/egress buffer addresses -+ * and ingress buffer lengths. -+ */ -+#define DPAA2_ETH_RX_BUFFER_SIZE 2048 -+#define DPAA2_ETH_TX_BUF_ALIGN 64 -+#define DPAA2_ETH_RX_BUF_ALIGN 256 -+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ -+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) -+ -+#define DPAA2_ETH_BUF_RAW_SIZE \ -+ (DPAA2_ETH_RX_BUFFER_SIZE + \ -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ -+ DPAA2_ETH_RX_BUF_ALIGN) -+ -+/* PTP nominal frequency 1MHz */ -+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 -+ -+/* We are accommodating a skb backpointer and some S/G info -+ * in the frame's software annotation. The hardware -+ * options are either 0 or 64, so we choose the latter. -+ */ -+#define DPAA2_ETH_SWA_SIZE 64 -+ -+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ -+struct dpaa2_eth_swa { -+ struct sk_buff *skb; -+ struct scatterlist *scl; -+ int num_sg; -+ int num_dma_bufs; -+}; -+ -+/* Annotation valid bits in FD FRC */ -+#define DPAA2_FD_FRC_FASV 0x8000 -+#define DPAA2_FD_FRC_FAEADV 0x4000 -+#define DPAA2_FD_FRC_FAPRV 0x2000 -+#define DPAA2_FD_FRC_FAIADV 0x1000 -+#define DPAA2_FD_FRC_FASWOV 0x0800 -+#define DPAA2_FD_FRC_FAICFDV 0x0400 -+ -+/* Annotation bits in FD CTRL */ -+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ -+#define DPAA2_FD_CTRL_PTA 0x00800000 -+#define DPAA2_FD_CTRL_PTV1 0x00400000 -+ -+/* Frame annotation status */ -+struct dpaa2_fas { -+ u8 reserved; -+ u8 ppid; -+ __le16 ifpid; -+ __le32 status; -+} __packed; -+ -+/* Debug frame, otherwise supposed to be discarded */ -+#define DPAA2_ETH_FAS_DISC 0x80000000 -+/* MACSEC frame */ -+#define DPAA2_ETH_FAS_MS 0x40000000 -+#define DPAA2_ETH_FAS_PTP 0x08000000 -+/* Ethernet multicast frame */ -+#define DPAA2_ETH_FAS_MC 0x04000000 -+/* Ethernet broadcast frame */ -+#define DPAA2_ETH_FAS_BC 0x02000000 -+#define DPAA2_ETH_FAS_KSE 0x00040000 -+#define DPAA2_ETH_FAS_EOFHE 0x00020000 -+#define DPAA2_ETH_FAS_MNLE 0x00010000 -+#define DPAA2_ETH_FAS_TIDE 0x00008000 -+#define DPAA2_ETH_FAS_PIEE 0x00004000 -+/* Frame length error */ -+#define DPAA2_ETH_FAS_FLE 0x00002000 -+/* Frame physical error; our favourite pastime */ -+#define DPAA2_ETH_FAS_FPE 0x00001000 -+#define DPAA2_ETH_FAS_PTE 0x00000080 -+#define DPAA2_ETH_FAS_ISP 0x00000040 -+#define DPAA2_ETH_FAS_PHE 0x00000020 -+#define DPAA2_ETH_FAS_BLE 0x00000010 -+/* L3 csum validation performed */ -+#define DPAA2_ETH_FAS_L3CV 0x00000008 -+/* L3 csum error */ -+#define DPAA2_ETH_FAS_L3CE 0x00000004 -+/* L4 csum validation performed */ -+#define DPAA2_ETH_FAS_L4CV 0x00000002 -+/* L4 csum error */ -+#define DPAA2_ETH_FAS_L4CE 0x00000001 -+/* These bits always signal errors */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE | \ -+ DPAA2_ETH_FAS_PIEE | \ -+ DPAA2_ETH_FAS_FLE | \ -+ DPAA2_ETH_FAS_FPE | \ -+ DPAA2_ETH_FAS_PTE | \ -+ DPAA2_ETH_FAS_ISP | \ -+ DPAA2_ETH_FAS_PHE | \ -+ DPAA2_ETH_FAS_BLE | \ -+ DPAA2_ETH_FAS_L3CE | \ -+ DPAA2_ETH_FAS_L4CE) -+/* Unsupported features in the ingress */ -+#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS -+/* Tx errors */ -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE) -+ -+/* Time in milliseconds between link state updates */ -+#define DPAA2_ETH_LINK_STATE_REFRESH 1000 -+ -+/* Driver statistics, other than those in struct rtnl_link_stats64. -+ * These are usually collected per-CPU and aggregated by ethtool. -+ */ -+struct dpaa2_eth_stats { -+ __u64 tx_conf_frames; -+ __u64 tx_conf_bytes; -+ __u64 tx_sg_frames; -+ __u64 tx_sg_bytes; -+ __u64 rx_sg_frames; -+ __u64 rx_sg_bytes; -+ /* Enqueues retried due to portal busy */ -+ __u64 tx_portal_busy; -+}; -+ -+/* Per-FQ statistics */ -+struct dpaa2_eth_fq_stats { -+ /* Number of frames received on this queue */ -+ __u64 frames; -+}; -+ -+/* Per-channel statistics */ -+struct dpaa2_eth_ch_stats { -+ /* Volatile dequeues retried due to portal busy */ -+ __u64 dequeue_portal_busy; -+ /* Number of CDANs; useful to estimate avg NAPI len */ -+ __u64 cdan; -+ /* Number of frames received on queues from this channel */ -+ __u64 frames; -+}; -+ -+/* Maximum number of Rx queues associated with a DPNI */ -+#define DPAA2_ETH_MAX_RX_QUEUES 16 -+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS -+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 -+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ -+ DPAA2_ETH_MAX_TX_QUEUES + \ -+ DPAA2_ETH_MAX_RX_ERR_QUEUES) -+ -+#define DPAA2_ETH_MAX_DPCONS NR_CPUS -+ -+enum dpaa2_eth_fq_type { -+ DPAA2_RX_FQ = 0, -+ DPAA2_TX_CONF_FQ, -+ DPAA2_RX_ERR_FQ -+}; -+ -+struct dpaa2_eth_priv; -+ -+struct dpaa2_eth_fq { -+ u32 fqid; -+ u16 flowid; -+ int target_cpu; -+ struct dpaa2_eth_channel *channel; -+ enum dpaa2_eth_fq_type type; -+ -+ void (*consume)(struct dpaa2_eth_priv *, -+ struct dpaa2_eth_channel *, -+ const struct dpaa2_fd *, -+ struct napi_struct *); -+ struct dpaa2_eth_priv *netdev_priv; /* backpointer */ -+ struct dpaa2_eth_fq_stats stats; -+}; -+ -+struct dpaa2_eth_channel { -+ struct dpaa2_io_notification_ctx nctx; -+ struct fsl_mc_device *dpcon; -+ int dpcon_id; -+ int ch_id; -+ int dpio_id; -+ struct napi_struct napi; -+ struct dpaa2_io_store *store; -+ struct dpaa2_eth_priv *priv; -+ int buf_count; -+ struct dpaa2_eth_ch_stats stats; -+}; -+ -+struct dpaa2_cls_rule { -+ struct ethtool_rx_flow_spec fs; -+ bool in_use; -+}; -+ -+struct dpaa2_eth_priv { -+ struct net_device *net_dev; -+ -+ u8 num_fqs; -+ /* First queue is tx conf, the rest are rx */ -+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; -+ -+ u8 num_channels; -+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; -+ -+ int dpni_id; -+ struct dpni_attr dpni_attrs; -+ struct dpni_extended_cfg dpni_ext_cfg; -+ /* Insofar as the MC is concerned, we're using one layout on all 3 types -+ * of buffers (Rx, Tx, Tx-Conf). -+ */ -+ struct dpni_buffer_layout buf_layout; -+ u16 tx_data_offset; -+ -+ struct fsl_mc_device *dpbp_dev; -+ struct dpbp_attr dpbp_attrs; -+ -+ u16 tx_qdid; -+ struct fsl_mc_io *mc_io; -+ /* SysFS-controlled affinity mask for TxConf FQs */ -+ struct cpumask txconf_cpumask; -+ /* Cores which have an affine DPIO/DPCON. -+ * This is the cpu set on which Rx frames are processed; -+ * Tx confirmation frames are processed on a subset of this, -+ * depending on user settings. -+ */ -+ struct cpumask dpio_cpumask; -+ -+ /* Standard statistics */ -+ struct rtnl_link_stats64 __percpu *percpu_stats; -+ /* Extra stats, in addition to the ones known by the kernel */ -+ struct dpaa2_eth_stats __percpu *percpu_extras; -+ u32 msg_enable; /* net_device message level */ -+ -+ u16 mc_token; -+ -+ struct dpni_link_state link_state; -+ struct task_struct *poll_thread; -+ -+ /* enabled ethtool hashing bits */ -+ u64 rx_hash_fields; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+ struct dpaa2_debugfs dbg; -+#endif -+ -+ /* array of classification rules */ -+ struct dpaa2_cls_rule *cls_rule; -+ -+ struct dpni_tx_shaping_cfg shaping_cfg; -+ -+ bool ts_tx_en; /* Tx timestamping enabled */ -+ bool ts_rx_en; /* Rx timestamping enabled */ -+}; -+ -+/* default Rx hash options, set during probing */ -+#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ -+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ -+ | RXH_L4_B_2_3) -+ -+#define dpaa2_eth_hash_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) -+ -+#define dpaa2_eth_fs_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) -+ -+#define DPAA2_CLASSIFIER_ENTRY_COUNT 16 -+ -+/* Required by struct dpni_attr::ext_cfg_iova */ -+#define DPAA2_EXT_CFG_SIZE 256 -+ -+extern const struct ethtool_ops dpaa2_ethtool_ops; -+ -+int dpaa2_set_hash(struct net_device *net_dev, u64 flags); -+ -+static int dpaa2_queue_count(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_eth_hash_enabled(priv)) -+ return 1; -+ -+ return priv->dpni_ext_cfg.tc_cfg[0].max_dist; -+} -+ -+static inline int dpaa2_max_channels(struct dpaa2_eth_priv *priv) -+{ -+ /* Ideally, we want a number of channels large enough -+ * to accommodate both the Rx distribution size -+ * and the max number of Tx confirmation queues -+ */ -+ return max_t(int, dpaa2_queue_count(priv), -+ priv->dpni_attrs.max_senders); -+} -+ -+void dpaa2_cls_check(struct net_device *); -+ -+#endif /* __DPAA2_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -0,0 +1,882 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpni.h" /* DPNI_LINK_OPT_* */ -+#include "dpaa2-eth.h" -+ -+/* size of DMA memory used to pass configuration to classifier, in bytes */ -+#define DPAA2_CLASSIFIER_DMA_SIZE 256 -+ -+/* To be kept in sync with 'enum dpni_counter' */ -+char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { -+ "rx frames", -+ "rx bytes", -+ "rx frames dropped", -+ "rx err frames", -+ "rx mcast frames", -+ "rx mcast bytes", -+ "rx bcast frames", -+ "rx bcast bytes", -+ "tx frames", -+ "tx bytes", -+ "tx err frames", -+}; -+ -+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) -+ -+/* To be kept in sync with 'struct dpaa2_eth_stats' */ -+char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { -+ /* per-cpu stats */ -+ -+ "tx conf frames", -+ "tx conf bytes", -+ "tx sg frames", -+ "tx sg bytes", -+ "rx sg frames", -+ "rx sg bytes", -+ /* how many times we had to retry the enqueue command */ -+ "tx portal busy", -+ -+ /* Channel stats */ -+ -+ /* How many times we had to retry the volatile dequeue command */ -+ "portal busy", -+ /* Number of notifications received */ -+ "cdan", -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ /* FQ stats */ -+ "rx pending frames", -+ "rx pending bytes", -+ "tx conf pending frames", -+ "tx conf pending bytes", -+ "buffer count" -+#endif -+}; -+ -+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) -+ -+static void dpaa2_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ struct mc_version mc_ver; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ char fw_version[ETHTOOL_FWVERS_LEN]; -+ char version[32]; -+ int err; -+ -+ err = mc_get_version(priv->mc_io, 0, &mc_ver); -+ if (err) { -+ strlcpy(drvinfo->fw_version, "Error retrieving MC version", -+ sizeof(drvinfo->fw_version)); -+ } else { -+ scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", -+ mc_ver.major, mc_ver.minor, mc_ver.revision); -+ strlcpy(drvinfo->fw_version, fw_version, -+ sizeof(drvinfo->fw_version)); -+ } -+ -+ scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, -+ DPNI_VER_MINOR); -+ strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); -+ -+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); -+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), -+ sizeof(drvinfo->bus_info)); -+} -+ -+static u32 dpaa2_get_msglevel(struct net_device *net_dev) -+{ -+ return ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable; -+} -+ -+static void dpaa2_set_msglevel(struct net_device *net_dev, -+ u32 msg_enable) -+{ -+ ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable = -+ msg_enable; -+} -+ -+static int dpaa2_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_state state = {0}; -+ int err = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (err) { -+ netdev_err(net_dev, "ERROR %d getting link state", err); -+ goto out; -+ } -+ -+ /* At the moment, we have no way of interrogating the DPMAC -+ * from the DPNI side - and for that matter there may exist -+ * no DPMAC at all. So for now we just don't report anything -+ * beyond the DPNI attributes. -+ */ -+ if (state.options & DPNI_LINK_OPT_AUTONEG) -+ cmd->autoneg = AUTONEG_ENABLE; -+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) -+ cmd->duplex = DUPLEX_FULL; -+ ethtool_cmd_speed_set(cmd, state.rate); -+ -+out: -+ return err; -+} -+ -+static int dpaa2_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_cfg cfg = {0}; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err = 0; -+ -+ netdev_dbg(net_dev, "Setting link parameters..."); -+ -+ /* Due to a temporary firmware limitation, the DPNI must be down -+ * in order to be able to change link settings. Taking steps to let -+ * the user know that. -+ */ -+ if (netif_running(net_dev)) { -+ netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); -+ return -EACCES; -+ } -+ -+ cfg.rate = ethtool_cmd_speed(cmd); -+ if (cmd->autoneg == AUTONEG_ENABLE) -+ cfg.options |= DPNI_LINK_OPT_AUTONEG; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; -+ if (cmd->duplex == DUPLEX_HALF) -+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; -+ -+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); -+ if (err) -+ /* ethtool will be loud enough if we return an error; no point -+ * in putting our own error message on the console by default -+ */ -+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); -+ -+ return err; -+} -+ -+static void dpaa2_get_strings(struct net_device *netdev, u32 stringset, -+ u8 *data) -+{ -+ u8 *p = data; -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ break; -+ } -+} -+ -+static int dpaa2_get_sset_count(struct net_device *net_dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ -+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** Fill in hardware counters, as returned by the MC firmware. -+ */ -+static void dpaa2_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ int i; /* Current index in the data array */ -+ int j, k, err; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ u32 fcnt, bcnt; -+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; -+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; -+ u32 buf_cnt; -+#endif -+ u64 cdan = 0; -+ u64 portal_busy = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_eth_stats *extras; -+ struct dpaa2_eth_ch_stats *ch_stats; -+ -+ memset(data, 0, -+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); -+ -+ /* Print standard counters, from DPNI statistics */ -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i, -+ data + i); -+ if (err != 0) -+ netdev_warn(net_dev, "Err %d getting DPNI counter %d", -+ err, i); -+ } -+ -+ /* Print per-cpu extra stats */ -+ for_each_online_cpu(k) { -+ extras = per_cpu_ptr(priv->percpu_extras, k); -+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) -+ *((__u64 *)data + i + j) += *((__u64 *)extras + j); -+ } -+ i += j; -+ -+ /* We may be using fewer DPIOs than actual CPUs */ -+ for_each_cpu(j, &priv->dpio_cpumask) { -+ ch_stats = &priv->channel[j]->stats; -+ cdan += ch_stats->cdan; -+ portal_busy += ch_stats->dequeue_portal_busy; -+ } -+ -+ *(data + i++) = portal_busy; -+ *(data + i++) = cdan; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ for (j = 0; j < priv->num_fqs; j++) { -+ /* Print FQ instantaneous counts */ -+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, -+ &fcnt, &bcnt); -+ if (err) { -+ netdev_warn(net_dev, "FQ query error %d", err); -+ return; -+ } -+ -+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { -+ fcnt_tx_total += fcnt; -+ bcnt_tx_total += bcnt; -+ } else { -+ fcnt_rx_total += fcnt; -+ bcnt_rx_total += bcnt; -+ } -+ } -+ *(data + i++) = fcnt_rx_total; -+ *(data + i++) = bcnt_rx_total; -+ *(data + i++) = fcnt_tx_total; -+ *(data + i++) = bcnt_tx_total; -+ -+ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt); -+ if (err) { -+ netdev_warn(net_dev, "Buffer count query error %d\n", err); -+ return; -+ } -+ *(data + i++) = buf_cnt; -+#endif -+} -+ -+static const struct dpaa2_hash_fields { -+ u64 rxnfc_field; -+ enum net_prot cls_prot; -+ int cls_field; -+ int size; -+} dpaa2_hash_fields[] = { -+ { -+ /* L2 header */ -+ .rxnfc_field = RXH_L2DA, -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_DA, -+ .size = 6, -+ }, { -+ /* VLAN header */ -+ .rxnfc_field = RXH_VLAN, -+ .cls_prot = NET_PROT_VLAN, -+ .cls_field = NH_FLD_VLAN_TCI, -+ .size = 2, -+ }, { -+ /* IP header */ -+ .rxnfc_field = RXH_IP_SRC, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_SRC, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_IP_DST, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_DST, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_L3_PROTO, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_PROTO, -+ .size = 1, -+ }, { -+ /* Using UDP ports, this is functionally equivalent to raw -+ * byte pairs from L4 header. -+ */ -+ .rxnfc_field = RXH_L4_B_0_1, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_SRC, -+ .size = 2, -+ }, { -+ .rxnfc_field = RXH_L4_B_2_3, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_DST, -+ .size = 2, -+ }, -+}; -+ -+static int dpaa2_cls_is_enabled(struct net_device *net_dev, u64 flag) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ return !!(priv->rx_hash_fields & flag); -+} -+ -+static int dpaa2_cls_key_off(struct net_device *net_dev, u64 flag) -+{ -+ int i, off = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -+ if (dpaa2_hash_fields[i].rxnfc_field & flag) -+ return off; -+ if (dpaa2_cls_is_enabled(net_dev, -+ dpaa2_hash_fields[i].rxnfc_field)) -+ off += dpaa2_hash_fields[i].size; -+ } -+ -+ return -1; -+} -+ -+static u8 dpaa2_cls_key_size(struct net_device *net_dev) -+{ -+ u8 i, size = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -+ if (!dpaa2_cls_is_enabled(net_dev, -+ dpaa2_hash_fields[i].rxnfc_field)) -+ continue; -+ size += dpaa2_hash_fields[i].size; -+ } -+ -+ return size; -+} -+ -+static u8 dpaa2_cls_max_key_size(struct net_device *net_dev) -+{ -+ u8 i, size = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) -+ size += dpaa2_hash_fields[i].size; -+ -+ return size; -+} -+ -+void dpaa2_cls_check(struct net_device *net_dev) -+{ -+ u8 key_size = dpaa2_cls_max_key_size(net_dev); -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && -+ priv->dpni_attrs.max_dist_key_size < key_size) { -+ dev_err(&net_dev->dev, -+ "max_dist_key_size = %d, expected %d. Steering is disabled\n", -+ priv->dpni_attrs.max_dist_key_size, -+ key_size); -+ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; -+ } -+} -+ -+/* Set RX hash options -+ * flags is a combination of RXH_ bits -+ */ -+int dpaa2_set_hash(struct net_device *net_dev, u64 flags) -+{ -+ struct device *dev = net_dev->dev.parent; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpkg_profile_cfg cls_cfg; -+ struct dpni_rx_tc_dist_cfg dist_cfg; -+ u8 *dma_mem; -+ u64 enabled_flags = 0; -+ int i; -+ int err = 0; -+ -+ if (!dpaa2_eth_hash_enabled(priv)) { -+ dev_err(dev, "Hashing support is not enabled\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (flags & ~DPAA2_RXH_SUPPORTED) { -+ /* RXH_DISCARD is not supported */ -+ dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ memset(&cls_cfg, 0, sizeof(cls_cfg)); -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -+ struct dpkg_extract *key = -+ &cls_cfg.extracts[cls_cfg.num_extracts]; -+ -+ if (!(flags & dpaa2_hash_fields[i].rxnfc_field)) -+ continue; -+ -+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { -+ dev_err(dev, "error adding key extraction rule, too many rules?\n"); -+ return -E2BIG; -+ } -+ -+ key->type = DPKG_EXTRACT_FROM_HDR; -+ key->extract.from_hdr.prot = -+ dpaa2_hash_fields[i].cls_prot; -+ key->extract.from_hdr.type = DPKG_FULL_FIELD; -+ key->extract.from_hdr.field = -+ dpaa2_hash_fields[i].cls_field; -+ cls_cfg.num_extracts++; -+ -+ enabled_flags |= dpaa2_hash_fields[i].rxnfc_field; -+ } -+ -+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_prepare_key_cfg error %d", err); -+ return err; -+ } -+ -+ memset(&dist_cfg, 0, sizeof(dist_cfg)); -+ -+ /* Prepare for setting the rx dist */ -+ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, -+ DPAA2_CLASSIFIER_DMA_SIZE, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ kfree(dma_mem); -+ return -ENOMEM; -+ } -+ -+ dist_cfg.dist_size = dpaa2_queue_count(priv); -+ if (dpaa2_eth_fs_enabled(priv)) { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; -+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -+ } else { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ } -+ -+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -+ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, -+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); -+ kfree(dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); -+ return err; -+ } -+ -+ priv->rx_hash_fields = enabled_flags; -+ -+ return 0; -+} -+ -+static int dpaa2_cls_prep_rule(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ void *key) -+{ -+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; -+ struct ethhdr *eth_h, *eth_m; -+ struct ethtool_flow_ext *ext_h, *ext_m; -+ const u8 key_size = dpaa2_cls_key_size(net_dev); -+ void *msk = key + key_size; -+ -+ memset(key, 0, key_size * 2); -+ -+ /* This code is a major mess, it has to be cleaned up after the -+ * classification mask issue is fixed and key format will be made static -+ */ -+ -+ switch (fs->flow_type & 0xff) { -+ case TCP_V4_FLOW: -+ l4ip4_h = &fs->h_u.tcp_ip4_spec; -+ l4ip4_m = &fs->m_u.tcp_ip4_spec; -+ /* TODO: ethertype to match IPv4 and protocol to match TCP */ -+ goto l4ip4; -+ -+ case UDP_V4_FLOW: -+ l4ip4_h = &fs->h_u.udp_ip4_spec; -+ l4ip4_m = &fs->m_u.udp_ip4_spec; -+ goto l4ip4; -+ -+ case SCTP_V4_FLOW: -+ l4ip4_h = &fs->h_u.sctp_ip4_spec; -+ l4ip4_m = &fs->m_u.sctp_ip4_spec; -+ -+l4ip4: -+ if (l4ip4_m->tos) { -+ netdev_err(net_dev, -+ "ToS is not supported for IPv4 L4\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->ip4src && -+ !dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ netdev_err(net_dev, "IP SRC not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->ip4dst && -+ !dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) { -+ netdev_err(net_dev, "IP DST not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->psrc && -+ !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ netdev_err(net_dev, "PSRC not supported, ignored\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->pdst && -+ !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ netdev_err(net_dev, "PDST not supported, ignored\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_SRC)) -+ = l4ip4_h->ip4src; -+ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_SRC)) -+ = l4ip4_m->ip4src; -+ } -+ if (dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) { -+ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_DST)) -+ = l4ip4_h->ip4dst; -+ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_DST)) -+ = l4ip4_m->ip4dst; -+ } -+ -+ if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1)) -+ = l4ip4_h->psrc; -+ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1)) -+ = l4ip4_m->psrc; -+ } -+ -+ if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3)) -+ = l4ip4_h->pdst; -+ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3)) -+ = l4ip4_m->pdst; -+ } -+ break; -+ -+ case ETHER_FLOW: -+ eth_h = &fs->h_u.ether_spec; -+ eth_m = &fs->m_u.ether_spec; -+ -+ if (eth_m->h_proto) { -+ netdev_err(net_dev, "Ethertype is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (!is_zero_ether_addr(eth_m->h_source)) { -+ netdev_err(net_dev, "ETH SRC is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key -+ + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ eth_h->h_dest); -+ ether_addr_copy(msk -+ + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ eth_m->h_dest); -+ } else { -+ if (!is_zero_ether_addr(eth_m->h_dest)) { -+ netdev_err(net_dev, -+ "ETH DST is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ } -+ break; -+ -+ default: -+ /* TODO: IP user flow, AH, ESP */ -+ return -EOPNOTSUPP; -+ } -+ -+ if (fs->flow_type & FLOW_EXT) { -+ /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ -+ return -EOPNOTSUPP; -+ } -+ -+ if (fs->flow_type & FLOW_MAC_EXT) { -+ ext_h = &fs->h_ext; -+ ext_m = &fs->m_ext; -+ -+ if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key -+ + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ ext_h->h_dest); -+ ether_addr_copy(msk -+ + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ ext_m->h_dest); -+ } else { -+ if (!is_zero_ether_addr(ext_m->h_dest)) { -+ netdev_err(net_dev, -+ "ETH DST is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ } -+ } -+ return 0; -+} -+ -+static int dpaa2_do_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ bool add) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ struct dpni_rule_cfg rule_cfg; -+ void *dma_mem; -+ int err = 0; -+ -+ if (!dpaa2_eth_fs_enabled(priv)) { -+ netdev_err(net_dev, "dev does not support steering!\n"); -+ /* dev doesn't support steering */ -+ return -EOPNOTSUPP; -+ } -+ -+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && -+ fs->ring_cookie >= dpaa2_queue_count(priv)) || -+ fs->location >= rule_cnt) -+ return -EINVAL; -+ -+ memset(&rule_cfg, 0, sizeof(rule_cfg)); -+ rule_cfg.key_size = dpaa2_cls_key_size(net_dev); -+ -+ /* allocate twice the key size, for the actual key and for mask */ -+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpaa2_cls_prep_rule(net_dev, fs, dma_mem); -+ if (err) -+ goto err_free_mem; -+ -+ rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, -+ rule_cfg.key_size * 2, -+ DMA_TO_DEVICE); -+ -+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; -+ -+ if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { -+ int i; -+ u8 *mask = dma_mem + rule_cfg.key_size; -+ -+ /* check that nothing is masked out, otherwise it won't work */ -+ for (i = 0; i < rule_cfg.key_size; i++) { -+ if (mask[i] == 0xff) -+ continue; -+ netdev_err(net_dev, "dev does not support masking!\n"); -+ err = -EOPNOTSUPP; -+ goto err_free_mem; -+ } -+ rule_cfg.mask_iova = 0; -+ } -+ -+ /* No way to control rule order in firmware */ -+ if (add) -+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg, (u16)fs->ring_cookie); -+ else -+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg); -+ -+ dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, -+ rule_cfg.key_size * 2, DMA_TO_DEVICE); -+ if (err) { -+ netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); -+ goto err_free_mem; -+ } -+ -+ priv->cls_rule[fs->location].fs = *fs; -+ priv->cls_rule[fs->location].in_use = true; -+ -+err_free_mem: -+ kfree(dma_mem); -+ -+ return err; -+} -+ -+static int dpaa2_add_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = dpaa2_do_cls(net_dev, fs, true); -+ if (err) -+ return err; -+ -+ priv->cls_rule[fs->location].in_use = true; -+ priv->cls_rule[fs->location].fs = *fs; -+ -+ return 0; -+} -+ -+static int dpaa2_del_cls(struct net_device *net_dev, int location) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = dpaa2_do_cls(net_dev, &priv->cls_rule[location].fs, false); -+ if (err) -+ return err; -+ -+ priv->cls_rule[location].in_use = false; -+ -+ return 0; -+} -+ -+static void dpaa2_clear_cls(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int i, err; -+ -+ for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { -+ if (!priv->cls_rule[i].in_use) -+ continue; -+ -+ err = dpaa2_del_cls(net_dev, i); -+ if (err) -+ netdev_warn(net_dev, -+ "err trying to delete classification entry %d\n", -+ i); -+ } -+} -+ -+static int dpaa2_set_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc) -+{ -+ int err = 0; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_SRXFH: -+ /* first off clear ALL classification rules, chaging key -+ * composition will break them anyway -+ */ -+ dpaa2_clear_cls(net_dev); -+ /* we purposely ignore cmd->flow_type for now, because the -+ * classifier only supports a single set of fields for all -+ * protocols -+ */ -+ err = dpaa2_set_hash(net_dev, rxnfc->data); -+ break; -+ case ETHTOOL_SRXCLSRLINS: -+ err = dpaa2_add_cls(net_dev, &rxnfc->fs); -+ break; -+ -+ case ETHTOOL_SRXCLSRLDEL: -+ err = dpaa2_del_cls(net_dev, rxnfc->fs.location); -+ break; -+ -+ default: -+ err = -EOPNOTSUPP; -+ } -+ -+ return err; -+} -+ -+static int dpaa2_get_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ int i, j; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_GRXFH: -+ /* we purposely ignore cmd->flow_type for now, because the -+ * classifier only supports a single set of fields for all -+ * protocols -+ */ -+ rxnfc->data = priv->rx_hash_fields; -+ break; -+ -+ case ETHTOOL_GRXRINGS: -+ rxnfc->data = dpaa2_queue_count(priv); -+ break; -+ -+ case ETHTOOL_GRXCLSRLCNT: -+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) -+ if (priv->cls_rule[i].in_use) -+ rxnfc->rule_cnt++; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ case ETHTOOL_GRXCLSRULE: -+ if (!priv->cls_rule[rxnfc->fs.location].in_use) -+ return -EINVAL; -+ -+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; -+ break; -+ -+ case ETHTOOL_GRXCLSRLALL: -+ for (i = 0, j = 0; i < rule_cnt; i++) { -+ if (!priv->cls_rule[i].in_use) -+ continue; -+ if (j == rxnfc->rule_cnt) -+ return -EMSGSIZE; -+ rule_locs[j++] = i; -+ } -+ rxnfc->rule_cnt = j; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+const struct ethtool_ops dpaa2_ethtool_ops = { -+ .get_drvinfo = dpaa2_get_drvinfo, -+ .get_msglevel = dpaa2_get_msglevel, -+ .set_msglevel = dpaa2_set_msglevel, -+ .get_link = ethtool_op_get_link, -+ .get_settings = dpaa2_get_settings, -+ .set_settings = dpaa2_set_settings, -+ .get_sset_count = dpaa2_get_sset_count, -+ .get_ethtool_stats = dpaa2_get_ethtool_stats, -+ .get_strings = dpaa2_get_strings, -+ .get_rxnfc = dpaa2_get_rxnfc, -+ .set_rxnfc = dpaa2_set_rxnfc, -+}; ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h -@@ -0,0 +1,175 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPKG_H_ -+#define __FSL_DPKG_H_ -+ -+#include -+#include "../../fsl-mc/include/net.h" -+ -+/* Data Path Key Generator API -+ * Contains initialization APIs and runtime APIs for the Key Generator -+ */ -+ -+/** Key Generator properties */ -+ -+/** -+ * Number of masks per key extraction -+ */ -+#define DPKG_NUM_OF_MASKS 4 -+/** -+ * Number of extractions per key profile -+ */ -+#define DPKG_MAX_NUM_OF_EXTRACTS 10 -+ -+/** -+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types -+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset -+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field -+ * @DPKG_FULL_FIELD: Extract a full field -+ */ -+enum dpkg_extract_from_hdr_type { -+ DPKG_FROM_HDR = 0, -+ DPKG_FROM_FIELD = 1, -+ DPKG_FULL_FIELD = 2 -+}; -+ -+/** -+ * enum dpkg_extract_type - Enumeration for selecting extraction type -+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header -+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header -+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; -+ * e.g. can be used to extract header existence; -+ * please refer to 'Parse Result definition' section in the parser BG -+ */ -+enum dpkg_extract_type { -+ DPKG_EXTRACT_FROM_HDR = 0, -+ DPKG_EXTRACT_FROM_DATA = 1, -+ DPKG_EXTRACT_FROM_PARSE = 3 -+}; -+ -+/** -+ * struct dpkg_mask - A structure for defining a single extraction mask -+ * @mask: Byte mask for the extracted content -+ * @offset: Offset within the extracted content -+ */ -+struct dpkg_mask { -+ uint8_t mask; -+ uint8_t offset; -+}; -+ -+/** -+ * struct dpkg_extract - A structure for defining a single extraction -+ * @type: Determines how the union below is interpreted: -+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; -+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; -+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' -+ * @extract: Selects extraction method -+ * @num_of_byte_masks: Defines the number of valid entries in the array below; -+ * This is also the number of bytes to be used as masks -+ * @masks: Masks parameters -+ */ -+struct dpkg_extract { -+ enum dpkg_extract_type type; -+ /** -+ * union extract - Selects extraction method -+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ */ -+ union { -+ /** -+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @prot: Any of the supported headers -+ * @type: Defines the type of header extraction: -+ * DPKG_FROM_HDR: use size & offset below; -+ * DPKG_FROM_FIELD: use field, size and offset below; -+ * DPKG_FULL_FIELD: use field below -+ * @field: One of the supported fields (NH_FLD_) -+ * -+ * @size: Size in bytes -+ * @offset: Byte offset -+ * @hdr_index: Clear for cases not listed below; -+ * Used for protocols that may have more than a single -+ * header, 0 indicates an outer header; -+ * Supported protocols (possible values): -+ * NET_PROT_VLAN (0, HDR_INDEX_LAST); -+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); -+ * NET_PROT_IP(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv4(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv6(0, HDR_INDEX_LAST); -+ */ -+ -+ struct { -+ enum net_prot prot; -+ enum dpkg_extract_from_hdr_type type; -+ uint32_t field; -+ uint8_t size; -+ uint8_t offset; -+ uint8_t hdr_index; -+ } from_hdr; -+ /** -+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_data; -+ -+ /** -+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_parse; -+ } extract; -+ -+ uint8_t num_of_byte_masks; -+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; -+}; -+ -+/** -+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation -+ * profile (rule) -+ * @num_extracts: Defines the number of valid entries in the array below -+ * @extracts: Array of required extractions -+ */ -+struct dpkg_profile_cfg { -+ uint8_t num_extracts; -+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; -+}; -+ -+#endif /* __FSL_DPKG_H_ */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -@@ -0,0 +1,1058 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPNI_CMD_H -+#define _FSL_DPNI_CMD_H -+ -+/* DPNI Version */ -+#define DPNI_VER_MAJOR 6 -+#define DPNI_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPNI_CMDID_OPEN 0x801 -+#define DPNI_CMDID_CLOSE 0x800 -+#define DPNI_CMDID_CREATE 0x901 -+#define DPNI_CMDID_DESTROY 0x900 -+ -+#define DPNI_CMDID_ENABLE 0x002 -+#define DPNI_CMDID_DISABLE 0x003 -+#define DPNI_CMDID_GET_ATTR 0x004 -+#define DPNI_CMDID_RESET 0x005 -+#define DPNI_CMDID_IS_ENABLED 0x006 -+ -+#define DPNI_CMDID_SET_IRQ 0x010 -+#define DPNI_CMDID_GET_IRQ 0x011 -+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPNI_CMDID_SET_IRQ_MASK 0x014 -+#define DPNI_CMDID_GET_IRQ_MASK 0x015 -+#define DPNI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPNI_CMDID_SET_POOLS 0x200 -+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 -+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 -+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 -+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 -+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 -+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 -+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 -+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 -+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 -+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A -+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B -+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C -+ -+#define DPNI_CMDID_GET_QDID 0x210 -+#define DPNI_CMDID_GET_SP_INFO 0x211 -+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 -+#define DPNI_CMDID_GET_COUNTER 0x213 -+#define DPNI_CMDID_SET_COUNTER 0x214 -+#define DPNI_CMDID_GET_LINK_STATE 0x215 -+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 -+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 -+#define DPNI_CMDID_SET_MTU 0x218 -+#define DPNI_CMDID_GET_MTU 0x219 -+#define DPNI_CMDID_SET_LINK_CFG 0x21A -+#define DPNI_CMDID_SET_TX_SHAPING 0x21B -+ -+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 -+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 -+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 -+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 -+#define DPNI_CMDID_SET_PRIM_MAC 0x224 -+#define DPNI_CMDID_GET_PRIM_MAC 0x225 -+#define DPNI_CMDID_ADD_MAC_ADDR 0x226 -+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 -+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 -+ -+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 -+#define DPNI_CMDID_ADD_VLAN_ID 0x231 -+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 -+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 -+ -+#define DPNI_CMDID_SET_RX_TC_DIST 0x235 -+#define DPNI_CMDID_SET_TX_FLOW 0x236 -+#define DPNI_CMDID_GET_TX_FLOW 0x237 -+#define DPNI_CMDID_SET_RX_FLOW 0x238 -+#define DPNI_CMDID_GET_RX_FLOW 0x239 -+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A -+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B -+ -+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E -+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F -+ -+#define DPNI_CMDID_SET_QOS_TBL 0x240 -+#define DPNI_CMDID_ADD_QOS_ENT 0x241 -+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 -+#define DPNI_CMDID_CLR_QOS_TBL 0x243 -+#define DPNI_CMDID_ADD_FS_ENT 0x244 -+#define DPNI_CMDID_REMOVE_FS_ENT 0x245 -+#define DPNI_CMDID_CLR_FS_ENT 0x246 -+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 -+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 -+#define DPNI_CMDID_SET_IPR 0x249 -+#define DPNI_CMDID_SET_IPF 0x24A -+ -+#define DPNI_CMDID_SET_TX_SELECTION 0x250 -+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 -+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 -+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 -+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 -+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 -+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 -+#define DPNI_CMDID_SET_TX_CONF 0x257 -+#define DPNI_CMDID_GET_TX_CONF 0x258 -+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 -+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A -+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B -+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_OPEN(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ -+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ -+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ -+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ -+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ -+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ -+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ -+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ -+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_ATTR(cmd, attr) \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ -+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ -+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ -+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ -+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ -+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ -+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_QDID(cmd, qdid) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_COUNTER(cmd, counter) \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_COUNTER(cmd, value) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MTU(cmd, mtu) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MTU(cmd, mtu) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ -+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[0].mode); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[1].mode); \ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[2].mode); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[3].mode); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[4].mode); \ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[5].mode); \ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[6].mode); \ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[7].mode); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ -+ cfg->fs_cfg.miss_action); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ -+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPR(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPF(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ cfg->queue_cfg.dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ -+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ -+ cfg->queue_cfg.tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ cfg->queue_cfg.flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.frame_data_size); \ -+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.flow_context_size); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ -+ cfg->queue_cfg.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ -+ attr->queue_attr.dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ attr->queue_attr.dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ -+ MC_RSP_OP(cmd, 0, 46, 1, int, \ -+ attr->queue_attr.order_preservation_en); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ -+ attr->queue_attr.tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ -+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ attr->queue_attr.flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.frame_data_size); \ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.flow_context_size); \ -+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ -+ attr->queue_attr.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#endif /* _FSL_DPNI_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -@@ -0,0 +1,1907 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)key_cfg_buf; -+ -+ if (!key_cfg_buf || !cfg) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, cfg->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < cfg->num_extracts; i++) { -+ switch (cfg->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ cfg->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ cfg->extracts[i].extract. -+ from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_data.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, cfg->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, cfg->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ cfg->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPNI_CMD_OPEN(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPNI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_POOLS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_ATTR(cmd, attr); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_QDID(cmd, *qdid); -+ -+ return 0; -+} -+ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_SP_INFO(cmd, sp_info); -+ -+ return 0; -+} -+ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); -+ -+ return 0; -+} -+ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_COUNTER(cmd, counter); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_COUNTER(cmd, *value); -+ -+ return 0; -+} -+ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_COUNTER(cmd, counter, value); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_LINK_CFG(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); -+ -+ return 0; -+} -+ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MTU(cmd, mtu); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MTU(cmd, *mtu); -+ -+ return 0; -+} -+ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ return 0; -+} -+ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); -+ -+ return 0; -+} -+ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPR(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPF(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); -+ -+ return 0; -+} -+ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_EXT_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -@@ -0,0 +1,2581 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPNI_H -+#define __FSL_DPNI_H -+ -+#include "dpkg.h" -+ -+struct fsl_mc_io; -+ -+/** -+ * Data Path Network Interface API -+ * Contains initialization APIs and runtime control APIs for DPNI -+ */ -+ -+/** General DPNI macros */ -+ -+/** -+ * Maximum number of traffic classes -+ */ -+#define DPNI_MAX_TC 8 -+/** -+ * Maximum number of buffer pools per DPNI -+ */ -+#define DPNI_MAX_DPBP 8 -+/** -+ * Maximum number of storage-profiles per DPNI -+ */ -+#define DPNI_MAX_SP 2 -+ -+/** -+ * All traffic classes considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) -+/** -+ * Generate new flow ID; see dpni_set_tx_flow() -+ */ -+#define DPNI_NEW_FLOW_ID (uint16_t)(-1) -+/* use for common tx-conf queue; see dpni_set_tx_conf_() */ -+#define DPNI_COMMON_TX_CONF (uint16_t)(-1) -+ -+/** -+ * dpni_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpni_id: DPNI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpni_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token); -+ -+/** -+ * dpni_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* DPNI configuration options */ -+ -+/** -+ * Allow different distribution key profiles for different traffic classes; -+ * if not set, a single key profile is assumed -+ */ -+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 -+ -+/** -+ * Disable all non-error transmit confirmation; error frames are reported -+ * back to a common Tx error queue -+ */ -+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 -+ -+/** -+ * Disable per-sender private Tx confirmation/error queue -+ */ -+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 -+ -+/** -+ * Support distribution based on hashed key; -+ * allows statistical distribution over receive queues in a traffic class -+ */ -+#define DPNI_OPT_DIST_HASH 0x00000010 -+ -+/** -+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are -+ * '0' then backward compatibility is preserved; -+ * Support distribution based on flow steering; -+ * allows explicit control of distribution over receive queues in a traffic -+ * class -+ */ -+#define DPNI_OPT_DIST_FS 0x00000020 -+ -+/** -+ * Unicast filtering support -+ */ -+#define DPNI_OPT_UNICAST_FILTER 0x00000080 -+/** -+ * Multicast filtering support -+ */ -+#define DPNI_OPT_MULTICAST_FILTER 0x00000100 -+/** -+ * VLAN filtering support -+ */ -+#define DPNI_OPT_VLAN_FILTER 0x00000200 -+/** -+ * Support IP reassembly on received packets -+ */ -+#define DPNI_OPT_IPR 0x00000800 -+/** -+ * Support IP fragmentation on transmitted packets -+ */ -+#define DPNI_OPT_IPF 0x00001000 -+/** -+ * VLAN manipulation support -+ */ -+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 -+/** -+ * Support masking of QoS lookup keys -+ */ -+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 -+/** -+ * Support masking of Flow Steering lookup keys -+ */ -+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 -+ -+/** -+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration -+ * @tc_cfg: TCs configuration -+ * @ipr_cfg: IP reassembly configuration -+ */ -+struct dpni_extended_cfg { -+ /** -+ * struct tc_cfg - TC configuration -+ * @max_dist: Maximum distribution size for Rx traffic class; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024; -+ * value '0' will be treated as '1'. -+ * other unsupported values will be round down to the nearest -+ * supported value. -+ * @max_fs_entries: Maximum FS entries for Rx traffic class; -+ * '0' means no support for this TC; -+ */ -+ struct { -+ uint16_t max_dist; -+ uint16_t max_fs_entries; -+ } tc_cfg[DPNI_MAX_TC]; -+ /** -+ * struct ipr_cfg - Structure representing IP reassembly configuration -+ * @max_reass_frm_size: Maximum size of the reassembled frame -+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments -+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments -+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly -+ * process -+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly -+ * process -+ */ -+ struct { -+ uint16_t max_reass_frm_size; -+ uint16_t min_frag_size_ipv4; -+ uint16_t min_frag_size_ipv6; -+ uint16_t max_open_frames_ipv4; -+ uint16_t max_open_frames_ipv6; -+ } ipr_cfg; -+}; -+ -+/** -+ * dpni_prepare_extended_cfg() - function prepare extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_create() -+ */ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf); -+ -+/** -+ * struct dpni_cfg - Structure representing DPNI configuration -+ * @mac_addr: Primary MAC address -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpni_cfg { -+ uint8_t mac_addr[6]; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Mask of available options; use 'DPNI_OPT_' values -+ * @start_hdr: Selects the packet starting header for parsing; -+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); -+ * '0' will e treated as '1' -+ * @max_unicast_filters: Maximum number of unicast filters; -+ * '0' is treated as '16' -+ * @max_multicast_filters: Maximum number of multicast filters; -+ * '0' is treated as '64' -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in -+ * the QoS table; '0' is treated as '64' -+ * @max_qos_key_size: Maximum key size for the QoS look-up; -+ * '0' is treated as '24' which is enough for IPv4 -+ * 5-tuple -+ * @max_dist_key_size: Maximum key size for the distribution; -+ * '0' is treated as '24' which is enough for IPv4 5-tuple -+ * @max_policers: Maximum number of policers; -+ * should be between '0' and max_tcs -+ * @max_congestion_ctrl: Maximum number of congestion control groups -+ * (CGs); covers early drop and congestion notification -+ * requirements; -+ * should be between '0' and ('max_tcs' + 'max_senders') -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory -+ * filled with the extended configuration by calling -+ * dpni_prepare_extended_cfg() -+ */ -+ struct { -+ uint32_t options; -+ enum net_prot start_hdr; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+ } adv; -+}; -+ -+/** -+ * dpni_create() - Create the DPNI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPNI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpni_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpni_destroy() - Destroy the DPNI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpni_pools_cfg - Structure representing buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpni_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPNI_MAX_DPBP]; -+}; -+ -+/** -+ * dpni_set_pools() - Set buffer pools configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Buffer pools configuration -+ * -+ * mandatory for DPNI operation -+ * warning:Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg); -+ -+/** -+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_is_enabled() - Check if the DPNI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_reset() - Reset the DPNI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPNI IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPNI_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+ -+/** -+ * struct dpni_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpni_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_get_irq() - Get IRQ information from the DPNI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state: - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpni_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpni_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpni_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpni_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpni_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpni_attr - Structure representing DPNI attributes -+ * @id: DPNI object ID -+ * @version: DPNI version -+ * @start_hdr: Indicates the packet starting header for parsing -+ * @options: Mask of available options; reflects the value as was given in -+ * object's creation -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) -+ * @max_unicast_filters: Maximum number of unicast filters -+ * @max_multicast_filters: Maximum number of multicast filters -+ * @max_vlan_filters: Maximum number of VLAN filters -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table -+ * @max_qos_key_size: Maximum key size for the QoS look-up -+ * @max_dist_key_size: Maximum key size for the distribution look-up -+ * @max_policers: Maximum number of policers; -+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; -+ * call dpni_extract_extended_cfg() to extract the extended configuration -+ */ -+struct dpni_attr { -+ int id; -+ /** -+ * struct version - DPNI version -+ * @major: DPNI major version -+ * @minor: DPNI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ enum net_prot start_hdr; -+ uint32_t options; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+}; -+ -+/** -+ * dpni_get_attributes() - Retrieve DPNI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr); -+ -+/** -+ * dpni_extract_extended_cfg() - extract the extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: 256 bytes of DMA-able memory -+ * -+ * This function has to be called after dpni_get_attributes() -+ */ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf); -+ -+/** -+ * DPNI errors -+ */ -+ -+/** -+ * Extract out of frame header error -+ */ -+#define DPNI_ERROR_EOFHE 0x00020000 -+/** -+ * Frame length error -+ */ -+#define DPNI_ERROR_FLE 0x00002000 -+/** -+ * Frame physical error -+ */ -+#define DPNI_ERROR_FPE 0x00001000 -+/** -+ * Parsing header error -+ */ -+#define DPNI_ERROR_PHE 0x00000020 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L3CE 0x00000004 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L4CE 0x00000001 -+ -+/** -+ * enum dpni_error_action - Defines DPNI behavior for errors -+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame -+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow -+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue -+ */ -+enum dpni_error_action { -+ DPNI_ERROR_ACTION_DISCARD = 0, -+ DPNI_ERROR_ACTION_CONTINUE = 1, -+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 -+}; -+ -+/** -+ * struct dpni_error_cfg - Structure representing DPNI errors treatment -+ * @errors: Errors mask; use 'DPNI_ERROR__ -+ * @error_action: The desired action for the errors mask -+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation -+ * status (FAS); relevant only for the non-discard action -+ */ -+struct dpni_error_cfg { -+ uint32_t errors; -+ enum dpni_error_action error_action; -+ int set_frame_annotation; -+}; -+ -+/** -+ * dpni_set_errors_behavior() - Set errors behavior -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Errors configuration -+ * -+ * this function may be called numerous times with different -+ * error masks -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg); -+ -+/** -+ * DPNI buffer layout modification options -+ */ -+ -+/** -+ * Select to modify the time-stamp setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 -+/** -+ * Select to modify the parser-result setting; not applicable for Tx -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 -+/** -+ * Select to modify the frame-status setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 -+/** -+ * Select to modify the private-data-size setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 -+/** -+ * Select to modify the data-alignment setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 -+/** -+ * Select to modify the data-head-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 -+/** -+ * Select to modify the data-tail-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 -+ -+/** -+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout -+ * @options: Flags representing the suggested modifications to the buffer -+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags -+ * @pass_timestamp: Pass timestamp value -+ * @pass_parser_result: Pass parser results -+ * @pass_frame_status: Pass frame status -+ * @private_data_size: Size kept for private data (in bytes) -+ * @data_align: Data alignment -+ * @data_head_room: Data head room -+ * @data_tail_room: Data tail room -+ */ -+struct dpni_buffer_layout { -+ uint32_t options; -+ int pass_timestamp; -+ int pass_parser_result; -+ int pass_frame_status; -+ uint16_t private_data_size; -+ uint16_t data_align; -+ uint16_t data_head_room; -+ uint16_t data_tail_room; -+}; -+ -+/** -+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout -+ * attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout -+ * configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used -+ * for enqueue operations -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @qdid: Returned virtual QDID value that should be used as an argument -+ * in all enqueue operations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid); -+ -+/** -+ * struct dpni_sp_info - Structure representing DPNI storage-profile information -+ * (relevant only for DPNI owned by AIOP) -+ * @spids: array of storage-profiles -+ */ -+struct dpni_sp_info { -+ uint16_t spids[DPNI_MAX_SP]; -+}; -+ -+/** -+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @sp_info: Returned AIOP storage-profile information -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only relevant for DPNI that belongs to AIOP container. -+ */ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info); -+ -+/** -+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @data_offset: Tx data offset (from start of buffer) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPNI_CNT_ING_FRAME: Counts ingress frames -+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes -+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit -+ * 'drop' setting -+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors -+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPNI_CNT_EGR_FRAME: Counts egress frames -+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes -+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors -+ */ -+enum dpni_counter { -+ DPNI_CNT_ING_FRAME = 0x0, -+ DPNI_CNT_ING_BYTE = 0x1, -+ DPNI_CNT_ING_FRAME_DROP = 0x2, -+ DPNI_CNT_ING_FRAME_DISCARD = 0x3, -+ DPNI_CNT_ING_MCAST_FRAME = 0x4, -+ DPNI_CNT_ING_MCAST_BYTE = 0x5, -+ DPNI_CNT_ING_BCAST_FRAME = 0x6, -+ DPNI_CNT_ING_BCAST_BYTES = 0x7, -+ DPNI_CNT_EGR_FRAME = 0x8, -+ DPNI_CNT_EGR_BYTE = 0x9, -+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * dpni_get_counter() - Read a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: Returned counter's current value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value); -+ -+/** -+ * dpni_set_counter() - Set (or clear) a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: New counter value; typically pass '0' for resetting -+ * the counter. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct - Structure representing DPNI link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ */ -+struct dpni_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpni_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg); -+ -+/** -+ * struct dpni_link_state - Structure representing DPNI link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ * @up: Link state; '0' for down, '1' for up -+ */ -+struct dpni_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpni_get_link_state() - Return the link state (either up or down) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @state: Returned link state; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state); -+ -+/** -+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration -+ * @rate_limit: rate in Mbps -+ * @max_burst_size: burst size in bytes (up to 64KB) -+ */ -+struct dpni_tx_shaping_cfg { -+ uint32_t rate_limit; -+ uint16_t max_burst_size; -+}; -+ -+/** -+ * dpni_set_tx_shaping() - Set the transmit shaping -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tx_shaper: tx shaping configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper); -+ -+/** -+ * dpni_set_max_frame_length() - Set the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * dpni_get_max_frame_length() - Get the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length); -+ -+/** -+ * dpni_set_mtu() - Set the MTU for the interface. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: MTU length (in bytes) -+ * -+ * MTU determines the maximum fragment size for performing IP -+ * fragmentation on egress packets. -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu); -+ -+/** -+ * dpni_get_mtu() - Get the MTU. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: Returned MTU length (in bytes) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu); -+ -+/** -+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_primary_mac_addr() - Set the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to set as primary address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_get_primary_mac_addr() - Get the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: Returned MAC address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]); -+ -+/** -+ * dpni_add_mac_addr() - Add MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_remove_mac_addr() - Remove MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @unicast: Set to '1' to clear unicast addresses -+ * @multicast: Set to '1' to clear multicast addresses -+ * -+ * The primary MAC address is not cleared by this operation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast); -+ -+/** -+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_add_vlan_id() - Add VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_remove_vlan_id() - Remove VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_clear_vlan_filters() - Clear all VLAN filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode -+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority -+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling -+ */ -+enum dpni_tx_schedule_mode { -+ DPNI_TX_SCHED_STRICT_PRIORITY, -+ DPNI_TX_SCHED_WEIGHTED, -+}; -+ -+/** -+ * struct dpni_tx_schedule_cfg - Structure representing Tx -+ * scheduling configuration -+ * @mode: scheduling mode -+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; -+ * not applicable for 'strict-priority' mode; -+ */ -+struct dpni_tx_schedule_cfg { -+ enum dpni_tx_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpni_tx_selection_cfg - Structure representing transmission -+ * selection configuration -+ * @tc_sched: an array of traffic-classes -+ */ -+struct dpni_tx_selection_cfg { -+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; -+}; -+ -+/** -+ * dpni_set_tx_selection() - Set transmission selection configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: transmission selection configuration -+ * -+ * warning: Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpni_dist_mode - DPNI distribution mode -+ * @DPNI_DIST_MODE_NONE: No distribution -+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if -+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation -+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if -+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation -+ */ -+enum dpni_dist_mode { -+ DPNI_DIST_MODE_NONE = 0, -+ DPNI_DIST_MODE_HASH = 1, -+ DPNI_DIST_MODE_FS = 2 -+}; -+ -+/** -+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action -+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame -+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id -+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash -+ */ -+enum dpni_fs_miss_action { -+ DPNI_FS_MISS_DROP = 0, -+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, -+ DPNI_FS_MISS_HASH = 2 -+}; -+ -+/** -+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration -+ * @miss_action: Miss action selection -+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' -+ */ -+struct dpni_fs_tbl_cfg { -+ enum dpni_fs_miss_action miss_action; -+ uint16_t default_flow_id; -+}; -+ -+/** -+ * dpni_prepare_key_cfg() - function prepare extract parameters -+ * @cfg: defining a full Key Generation profile (rule) -+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before the following functions: -+ * - dpni_set_rx_tc_dist() -+ * - dpni_set_qos_table() -+ */ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf); -+ -+/** -+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration -+ * @dist_size: Set the distribution size; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024 -+ * @dist_mode: Distribution mode -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * the extractions to be used for the distribution key by calling -+ * dpni_prepare_key_cfg() relevant only when -+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' -+ * @fs_cfg: Flow Steering table configuration; only relevant if -+ * 'dist_mode = DPNI_DIST_MODE_FS' -+ */ -+struct dpni_rx_tc_dist_cfg { -+ uint16_t dist_size; -+ enum dpni_dist_mode dist_mode; -+ uint64_t key_cfg_iova; -+ struct dpni_fs_tbl_cfg fs_cfg; -+}; -+ -+/** -+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class distribution configuration -+ * -+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() -+ * first to prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg); -+ -+/** -+ * Set to select color aware mode (otherwise - color blind) -+ */ -+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 -+/** -+ * Set to discard frame with RED color -+ */ -+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 -+ -+/** -+ * enum dpni_policer_mode - selecting the policer mode -+ * @DPNI_POLICER_MODE_NONE: Policer is disabled -+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through -+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 -+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 -+ */ -+enum dpni_policer_mode { -+ DPNI_POLICER_MODE_NONE = 0, -+ DPNI_POLICER_MODE_PASS_THROUGH, -+ DPNI_POLICER_MODE_RFC_2698, -+ DPNI_POLICER_MODE_RFC_4115 -+}; -+ -+/** -+ * enum dpni_policer_unit - DPNI policer units -+ * @DPNI_POLICER_UNIT_BYTES: bytes units -+ * @DPNI_POLICER_UNIT_FRAMES: frames units -+ */ -+enum dpni_policer_unit { -+ DPNI_POLICER_UNIT_BYTES = 0, -+ DPNI_POLICER_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_policer_color - selecting the policer color -+ * @DPNI_POLICER_COLOR_GREEN: Green color -+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color -+ * @DPNI_POLICER_COLOR_RED: Red color -+ */ -+enum dpni_policer_color { -+ DPNI_POLICER_COLOR_GREEN = 0, -+ DPNI_POLICER_COLOR_YELLOW, -+ DPNI_POLICER_COLOR_RED -+}; -+ -+/** -+ * struct dpni_rx_tc_policing_cfg - Policer configuration -+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values -+ * @mode: policer mode -+ * @default_color: For pass-through mode the policer re-colors with this -+ * color any incoming packets. For Color aware non-pass-through mode: -+ * policer re-colors with this color all packets with FD[DROPP]>2. -+ * @units: Bytes or Packets -+ * @cir: Committed information rate (CIR) in Kbps or packets/second -+ * @cbs: Committed burst size (CBS) in bytes or packets -+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second -+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second -+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets -+ * Excess burst size (EBS, rfc4115) in bytes or packets -+ */ -+struct dpni_rx_tc_policing_cfg { -+ uint32_t options; -+ enum dpni_policer_mode mode; -+ enum dpni_policer_unit units; -+ enum dpni_policer_color default_color; -+ uint32_t cir; -+ uint32_t cbs; -+ uint32_t eir; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * enum dpni_congestion_unit - DPNI congestion units -+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units -+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units -+ */ -+enum dpni_congestion_unit { -+ DPNI_CONGESTION_UNIT_BYTES = 0, -+ DPNI_CONGESTION_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_early_drop_mode - DPNI early drop mode -+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpni_early_drop_mode { -+ DPNI_EARLY_DROP_MODE_NONE = 0, -+ DPNI_EARLY_DROP_MODE_TAIL, -+ DPNI_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpni_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the max_threshold). -+ */ -+struct dpni_wred_cfg { -+ uint64_t max_threshold; -+ uint64_t min_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpni_early_drop_cfg - early-drop configuration -+ * @mode: drop mode -+ * @units: units type -+ * @green: WRED - 'green' configuration -+ * @yellow: WRED - 'yellow' configuration -+ * @red: WRED - 'red' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpni_early_drop_cfg { -+ enum dpni_early_drop_mode mode; -+ enum dpni_congestion_unit units; -+ -+ struct dpni_wred_cfg green; -+ struct dpni_wred_cfg yellow; -+ struct dpni_wred_cfg red; -+ -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpni_prepare_early_drop() - prepare an early drop. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_set_rx_tc_early_drop or -+ * dpni_set_tx_tc_early_drop -+ * -+ */ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpni_extract_early_drop() - extract the early drop configuration. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called after dpni_get_rx_tc_early_drop or -+ * dpni_get_tx_tc_early_drop -+ * -+ */ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf); -+ -+/** -+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * enum dpni_dest - DPNI destination types -+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and -+ * does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpni_dest { -+ DPNI_DEST_NONE = 0, -+ DPNI_DEST_DPIO = 1, -+ DPNI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPNI_DEST_NONE' option -+ */ -+struct dpni_dest_cfg { -+ enum dpni_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPNI congestion options */ -+ -+/** -+ * CSCN message is written to message_iova once entering a -+ * congestion state (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 -+/** -+ * CSCN message is written to message_iova once exiting a -+ * congestion state (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 -+/** -+ * CSCN write will attempt to allocate into a cache (coherent write); -+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected -+ */ -+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once entering a congestion state -+ * (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once exiting a congestion state -+ * (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the -+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) -+ */ -+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 -+ -+/** -+ * struct dpni_congestion_notification_cfg - congestion notification -+ * configuration -+ * @units: units type -+ * @threshold_entry: above this threshold we enter a congestion state. -+ * set it to '0' to disable it -+ * @threshold_exit: below this threshold we exit the congestion state. -+ * @message_ctx: The context that will be part of the CSCN message -+ * @message_iova: I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is -+ * contained in 'options' -+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel -+ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values -+ */ -+ -+struct dpni_congestion_notification_cfg { -+ enum dpni_congestion_unit units; -+ uint32_t threshold_entry; -+ uint32_t threshold_exit; -+ uint64_t message_ctx; -+ uint64_t message_iova; -+ struct dpni_dest_cfg dest_cfg; -+ uint16_t options; -+}; -+ -+/** -+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * enum dpni_flc_type - DPNI FLC types -+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value -+ * @DPNI_FLC_STASH: select the FLC to be used for stash control -+ */ -+enum dpni_flc_type { -+ DPNI_FLC_USER_DEFINED = 0, -+ DPNI_FLC_STASH = 1, -+}; -+ -+/** -+ * enum dpni_stash_size - DPNI FLC stashing size -+ * @DPNI_STASH_SIZE_0B: no stash -+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes -+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes -+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes -+ */ -+enum dpni_stash_size { -+ DPNI_STASH_SIZE_0B = 0, -+ DPNI_STASH_SIZE_64B = 1, -+ DPNI_STASH_SIZE_128B = 2, -+ DPNI_STASH_SIZE_192B = 3, -+}; -+ -+/* DPNI FLC stash options */ -+ -+/** -+ * stashes the whole annotation area (up to 192 bytes) -+ */ -+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 -+ -+/** -+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration -+ * @flc_type: FLC type -+ * @options: Mask of available options; -+ * use 'DPNI_FLC_STASH_' values -+ * @frame_data_size: Size of frame data to be stashed -+ * @flow_context_size: Size of flow context to be stashed -+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': -+ * this value will be provided in the frame descriptor -+ * (FD[FLC]) -+ * 2. In case flc_type is 'DPNI_FLC_STASH': -+ * this value will be I/O virtual address of the -+ * flow-context; -+ * Must be cacheline-aligned and DMA-able memory -+ */ -+struct dpni_flc_cfg { -+ enum dpni_flc_type flc_type; -+ uint32_t options; -+ enum dpni_stash_size frame_data_size; -+ enum dpni_stash_size flow_context_size; -+ uint64_t flow_context; -+}; -+ -+/** -+ * DPNI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPNI_QUEUE_OPT_DEST 0x00000002 -+/** Select to modify the flow-context parameters; -+ * not applicable for Tx-conf/Err queues as the FD comes from the user -+ */ -+#define DPNI_QUEUE_OPT_FLC 0x00000004 -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 -+/* Select to modify the queue's tail-drop threshold */ -+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 -+ -+/** -+ * struct dpni_queue_cfg - Structure representing queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPNI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' -+ * is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' -+ * @flc_cfg: Flow context configuration; in case the TC's distribution -+ * is either NONE or HASH the FLC's settings of flow#0 are used. -+ * in the case of FS (flow-steering) the flow's FLC settings -+ * are used. -+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' -+ * @order_preservation_en: enable/disable order preservation; -+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained -+ * in 'options' -+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; -+ * '0' value disable the threshold; maximum value is 0xE000000; -+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained -+ * in 'options' -+ */ -+struct dpni_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * struct dpni_queue_attr - Structure representing queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @flc_cfg: Flow context configuration -+ * @order_preservation_en: enable/disable order preservation -+ * @tail_drop_threshold: queue's tail drop threshold in bytes; -+ * @fqid: Virtual fqid value to be used for dequeue operations -+ */ -+struct dpni_queue_attr { -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+ -+ uint32_t fqid; -+}; -+ -+/** -+ * DPNI Tx flow modification options -+ */ -+ -+/** -+ * Select to modify the settings for dedicate Tx confirmation/error -+ */ -+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 -+/** -+ * Select to modify the L3 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 -+/** -+ * Select to modify the L4 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 -+ -+/** -+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration -+ * @options: Flags representing the suggested modifications to the Tx flow; -+ * Use any combination 'DPNI_TX_FLOW_OPT_' flags -+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx -+ * confirmation and error queue; Set to '0' to use the private -+ * Tx confirmation and error queue; valid only if -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation -+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' -+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' -+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' -+ */ -+struct dpni_tx_flow_cfg { -+ uint32_t options; -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_set_tx_flow() - Set Tx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: Provides (or returns) the sender's flow ID; -+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate -+ * a new flow_id; this ID should be used as the QDBIN argument -+ * in enqueue operations -+ * @cfg: Tx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg); -+ -+/** -+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes -+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and -+ * error queue; '0' if using private Tx confirmation and error queue -+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled -+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled -+ */ -+struct dpni_tx_flow_attr { -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_get_tx_flow() - Get Tx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function -+ * @attr: Returned Tx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr); -+ -+/** -+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration -+ * @errors_only: Set to '1' to report back only error frames; -+ * Set to '0' to confirm transmission/error for all transmitted frames; -+ * @queue_cfg: Queue configuration -+ */ -+struct dpni_tx_conf_cfg { -+ int errors_only; -+ struct dpni_queue_cfg queue_cfg; -+}; -+ -+/** -+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: Queue configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported -+ * back - successfully transmitted frames are not confirmed. Otherwise, all -+ * transmitted frames are sent for confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg); -+ -+/** -+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes -+ * @errors_only: '1' if only error frames are reported back; '0' if all -+ * transmitted frames are confirmed -+ * @queue_attr: Queue attributes -+ */ -+struct dpni_tx_conf_attr { -+ int errors_only; -+ struct dpni_queue_attr queue_attr; -+}; -+ -+/** -+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @attr: Returned tx-conf attributes -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr); -+ -+/** -+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @revoke: revoke or not -+ * -+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not -+ * selected at DPNI creation. -+ * Calling this function with 'revoke' set to '1' disables all transmit -+ * confirmation (including the private confirmation queues), regardless of -+ * previous settings; Note that in this case, Tx error frames are still -+ * enqueued to the general transmit errors queue. -+ * Calling this function with 'revoke' set to '0' restores the previous -+ * settings for both general and private transmit confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke); -+ -+/** -+ * dpni_set_rx_flow() - Set Rx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7); -+ * use 'DPNI_ALL_TCS' to set all TCs and all flows -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPNI_ALL_TC_FLOWS' to set all flows within -+ * this tc_id; ignored if tc_id is set to -+ * 'DPNI_ALL_TCS'; -+ * @cfg: Rx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_flow() - Get Rx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @flow_id: Rx flow id within the traffic class -+ * @attr: Returned Rx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * dpni_set_rx_err_queue() - Set Rx error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_err_queue() - Get Rx error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Returned Queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * key extractions to be used as the QoS criteria by calling -+ * dpni_prepare_key_cfg() -+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); -+ * '0' to use the 'default_tc' in such cases -+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 -+ */ -+struct dpni_qos_tbl_cfg { -+ uint64_t key_cfg_iova; -+ int discard_on_miss; -+ uint8_t default_tc; -+}; -+ -+/** -+ * dpni_set_qos_table() - Set QoS mapping table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS table configuration -+ * -+ * This function and all QoS-related functions require that -+ *'max_tcs > 1' was set at DPNI creation. -+ * -+ * warning: Before calling this function, call dpni_prepare_key_cfg() to -+ * prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg); -+ -+/** -+ * struct dpni_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpni_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to add -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id); -+ -+/** -+ * dpni_remove_qos_entry() - Remove QoS mapping entry -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_qos_table() - Clear all QoS mapping entries -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Following this function call, all frames are directed to -+ * the default traffic class (0) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class -+ * (to select a flow ID) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to add -+ * @flow_id: Flow id selection (must be smaller than the -+ * distribution size of the traffic class) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id); -+ -+/** -+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id); -+ -+/** -+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI -+ * creation. Fragmentation is performed according to MTU value -+ * set by dpni_set_mtu() function -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+#endif /* __FSL_DPNI_H */ ---- a/drivers/staging/fsl-mc/include/mc-cmd.h -+++ b/drivers/staging/fsl-mc/include/mc-cmd.h -@@ -103,8 +103,11 @@ enum mc_cmd_status { - #define MC_CMD_HDR_READ_FLAGS(_hdr) \ - ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S)) - -+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) -+ - #define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ -- ((_ext)[_param] |= mc_enc((_offset), (_width), _arg)) -+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) - - #define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ - ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/net.h -@@ -0,0 +1,481 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_NET_H -+#define __FSL_NET_H -+ -+#define LAST_HDR_INDEX 0xFFFFFFFF -+ -+/*****************************************************************************/ -+/* Protocol fields */ -+/*****************************************************************************/ -+ -+/************************* Ethernet fields *********************************/ -+#define NH_FLD_ETH_DA (1) -+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) -+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) -+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) -+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) -+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) -+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) -+ -+#define NH_FLD_ETH_ADDR_SIZE 6 -+ -+/*************************** VLAN fields ***********************************/ -+#define NH_FLD_VLAN_VPRI (1) -+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) -+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) -+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) -+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) -+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) -+ -+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ -+ NH_FLD_VLAN_CFI | \ -+ NH_FLD_VLAN_VID) -+ -+/************************ IP (generic) fields ******************************/ -+#define NH_FLD_IP_VER (1) -+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) -+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) -+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) -+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) -+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) -+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) -+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) -+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) -+ -+#define NH_FLD_IP_PROTO_SIZE 1 -+ -+/***************************** IPV4 fields *********************************/ -+#define NH_FLD_IPV4_VER (1) -+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) -+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) -+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) -+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) -+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) -+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) -+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) -+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) -+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) -+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) -+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) -+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) -+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) -+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) -+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) -+ -+#define NH_FLD_IPV4_ADDR_SIZE 4 -+#define NH_FLD_IPV4_PROTO_SIZE 1 -+ -+/***************************** IPV6 fields *********************************/ -+#define NH_FLD_IPV6_VER (1) -+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) -+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) -+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) -+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) -+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) -+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) -+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) -+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) -+ -+#define NH_FLD_IPV6_ADDR_SIZE 16 -+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 -+ -+/***************************** ICMP fields *********************************/ -+#define NH_FLD_ICMP_TYPE (1) -+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) -+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) -+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) -+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) -+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) -+ -+#define NH_FLD_ICMP_CODE_SIZE 1 -+#define NH_FLD_ICMP_TYPE_SIZE 1 -+ -+/***************************** IGMP fields *********************************/ -+#define NH_FLD_IGMP_VERSION (1) -+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) -+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) -+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) -+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) -+ -+/***************************** TCP fields **********************************/ -+#define NH_FLD_TCP_PORT_SRC (1) -+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) -+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) -+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) -+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) -+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) -+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) -+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) -+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) -+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) -+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) -+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) -+ -+#define NH_FLD_TCP_PORT_SIZE 2 -+ -+/***************************** UDP fields **********************************/ -+#define NH_FLD_UDP_PORT_SRC (1) -+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) -+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) -+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) -+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_UDP_PORT_SIZE 2 -+ -+/*************************** UDP-lite fields *******************************/ -+#define NH_FLD_UDP_LITE_PORT_SRC (1) -+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) -+#define NH_FLD_UDP_LITE_ALL_FIELDS \ -+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_UDP_LITE_PORT_SIZE 2 -+ -+/*************************** UDP-encap-ESP fields **************************/ -+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) -+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) -+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) -+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) -+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) -+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) -+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ -+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) -+ -+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 -+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_PORT_SRC (1) -+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) -+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) -+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) -+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_SCTP_PORT_SIZE 2 -+ -+/***************************** DCCP fields *********************************/ -+#define NH_FLD_DCCP_PORT_SRC (1) -+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) -+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_DCCP_PORT_SIZE 2 -+ -+/***************************** IPHC fields *********************************/ -+#define NH_FLD_IPHC_CID (1) -+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) -+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) -+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) -+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) -+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) -+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) -+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) -+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) -+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) -+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) -+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) -+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) -+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ -+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) -+ -+/*************************** L2TPV2 fields *********************************/ -+#define NH_FLD_L2TPV2_TYPE_BIT (1) -+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) -+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) -+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) -+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) -+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) -+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) -+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) -+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) -+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) -+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) -+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) -+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) -+#define NH_FLD_L2TPV2_ALL_FIELDS \ -+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) -+ -+/*************************** L2TPV3 fields *********************************/ -+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) -+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) -+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) -+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) -+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) -+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) -+ -+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) -+ -+/**************************** PPP fields ***********************************/ -+#define NH_FLD_PPP_PID (1) -+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) -+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) -+ -+/************************** PPPoE fields ***********************************/ -+#define NH_FLD_PPPOE_VER (1) -+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) -+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) -+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) -+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) -+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) -+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) -+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) -+ -+/************************* PPP-Mux fields **********************************/ -+#define NH_FLD_PPPMUX_PID (1) -+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) -+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) -+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) -+ -+/*********************** PPP-Mux sub-frame fields **************************/ -+#define NH_FLD_PPPMUX_SUBFRM_PFF (1) -+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) -+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) -+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) -+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) -+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ -+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) -+ -+/*************************** LLC fields ************************************/ -+#define NH_FLD_LLC_DSAP (1) -+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) -+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) -+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) -+ -+/*************************** NLPID fields **********************************/ -+#define NH_FLD_NLPID_NLPID (1) -+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) -+ -+/*************************** SNAP fields ***********************************/ -+#define NH_FLD_SNAP_OUI (1) -+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) -+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) -+ -+/*************************** LLC SNAP fields *******************************/ -+#define NH_FLD_LLC_SNAP_TYPE (1) -+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) -+ -+#define NH_FLD_ARP_HTYPE (1) -+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) -+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) -+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) -+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) -+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) -+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) -+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) -+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) -+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) -+ -+/*************************** RFC2684 fields ********************************/ -+#define NH_FLD_RFC2684_LLC (1) -+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) -+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) -+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) -+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) -+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) -+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) -+ -+/*************************** User defined fields ***************************/ -+#define NH_FLD_USER_DEFINED_SRCPORT (1) -+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) -+#define NH_FLD_USER_DEFINED_ALL_FIELDS \ -+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) -+ -+/*************************** Payload fields ********************************/ -+#define NH_FLD_PAYLOAD_BUFFER (1) -+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) -+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) -+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) -+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) -+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) -+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) -+ -+/*************************** GRE fields ************************************/ -+#define NH_FLD_GRE_TYPE (1) -+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) -+ -+/*************************** MINENCAP fields *******************************/ -+#define NH_FLD_MINENCAP_SRC_IP (1) -+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) -+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) -+#define NH_FLD_MINENCAP_ALL_FIELDS \ -+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) -+ -+/*************************** IPSEC AH fields *******************************/ -+#define NH_FLD_IPSEC_AH_SPI (1) -+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) -+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) -+ -+/*************************** IPSEC ESP fields ******************************/ -+#define NH_FLD_IPSEC_ESP_SPI (1) -+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) -+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) -+ -+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 -+ -+/*************************** MPLS fields ***********************************/ -+#define NH_FLD_MPLS_LABEL_STACK (1) -+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ -+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) -+ -+/*************************** MACSEC fields *********************************/ -+#define NH_FLD_MACSEC_SECTAG (1) -+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) -+ -+/*************************** GTP fields ************************************/ -+#define NH_FLD_GTP_TEID (1) -+ -+ -+/* Protocol options */ -+ -+/* Ethernet options */ -+#define NH_OPT_ETH_BROADCAST 1 -+#define NH_OPT_ETH_MULTICAST 2 -+#define NH_OPT_ETH_UNICAST 3 -+#define NH_OPT_ETH_BPDU 4 -+ -+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) -+/* also applicable for broadcast */ -+ -+/* VLAN options */ -+#define NH_OPT_VLAN_CFI 1 -+ -+/* IPV4 options */ -+#define NH_OPT_IPV4_UNICAST 1 -+#define NH_OPT_IPV4_MULTICAST 2 -+#define NH_OPT_IPV4_BROADCAST 3 -+#define NH_OPT_IPV4_OPTION 4 -+#define NH_OPT_IPV4_FRAG 5 -+#define NH_OPT_IPV4_INITIAL_FRAG 6 -+ -+/* IPV6 options */ -+#define NH_OPT_IPV6_UNICAST 1 -+#define NH_OPT_IPV6_MULTICAST 2 -+#define NH_OPT_IPV6_OPTION 3 -+#define NH_OPT_IPV6_FRAG 4 -+#define NH_OPT_IPV6_INITIAL_FRAG 5 -+ -+/* General IP options (may be used for any version) */ -+#define NH_OPT_IP_FRAG 1 -+#define NH_OPT_IP_INITIAL_FRAG 2 -+#define NH_OPT_IP_OPTION 3 -+ -+/* Minenc. options */ -+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 -+ -+/* GRE. options */ -+#define NH_OPT_GRE_ROUTING_PRESENT 1 -+ -+/* TCP options */ -+#define NH_OPT_TCP_OPTIONS 1 -+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 -+#define NH_OPT_TCP_CONTROL_LOW_BITS 3 -+ -+/* CAPWAP options */ -+#define NH_OPT_CAPWAP_DTLS 1 -+ -+enum net_prot { -+ NET_PROT_NONE = 0, -+ NET_PROT_PAYLOAD, -+ NET_PROT_ETH, -+ NET_PROT_VLAN, -+ NET_PROT_IPV4, -+ NET_PROT_IPV6, -+ NET_PROT_IP, -+ NET_PROT_TCP, -+ NET_PROT_UDP, -+ NET_PROT_UDP_LITE, -+ NET_PROT_IPHC, -+ NET_PROT_SCTP, -+ NET_PROT_SCTP_CHUNK_DATA, -+ NET_PROT_PPPOE, -+ NET_PROT_PPP, -+ NET_PROT_PPPMUX, -+ NET_PROT_PPPMUX_SUBFRM, -+ NET_PROT_L2TPV2, -+ NET_PROT_L2TPV3_CTRL, -+ NET_PROT_L2TPV3_SESS, -+ NET_PROT_LLC, -+ NET_PROT_LLC_SNAP, -+ NET_PROT_NLPID, -+ NET_PROT_SNAP, -+ NET_PROT_MPLS, -+ NET_PROT_IPSEC_AH, -+ NET_PROT_IPSEC_ESP, -+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ -+ NET_PROT_MACSEC, -+ NET_PROT_GRE, -+ NET_PROT_MINENCAP, -+ NET_PROT_DCCP, -+ NET_PROT_ICMP, -+ NET_PROT_IGMP, -+ NET_PROT_ARP, -+ NET_PROT_CAPWAP_DATA, -+ NET_PROT_CAPWAP_CTRL, -+ NET_PROT_RFC2684, -+ NET_PROT_ICMPV6, -+ NET_PROT_FCOE, -+ NET_PROT_FIP, -+ NET_PROT_ISCSI, -+ NET_PROT_GTP, -+ NET_PROT_USER_DEFINED_L2, -+ NET_PROT_USER_DEFINED_L3, -+ NET_PROT_USER_DEFINED_L4, -+ NET_PROT_USER_DEFINED_L5, -+ NET_PROT_USER_DEFINED_SHIM1, -+ NET_PROT_USER_DEFINED_SHIM2, -+ -+ NET_PROT_DUMMY_LAST -+}; -+ -+/*! IEEE8021.Q */ -+#define NH_IEEE8021Q_ETYPE 0x8100 -+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ -+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ -+ (((uint32_t)(pcp & 0x07)) << 13) | \ -+ (((uint32_t)(dei & 0x01)) << 12) | \ -+ (((uint32_t)(vlan_id & 0xFFF)))) -+ -+#endif /* __FSL_NET_H */ ---- a/net/core/pktgen.c -+++ b/net/core/pktgen.c -@@ -2790,6 +2790,7 @@ static struct sk_buff *pktgen_alloc_skb( - } else { - skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); - } -+ skb_reserve(skb, LL_RESERVED_SPACE(dev)); - - /* the caller pre-fetches from skb->data and reserves for the mac hdr */ - if (likely(skb)) diff --git a/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch b/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch deleted file mode 100644 index d331b6600..000000000 --- a/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch +++ /dev/null @@ -1,3257 +0,0 @@ -From 2a6f0dd5425cf43b8c09a8203e6ee64ba2b3868d Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Tue, 12 Jan 2016 08:58:40 +0200 -Subject: [PATCH 202/226] staging: fsl-dpaa2: eth: code cleanup for - upstreaming - --this is a squash of cleanup commits (see QLINUX-5338), all commit logs - are below - -Signed-off-by: Stuart Yoder - ---------------------------------------------------------------------- - -fsl-dpaa2: eth: Drain queues upon ifconfig down - -MC firmware assists in draining the Tx FQs; the Eth driver flushes the -Rx and TxConfirm queues then empties the buffer pool. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: eth: Don't use magic numbers - -Add a define to avoid mentioning directly the maximum number -of buffers released/acquired through a single QBMan command. - -Signed-off-by: Ioana Radulescu - -dpaa2-eth: Remove cpumask_rr macro - -It's only used in one place and not very intuitive - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Rename a variable - -The old name was a leftover and non-intuitive. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Rearrange code - -Rearrange the conditional statements in several functions -to avoid excessive indenting, with no change in functionality. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove incorrect check - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Fix bug on error path - -We were not doing a DMA unmap on the error path of dpaa2_dpni_setup. -Reorganize the code a bit to avoid this. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Error messages cleanup - -This commit cleans up and improves uniformity of messages on -error paths throughout the Ethernet driver: - -* don't use WARN/WARN_ON/WARN_ONCE for warning messages, as -we don't need a stack dump -* give up using the DPAA2_ETH_WARN_IF_ERR custom macro -* ensure dev_err and netdev_err are each used where needed and -not randomly -* remove error messages on memory allocation failures; the kernel -is quite capable of dumping a detailed message when that happens -* remove error messages on the fast path; we don't want to flood -the console and we already increment counters in most error cases -* ratelimit error messages where appropriate - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Fix name of ethtool counters - -Rename counters in ethtool -S from "portal busy" to "dequeue portal -busy" and from "tx portal busy" to "enqueue portal busy", so it's -less confusing for the user. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Retry DAN rearm if portal busy - -There's a chance the data available notification rearming will -fail if the QBMan portal is busy. Keep retrying until portal -becomes available again, like we do for buffer release and -pull dequeue operations. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Add cpu_relax() to portal busy loops - -For several DPIO operations, we may need to repeatedly try -until the QBMan portal is no longer busy. Add a cpu_relax() to -those loops, like we were already doing when seeding buffers. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Add a counter for channel pull errors - -We no longer print an error message in this case, so add an error -counter so we can at least know something went wrong. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Function renames - -Attempt to provide more uniformity for the DPAA2 Ethernet -driver function naming conventions: -* major functions (ndo_ops, driver ops, ethtool, etc) all have -the "dpaa2_eth" prefix -* non-static functions also start with "dpaa2_eth" -* static helper functions don't get any prefix in order to avoid -very long names -* some functions get more intuitive and/or explicit names -* don't use names starting with an underscore - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Structure and macro renames - -Some more renaming: -* defines of error/status bits in the frame annotation status -word get a "DPAA2_FAS" prefix instead of "DPAA2_ETH_FAS", as they're -not really specific to the ethernet driver. We may consider moving -these defines to a separate header file in the future -* DPAA2_ETH_RX_BUFFER_SIZE is renamed to DPAA2_ETH_RX_BUF_SIZE -to better match the naming style of other defines -* structure "dpaa2_eth_stats" becomes "dpaa2_eth_drv_stats" to -make it clear these are driver specific statistics - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Cosmetics - -Various coding style fixes and other minor cosmetics, -with no functional impact. Also remove a couple of unused -defines and a structure field. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Move function call - -Move call to set_fq_affinity() from probe to setup_fqs(), as it -logically belongs there. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Comments cleanup - -Add relevant comments where needed, remove obsolete or -useless ones. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove link poll Kconfig option - -Always try to use interrupts, but if they are not available -fall back to polling the link state. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: Remove message level - -We were defining netif message level, but we weren't using -it when printing error/info messages, so remove for now. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: eth: fix compile error on 4.5 uprev - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/ethernet/Kconfig | 6 - - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 6 +- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 992 ++++++++++---------- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 133 +-- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 226 ++--- - 5 files changed, 693 insertions(+), 670 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/Kconfig -+++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -@@ -16,12 +16,6 @@ menuconfig FSL_DPAA2_ETH - driver, using the Freescale MC bus driver. - - if FSL_DPAA2_ETH --config FSL_DPAA2_ETH_LINK_POLL -- bool "Use polling mode for link state" -- default n -- ---help--- -- Poll for detecting link state changes instead of using -- interrupts. - - config FSL_DPAA2_ETH_USE_ERR_QUEUE - bool "Enable Rx error queue" ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -@@ -30,7 +30,6 @@ - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -- - #include - #include - #include "dpaa2-eth.h" -@@ -38,14 +37,13 @@ - - #define DPAA2_ETH_DBG_ROOT "dpaa2-eth" - -- - static struct dentry *dpaa2_dbg_root; - - static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) - { - struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; - struct rtnl_link_stats64 *stats; -- struct dpaa2_eth_stats *extras; -+ struct dpaa2_eth_drv_stats *extras; - int i; - - seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); -@@ -200,7 +198,7 @@ static ssize_t dpaa2_dbg_reset_write(str - { - struct dpaa2_eth_priv *priv = file->private_data; - struct rtnl_link_stats64 *percpu_stats; -- struct dpaa2_eth_stats *percpu_extras; -+ struct dpaa2_eth_drv_stats *percpu_extras; - struct dpaa2_eth_fq *fq; - struct dpaa2_eth_channel *ch; - int i; ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -53,26 +53,14 @@ MODULE_LICENSE("Dual BSD/GPL"); - MODULE_AUTHOR("Freescale Semiconductor, Inc"); - MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); - --static int debug = -1; --module_param(debug, int, S_IRUGO); --MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); -- - /* Oldest DPAA2 objects version we are compatible with */ - #define DPAA2_SUPPORTED_DPNI_VERSION 6 - #define DPAA2_SUPPORTED_DPBP_VERSION 2 - #define DPAA2_SUPPORTED_DPCON_VERSION 2 - --/* Iterate through the cpumask in a round-robin fashion. */ --#define cpumask_rr(cpu, maskptr) \ --do { \ -- (cpu) = cpumask_next((cpu), (maskptr)); \ -- if ((cpu) >= nr_cpu_ids) \ -- (cpu) = cpumask_first((maskptr)); \ --} while (0) -- --static void dpaa2_eth_rx_csum(struct dpaa2_eth_priv *priv, -- u32 fd_status, -- struct sk_buff *skb) -+static void validate_rx_csum(struct dpaa2_eth_priv *priv, -+ u32 fd_status, -+ struct sk_buff *skb) - { - skb_checksum_none_assert(skb); - -@@ -81,8 +69,8 @@ static void dpaa2_eth_rx_csum(struct dpa - return; - - /* Read checksum validation bits */ -- if (!((fd_status & DPAA2_ETH_FAS_L3CV) && -- (fd_status & DPAA2_ETH_FAS_L4CV))) -+ if (!((fd_status & DPAA2_FAS_L3CV) && -+ (fd_status & DPAA2_FAS_L4CV))) - return; - - /* Inform the stack there's no need to compute L3/L4 csum anymore */ -@@ -92,53 +80,55 @@ static void dpaa2_eth_rx_csum(struct dpa - /* Free a received FD. - * Not to be used for Tx conf FDs or on any other paths. - */ --static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, -- const struct dpaa2_fd *fd, -- void *vaddr) -+static void free_rx_fd(struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ void *vaddr) - { - struct device *dev = priv->net_dev->dev.parent; - dma_addr_t addr = dpaa2_fd_get_addr(fd); - u8 fd_format = dpaa2_fd_get_format(fd); -+ struct dpaa2_sg_entry *sgt; -+ void *sg_vaddr; -+ int i; - -- if (fd_format == dpaa2_fd_sg) { -- struct dpaa2_sg_entry *sgt = vaddr + dpaa2_fd_get_offset(fd); -- void *sg_vaddr; -- int i; -+ /* If single buffer frame, just free the data buffer */ -+ if (fd_format == dpaa2_fd_single) -+ goto free_buf; - -- for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -- dpaa2_sg_le_to_cpu(&sgt[i]); -+ /* For S/G frames, we first need to free all SG entries */ -+ sgt = vaddr + dpaa2_fd_get_offset(fd); -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ dpaa2_sg_le_to_cpu(&sgt[i]); - -- addr = dpaa2_sg_get_addr(&sgt[i]); -- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, -- DMA_FROM_DEVICE); -+ addr = dpaa2_sg_get_addr(&sgt[i]); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); - -- sg_vaddr = phys_to_virt(addr); -- put_page(virt_to_head_page(sg_vaddr)); -+ sg_vaddr = phys_to_virt(addr); -+ put_page(virt_to_head_page(sg_vaddr)); - -- if (dpaa2_sg_is_final(&sgt[i])) -- break; -- } -+ if (dpaa2_sg_is_final(&sgt[i])) -+ break; - } - -+free_buf: - put_page(virt_to_head_page(vaddr)); - } - - /* Build a linear skb based on a single-buffer frame descriptor */ --static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_channel *ch, -- const struct dpaa2_fd *fd, -- void *fd_vaddr) -+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ void *fd_vaddr) - { - struct sk_buff *skb = NULL; - u16 fd_offset = dpaa2_fd_get_offset(fd); - u32 fd_length = dpaa2_fd_get_len(fd); - -- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUFFER_SIZE + -+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -- if (unlikely(!skb)) { -- netdev_err(priv->net_dev, "build_skb() failed\n"); -+ if (unlikely(!skb)) - return NULL; -- } - - skb_reserve(skb, fd_offset); - skb_put(skb, fd_length); -@@ -149,9 +139,9 @@ static struct sk_buff *dpaa2_eth_build_l - } - - /* Build a non linear (fragmented) skb based on a S/G table */ --static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_channel *ch, -- struct dpaa2_sg_entry *sgt) -+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ struct dpaa2_sg_entry *sgt) - { - struct sk_buff *skb = NULL; - struct device *dev = priv->net_dev->dev.parent; -@@ -168,66 +158,57 @@ static struct sk_buff *dpaa2_eth_build_f - - dpaa2_sg_le_to_cpu(sge); - -- /* We don't support anything else yet! */ -- if (unlikely(dpaa2_sg_get_format(sge) != dpaa2_sg_single)) { -- dev_warn_once(dev, "Unsupported S/G entry format: %d\n", -- dpaa2_sg_get_format(sge)); -- return NULL; -- } -+ /* NOTE: We only support SG entries in dpaa2_sg_single format, -+ * but this is the only format we may receive from HW anyway -+ */ - -- /* Get the address, offset and length from the S/G entry */ -+ /* Get the address and length from the S/G entry */ - sg_addr = dpaa2_sg_get_addr(sge); -- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUFFER_SIZE, -+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(dev, sg_addr))) { -- netdev_err(priv->net_dev, "DMA unmap failed\n"); -- return NULL; -- } -+ - sg_vaddr = phys_to_virt(sg_addr); - sg_length = dpaa2_sg_get_len(sge); - - if (i == 0) { - /* We build the skb around the first data buffer */ -- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUFFER_SIZE + -+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -- if (unlikely(!skb)) { -- netdev_err(priv->net_dev, "build_skb failed\n"); -+ if (unlikely(!skb)) - return NULL; -- } -+ - sg_offset = dpaa2_sg_get_offset(sge); - skb_reserve(skb, sg_offset); - skb_put(skb, sg_length); - } else { -- /* Subsequent data in SGEntries are stored at -- * offset 0 in their buffers, we don't need to -- * compute sg_offset. -- */ -- WARN_ONCE(dpaa2_sg_get_offset(sge) != 0, -- "Non-zero offset in SGE[%d]!\n", i); -- - /* Rest of the data buffers are stored as skb frags */ - page = virt_to_page(sg_vaddr); - head_page = virt_to_head_page(sg_vaddr); - -- /* Offset in page (which may be compound) */ -+ /* Offset in page (which may be compound). -+ * Data in subsequent SG entries is stored from the -+ * beginning of the buffer, so we don't need to add the -+ * sg_offset. -+ */ - page_offset = ((unsigned long)sg_vaddr & - (PAGE_SIZE - 1)) + - (page_address(page) - page_address(head_page)); - - skb_add_rx_frag(skb, i - 1, head_page, page_offset, -- sg_length, DPAA2_ETH_RX_BUFFER_SIZE); -+ sg_length, DPAA2_ETH_RX_BUF_SIZE); - } - - if (dpaa2_sg_is_final(sge)) - break; - } - -- /* Count all data buffers + sgt buffer */ -+ /* Count all data buffers + SG table buffer */ - ch->buf_count -= i + 2; - - return skb; - } - -+/* Main Rx frame processing routine */ - static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, -@@ -238,7 +219,7 @@ static void dpaa2_eth_rx(struct dpaa2_et - void *vaddr; - struct sk_buff *skb; - struct rtnl_link_stats64 *percpu_stats; -- struct dpaa2_eth_stats *percpu_extras; -+ struct dpaa2_eth_drv_stats *percpu_extras; - struct device *dev = priv->net_dev->dev.parent; - struct dpaa2_fas *fas; - u32 status = 0; -@@ -246,7 +227,7 @@ static void dpaa2_eth_rx(struct dpaa2_et - /* Tracing point */ - trace_dpaa2_rx_fd(priv->net_dev, fd); - -- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); - vaddr = phys_to_virt(addr); - - prefetch(vaddr + priv->buf_layout.private_data_size); -@@ -256,32 +237,30 @@ static void dpaa2_eth_rx(struct dpaa2_et - percpu_extras = this_cpu_ptr(priv->percpu_extras); - - if (fd_format == dpaa2_fd_single) { -- skb = dpaa2_eth_build_linear_skb(priv, ch, fd, vaddr); -+ skb = build_linear_skb(priv, ch, fd, vaddr); - } else if (fd_format == dpaa2_fd_sg) { - struct dpaa2_sg_entry *sgt = - vaddr + dpaa2_fd_get_offset(fd); -- skb = dpaa2_eth_build_frag_skb(priv, ch, sgt); -+ skb = build_frag_skb(priv, ch, sgt); - put_page(virt_to_head_page(vaddr)); - percpu_extras->rx_sg_frames++; - percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); - } else { - /* We don't support any other format */ -- netdev_err(priv->net_dev, "Received invalid frame format\n"); - goto err_frame_format; - } - -- if (unlikely(!skb)) { -- dev_err_once(dev, "error building skb\n"); -+ if (unlikely(!skb)) - goto err_build_skb; -- } - - prefetch(skb->data); - -+ /* Get the timestamp value */ - if (priv->ts_rx_en) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); -- u64 *ns = (u64 *) (vaddr + -- priv->buf_layout.private_data_size + -- sizeof(struct dpaa2_fas)); -+ u64 *ns = (u64 *)(vaddr + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); - - *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); -@@ -293,7 +272,7 @@ static void dpaa2_eth_rx(struct dpaa2_et - fas = (struct dpaa2_fas *) - (vaddr + priv->buf_layout.private_data_size); - status = le32_to_cpu(fas->status); -- dpaa2_eth_rx_csum(priv, status, skb); -+ validate_rx_csum(priv, status, skb); - } - - skb->protocol = eth_type_trans(skb, priv->net_dev); -@@ -309,11 +288,14 @@ static void dpaa2_eth_rx(struct dpaa2_et - return; - err_frame_format: - err_build_skb: -- dpaa2_eth_free_rx_fd(priv, fd, vaddr); -+ free_rx_fd(priv, fd, vaddr); - percpu_stats->rx_dropped++; - } - - #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+/* Processing of Rx frames received on the error FQ -+ * We check and print the error bits and then free the frame -+ */ - static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, -@@ -326,21 +308,18 @@ static void dpaa2_eth_rx_err(struct dpaa - struct dpaa2_fas *fas; - u32 status = 0; - -- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); - vaddr = phys_to_virt(addr); - - if (fd->simple.frc & DPAA2_FD_FRC_FASV) { - fas = (struct dpaa2_fas *) - (vaddr + priv->buf_layout.private_data_size); - status = le32_to_cpu(fas->status); -- -- /* All frames received on this queue should have at least -- * one of the Rx error bits set */ -- WARN_ON_ONCE((status & DPAA2_ETH_RX_ERR_MASK) == 0); -- netdev_dbg(priv->net_dev, "Rx frame error: 0x%08x\n", -- status & DPAA2_ETH_RX_ERR_MASK); -+ if (net_ratelimit()) -+ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n", -+ status & DPAA2_ETH_RX_ERR_MASK); - } -- dpaa2_eth_free_rx_fd(priv, fd, vaddr); -+ free_rx_fd(priv, fd, vaddr); - - percpu_stats = this_cpu_ptr(priv->percpu_stats); - percpu_stats->rx_errors++; -@@ -353,7 +332,7 @@ static void dpaa2_eth_rx_err(struct dpaa - * - * Observance of NAPI budget is not our concern, leaving that to the caller. - */ --static int dpaa2_eth_store_consume(struct dpaa2_eth_channel *ch) -+static int consume_frames(struct dpaa2_eth_channel *ch) - { - struct dpaa2_eth_priv *priv = ch->priv; - struct dpaa2_eth_fq *fq; -@@ -365,20 +344,14 @@ static int dpaa2_eth_store_consume(struc - do { - dq = dpaa2_io_store_next(ch->store, &is_last); - if (unlikely(!dq)) { -- if (unlikely(!is_last)) { -- netdev_dbg(priv->net_dev, -- "Channel %d reqturned no valid frames\n", -- ch->ch_id); -- /* MUST retry until we get some sort of -- * valid response token (be it "empty dequeue" -- * or a valid frame). -- */ -- continue; -- } -- break; -+ /* If we're here, we *must* have placed a -+ * volatile dequeue comnmand, so keep reading through -+ * the store until we get some sort of valid response -+ * token (either a valid frame or an "empty dequeue") -+ */ -+ continue; - } - -- /* Obtain FD and process it */ - fd = dpaa2_dq_fd(dq); - fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); - fq->stats.frames++; -@@ -390,9 +363,10 @@ static int dpaa2_eth_store_consume(struc - return cleaned; - } - --static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, -- struct sk_buff *skb, -- struct dpaa2_fd *fd) -+/* Create a frame descriptor based on a fragmented skb */ -+static int build_sg_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) - { - struct device *dev = priv->net_dev->dev.parent; - void *sgt_buf = NULL; -@@ -404,14 +378,16 @@ static int dpaa2_eth_build_sg_fd(struct - struct scatterlist *scl, *crt_scl; - int num_sg; - int num_dma_bufs; -- struct dpaa2_eth_swa *bps; -+ struct dpaa2_eth_swa *swa; - - /* Create and map scatterlist. - * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have - * to go beyond nr_frags+1. - * Note: We don't support chained scatterlists - */ -- WARN_ON(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1); -+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) -+ return -EINVAL; -+ - scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); - if (unlikely(!scl)) - return -ENOMEM; -@@ -420,7 +396,6 @@ static int dpaa2_eth_build_sg_fd(struct - num_sg = skb_to_sgvec(skb, scl, 0, skb->len); - num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); - if (unlikely(!num_dma_bufs)) { -- netdev_err(priv->net_dev, "dma_map_sg() error\n"); - err = -ENOMEM; - goto dma_map_sg_failed; - } -@@ -430,7 +405,6 @@ static int dpaa2_eth_build_sg_fd(struct - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); - sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); - if (unlikely(!sgt_buf)) { -- netdev_err(priv->net_dev, "failed to allocate SGT buffer\n"); - err = -ENOMEM; - goto sgt_buf_alloc_failed; - } -@@ -462,19 +436,19 @@ static int dpaa2_eth_build_sg_fd(struct - * Fit the scatterlist and the number of buffers alongside the - * skb backpointer in the SWA. We'll need all of them on Tx Conf. - */ -- bps = (struct dpaa2_eth_swa *)sgt_buf; -- bps->skb = skb; -- bps->scl = scl; -- bps->num_sg = num_sg; -- bps->num_dma_bufs = num_dma_bufs; -+ swa = (struct dpaa2_eth_swa *)sgt_buf; -+ swa->skb = skb; -+ swa->scl = scl; -+ swa->num_sg = num_sg; -+ swa->num_dma_bufs = num_dma_bufs; - -+ /* Hardware expects the SG table to be in little endian format */ - for (j = 0; j < i; j++) - dpaa2_sg_cpu_to_le(&sgt[j]); - - /* Separately map the SGT buffer */ - addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, addr))) { -- netdev_err(priv->net_dev, "dma_map_single() failed\n"); - err = -ENOMEM; - goto dma_map_single_failed; - } -@@ -484,7 +458,7 @@ static int dpaa2_eth_build_sg_fd(struct - dpaa2_fd_set_len(fd, skb->len); - - fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -- DPAA2_FD_CTRL_PTV1; -+ DPAA2_FD_CTRL_PTV1; - - return 0; - -@@ -497,9 +471,10 @@ dma_map_sg_failed: - return err; - } - --static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, -- struct sk_buff *skb, -- struct dpaa2_fd *fd) -+/* Create a frame descriptor based on a linear skb */ -+static int build_single_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) - { - struct device *dev = priv->net_dev->dev.parent; - u8 *buffer_start; -@@ -524,14 +499,11 @@ static int dpaa2_eth_build_single_fd(str - skbh = (struct sk_buff **)buffer_start; - *skbh = skb; - -- addr = dma_map_single(dev, -- buffer_start, -+ addr = dma_map_single(dev, buffer_start, - skb_tail_pointer(skb) - buffer_start, - DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(dev, addr))) { -- dev_err(dev, "dma_map_single() failed\n"); -- return -EINVAL; -- } -+ if (unlikely(dma_mapping_error(dev, addr))) -+ return -ENOMEM; - - dpaa2_fd_set_addr(fd, addr); - dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); -@@ -539,21 +511,23 @@ static int dpaa2_eth_build_single_fd(str - dpaa2_fd_set_format(fd, dpaa2_fd_single); - - fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -- DPAA2_FD_CTRL_PTV1; -+ DPAA2_FD_CTRL_PTV1; - - return 0; - } - --/* DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb -+/* FD freeing routine on the Tx path -+ * -+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb - * back-pointed to is also freed. - * This can be called either from dpaa2_eth_tx_conf() or on the error path of - * dpaa2_eth_tx(). - * Optionally, return the frame annotation status word (FAS), which needs - * to be checked if we're on the confirmation path. - */ --static void dpaa2_eth_free_fd(const struct dpaa2_eth_priv *priv, -- const struct dpaa2_fd *fd, -- u32 *status) -+static void free_tx_fd(const struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ u32 *status) - { - struct device *dev = priv->net_dev->dev.parent; - dma_addr_t fd_addr; -@@ -562,7 +536,7 @@ static void dpaa2_eth_free_fd(const stru - int unmap_size; - struct scatterlist *scl; - int num_sg, num_dma_bufs; -- struct dpaa2_eth_swa *bps; -+ struct dpaa2_eth_swa *swa; - bool fd_single; - struct dpaa2_fas *fas; - -@@ -580,11 +554,11 @@ static void dpaa2_eth_free_fd(const stru - skb_tail_pointer(skb) - buffer_start, - DMA_TO_DEVICE); - } else { -- bps = (struct dpaa2_eth_swa *)skbh; -- skb = bps->skb; -- scl = bps->scl; -- num_sg = bps->num_sg; -- num_dma_bufs = bps->num_dma_bufs; -+ swa = (struct dpaa2_eth_swa *)skbh; -+ skb = swa->skb; -+ scl = swa->scl; -+ num_sg = swa->num_sg; -+ num_dma_bufs = swa->num_dma_bufs; - - /* Unmap the scatterlist */ - dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -@@ -596,6 +570,7 @@ static void dpaa2_eth_free_fd(const stru - dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); - } - -+ /* Get the timestamp value */ - if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - struct skb_shared_hwtstamps shhwtstamps; - u64 *ns; -@@ -610,8 +585,9 @@ static void dpaa2_eth_free_fd(const stru - skb_tstamp_tx(skb, &shhwtstamps); - } - -- /* Check the status from the Frame Annotation after we unmap the first -- * buffer but before we free it. -+ /* Read the status from the Frame Annotation after we unmap the first -+ * buffer but before we free it. The caller function is responsible -+ * for checking the status value. - */ - if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { - fas = (struct dpaa2_fas *) -@@ -632,24 +608,16 @@ static int dpaa2_eth_tx(struct sk_buff * - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_fd fd; - struct rtnl_link_stats64 *percpu_stats; -- struct dpaa2_eth_stats *percpu_extras; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ u16 queue_mapping, flow_id; - int err, i; -- /* TxConf FQ selection primarily based on cpu affinity; this is -- * non-migratable context, so it's safe to call smp_processor_id(). -- */ -- u16 queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; - - percpu_stats = this_cpu_ptr(priv->percpu_stats); - percpu_extras = this_cpu_ptr(priv->percpu_extras); - -- /* Setup the FD fields */ -- memset(&fd, 0, sizeof(fd)); -- - if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { - struct sk_buff *ns; - -- dev_info_once(net_dev->dev.parent, -- "skb headroom too small, must realloc.\n"); - ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); - if (unlikely(!ns)) { - percpu_stats->tx_dropped++; -@@ -664,18 +632,20 @@ static int dpaa2_eth_tx(struct sk_buff * - */ - skb = skb_unshare(skb, GFP_ATOMIC); - if (unlikely(!skb)) { -- netdev_err(net_dev, "Out of memory for skb_unshare()"); - /* skb_unshare() has already freed the skb */ - percpu_stats->tx_dropped++; - return NETDEV_TX_OK; - } - -+ /* Setup the FD fields */ -+ memset(&fd, 0, sizeof(fd)); -+ - if (skb_is_nonlinear(skb)) { -- err = dpaa2_eth_build_sg_fd(priv, skb, &fd); -+ err = build_sg_fd(priv, skb, &fd); - percpu_extras->tx_sg_frames++; - percpu_extras->tx_sg_bytes += skb->len; - } else { -- err = dpaa2_eth_build_single_fd(priv, skb, &fd); -+ err = build_single_fd(priv, skb, &fd); - } - - if (unlikely(err)) { -@@ -686,19 +656,22 @@ static int dpaa2_eth_tx(struct sk_buff * - /* Tracing point */ - trace_dpaa2_tx_fd(net_dev, &fd); - -+ /* TxConf FQ selection primarily based on cpu affinity; this is -+ * non-migratable context, so it's safe to call smp_processor_id(). -+ */ -+ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; -+ flow_id = priv->fq[queue_mapping].flowid; - for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { - err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, -- priv->fq[queue_mapping].flowid, -- &fd); -+ flow_id, &fd); - if (err != -EBUSY) - break; - } - percpu_extras->tx_portal_busy += i; - if (unlikely(err < 0)) { -- netdev_dbg(net_dev, "error enqueueing Tx frame\n"); - percpu_stats->tx_errors++; - /* Clean up everything, including freeing the skb */ -- dpaa2_eth_free_fd(priv, &fd, NULL); -+ free_tx_fd(priv, &fd, NULL); - } else { - percpu_stats->tx_packets++; - percpu_stats->tx_bytes += skb->len; -@@ -713,13 +686,14 @@ err_alloc_headroom: - return NETDEV_TX_OK; - } - -+/* Tx confirmation frame processing routine */ - static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct napi_struct *napi __always_unused) - { - struct rtnl_link_stats64 *percpu_stats; -- struct dpaa2_eth_stats *percpu_extras; -+ struct dpaa2_eth_drv_stats *percpu_extras; - u32 status = 0; - - /* Tracing point */ -@@ -729,18 +703,16 @@ static void dpaa2_eth_tx_conf(struct dpa - percpu_extras->tx_conf_frames++; - percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); - -- dpaa2_eth_free_fd(priv, fd, &status); -+ free_tx_fd(priv, fd, &status); - - if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { -- netdev_err(priv->net_dev, "TxConf frame error(s): 0x%08x\n", -- status & DPAA2_ETH_TXCONF_ERR_MASK); - percpu_stats = this_cpu_ptr(priv->percpu_stats); - /* Tx-conf logically pertains to the egress path. */ - percpu_stats->tx_errors++; - } - } - --static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) -+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) - { - int err; - -@@ -763,7 +735,7 @@ static int dpaa2_eth_set_rx_csum(struct - return 0; - } - --static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) -+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) - { - struct dpaa2_eth_fq *fq; - struct dpni_tx_flow_cfg tx_flow_cfg; -@@ -793,37 +765,38 @@ static int dpaa2_eth_set_tx_csum(struct - return 0; - } - --static int dpaa2_bp_add_7(struct dpaa2_eth_priv *priv, u16 bpid) -+/* Perform a single release command to add buffers -+ * to the specified buffer pool -+ */ -+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) - { - struct device *dev = priv->net_dev->dev.parent; -- u64 buf_array[7]; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *buf; - dma_addr_t addr; - int i; - -- for (i = 0; i < 7; i++) { -+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ - buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); -- if (unlikely(!buf)) { -- dev_err(dev, "buffer allocation failed\n"); -+ if (unlikely(!buf)) - goto err_alloc; -- } -+ - buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); - -- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUFFER_SIZE, -+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(dev, addr))) { -- dev_err(dev, "dma_map_single() failed\n"); -+ if (unlikely(dma_mapping_error(dev, addr))) - goto err_map; -- } -+ - buf_array[i] = addr; - - /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, - buf, DPAA2_ETH_BUF_RAW_SIZE, -- addr, DPAA2_ETH_RX_BUFFER_SIZE, -+ addr, DPAA2_ETH_RX_BUF_SIZE, - bpid); - } - -@@ -850,59 +823,57 @@ err_alloc: - return 0; - } - --static int dpaa2_dpbp_seed(struct dpaa2_eth_priv *priv, u16 bpid) -+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) - { - int i, j; - int new_count; - - /* This is the lazy seeding of Rx buffer pools. -- * dpaa2_bp_add_7() is also used on the Rx hotpath and calls -+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls - * napi_alloc_frag(). The trouble with that is that it in turn ends up - * calling this_cpu_ptr(), which mandates execution in atomic context. - * Rather than splitting up the code, do a one-off preempt disable. - */ - preempt_disable(); - for (j = 0; j < priv->num_channels; j++) { -- for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += 7) { -- new_count = dpaa2_bp_add_7(priv, bpid); -+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; -+ i += DPAA2_ETH_BUFS_PER_CMD) { -+ new_count = add_bufs(priv, bpid); - priv->channel[j]->buf_count += new_count; - -- if (new_count < 7) { -+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { - preempt_enable(); -- goto out_of_memory; -+ return -ENOMEM; - } - } - } - preempt_enable(); - - return 0; -- --out_of_memory: -- return -ENOMEM; - } - - /** - * Drain the specified number of buffers from the DPNI's private buffer pool. -- * @count must not exceeed 7 -+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD - */ --static void dpaa2_dpbp_drain_cnt(struct dpaa2_eth_priv *priv, int count) -+static void drain_bufs(struct dpaa2_eth_priv *priv, int count) - { - struct device *dev = priv->net_dev->dev.parent; -- u64 buf_array[7]; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *vaddr; - int ret, i; - - do { - ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, -- buf_array, count); -+ buf_array, count); - if (ret < 0) { -- pr_err("dpaa2_io_service_acquire() failed\n"); -+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); - return; - } - for (i = 0; i < ret; i++) { - /* Same logic as on regular Rx path */ - dma_unmap_single(dev, buf_array[i], -- DPAA2_ETH_RX_BUFFER_SIZE, -+ DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); - vaddr = phys_to_virt(buf_array[i]); - put_page(virt_to_head_page(vaddr)); -@@ -910,12 +881,12 @@ static void dpaa2_dpbp_drain_cnt(struct - } while (ret); - } - --static void __dpaa2_dpbp_free(struct dpaa2_eth_priv *priv) -+static void drain_pool(struct dpaa2_eth_priv *priv) - { - int i; - -- dpaa2_dpbp_drain_cnt(priv, 7); -- dpaa2_dpbp_drain_cnt(priv, 1); -+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); -+ drain_bufs(priv, 1); - - for (i = 0; i < priv->num_channels; i++) - priv->channel[i]->buf_count = 0; -@@ -924,50 +895,55 @@ static void __dpaa2_dpbp_free(struct dpa - /* Function is called from softirq context only, so we don't need to guard - * the access to percpu count - */ --static int dpaa2_dpbp_refill(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_channel *ch, -- u16 bpid) -+static int refill_pool(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ u16 bpid) - { - int new_count; -- int err = 0; - -- if (unlikely(ch->buf_count < DPAA2_ETH_REFILL_THRESH)) { -- do { -- new_count = dpaa2_bp_add_7(priv, bpid); -- if (unlikely(!new_count)) { -- /* Out of memory; abort for now, we'll -- * try later on -- */ -- break; -- } -- ch->buf_count += new_count; -- } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); -+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) -+ return 0; - -- if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) -- err = -ENOMEM; -- } -+ do { -+ new_count = add_bufs(priv, bpid); -+ if (unlikely(!new_count)) { -+ /* Out of memory; abort for now, we'll try later on */ -+ break; -+ } -+ ch->buf_count += new_count; -+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); - -- return err; -+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) -+ return -ENOMEM; -+ -+ return 0; - } - --static int __dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) -+static int pull_channel(struct dpaa2_eth_channel *ch) - { - int err; - int dequeues = -1; -- struct dpaa2_eth_priv *priv = ch->priv; - - /* Retry while portal is busy */ - do { - err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); - dequeues++; -+ cpu_relax(); - } while (err == -EBUSY); -- if (unlikely(err)) -- netdev_err(priv->net_dev, "dpaa2_io_service_pull err %d", err); - - ch->stats.dequeue_portal_busy += dequeues; -+ if (unlikely(err)) -+ ch->stats.pull_err++; -+ - return err; - } - -+/* NAPI poll routine -+ * -+ * Frames are dequeued from the QMan channel associated with this NAPI context. -+ * Rx, Tx confirmation and (if configured) Rx error frames all count -+ * towards the NAPI budget. -+ */ - static int dpaa2_eth_poll(struct napi_struct *napi, int budget) - { - struct dpaa2_eth_channel *ch; -@@ -978,32 +954,32 @@ static int dpaa2_eth_poll(struct napi_st - ch = container_of(napi, struct dpaa2_eth_channel, napi); - priv = ch->priv; - -- __dpaa2_eth_pull_channel(ch); -+ while (cleaned < budget) { -+ err = pull_channel(ch); -+ if (unlikely(err)) -+ break; - -- do { - /* Refill pool if appropriate */ -- dpaa2_dpbp_refill(priv, ch, priv->dpbp_attrs.bpid); -+ refill_pool(priv, ch, priv->dpbp_attrs.bpid); - -- store_cleaned = dpaa2_eth_store_consume(ch); -+ store_cleaned = consume_frames(ch); - cleaned += store_cleaned; - -+ /* If we have enough budget left for a full store, -+ * try a new pull dequeue, otherwise we're done here -+ */ - if (store_cleaned == 0 || - cleaned > budget - DPAA2_ETH_STORE_SIZE) - break; -- -- /* Try to dequeue some more */ -- err = __dpaa2_eth_pull_channel(ch); -- if (unlikely(err)) -- break; -- } while (1); -+ } - - if (cleaned < budget) { - napi_complete_done(napi, cleaned); -- err = dpaa2_io_service_rearm(NULL, &ch->nctx); -- if (unlikely(err)) -- netdev_err(priv->net_dev, -- "Notif rearm failed for channel %d\n", -- ch->ch_id); -+ /* Re-enable data available notifications */ -+ do { -+ err = dpaa2_io_service_rearm(NULL, &ch->nctx); -+ cpu_relax(); -+ } while (err == -EBUSY); - } - - ch->stats.frames += cleaned; -@@ -1011,7 +987,7 @@ static int dpaa2_eth_poll(struct napi_st - return cleaned; - } - --static void dpaa2_eth_napi_enable(struct dpaa2_eth_priv *priv) -+static void enable_ch_napi(struct dpaa2_eth_priv *priv) - { - struct dpaa2_eth_channel *ch; - int i; -@@ -1022,7 +998,7 @@ static void dpaa2_eth_napi_enable(struct - } - } - --static void dpaa2_eth_napi_disable(struct dpaa2_eth_priv *priv) -+static void disable_ch_napi(struct dpaa2_eth_priv *priv) - { - struct dpaa2_eth_channel *ch; - int i; -@@ -1033,7 +1009,7 @@ static void dpaa2_eth_napi_disable(struc - } - } - --static int dpaa2_link_state_update(struct dpaa2_eth_priv *priv) -+static int link_state_update(struct dpaa2_eth_priv *priv) - { - struct dpni_link_state state; - int err; -@@ -1069,7 +1045,7 @@ static int dpaa2_eth_open(struct net_dev - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int err; - -- err = dpaa2_dpbp_seed(priv, priv->dpbp_attrs.bpid); -+ err = seed_pool(priv, priv->dpbp_attrs.bpid); - if (err) { - /* Not much to do; the buffer pool, though not filled up, - * may still contain some buffers which would enable us -@@ -1084,7 +1060,7 @@ static int dpaa2_eth_open(struct net_dev - * immediately after dpni_enable(); - */ - netif_tx_stop_all_queues(net_dev); -- dpaa2_eth_napi_enable(priv); -+ enable_ch_napi(priv); - /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will - * return true and cause 'ip link show' to report the LOWER_UP flag, - * even though the link notification wasn't even received. -@@ -1093,16 +1069,16 @@ static int dpaa2_eth_open(struct net_dev - - err = dpni_enable(priv->mc_io, 0, priv->mc_token); - if (err < 0) { -- dev_err(net_dev->dev.parent, "dpni_enable() failed\n"); -+ netdev_err(net_dev, "dpni_enable() failed\n"); - goto enable_err; - } - - /* If the DPMAC object has already processed the link up interrupt, - * we have to learn the link state ourselves. - */ -- err = dpaa2_link_state_update(priv); -+ err = link_state_update(priv); - if (err < 0) { -- dev_err(net_dev->dev.parent, "Can't update link state\n"); -+ netdev_err(net_dev, "Can't update link state\n"); - goto link_state_err; - } - -@@ -1110,26 +1086,84 @@ static int dpaa2_eth_open(struct net_dev - - link_state_err: - enable_err: -- dpaa2_eth_napi_disable(priv); -- __dpaa2_dpbp_free(priv); -+ disable_ch_napi(priv); -+ drain_pool(priv); - return err; - } - -+/* The DPIO store must be empty when we call this, -+ * at the end of every NAPI cycle. -+ */ -+static u32 drain_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch) -+{ -+ u32 drained = 0, total = 0; -+ -+ do { -+ pull_channel(ch); -+ drained = consume_frames(ch); -+ total += drained; -+ } while (drained); -+ -+ return total; -+} -+ -+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ u32 drained = 0; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ drained += drain_channel(priv, ch); -+ } -+ -+ return drained; -+} -+ - static int dpaa2_eth_stop(struct net_device *net_dev) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int dpni_enabled; -+ int retries = 10; -+ u32 drained; - -- /* Stop Tx and Rx traffic */ - netif_tx_stop_all_queues(net_dev); - netif_carrier_off(net_dev); -- dpni_disable(priv->mc_io, 0, priv->mc_token); - -- msleep(500); -+ /* Loop while dpni_disable() attempts to drain the egress FQs -+ * and confirm them back to us. -+ */ -+ do { -+ dpni_disable(priv->mc_io, 0, priv->mc_token); -+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); -+ if (dpni_enabled) -+ /* Allow the MC some slack */ -+ msleep(100); -+ } while (dpni_enabled && --retries); -+ if (!retries) { -+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); -+ /* Must go on and disable NAPI nonetheless, so we don't crash at -+ * the next "ifconfig up" -+ */ -+ } - -- dpaa2_eth_napi_disable(priv); -- msleep(100); -+ /* Wait for NAPI to complete on every core and disable it. -+ * In particular, this will also prevent NAPI from being rescheduled if -+ * a new CDAN is serviced, effectively discarding the CDAN. We therefore -+ * don't even need to disarm the channels, except perhaps for the case -+ * of a huge coalescing value. -+ */ -+ disable_ch_napi(priv); -+ -+ /* Manually drain the Rx and TxConf queues */ -+ drained = drain_ingress_frames(priv); -+ if (drained) -+ netdev_dbg(net_dev, "Drained %d frames.\n", drained); - -- __dpaa2_dpbp_free(priv); -+ /* Empty the buffer pool */ -+ drain_pool(priv); - - return 0; - } -@@ -1138,7 +1172,7 @@ static int dpaa2_eth_init(struct net_dev - { - u64 supported = 0; - u64 not_supported = 0; -- const struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - u32 options = priv->dpni_attrs.options; - - /* Capabilities listing */ -@@ -1230,7 +1264,7 @@ static int dpaa2_eth_change_mtu(struct n - err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, - (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); - if (err) { -- netdev_err(net_dev, "dpni_set_mfl() failed\n"); -+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); - return err; - } - -@@ -1238,18 +1272,11 @@ static int dpaa2_eth_change_mtu(struct n - return 0; - } - --/* Convenience macro to make code littered with error checking more readable */ --#define DPAA2_ETH_WARN_IF_ERR(err, netdevp, format, ...) \ --do { \ -- if (err) \ -- netdev_warn(netdevp, format, ##__VA_ARGS__); \ --} while (0) -- - /* Copy mac unicast addresses from @net_dev to @priv. - * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. - */ --static void _dpaa2_eth_hw_add_uc_addr(const struct net_device *net_dev, -- struct dpaa2_eth_priv *priv) -+static void add_uc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) - { - struct netdev_hw_addr *ha; - int err; -@@ -1257,17 +1284,18 @@ static void _dpaa2_eth_hw_add_uc_addr(co - netdev_for_each_uc_addr(ha, net_dev) { - err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, - ha->addr); -- DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev, -- "Could not add ucast MAC %pM to the filtering table (err %d)\n", -- ha->addr, err); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add ucast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); - } - } - - /* Copy mac multicast addresses from @net_dev to @priv - * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. - */ --static void _dpaa2_eth_hw_add_mc_addr(const struct net_device *net_dev, -- struct dpaa2_eth_priv *priv) -+static void add_mc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) - { - struct netdev_hw_addr *ha; - int err; -@@ -1275,9 +1303,10 @@ static void _dpaa2_eth_hw_add_mc_addr(co - netdev_for_each_mc_addr(ha, net_dev) { - err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, - ha->addr); -- DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev, -- "Could not add mcast MAC %pM to the filtering table (err %d)\n", -- ha->addr, err); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add mcast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); - } - } - -@@ -1296,11 +1325,11 @@ static void dpaa2_eth_set_rx_mode(struct - /* Basic sanity checks; these probably indicate a misconfiguration */ - if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) - netdev_info(net_dev, -- "max_unicast_filters=%d, you must have DPNI_OPT_UNICAST_FILTER in the DPL\n", -+ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n", - max_uc); - if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) - netdev_info(net_dev, -- "max_multicast_filters=%d, you must have DPNI_OPT_MULTICAST_FILTER in the DPL\n", -+ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n", - max_mc); - - /* Force promiscuous if the uc or mc counts exceed our capabilities. */ -@@ -1318,9 +1347,9 @@ static void dpaa2_eth_set_rx_mode(struct - } - - /* Adjust promisc settings due to flag combinations */ -- if (net_dev->flags & IFF_PROMISC) { -+ if (net_dev->flags & IFF_PROMISC) - goto force_promisc; -- } else if (net_dev->flags & IFF_ALLMULTI) { -+ if (net_dev->flags & IFF_ALLMULTI) { - /* First, rebuild unicast filtering table. This should be done - * in promisc mode, in order to avoid frame loss while we - * progressively add entries to the table. -@@ -1329,16 +1358,19 @@ static void dpaa2_eth_set_rx_mode(struct - * nonetheless. - */ - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc\n"); - - /* Actual uc table reconstruction. */ - err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc filters\n"); -- _dpaa2_eth_hw_add_uc_addr(net_dev, priv); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc filters\n"); -+ add_uc_hw_addr(net_dev, priv); - - /* Finally, clear uc promisc and set mc promisc as requested. */ - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc promisc\n"); - goto force_mc_promisc; - } - -@@ -1346,32 +1378,39 @@ static void dpaa2_eth_set_rx_mode(struct - * For now, rebuild mac filtering tables while forcing both of them on. - */ - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc (%d)\n", err); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); - err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mc promisc (%d)\n", err); -+ if (err) -+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); - - /* Actual mac filtering tables reconstruction */ - err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mac filters\n"); -- _dpaa2_eth_hw_add_mc_addr(net_dev, priv); -- _dpaa2_eth_hw_add_uc_addr(net_dev, priv); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mac filters\n"); -+ add_mc_hw_addr(net_dev, priv); -+ add_uc_hw_addr(net_dev, priv); - - /* Now we can clear both ucast and mcast promisc, without risking - * to drop legitimate frames anymore. - */ - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear ucast promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't clear ucast promisc\n"); - err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mcast promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mcast promisc\n"); - - return; - - force_promisc: - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set ucast promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't set ucast promisc\n"); - force_mc_promisc: - err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mcast promisc\n"); -+ if (err) -+ netdev_warn(net_dev, "Can't set mcast promisc\n"); - } - - static int dpaa2_eth_set_features(struct net_device *net_dev, -@@ -1379,20 +1418,19 @@ static int dpaa2_eth_set_features(struct - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - netdev_features_t changed = features ^ net_dev->features; -+ bool enable; - int err; - - if (changed & NETIF_F_RXCSUM) { -- bool enable = !!(features & NETIF_F_RXCSUM); -- -- err = dpaa2_eth_set_rx_csum(priv, enable); -+ enable = !!(features & NETIF_F_RXCSUM); -+ err = set_rx_csum(priv, enable); - if (err) - return err; - } - - if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { -- bool enable = !!(features & -- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); -- err = dpaa2_eth_set_tx_csum(priv, enable); -+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); -+ err = set_tx_csum(priv, enable); - if (err) - return err; - } -@@ -1419,9 +1457,9 @@ static int dpaa2_eth_ts_ioctl(struct net - return -ERANGE; - } - -- if (config.rx_filter == HWTSTAMP_FILTER_NONE) -+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { - priv->ts_rx_en = false; -- else { -+ } else { - priv->ts_rx_en = true; - /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; -@@ -1435,8 +1473,8 @@ static int dpaa2_eth_ioctl(struct net_de - { - if (cmd == SIOCSHWTSTAMP) - return dpaa2_eth_ts_ioctl(dev, rq, cmd); -- else -- return -EINVAL; -+ -+ return -EINVAL; - } - - static const struct net_device_ops dpaa2_eth_ops = { -@@ -1452,7 +1490,7 @@ static const struct net_device_ops dpaa2 - .ndo_do_ioctl = dpaa2_eth_ioctl, - }; - --static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) -+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) - { - struct dpaa2_eth_channel *ch; - -@@ -1464,37 +1502,9 @@ static void dpaa2_eth_cdan_cb(struct dpa - napi_schedule_irqoff(&ch->napi); - } - --static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) --{ -- int i; -- -- /* We have one TxConf FQ per Tx flow */ -- for (i = 0; i < priv->dpni_attrs.max_senders; i++) { -- priv->fq[priv->num_fqs].netdev_priv = priv; -- priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; -- priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; -- priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; -- } -- -- /* The number of Rx queues (Rx distribution width) may be different from -- * the number of cores. -- * We only support one traffic class for now. -- */ -- for (i = 0; i < dpaa2_queue_count(priv); i++) { -- priv->fq[priv->num_fqs].netdev_priv = priv; -- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -- priv->fq[priv->num_fqs++].flowid = (u16)i; -- } -- --#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -- /* We have exactly one Rx error queue per DPNI */ -- priv->fq[priv->num_fqs].netdev_priv = priv; -- priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; -- priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; --#endif --} -- -+/* Verify that the FLIB API version of various MC objects is supported -+ * by our driver -+ */ - static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) - { - char *name = ls_dev->obj_desc.type; -@@ -1517,8 +1527,7 @@ static int check_obj_version(struct fsl_ - - /* Check that the FLIB-defined version matches the one reported by MC */ - if (mc_version != flib_version) { -- dev_err(dev, -- "%s FLIB version mismatch: MC reports %d, we have %d\n", -+ dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", - name, mc_version, flib_version); - return -EINVAL; - } -@@ -1534,7 +1543,8 @@ static int check_obj_version(struct fsl_ - return 0; - } - --static struct fsl_mc_device *dpaa2_dpcon_setup(struct dpaa2_eth_priv *priv) -+/* Allocate and configure a DPCON object */ -+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) - { - struct fsl_mc_device *dpcon; - struct device *dev = priv->net_dev->dev.parent; -@@ -1582,8 +1592,8 @@ err_open: - return NULL; - } - --static void dpaa2_dpcon_free(struct dpaa2_eth_priv *priv, -- struct fsl_mc_device *dpcon) -+static void free_dpcon(struct dpaa2_eth_priv *priv, -+ struct fsl_mc_device *dpcon) - { - dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); - dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -@@ -1591,7 +1601,7 @@ static void dpaa2_dpcon_free(struct dpaa - } - - static struct dpaa2_eth_channel * --dpaa2_alloc_channel(struct dpaa2_eth_priv *priv) -+alloc_channel(struct dpaa2_eth_priv *priv) - { - struct dpaa2_eth_channel *channel; - struct dpcon_attr attr; -@@ -1599,12 +1609,10 @@ dpaa2_alloc_channel(struct dpaa2_eth_pri - int err; - - channel = kzalloc(sizeof(*channel), GFP_ATOMIC); -- if (!channel) { -- dev_err(dev, "Memory allocation failed\n"); -+ if (!channel) - return NULL; -- } - -- channel->dpcon = dpaa2_dpcon_setup(priv); -+ channel->dpcon = setup_dpcon(priv); - if (!channel->dpcon) - goto err_setup; - -@@ -1622,20 +1630,23 @@ dpaa2_alloc_channel(struct dpaa2_eth_pri - return channel; - - err_get_attr: -- dpaa2_dpcon_free(priv, channel->dpcon); -+ free_dpcon(priv, channel->dpcon); - err_setup: - kfree(channel); - return NULL; - } - --static void dpaa2_free_channel(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_channel *channel) -+static void free_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *channel) - { -- dpaa2_dpcon_free(priv, channel->dpcon); -+ free_dpcon(priv, channel->dpcon); - kfree(channel); - } - --static int dpaa2_dpio_setup(struct dpaa2_eth_priv *priv) -+/* DPIO setup: allocate and configure QBMan channels, setup core affinity -+ * and register data availability notifications -+ */ -+static int setup_dpio(struct dpaa2_eth_priv *priv) - { - struct dpaa2_io_notification_ctx *nctx; - struct dpaa2_eth_channel *channel; -@@ -1652,7 +1663,7 @@ static int dpaa2_dpio_setup(struct dpaa2 - cpumask_clear(&priv->dpio_cpumask); - for_each_online_cpu(i) { - /* Try to allocate a channel */ -- channel = dpaa2_alloc_channel(priv); -+ channel = alloc_channel(priv); - if (!channel) - goto err_alloc_ch; - -@@ -1660,7 +1671,7 @@ static int dpaa2_dpio_setup(struct dpaa2 - - nctx = &channel->nctx; - nctx->is_cdan = 1; -- nctx->cb = dpaa2_eth_cdan_cb; -+ nctx->cb = cdan_cb; - nctx->id = channel->ch_id; - nctx->desired_cpu = i; - -@@ -1671,7 +1682,7 @@ static int dpaa2_dpio_setup(struct dpaa2 - /* This core doesn't have an affine DPIO, but there's - * a chance another one does, so keep trying - */ -- dpaa2_free_channel(priv, channel); -+ free_channel(priv, channel); - continue; - } - -@@ -1693,7 +1704,7 @@ static int dpaa2_dpio_setup(struct dpaa2 - cpumask_set_cpu(i, &priv->dpio_cpumask); - priv->num_channels++; - -- if (priv->num_channels == dpaa2_max_channels(priv)) -+ if (priv->num_channels == dpaa2_eth_max_channels(priv)) - break; - } - -@@ -1706,7 +1717,7 @@ static int dpaa2_dpio_setup(struct dpaa2 - - err_set_cdan: - dpaa2_io_service_deregister(NULL, nctx); -- dpaa2_free_channel(priv, channel); -+ free_channel(priv, channel); - err_alloc_ch: - if (cpumask_empty(&priv->dpio_cpumask)) { - dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); -@@ -1717,7 +1728,7 @@ err_alloc_ch: - return 0; - } - --static void dpaa2_dpio_free(struct dpaa2_eth_priv *priv) -+static void free_dpio(struct dpaa2_eth_priv *priv) - { - int i; - struct dpaa2_eth_channel *ch; -@@ -1726,12 +1737,12 @@ static void dpaa2_dpio_free(struct dpaa2 - for (i = 0; i < priv->num_channels; i++) { - ch = priv->channel[i]; - dpaa2_io_service_deregister(NULL, &ch->nctx); -- dpaa2_free_channel(priv, ch); -+ free_channel(priv, ch); - } - } - --static struct dpaa2_eth_channel * --dpaa2_get_channel_by_cpu(struct dpaa2_eth_priv *priv, int cpu) -+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, -+ int cpu) - { - struct device *dev = priv->net_dev->dev.parent; - int i; -@@ -1748,11 +1759,11 @@ dpaa2_get_channel_by_cpu(struct dpaa2_et - return priv->channel[0]; - } - --static void dpaa2_set_fq_affinity(struct dpaa2_eth_priv *priv) -+static void set_fq_affinity(struct dpaa2_eth_priv *priv) - { - struct device *dev = priv->net_dev->dev.parent; - struct dpaa2_eth_fq *fq; -- int rx_cpu, txconf_cpu; -+ int rx_cpu, txc_cpu; - int i; - - /* For each FQ, pick one channel/CPU to deliver frames to. -@@ -1760,7 +1771,7 @@ static void dpaa2_set_fq_affinity(struct - * through direct user intervention. - */ - rx_cpu = cpumask_first(&priv->dpio_cpumask); -- txconf_cpu = cpumask_first(&priv->txconf_cpumask); -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); - - for (i = 0; i < priv->num_fqs; i++) { - fq = &priv->fq[i]; -@@ -1768,20 +1779,56 @@ static void dpaa2_set_fq_affinity(struct - case DPAA2_RX_FQ: - case DPAA2_RX_ERR_FQ: - fq->target_cpu = rx_cpu; -- cpumask_rr(rx_cpu, &priv->dpio_cpumask); -+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); -+ if (rx_cpu >= nr_cpu_ids) -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); - break; - case DPAA2_TX_CONF_FQ: -- fq->target_cpu = txconf_cpu; -- cpumask_rr(txconf_cpu, &priv->txconf_cpumask); -+ fq->target_cpu = txc_cpu; -+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); -+ if (txc_cpu >= nr_cpu_ids) -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); - break; - default: - dev_err(dev, "Unknown FQ type: %d\n", fq->type); - } -- fq->channel = dpaa2_get_channel_by_cpu(priv, fq->target_cpu); -+ fq->channel = get_affine_channel(priv, fq->target_cpu); -+ } -+} -+ -+static void setup_fqs(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ /* We have one TxConf FQ per Tx flow */ -+ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; -+ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; -+ } -+ -+ /* The number of Rx queues (Rx distribution width) may be different from -+ * the number of cores. -+ * We only support one traffic class for now. -+ */ -+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -+ priv->fq[priv->num_fqs++].flowid = (u16)i; - } -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ /* We have exactly one Rx error queue per DPNI */ -+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; -+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; -+#endif -+ -+ /* For each FQ, decide on which core to process incoming frames */ -+ set_fq_affinity(priv); - } - --static int dpaa2_dpbp_setup(struct dpaa2_eth_priv *priv) -+/* Allocate and configure one buffer pool for each interface */ -+static int setup_dpbp(struct dpaa2_eth_priv *priv) - { - int err; - struct fsl_mc_device *dpbp_dev; -@@ -1833,15 +1880,16 @@ err_open: - return err; - } - --static void dpaa2_dpbp_free(struct dpaa2_eth_priv *priv) -+static void free_dpbp(struct dpaa2_eth_priv *priv) - { -- __dpaa2_dpbp_free(priv); -+ drain_pool(priv); - dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - fsl_mc_object_free(priv->dpbp_dev); - } - --static int dpaa2_dpni_setup(struct fsl_mc_device *ls_dev) -+/* Configure the DPNI object this interface is associated with */ -+static int setup_dpni(struct fsl_mc_device *ls_dev) - { - struct device *dev = &ls_dev->dev; - struct dpaa2_eth_priv *priv; -@@ -1854,7 +1902,7 @@ static int dpaa2_dpni_setup(struct fsl_m - - priv->dpni_id = ls_dev->obj_desc.id; - -- /* and get a handle for the DPNI this interface is associate with */ -+ /* get a handle for the DPNI object */ - err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); - if (err) { - dev_err(dev, "dpni_open() failed\n"); -@@ -1864,7 +1912,10 @@ static int dpaa2_dpni_setup(struct fsl_m - ls_dev->mc_io = priv->mc_io; - ls_dev->mc_handle = priv->mc_token; - -- dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); -+ /* Map a memory region which will be used by MC to pass us an -+ * attribute structure -+ */ -+ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); - if (!dma_mem) - goto err_alloc; - -@@ -1878,10 +1929,15 @@ static int dpaa2_dpni_setup(struct fsl_m - - err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, - &priv->dpni_attrs); -+ -+ /* We'll check the return code after unmapping, as we need to -+ * do this anyway -+ */ -+ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -+ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -+ - if (err) { - dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); -- dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -- DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); - goto err_get_attr; - } - -@@ -1889,9 +1945,6 @@ static int dpaa2_dpni_setup(struct fsl_m - if (err) - goto err_dpni_ver; - -- dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -- DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -- - memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); - err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); - if (err) { -@@ -1909,15 +1962,15 @@ static int dpaa2_dpni_setup(struct fsl_m - priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; - /* HW erratum mandates data alignment in multiples of 256 */ - priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; -- /* ...rx, ... */ -+ -+ /* rx buffer */ - err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, - &priv->buf_layout); - if (err) { - dev_err(dev, "dpni_set_rx_buffer_layout() failed"); - goto err_buf_layout; - } -- /* ... tx, ... */ -- /* remove Rx-only options */ -+ /* tx buffer: remove Rx-only options */ - priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | - DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); - err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, -@@ -1926,7 +1979,7 @@ static int dpaa2_dpni_setup(struct fsl_m - dev_err(dev, "dpni_set_tx_buffer_layout() failed"); - goto err_buf_layout; - } -- /* ... tx-confirm. */ -+ /* tx-confirm: same options as tx */ - priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; - priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; - priv->buf_layout.pass_timestamp = 1; -@@ -1946,8 +1999,9 @@ static int dpaa2_dpni_setup(struct fsl_m - goto err_data_offset; - } - -- /* Warn in case TX data offset is not multiple of 64 bytes. */ -- WARN_ON(priv->tx_data_offset % 64); -+ if ((priv->tx_data_offset % 64) != 0) -+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", -+ priv->tx_data_offset); - - /* Accommodate SWA space. */ - priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; -@@ -1976,7 +2030,7 @@ err_open: - return err; - } - --static void dpaa2_dpni_free(struct dpaa2_eth_priv *priv) -+static void free_dpni(struct dpaa2_eth_priv *priv) - { - int err; - -@@ -1988,8 +2042,8 @@ static void dpaa2_dpni_free(struct dpaa2 - dpni_close(priv->mc_io, 0, priv->mc_token); - } - --static int dpaa2_rx_flow_setup(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_fq *fq) -+static int setup_rx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) - { - struct device *dev = priv->net_dev->dev.parent; - struct dpni_queue_attr rx_queue_attr; -@@ -2023,8 +2077,8 @@ static int dpaa2_rx_flow_setup(struct dp - return 0; - } - --static int dpaa2_tx_flow_setup(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_fq *fq) -+static int setup_tx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) - { - struct device *dev = priv->net_dev->dev.parent; - struct dpni_tx_flow_cfg tx_flow_cfg; -@@ -2070,15 +2124,16 @@ static int dpaa2_tx_flow_setup(struct dp - } - - #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE --static int dpaa2_rx_err_setup(struct dpaa2_eth_priv *priv, -- struct dpaa2_eth_fq *fq) -+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) - { - struct dpni_queue_attr queue_attr; - struct dpni_queue_cfg queue_cfg; - int err; - - /* Configure the Rx error queue to generate CDANs, -- * just like the Rx queues */ -+ * just like the Rx queues -+ */ - queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; - queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; - queue_cfg.dest_cfg.priority = 1; -@@ -2091,7 +2146,8 @@ static int dpaa2_rx_err_setup(struct dpa - } - - /* Get the FQID */ -- err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_attr); -+ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, -+ &queue_attr); - if (err) { - netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); - return err; -@@ -2102,7 +2158,10 @@ static int dpaa2_rx_err_setup(struct dpa - } - #endif - --static int dpaa2_dpni_bind(struct dpaa2_eth_priv *priv) -+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, -+ * frame queues and channels -+ */ -+static int bind_dpni(struct dpaa2_eth_priv *priv) - { - struct net_device *net_dev = priv->net_dev; - struct device *dev = net_dev->dev.parent; -@@ -2114,20 +2173,20 @@ static int dpaa2_dpni_bind(struct dpaa2_ - pools_params.num_dpbp = 1; - pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; - pools_params.pools[0].backup_pool = 0; -- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUFFER_SIZE; -+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; - err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); - if (err) { - dev_err(dev, "dpni_set_pools() failed\n"); - return err; - } - -- dpaa2_cls_check(net_dev); -+ check_fs_support(net_dev); - - /* have the interface implicitly distribute traffic based on supported - * header fields - */ - if (dpaa2_eth_hash_enabled(priv)) { -- err = dpaa2_set_hash(net_dev, DPAA2_RXH_SUPPORTED); -+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); - if (err) - return err; - } -@@ -2151,14 +2210,14 @@ static int dpaa2_dpni_bind(struct dpaa2_ - for (i = 0; i < priv->num_fqs; i++) { - switch (priv->fq[i].type) { - case DPAA2_RX_FQ: -- err = dpaa2_rx_flow_setup(priv, &priv->fq[i]); -+ err = setup_rx_flow(priv, &priv->fq[i]); - break; - case DPAA2_TX_CONF_FQ: -- err = dpaa2_tx_flow_setup(priv, &priv->fq[i]); -+ err = setup_tx_flow(priv, &priv->fq[i]); - break; - #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE - case DPAA2_RX_ERR_FQ: -- err = dpaa2_rx_err_setup(priv, &priv->fq[i]); -+ err = setup_rx_err_flow(priv, &priv->fq[i]); - break; - #endif - default: -@@ -2178,7 +2237,8 @@ static int dpaa2_dpni_bind(struct dpaa2_ - return 0; - } - --static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) -+/* Allocate rings for storing incoming frame descriptors */ -+static int alloc_rings(struct dpaa2_eth_priv *priv) - { - struct net_device *net_dev = priv->net_dev; - struct device *dev = net_dev->dev.parent; -@@ -2205,7 +2265,7 @@ err_ring: - return -ENOMEM; - } - --static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) -+static void free_rings(struct dpaa2_eth_priv *priv) - { - int i; - -@@ -2213,7 +2273,7 @@ static void dpaa2_eth_free_rings(struct - dpaa2_io_store_destroy(priv->channel[i]->store); - } - --static int dpaa2_eth_netdev_init(struct net_device *net_dev) -+static int netdev_init(struct net_device *net_dev) - { - int err; - struct device *dev = net_dev->dev.parent; -@@ -2223,7 +2283,9 @@ static int dpaa2_eth_netdev_init(struct - - net_dev->netdev_ops = &dpaa2_eth_ops; - -- /* If the DPL contains all-0 mac_addr, set a random hardware address */ -+ /* If the DPNI attributes contain an all-0 mac_addr, -+ * set a random hardware address -+ */ - err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, - mac_addr); - if (err) { -@@ -2281,14 +2343,13 @@ static int dpaa2_eth_netdev_init(struct - return 0; - } - --#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL --static int dpaa2_poll_link_state(void *arg) -+static int poll_link_state(void *arg) - { - struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; - int err; - - while (!kthread_should_stop()) { -- err = dpaa2_link_state_update(priv); -+ err = link_state_update(priv); - if (unlikely(err)) - return err; - -@@ -2297,7 +2358,7 @@ static int dpaa2_poll_link_state(void *a - - return 0; - } --#else -+ - static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) - { - return IRQ_WAKE_THREAD; -@@ -2312,7 +2373,6 @@ static irqreturn_t dpni_irq0_handler_thr - struct net_device *net_dev = dev_get_drvdata(dev); - int err; - -- netdev_dbg(net_dev, "IRQ %d received\n", irq_num); - err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, - irq_index, &status); - if (unlikely(err)) { -@@ -2323,7 +2383,7 @@ static irqreturn_t dpni_irq0_handler_thr - - if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { - clear |= DPNI_IRQ_EVENT_LINK_CHANGED; -- dpaa2_link_state_update(netdev_priv(net_dev)); -+ link_state_update(netdev_priv(net_dev)); - } - - out: -@@ -2332,17 +2392,18 @@ out: - return IRQ_HANDLED; - } - --static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) -+static int setup_irqs(struct fsl_mc_device *ls_dev) - { - int err = 0; - struct fsl_mc_device_irq *irq; -- int irq_count = ls_dev->obj_desc.irq_count; - u8 irq_index = DPNI_IRQ_INDEX; - u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; - -- /* The only interrupt supported now is the link state notification. */ -- if (WARN_ON(irq_count != 1)) -- return -EINVAL; -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); -+ return err; -+ } - - irq = ls_dev->irqs[0]; - err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, -@@ -2352,28 +2413,34 @@ static int dpaa2_eth_setup_irqs(struct f - dev_name(&ls_dev->dev), &ls_dev->dev); - if (err < 0) { - dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); -- return err; -+ goto free_mc_irq; - } - - err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, - irq_index, mask); - if (err < 0) { - dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); -- return err; -+ goto free_irq; - } - - err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, - irq_index, 1); - if (err < 0) { - dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); -- return err; -+ goto free_irq; - } - - return 0; -+ -+free_irq: -+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); -+free_mc_irq: -+ fsl_mc_free_irqs(ls_dev); -+ -+ return err; - } --#endif - --static void dpaa2_eth_napi_add(struct dpaa2_eth_priv *priv) -+static void add_ch_napi(struct dpaa2_eth_priv *priv) - { - int i; - struct dpaa2_eth_channel *ch; -@@ -2386,7 +2453,7 @@ static void dpaa2_eth_napi_add(struct dp - } - } - --static void dpaa2_eth_napi_del(struct dpaa2_eth_priv *priv) -+static void del_ch_napi(struct dpaa2_eth_priv *priv) - { - int i; - struct dpaa2_eth_channel *ch; -@@ -2398,7 +2465,6 @@ static void dpaa2_eth_napi_del(struct dp - } - - /* SysFS support */ -- - static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, - struct device_attribute *attr, - char *buf) -@@ -2482,22 +2548,21 @@ static ssize_t dpaa2_eth_write_txconf_cp - } - - /* Set the new TxConf FQ affinities */ -- dpaa2_set_fq_affinity(priv); -+ set_fq_affinity(priv); - --#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL - /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit - * link up notification is received. Give the polling thread enough time - * to detect the link state change, or else we'll end up with the - * transmission side forever shut down. - */ -- msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); --#endif -+ if (priv->do_link_poll) -+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); - - for (i = 0; i < priv->num_fqs; i++) { - fq = &priv->fq[i]; - if (fq->type != DPAA2_TX_CONF_FQ) - continue; -- dpaa2_tx_flow_setup(priv, fq); -+ setup_tx_flow(priv, fq); - } - - if (running) { -@@ -2568,7 +2633,6 @@ static int dpaa2_eth_probe(struct fsl_mc - - priv = netdev_priv(net_dev); - priv->net_dev = net_dev; -- priv->msg_enable = netif_msg_init(debug, -1); - - /* Obtain a MC portal */ - err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -@@ -2578,39 +2642,27 @@ static int dpaa2_eth_probe(struct fsl_mc - goto err_portal_alloc; - } - --#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL -- err = fsl_mc_allocate_irqs(dpni_dev); -- if (err) { -- dev_err(dev, "MC irqs allocation failed\n"); -- goto err_irqs_alloc; -- } --#endif -- -- /* DPNI initialization */ -- err = dpaa2_dpni_setup(dpni_dev); -- if (err < 0) -+ /* MC objects initialization and configuration */ -+ err = setup_dpni(dpni_dev); -+ if (err) - goto err_dpni_setup; - -- /* DPIO */ -- err = dpaa2_dpio_setup(priv); -+ err = setup_dpio(priv); - if (err) - goto err_dpio_setup; - -- /* FQs */ -- dpaa2_eth_setup_fqs(priv); -- dpaa2_set_fq_affinity(priv); -+ setup_fqs(priv); - -- /* DPBP */ -- err = dpaa2_dpbp_setup(priv); -+ err = setup_dpbp(priv); - if (err) - goto err_dpbp_setup; - -- /* DPNI binding to DPIO and DPBPs */ -- err = dpaa2_dpni_bind(priv); -+ err = bind_dpni(priv); - if (err) - goto err_bind; - -- dpaa2_eth_napi_add(priv); -+ /* Add a NAPI context for each channel */ -+ add_ch_napi(priv); - - /* Percpu statistics */ - priv->percpu_stats = alloc_percpu(*priv->percpu_stats); -@@ -2635,38 +2687,37 @@ static int dpaa2_eth_probe(struct fsl_mc - dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); - } - -- err = dpaa2_eth_netdev_init(net_dev); -+ err = netdev_init(net_dev); - if (err) - goto err_netdev_init; - - /* Configure checksum offload based on current interface flags */ -- err = dpaa2_eth_set_rx_csum(priv, -- !!(net_dev->features & NETIF_F_RXCSUM)); -+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); - if (err) - goto err_csum; - -- err = dpaa2_eth_set_tx_csum(priv, -- !!(net_dev->features & -- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); -+ err = set_tx_csum(priv, !!(net_dev->features & -+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); - if (err) - goto err_csum; - -- err = dpaa2_eth_alloc_rings(priv); -+ err = alloc_rings(priv); - if (err) - goto err_alloc_rings; - - net_dev->ethtool_ops = &dpaa2_ethtool_ops; - --#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -- priv->poll_thread = kthread_run(dpaa2_poll_link_state, priv, -- "%s_poll_link", net_dev->name); --#else -- err = dpaa2_eth_setup_irqs(dpni_dev); -+ err = setup_irqs(dpni_dev); - if (err) { -- netdev_err(net_dev, "ERROR %d setting up interrupts", err); -- goto err_setup_irqs; -+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); -+ priv->poll_thread = kthread_run(poll_link_state, priv, -+ "%s_poll_link", net_dev->name); -+ if (IS_ERR(priv->poll_thread)) { -+ netdev_err(net_dev, "Error starting polling thread\n"); -+ goto err_poll_thread; -+ } -+ priv->do_link_poll = true; - } --#endif - - dpaa2_eth_sysfs_init(&net_dev->dev); - dpaa2_dbg_add(priv); -@@ -2674,10 +2725,8 @@ static int dpaa2_eth_probe(struct fsl_mc - dev_info(dev, "Probed interface %s\n", net_dev->name); - return 0; - --#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL --err_setup_irqs: --#endif -- dpaa2_eth_free_rings(priv); -+err_poll_thread: -+ free_rings(priv); - err_alloc_rings: - err_csum: - unregister_netdev(net_dev); -@@ -2686,19 +2735,15 @@ err_netdev_init: - err_alloc_percpu_extras: - free_percpu(priv->percpu_stats); - err_alloc_percpu_stats: -- dpaa2_eth_napi_del(priv); -+ del_ch_napi(priv); - err_bind: -- dpaa2_dpbp_free(priv); -+ free_dpbp(priv); - err_dpbp_setup: -- dpaa2_dpio_free(priv); -+ free_dpio(priv); - err_dpio_setup: - kfree(priv->cls_rule); - dpni_close(priv->mc_io, 0, priv->mc_token); - err_dpni_setup: --#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL -- fsl_mc_free_irqs(dpni_dev); --err_irqs_alloc: --#endif - fsl_mc_portal_free(priv->mc_io); - err_portal_alloc: - dev_set_drvdata(dev, NULL); -@@ -2723,22 +2768,21 @@ static int dpaa2_eth_remove(struct fsl_m - unregister_netdev(net_dev); - dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); - -- dpaa2_dpio_free(priv); -- dpaa2_eth_free_rings(priv); -- dpaa2_eth_napi_del(priv); -- dpaa2_dpbp_free(priv); -- dpaa2_dpni_free(priv); -+ free_dpio(priv); -+ free_rings(priv); -+ del_ch_napi(priv); -+ free_dpbp(priv); -+ free_dpni(priv); - - fsl_mc_portal_free(priv->mc_io); - - free_percpu(priv->percpu_stats); - free_percpu(priv->percpu_extras); - --#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL -- kthread_stop(priv->poll_thread); --#else -- fsl_mc_free_irqs(ls_dev); --#endif -+ if (priv->do_link_poll) -+ kthread_stop(priv->poll_thread); -+ else -+ fsl_mc_free_irqs(ls_dev); - - kfree(priv->cls_rule); - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -49,8 +49,10 @@ - - #define DPAA2_ETH_STORE_SIZE 16 - --/* Maximum receive frame size is 64K */ --#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUFFER_SIZE) -+/* Maximum number of scatter-gather entries in an ingress frame, -+ * considering the maximum receive frame size is 64K -+ */ -+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) - - /* Maximum acceptable MTU value. It is in direct relation with the MC-enforced - * Max Frame Length (currently 10k). -@@ -75,17 +77,26 @@ - #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) - #define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE - -+/* Maximum number of buffers that can be acquired/released through a single -+ * QBMan command -+ */ -+#define DPAA2_ETH_BUFS_PER_CMD 7 -+ - /* Hardware requires alignment for ingress/egress buffer addresses - * and ingress buffer lengths. - */ --#define DPAA2_ETH_RX_BUFFER_SIZE 2048 -+#define DPAA2_ETH_RX_BUF_SIZE 2048 - #define DPAA2_ETH_TX_BUF_ALIGN 64 - #define DPAA2_ETH_RX_BUF_ALIGN 256 - #define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ - ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) - -+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress -+ * buffers large enough to allow building an skb around them and also account -+ * for alignment restrictions -+ */ - #define DPAA2_ETH_BUF_RAW_SIZE \ -- (DPAA2_ETH_RX_BUFFER_SIZE + \ -+ (DPAA2_ETH_RX_BUF_SIZE + \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ - DPAA2_ETH_RX_BUF_ALIGN) - -@@ -127,57 +138,56 @@ struct dpaa2_fas { - __le32 status; - } __packed; - -+/* Error and status bits in the frame annotation status word */ - /* Debug frame, otherwise supposed to be discarded */ --#define DPAA2_ETH_FAS_DISC 0x80000000 -+#define DPAA2_FAS_DISC 0x80000000 - /* MACSEC frame */ --#define DPAA2_ETH_FAS_MS 0x40000000 --#define DPAA2_ETH_FAS_PTP 0x08000000 -+#define DPAA2_FAS_MS 0x40000000 -+#define DPAA2_FAS_PTP 0x08000000 - /* Ethernet multicast frame */ --#define DPAA2_ETH_FAS_MC 0x04000000 -+#define DPAA2_FAS_MC 0x04000000 - /* Ethernet broadcast frame */ --#define DPAA2_ETH_FAS_BC 0x02000000 --#define DPAA2_ETH_FAS_KSE 0x00040000 --#define DPAA2_ETH_FAS_EOFHE 0x00020000 --#define DPAA2_ETH_FAS_MNLE 0x00010000 --#define DPAA2_ETH_FAS_TIDE 0x00008000 --#define DPAA2_ETH_FAS_PIEE 0x00004000 -+#define DPAA2_FAS_BC 0x02000000 -+#define DPAA2_FAS_KSE 0x00040000 -+#define DPAA2_FAS_EOFHE 0x00020000 -+#define DPAA2_FAS_MNLE 0x00010000 -+#define DPAA2_FAS_TIDE 0x00008000 -+#define DPAA2_FAS_PIEE 0x00004000 - /* Frame length error */ --#define DPAA2_ETH_FAS_FLE 0x00002000 --/* Frame physical error; our favourite pastime */ --#define DPAA2_ETH_FAS_FPE 0x00001000 --#define DPAA2_ETH_FAS_PTE 0x00000080 --#define DPAA2_ETH_FAS_ISP 0x00000040 --#define DPAA2_ETH_FAS_PHE 0x00000020 --#define DPAA2_ETH_FAS_BLE 0x00000010 -+#define DPAA2_FAS_FLE 0x00002000 -+/* Frame physical error */ -+#define DPAA2_FAS_FPE 0x00001000 -+#define DPAA2_FAS_PTE 0x00000080 -+#define DPAA2_FAS_ISP 0x00000040 -+#define DPAA2_FAS_PHE 0x00000020 -+#define DPAA2_FAS_BLE 0x00000010 - /* L3 csum validation performed */ --#define DPAA2_ETH_FAS_L3CV 0x00000008 -+#define DPAA2_FAS_L3CV 0x00000008 - /* L3 csum error */ --#define DPAA2_ETH_FAS_L3CE 0x00000004 -+#define DPAA2_FAS_L3CE 0x00000004 - /* L4 csum validation performed */ --#define DPAA2_ETH_FAS_L4CV 0x00000002 -+#define DPAA2_FAS_L4CV 0x00000002 - /* L4 csum error */ --#define DPAA2_ETH_FAS_L4CE 0x00000001 --/* These bits always signal errors */ --#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -- DPAA2_ETH_FAS_EOFHE | \ -- DPAA2_ETH_FAS_MNLE | \ -- DPAA2_ETH_FAS_TIDE | \ -- DPAA2_ETH_FAS_PIEE | \ -- DPAA2_ETH_FAS_FLE | \ -- DPAA2_ETH_FAS_FPE | \ -- DPAA2_ETH_FAS_PTE | \ -- DPAA2_ETH_FAS_ISP | \ -- DPAA2_ETH_FAS_PHE | \ -- DPAA2_ETH_FAS_BLE | \ -- DPAA2_ETH_FAS_L3CE | \ -- DPAA2_ETH_FAS_L4CE) --/* Unsupported features in the ingress */ --#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS -+#define DPAA2_FAS_L4CE 0x00000001 -+/* Possible errors on the ingress path */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE | \ -+ DPAA2_FAS_PIEE | \ -+ DPAA2_FAS_FLE | \ -+ DPAA2_FAS_FPE | \ -+ DPAA2_FAS_PTE | \ -+ DPAA2_FAS_ISP | \ -+ DPAA2_FAS_PHE | \ -+ DPAA2_FAS_BLE | \ -+ DPAA2_FAS_L3CE | \ -+ DPAA2_FAS_L4CE) - /* Tx errors */ --#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -- DPAA2_ETH_FAS_EOFHE | \ -- DPAA2_ETH_FAS_MNLE | \ -- DPAA2_ETH_FAS_TIDE) -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE) - - /* Time in milliseconds between link state updates */ - #define DPAA2_ETH_LINK_STATE_REFRESH 1000 -@@ -185,7 +195,7 @@ struct dpaa2_fas { - /* Driver statistics, other than those in struct rtnl_link_stats64. - * These are usually collected per-CPU and aggregated by ethtool. - */ --struct dpaa2_eth_stats { -+struct dpaa2_eth_drv_stats { - __u64 tx_conf_frames; - __u64 tx_conf_bytes; - __u64 tx_sg_frames; -@@ -210,15 +220,17 @@ struct dpaa2_eth_ch_stats { - __u64 cdan; - /* Number of frames received on queues from this channel */ - __u64 frames; -+ /* Pull errors */ -+ __u64 pull_err; - }; - --/* Maximum number of Rx queues associated with a DPNI */ -+/* Maximum number of queues associated with a DPNI */ - #define DPAA2_ETH_MAX_RX_QUEUES 16 - #define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS - #define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 --#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ -- DPAA2_ETH_MAX_TX_QUEUES + \ -- DPAA2_ETH_MAX_RX_ERR_QUEUES) -+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ -+ DPAA2_ETH_MAX_TX_QUEUES + \ -+ DPAA2_ETH_MAX_RX_ERR_QUEUES) - - #define DPAA2_ETH_MAX_DPCONS NR_CPUS - -@@ -241,7 +253,6 @@ struct dpaa2_eth_fq { - struct dpaa2_eth_channel *, - const struct dpaa2_fd *, - struct napi_struct *); -- struct dpaa2_eth_priv *netdev_priv; /* backpointer */ - struct dpaa2_eth_fq_stats stats; - }; - -@@ -258,16 +269,16 @@ struct dpaa2_eth_channel { - struct dpaa2_eth_ch_stats stats; - }; - --struct dpaa2_cls_rule { -+struct dpaa2_eth_cls_rule { - struct ethtool_rx_flow_spec fs; - bool in_use; - }; - -+/* Driver private data */ - struct dpaa2_eth_priv { - struct net_device *net_dev; - - u8 num_fqs; -- /* First queue is tx conf, the rest are rx */ - struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; - - u8 num_channels; -@@ -299,12 +310,12 @@ struct dpaa2_eth_priv { - /* Standard statistics */ - struct rtnl_link_stats64 __percpu *percpu_stats; - /* Extra stats, in addition to the ones known by the kernel */ -- struct dpaa2_eth_stats __percpu *percpu_extras; -- u32 msg_enable; /* net_device message level */ -+ struct dpaa2_eth_drv_stats __percpu *percpu_extras; - - u16 mc_token; - - struct dpni_link_state link_state; -+ bool do_link_poll; - struct task_struct *poll_thread; - - /* enabled ethtool hashing bits */ -@@ -315,7 +326,7 @@ struct dpaa2_eth_priv { - #endif - - /* array of classification rules */ -- struct dpaa2_cls_rule *cls_rule; -+ struct dpaa2_eth_cls_rule *cls_rule; - - struct dpni_tx_shaping_cfg shaping_cfg; - -@@ -341,9 +352,9 @@ struct dpaa2_eth_priv { - - extern const struct ethtool_ops dpaa2_ethtool_ops; - --int dpaa2_set_hash(struct net_device *net_dev, u64 flags); -+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); - --static int dpaa2_queue_count(struct dpaa2_eth_priv *priv) -+static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) - { - if (!dpaa2_eth_hash_enabled(priv)) - return 1; -@@ -351,16 +362,16 @@ static int dpaa2_queue_count(struct dpaa - return priv->dpni_ext_cfg.tc_cfg[0].max_dist; - } - --static inline int dpaa2_max_channels(struct dpaa2_eth_priv *priv) -+static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) - { - /* Ideally, we want a number of channels large enough - * to accommodate both the Rx distribution size - * and the max number of Tx confirmation queues - */ -- return max_t(int, dpaa2_queue_count(priv), -+ return max_t(int, dpaa2_eth_queue_count(priv), - priv->dpni_attrs.max_senders); - } - --void dpaa2_cls_check(struct net_device *); -+void check_fs_support(struct net_device *); - - #endif /* __DPAA2_H */ ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -52,7 +52,7 @@ char dpaa2_ethtool_stats[][ETH_GSTRING_L - - #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) - --/* To be kept in sync with 'struct dpaa2_eth_stats' */ -+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ - char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { - /* per-cpu stats */ - -@@ -63,12 +63,12 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_ - "rx sg frames", - "rx sg bytes", - /* how many times we had to retry the enqueue command */ -- "tx portal busy", -+ "enqueue portal busy", - - /* Channel stats */ -- - /* How many times we had to retry the volatile dequeue command */ -- "portal busy", -+ "dequeue portal busy", -+ "channel pull errors", - /* Number of notifications received */ - "cdan", - #ifdef CONFIG_FSL_QBMAN_DEBUG -@@ -83,8 +83,8 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_ - - #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) - --static void dpaa2_get_drvinfo(struct net_device *net_dev, -- struct ethtool_drvinfo *drvinfo) -+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) - { - struct mc_version mc_ver; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -@@ -112,20 +112,8 @@ static void dpaa2_get_drvinfo(struct net - sizeof(drvinfo->bus_info)); - } - --static u32 dpaa2_get_msglevel(struct net_device *net_dev) --{ -- return ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable; --} -- --static void dpaa2_set_msglevel(struct net_device *net_dev, -- u32 msg_enable) --{ -- ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable = -- msg_enable; --} -- --static int dpaa2_get_settings(struct net_device *net_dev, -- struct ethtool_cmd *cmd) -+static int dpaa2_eth_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) - { - struct dpni_link_state state = {0}; - int err = 0; -@@ -152,8 +140,8 @@ out: - return err; - } - --static int dpaa2_set_settings(struct net_device *net_dev, -- struct ethtool_cmd *cmd) -+static int dpaa2_eth_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) - { - struct dpni_link_cfg cfg = {0}; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -@@ -190,8 +178,8 @@ static int dpaa2_set_settings(struct net - return err; - } - --static void dpaa2_get_strings(struct net_device *netdev, u32 stringset, -- u8 *data) -+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, -+ u8 *data) - { - u8 *p = data; - int i; -@@ -210,7 +198,7 @@ static void dpaa2_get_strings(struct net - } - } - --static int dpaa2_get_sset_count(struct net_device *net_dev, int sset) -+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) - { - switch (sset) { - case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ -@@ -222,9 +210,9 @@ static int dpaa2_get_sset_count(struct n - - /** Fill in hardware counters, as returned by the MC firmware. - */ --static void dpaa2_get_ethtool_stats(struct net_device *net_dev, -- struct ethtool_stats *stats, -- u64 *data) -+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, -+ u64 *data) - { - int i; /* Current index in the data array */ - int j, k, err; -@@ -236,9 +224,9 @@ static void dpaa2_get_ethtool_stats(stru - u32 buf_cnt; - #endif - u64 cdan = 0; -- u64 portal_busy = 0; -+ u64 portal_busy = 0, pull_err = 0; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -- struct dpaa2_eth_stats *extras; -+ struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; - - memset(data, 0, -@@ -266,16 +254,18 @@ static void dpaa2_get_ethtool_stats(stru - ch_stats = &priv->channel[j]->stats; - cdan += ch_stats->cdan; - portal_busy += ch_stats->dequeue_portal_busy; -+ pull_err += ch_stats->pull_err; - } - - *(data + i++) = portal_busy; -+ *(data + i++) = pull_err; - *(data + i++) = cdan; - - #ifdef CONFIG_FSL_QBMAN_DEBUG - for (j = 0; j < priv->num_fqs; j++) { - /* Print FQ instantaneous counts */ - err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, -- &fcnt, &bcnt); -+ &fcnt, &bcnt); - if (err) { - netdev_warn(net_dev, "FQ query error %d", err); - return; -@@ -303,12 +293,12 @@ static void dpaa2_get_ethtool_stats(stru - #endif - } - --static const struct dpaa2_hash_fields { -+static const struct dpaa2_eth_hash_fields { - u64 rxnfc_field; - enum net_prot cls_prot; - int cls_field; - int size; --} dpaa2_hash_fields[] = { -+} hash_fields[] = { - { - /* L2 header */ - .rxnfc_field = RXH_L2DA, -@@ -353,55 +343,53 @@ static const struct dpaa2_hash_fields { - }, - }; - --static int dpaa2_cls_is_enabled(struct net_device *net_dev, u64 flag) -+static int cls_is_enabled(struct net_device *net_dev, u64 flag) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - - return !!(priv->rx_hash_fields & flag); - } - --static int dpaa2_cls_key_off(struct net_device *net_dev, u64 flag) -+static int cls_key_off(struct net_device *net_dev, u64 flag) - { - int i, off = 0; - -- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -- if (dpaa2_hash_fields[i].rxnfc_field & flag) -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -+ if (hash_fields[i].rxnfc_field & flag) - return off; -- if (dpaa2_cls_is_enabled(net_dev, -- dpaa2_hash_fields[i].rxnfc_field)) -- off += dpaa2_hash_fields[i].size; -+ if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) -+ off += hash_fields[i].size; - } - - return -1; - } - --static u8 dpaa2_cls_key_size(struct net_device *net_dev) -+static u8 cls_key_size(struct net_device *net_dev) - { - u8 i, size = 0; - -- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -- if (!dpaa2_cls_is_enabled(net_dev, -- dpaa2_hash_fields[i].rxnfc_field)) -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -+ if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) - continue; -- size += dpaa2_hash_fields[i].size; -+ size += hash_fields[i].size; - } - - return size; - } - --static u8 dpaa2_cls_max_key_size(struct net_device *net_dev) -+static u8 cls_max_key_size(struct net_device *net_dev) - { - u8 i, size = 0; - -- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) -- size += dpaa2_hash_fields[i].size; -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) -+ size += hash_fields[i].size; - - return size; - } - --void dpaa2_cls_check(struct net_device *net_dev) -+void check_fs_support(struct net_device *net_dev) - { -- u8 key_size = dpaa2_cls_max_key_size(net_dev); -+ u8 key_size = cls_max_key_size(net_dev); - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - - if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && -@@ -417,7 +405,7 @@ void dpaa2_cls_check(struct net_device * - /* Set RX hash options - * flags is a combination of RXH_ bits - */ --int dpaa2_set_hash(struct net_device *net_dev, u64 flags) -+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) - { - struct device *dev = net_dev->dev.parent; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -@@ -441,11 +429,11 @@ int dpaa2_set_hash(struct net_device *ne - - memset(&cls_cfg, 0, sizeof(cls_cfg)); - -- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) { -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { - struct dpkg_extract *key = - &cls_cfg.extracts[cls_cfg.num_extracts]; - -- if (!(flags & dpaa2_hash_fields[i].rxnfc_field)) -+ if (!(flags & hash_fields[i].rxnfc_field)) - continue; - - if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { -@@ -454,14 +442,12 @@ int dpaa2_set_hash(struct net_device *ne - } - - key->type = DPKG_EXTRACT_FROM_HDR; -- key->extract.from_hdr.prot = -- dpaa2_hash_fields[i].cls_prot; -+ key->extract.from_hdr.prot = hash_fields[i].cls_prot; - key->extract.from_hdr.type = DPKG_FULL_FIELD; -- key->extract.from_hdr.field = -- dpaa2_hash_fields[i].cls_field; -+ key->extract.from_hdr.field = hash_fields[i].cls_field; - cls_cfg.num_extracts++; - -- enabled_flags |= dpaa2_hash_fields[i].rxnfc_field; -+ enabled_flags |= hash_fields[i].rxnfc_field; - } - - dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -@@ -486,7 +472,7 @@ int dpaa2_set_hash(struct net_device *ne - return -ENOMEM; - } - -- dist_cfg.dist_size = dpaa2_queue_count(priv); -+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); - if (dpaa2_eth_fs_enabled(priv)) { - dist_cfg.dist_mode = DPNI_DIST_MODE_FS; - dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -@@ -508,14 +494,14 @@ int dpaa2_set_hash(struct net_device *ne - return 0; - } - --static int dpaa2_cls_prep_rule(struct net_device *net_dev, -- struct ethtool_rx_flow_spec *fs, -- void *key) -+static int prep_cls_rule(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ void *key) - { - struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; - struct ethhdr *eth_h, *eth_m; - struct ethtool_flow_ext *ext_h, *ext_m; -- const u8 key_size = dpaa2_cls_key_size(net_dev); -+ const u8 key_size = cls_key_size(net_dev); - void *msk = key + key_size; - - memset(key, 0, key_size * 2); -@@ -546,51 +532,47 @@ l4ip4: - "ToS is not supported for IPv4 L4\n"); - return -EOPNOTSUPP; - } -- if (l4ip4_m->ip4src && -- !dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { - netdev_err(net_dev, "IP SRC not supported!\n"); - return -EOPNOTSUPP; - } -- if (l4ip4_m->ip4dst && -- !dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) { -+ if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { - netdev_err(net_dev, "IP DST not supported!\n"); - return -EOPNOTSUPP; - } -- if (l4ip4_m->psrc && -- !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { - netdev_err(net_dev, "PSRC not supported, ignored\n"); - return -EOPNOTSUPP; - } -- if (l4ip4_m->pdst && -- !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { - netdev_err(net_dev, "PDST not supported, ignored\n"); - return -EOPNOTSUPP; - } - -- if (dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) { -- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_SRC)) -+ if (cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) - = l4ip4_h->ip4src; -- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_SRC)) -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) - = l4ip4_m->ip4src; - } -- if (dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) { -- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_DST)) -+ if (cls_is_enabled(net_dev, RXH_IP_DST)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) - = l4ip4_h->ip4dst; -- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_DST)) -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) - = l4ip4_m->ip4dst; - } - -- if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1)) -+ if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) - = l4ip4_h->psrc; -- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1)) -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) - = l4ip4_m->psrc; - } - -- if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3)) -+ if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) - = l4ip4_h->pdst; -- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3)) -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) - = l4ip4_m->pdst; - } - break; -@@ -609,12 +591,10 @@ l4ip4: - return -EOPNOTSUPP; - } - -- if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) { -- ether_addr_copy(key -- + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ if (cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), - eth_h->h_dest); -- ether_addr_copy(msk -- + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), - eth_m->h_dest); - } else { - if (!is_zero_ether_addr(eth_m->h_dest)) { -@@ -639,12 +619,10 @@ l4ip4: - ext_h = &fs->h_ext; - ext_m = &fs->m_ext; - -- if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) { -- ether_addr_copy(key -- + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ if (cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), - ext_h->h_dest); -- ether_addr_copy(msk -- + dpaa2_cls_key_off(net_dev, RXH_L2DA), -+ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), - ext_m->h_dest); - } else { - if (!is_zero_ether_addr(ext_m->h_dest)) { -@@ -657,9 +635,9 @@ l4ip4: - return 0; - } - --static int dpaa2_do_cls(struct net_device *net_dev, -- struct ethtool_rx_flow_spec *fs, -- bool add) -+static int do_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ bool add) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -@@ -674,19 +652,19 @@ static int dpaa2_do_cls(struct net_devic - } - - if ((fs->ring_cookie != RX_CLS_FLOW_DISC && -- fs->ring_cookie >= dpaa2_queue_count(priv)) || -+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || - fs->location >= rule_cnt) - return -EINVAL; - - memset(&rule_cfg, 0, sizeof(rule_cfg)); -- rule_cfg.key_size = dpaa2_cls_key_size(net_dev); -+ rule_cfg.key_size = cls_key_size(net_dev); - - /* allocate twice the key size, for the actual key and for mask */ - dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); - if (!dma_mem) - return -ENOMEM; - -- err = dpaa2_cls_prep_rule(net_dev, fs, dma_mem); -+ err = prep_cls_rule(net_dev, fs, dma_mem); - if (err) - goto err_free_mem; - -@@ -735,13 +713,13 @@ err_free_mem: - return err; - } - --static int dpaa2_add_cls(struct net_device *net_dev, -- struct ethtool_rx_flow_spec *fs) -+static int add_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int err; - -- err = dpaa2_do_cls(net_dev, fs, true); -+ err = do_cls(net_dev, fs, true); - if (err) - return err; - -@@ -751,12 +729,12 @@ static int dpaa2_add_cls(struct net_devi - return 0; - } - --static int dpaa2_del_cls(struct net_device *net_dev, int location) -+static int del_cls(struct net_device *net_dev, int location) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int err; - -- err = dpaa2_do_cls(net_dev, &priv->cls_rule[location].fs, false); -+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); - if (err) - return err; - -@@ -765,7 +743,7 @@ static int dpaa2_del_cls(struct net_devi - return 0; - } - --static void dpaa2_clear_cls(struct net_device *net_dev) -+static void clear_cls(struct net_device *net_dev) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int i, err; -@@ -774,7 +752,7 @@ static void dpaa2_clear_cls(struct net_d - if (!priv->cls_rule[i].in_use) - continue; - -- err = dpaa2_del_cls(net_dev, i); -+ err = del_cls(net_dev, i); - if (err) - netdev_warn(net_dev, - "err trying to delete classification entry %d\n", -@@ -782,8 +760,8 @@ static void dpaa2_clear_cls(struct net_d - } - } - --static int dpaa2_set_rxnfc(struct net_device *net_dev, -- struct ethtool_rxnfc *rxnfc) -+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc) - { - int err = 0; - -@@ -792,19 +770,19 @@ static int dpaa2_set_rxnfc(struct net_de - /* first off clear ALL classification rules, chaging key - * composition will break them anyway - */ -- dpaa2_clear_cls(net_dev); -+ clear_cls(net_dev); - /* we purposely ignore cmd->flow_type for now, because the - * classifier only supports a single set of fields for all - * protocols - */ -- err = dpaa2_set_hash(net_dev, rxnfc->data); -+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data); - break; - case ETHTOOL_SRXCLSRLINS: -- err = dpaa2_add_cls(net_dev, &rxnfc->fs); -+ err = add_cls(net_dev, &rxnfc->fs); - break; - - case ETHTOOL_SRXCLSRLDEL: -- err = dpaa2_del_cls(net_dev, rxnfc->fs.location); -+ err = del_cls(net_dev, rxnfc->fs.location); - break; - - default: -@@ -814,8 +792,8 @@ static int dpaa2_set_rxnfc(struct net_de - return err; - } - --static int dpaa2_get_rxnfc(struct net_device *net_dev, -- struct ethtool_rxnfc *rxnfc, u32 *rule_locs) -+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -@@ -831,7 +809,7 @@ static int dpaa2_get_rxnfc(struct net_de - break; - - case ETHTOOL_GRXRINGS: -- rxnfc->data = dpaa2_queue_count(priv); -+ rxnfc->data = dpaa2_eth_queue_count(priv); - break; - - case ETHTOOL_GRXCLSRLCNT: -@@ -868,15 +846,13 @@ static int dpaa2_get_rxnfc(struct net_de - } - - const struct ethtool_ops dpaa2_ethtool_ops = { -- .get_drvinfo = dpaa2_get_drvinfo, -- .get_msglevel = dpaa2_get_msglevel, -- .set_msglevel = dpaa2_set_msglevel, -+ .get_drvinfo = dpaa2_eth_get_drvinfo, - .get_link = ethtool_op_get_link, -- .get_settings = dpaa2_get_settings, -- .set_settings = dpaa2_set_settings, -- .get_sset_count = dpaa2_get_sset_count, -- .get_ethtool_stats = dpaa2_get_ethtool_stats, -- .get_strings = dpaa2_get_strings, -- .get_rxnfc = dpaa2_get_rxnfc, -- .set_rxnfc = dpaa2_set_rxnfc, -+ .get_settings = dpaa2_eth_get_settings, -+ .set_settings = dpaa2_eth_set_settings, -+ .get_sset_count = dpaa2_eth_get_sset_count, -+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, -+ .get_strings = dpaa2_eth_get_strings, -+ .get_rxnfc = dpaa2_eth_get_rxnfc, -+ .set_rxnfc = dpaa2_eth_set_rxnfc, - }; diff --git a/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch b/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch deleted file mode 100644 index 41d1c04e2..000000000 --- a/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 727de4692d731655dea96702aa099f4f4d3a5203 Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Mon, 21 Mar 2016 16:10:01 +0200 -Subject: [PATCH 203/226] fsl-dpaa2: eth: Update description of DPNI counters - -Update description of DPNI counters presented with "ethtool -S". - -Signed-off-by: Bogdan Hamciuc -(cherry picked from commit f68aab60355d00af13fdff2ded7bf38809beacd3) ---- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -39,15 +39,18 @@ - char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { - "rx frames", - "rx bytes", -- "rx frames dropped", -- "rx err frames", -+ /* rx frames filtered/policed */ -+ "rx filtered frames", -+ /* rx frames dropped with errors */ -+ "rx discarded frames", - "rx mcast frames", - "rx mcast bytes", - "rx bcast frames", - "rx bcast bytes", - "tx frames", - "tx bytes", -- "tx err frames", -+ /* tx frames dropped with errors */ -+ "tx discarded frames", - }; - - #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) diff --git a/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch b/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch deleted file mode 100644 index ff74d1300..000000000 --- a/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 9a38e2ce3b46a2bdc90b4ad190a26f9418909450 Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Tue, 29 Mar 2016 13:23:50 +0300 -Subject: [PATCH 204/226] fsl-dpaa2: eth: dpni: Clear compiler warnings -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Clear two warnings given by -Wcast-qual: -warning: cast discards ‘__attribute__((const))’ qualifier from pointer -target type - -Signed-off-by: Bogdan Hamciuc -(cherry picked from commit 96d14f291c2750e8b09268cecb84bfe7f013294d) ---- - drivers/staging/fsl-dpaa2/ethernet/dpni.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -@@ -128,7 +128,7 @@ int dpni_prepare_extended_cfg(const stru - int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, - const uint8_t *ext_cfg_buf) - { -- uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; - - DPNI_EXT_EXTENDED_CFG(ext_params, cfg); - -@@ -1651,7 +1651,7 @@ void dpni_prepare_early_drop(const struc - void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, - const uint8_t *early_drop_buf) - { -- uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; - - DPNI_EXT_EARLY_DROP(ext_params, cfg); - } diff --git a/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch b/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch deleted file mode 100644 index 0f96213a1..000000000 --- a/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 51106cb1fd14dfbf62c2760921463376f56ac732 Mon Sep 17 00:00:00 2001 -From: Bogdan Purcareata -Date: Tue, 21 Jun 2016 18:40:47 +0000 -Subject: [PATCH 205/226] fsl-dpaa2: eth: sanitize supported private flags - -On linux-v4.6 with CONFIG_MACVLAN=y, when bringing up a ni interface, the -network stack crashes due to a segfault. This is related to the -macvlan_device_event notifier, which registers itself to all the network -interface in the system. - -The notifier reads the netdev private flags and incorrectly qualifies -the interface as a macvlan port, since both the IFF_MACVLAN_PORT and -IFF_PROMISC flags have the same offset. Code spelunking reveals that -IFF_PROMISC is only used as an interface flag, not a private interface -flag. - -A similar situation happens with IFF_ALLMULTI, which overlaps with -IFF_BRIDGE_PORT. No info on the consequences of this, since I haven't -tested bridge scenarios. The interface can still be set in allmulti -mode using userspace tools (e.g. ifconfig). - -IFF_MULTICAST overlaps with IFF_UNICAST_FLT, therefore the current code -has no effect as it is. The closest multicast activation based on device -capabilities has been seen in the case of the Aeroflex Gaisler Ethernet -MAC (aeroflex/greth.c) - here, the runtime (not private) flag is set on -device probe. On a side node, ether_setup enables IFF_MULTICAST by default. - -Remove IFF_PROMISC, IFF_ALLMULTI and IFF_MULTICAST from device capabilities -init. - -Signed-off-by: Bogdan Purcareata ---- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 7 +------ - 1 file changed, 1 insertion(+), 6 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -1176,18 +1176,13 @@ static int dpaa2_eth_init(struct net_dev - u32 options = priv->dpni_attrs.options; - - /* Capabilities listing */ -- supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; -+ supported |= IFF_LIVE_ADDR_CHANGE; - - if (options & DPNI_OPT_UNICAST_FILTER) - supported |= IFF_UNICAST_FLT; - else - not_supported |= IFF_UNICAST_FLT; - -- if (options & DPNI_OPT_MULTICAST_FILTER) -- supported |= IFF_MULTICAST; -- else -- not_supported |= IFF_MULTICAST; -- - net_dev->priv_flags |= supported; - net_dev->priv_flags &= ~not_supported; - diff --git a/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch deleted file mode 100644 index bdb0f5895..000000000 --- a/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 7e536d0c2f870b39480268c20af6fc3d21abe611 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 15 Jun 2016 14:03:43 -0500 -Subject: [PATCH 206/226] fsl-dpaa2: eth: match id cleanup - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -2787,12 +2787,10 @@ static int dpaa2_eth_remove(struct fsl_m - return 0; - } - --static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { -+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpni", -- .ver_major = DPNI_VER_MAJOR, -- .ver_minor = DPNI_VER_MINOR - }, - { .vendor = 0x0 } - }; diff --git a/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch b/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch deleted file mode 100644 index b7885cf01..000000000 --- a/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 8557c8a3823b341607e16048d8318a1958eab3a9 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Thu, 12 May 2016 17:52:28 -0500 -Subject: [PATCH 207/226] fsl-dpaa2: eth: add device table to driver - -this is needed to have the driver loaded as a module - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -2794,6 +2794,7 @@ static const struct fsl_mc_device_id dpa - }, - { .vendor = 0x0 } - }; -+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); - - static struct fsl_mc_driver dpaa2_eth_driver = { - .driver = { diff --git a/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch b/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch deleted file mode 100644 index 42a23a598..000000000 --- a/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch +++ /dev/null @@ -1,2347 +0,0 @@ -From ecaf55d2907835cd0580903e134cdf08416ff694 Mon Sep 17 00:00:00 2001 -From: Bogdan Hamciuc -Date: Tue, 15 Sep 2015 10:27:19 -0500 -Subject: [PATCH 208/226] staging: fsl-dpaa2: mac: Added MAC / PHY interface - driver - -This is a commit of the cummulative, squashed dpmac patches. -All the commit logs are preserved below. - -Signed-off-by: Stuart Yoder - --------------------------------------------------------------- - -flib,dpmac: add dpmac files (Rebasing onto kernel 3.19, MC 0.6) - -patches moved from 4.0 kernel - -Signed-off-by: Bogdan Hamciuc -[Stuart: cherry-picked patch and split it up] -Signed-off-by: Stuart Yoder - -staging: fsl-dpaa2: mac: Added MAC / PHY interface driver. - -This driver works as a proxy between phylib including phy drivers and -the MC firmware. It receives updates on link state changes from PHY -lib and forwards them to MC and receives interrupt from MC whenever -a request is made to change the link state. - -Signed-off-by: Alex Marginean -Change-Id: I8097ea69ea8effded3bddd43b9d326bbb59ba6c8 -Reviewed-on: http://git.am.freescale.net:8181/35113 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -fsl-dpaa2: mac: Change IRQ flags - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ia86570858f9cf7f673089cd7c2078662d56b2f01 -Reviewed-on: http://git.am.freescale.net:8181/35581 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -fsl-dpaa2: mac: Check for actual link state change - -Do not invoke the MC firmware if the link state hasn't changed. - -Signed-off-by: Alex Marginean -Change-Id: Iba59d8b52c72334efa28f6126e50ec821c802852 -Reviewed-on: http://git.am.freescale.net:8181/35582 -Reviewed-by: Bogdan Hamciuc -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -fsl-dpaa2: mac: Fix "dpmac netdevs" probing - -Fixup code under DPAA2_MAC_NETDEVS to probe again. In particular, remove -the temporary addition of "fixed.c" in the mac/ folder. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Iea6768f3c5cd9b2de2c8421c03ecebf155b9792b -Reviewed-on: http://git.am.freescale.net:8181/37673 -Reviewed-by: Ruxandra Ioana Radulescu -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -flib: Remove duplicate header files - -These files are included by the DPAA2 mac driver files. - -Signed-off-by: Razvan Stefanescu -Change-Id: Ieff56e3c34393ef65a5ac1123aaf00bacefa050c -Reviewed-on: http://git.am.freescale.net:8181/37257 -Reviewed-by: Alexandru Marginean -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -fsl-dpaa2: mac: Add dependency on CONFIG_FIXED_PHY - -The DPAA2 DPMAC driver currently relies on fixed links, so it will fail -to probe in unusual ways if CONFIG_FIXED_PHY is not enabled. - -Signed-off-by: Bogdan Hamciuc -Change-Id: Ibc53226a215ed85a2ba22c55b18595fb939e7418 -Reviewed-on: http://git.am.freescale.net:8181/37687 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Stuart Yoder - -fsl-dpaa2: mac: Fix macro - -Remove macro ending backslash. - -Signed-off-by: Razvan Stefanescu -Change-Id: Ib0c4a41eee8fbe4aa7c991fc7fdb87771d3bf594 -Reviewed-on: http://git.am.freescale.net:8181/37254 -Tested-by: Review Code-CDREVIEW -Reviewed-by: Alexandru Marginean -Reviewed-by: Stuart Yoder - -fsl-dpaa2: mac: migrated remaining flibs for MC fw 8.0.0 - -Signed-off-by: J. German Rivera -[Stuart: split mac part out of original patch, updated subject] -Signed-off-by: Stuart Yoder - -staging: fsl-dpaa2: mac: Port to MC-0.7 Flibs - -Change-Id: Ief731e245bdc207f1bf8e7ff4dfdabb445d6010e -Signed-off-by: Bogdan Hamciuc -Reviewed-on: http://git.am.freescale.net:8181/39151 -Reviewed-by: Stuart Yoder -Tested-by: Stuart Yoder - -staging: fsl-dpaa2: mac: Do programing of MSIs in devm_request_threaded_irq() - -With the new dprc_set_obj_irq() we can now program MSIS in the device -in the callback invoked from devm_request_threaded_irq(). -Since this callback is invoked with interrupts disabled, we need to -use an atomic portal, instead of the root DPRC's built-in portal -which is non-atomic. - -Signed-off-by: Itai Katz -Signed-off-by: J. German Rivera -[Stuart: split original patch up by component] -Signed-off-by: Stuart Yoder - -fsl-dpaa2: mac: Fix driver probing - -The DPMAC probing function was broken in many ways. This patch adds -the following fixes: - - Look up PHY nodes based on the phy-handle property of the respective - DPMAC node; - - Defer DPMAC device probing until the MDIO MUX driver probes first (we - depend on that for configuring the PHYs on PCIe riser cards on - LS2085A QDS boards. - - Add Kconfig dependencies on XGMAC_MDIO and MDIO_BUS_MUX_MMIOREG. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Fix interrupt handling - -The DPMAC has two interrupt events muxed on a single interrupt line. -Both the PHY and the DPNI can initiate a link event. - -When the link event is initiated by the PHY (possibly as the effect of an -earlier link change request initiated by a DPNI), we must make sure -dpmac_set_link_state() is explicitly called in order for the event to be -propagated (back) to the DPNI. - -Finally, DPMAC interrupt mask has to be explicitly specified before calling -dpmac_set_irq_enabled(). - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Fix print in case device is not initialized - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Fix error paths at probe - -Merge error condition checks. Add error codes to the early exit paths. -Fix swapped goto labels. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Remove unused function prototype - -fixed_phy_register_2() was a leftover since we had to backport fixed PHY -implementation on kernel v3.10. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2/mac: Update dpmac binary interface to v3.2 - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: mac: Update Flib to MC 0.8.1 - -In practice, this adds a counter for "good" egress frames. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Add counter for "good" egress frames - -Now available with the 0.8.1 Flibs. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Update dpmac_set_link_state() error checking - -As of 0.8.1 Flibs, dpmac_set_link_state() no longer returns what we'd -consider spurious errors. This allows for cleaner error checking on -DPMAC-side. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Remove __cold attribute - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Check DPMAC FLIB version - -Make sure we support the DPMAC version, otherwise abort probing -early on and provide an error message. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: mac: Replace uintX_t with uX - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Fix crash on error path - -If the fixed-phy cannot be correctly registered, unregister_netdev() -receives a non-NULL, yet invalid phydev. Force the phydev reference to -NULL to avoid a crash on the probe routine's error path. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Remove TODOs comments from the code - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Fix ppx_eth_iface_mode order - -ppx_eth_iface_mode must be kept in sync with enum dpmac_eth_if, but some -array values weren't in the right order. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Remove forward declarations of functions - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Remove ppx_{err,warn,info} macros - -Replace with their straighforward equivalents, their contexts being -non-ambiguous. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Use non-atomic MC portal - -The DPMAC driver does not make MC calls from atomic contexts, so it is -safe to request non-atomic MC portals. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: Replace "ppx" prefix with "dpaa2_mac" - -Use a similar naming convention as for the Ethernet driver, -replacing "ppx" with "dpaa2_mac" as prefix for functions and -structures. - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: mac: Remove unnecessary blank line - -Signed-off-by: Ioana Radulescu - -fsl-dpaa2: mac: Do not handle link change confirmation interrupt - -The need for that interrupt is more about debugging. - -Signed-off-by: Bogdan Hamciuc - -fsl-dpaa2: mac: resolve compile issues on uprev to 4.5 - --interrupt info in mc struct changed upstream --fixed_phy_register() had new argument - -Signed-off-by: Stuart Yoder ---- - MAINTAINERS | 6 + - drivers/staging/fsl-dpaa2/Kconfig | 1 + - drivers/staging/fsl-dpaa2/Makefile | 1 + - drivers/staging/fsl-dpaa2/mac/Kconfig | 24 + - drivers/staging/fsl-dpaa2/mac/Makefile | 10 + - drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++++++++ - drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 ++++++++++++++++ - drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++++++++++++++++++++ - drivers/staging/fsl-dpaa2/mac/mac.c | 767 +++++++++++++++++++++++++++++ - 9 files changed, 2019 insertions(+) - create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -4554,6 +4554,12 @@ S: Maintained - F: drivers/staging/fsl-mc/bus/mc-ioctl.h - F: drivers/staging/fsl-mc/bus/mc-restool.c - -+FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER -+M: Alex Marginean -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/mac/ -+ - FREEVXFS FILESYSTEM - M: Christoph Hellwig - W: ftp://ftp.openlinux.org/pub/people/hch/vxfs ---- a/drivers/staging/fsl-dpaa2/Kconfig -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -9,3 +9,4 @@ config FSL_DPAA2 - Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. - # TODO move DPIO driver in-here? - source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" -+source "drivers/staging/fsl-dpaa2/mac/Kconfig" ---- a/drivers/staging/fsl-dpaa2/Makefile -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -3,3 +3,4 @@ - # - - obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ -+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Kconfig -@@ -0,0 +1,24 @@ -+config FSL_DPAA2_MAC -+ tristate "DPAA2 MAC / PHY interface" -+ depends on FSL_MC_BUS && FSL_DPAA2 -+ select MDIO_BUS_MUX_MMIOREG -+ select FSL_XGMAC_MDIO -+ select FIXED_PHY -+ ---help--- -+ Prototype driver for DPAA2 MAC / PHY interface object. -+ This driver works as a proxy between phylib including phy drivers and -+ the MC firmware. It receives updates on link state changes from PHY -+ lib and forwards them to MC and receives interrupt from MC whenever -+ a request is made to change the link state. -+ -+ -+config FSL_DPAA2_MAC_NETDEVS -+ bool "Expose net interfaces for PHYs" -+ default n -+ depends on FSL_DPAA2_MAC -+ ---help--- -+ Exposes macX net interfaces which allow direct control over MACs and -+ PHYs. -+ . -+ Leave disabled if unsure. -+ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Makefile -@@ -0,0 +1,10 @@ -+ -+obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o -+ -+dpaa2-mac-objs := mac.o dpmac.o -+ -+all: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules -+ -+clean: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h -@@ -0,0 +1,195 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c -@@ -0,0 +1,422 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_OPEN(cmd, dpmac_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_READ(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_MDIO_READ(cmd, cfg->data); -+ -+ return 0; -+} -+ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_WRITE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_COUNTER(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h -@@ -0,0 +1,593 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPMAC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPMAC_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * struct dpmac_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmac_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * DPMAC link configuration/state options -+ */ -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpmac_counter - DPMAC counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also frames truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error -+ * (optional used for wrong SFD). -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 -+ * bytes long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid -+ * frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error -+ * (except for undersized/fragment frame). -+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including -+ * pause frames. -+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME, -+ DPMAC_CNT_ENG_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -0,0 +1,767 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+ -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+#define DPAA2_SUPPORTED_DPMAC_VERSION 3 -+ -+struct dpaa2_mac_priv { -+ struct net_device *netdev; -+ struct fsl_mc_device *mc_dev; -+ struct dpmac_attr attr; -+ struct dpmac_link_state old_state; -+}; -+ -+/* TODO: fix the 10G modes, mapping can't be right: -+ * XGMII is paralel -+ * XAUI is serial, using 8b/10b encoding -+ * XFI is also serial but using 64b/66b encoding -+ * they can't all map to XGMII... -+ * -+ * This must be kept in sync with enum dpmac_eth_if. -+ */ -+static phy_interface_t dpaa2_mac_iface_mode[] = { -+ /* DPMAC_ETH_IF_MII */ -+ PHY_INTERFACE_MODE_MII, -+ /* DPMAC_ETH_IF_RMII */ -+ PHY_INTERFACE_MODE_RMII, -+ /* DPMAC_ETH_IF_SMII */ -+ PHY_INTERFACE_MODE_SMII, -+ /* DPMAC_ETH_IF_GMII */ -+ PHY_INTERFACE_MODE_GMII, -+ /* DPMAC_ETH_IF_RGMII */ -+ PHY_INTERFACE_MODE_RGMII, -+ /* DPMAC_ETH_IF_SGMII */ -+ PHY_INTERFACE_MODE_SGMII, -+ /* DPMAC_ETH_IF_QSGMII */ -+ PHY_INTERFACE_MODE_QSGMII, -+ /* DPMAC_ETH_IF_XAUI */ -+ PHY_INTERFACE_MODE_XGMII, -+ /* DPMAC_ETH_IF_XFI */ -+ PHY_INTERFACE_MODE_XGMII, -+}; -+ -+static void dpaa2_mac_link_changed(struct net_device *netdev) -+{ -+ struct phy_device *phydev; -+ struct dpmac_link_state state = { 0 }; -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int err; -+ -+ /* the PHY just notified us of link state change */ -+ phydev = netdev->phydev; -+ -+ state.up = !!phydev->link; -+ if (phydev->link) { -+ state.rate = phydev->speed; -+ -+ if (!phydev->duplex) -+ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; -+ if (phydev->autoneg) -+ state.options |= DPMAC_LINK_OPT_AUTONEG; -+ -+ netif_carrier_on(netdev); -+ } else { -+ netif_carrier_off(netdev); -+ } -+ -+ if (priv->old_state.up != state.up || -+ priv->old_state.rate != state.rate || -+ priv->old_state.options != state.options) { -+ priv->old_state = state; -+ phy_print_status(phydev); -+ } -+ -+ /* We must call into the MC firmware at all times, because we don't know -+ * when and whether a potential DPNI may have read the link state. -+ */ -+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, -+ priv->mc_dev->mc_handle, &state); -+ if (unlikely(err)) -+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); -+} -+ -+/* IRQ bits that we handle */ -+static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, -+ struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ -+static int dpaa2_mac_open(struct net_device *netdev) -+{ -+ /* start PHY state machine */ -+ phy_start(netdev->phydev); -+ -+ return 0; -+} -+ -+static int dpaa2_mac_stop(struct net_device *netdev) -+{ -+ if (!netdev->phydev) -+ goto done; -+ -+ /* stop PHY state machine */ -+ phy_stop(netdev->phydev); -+ -+ /* signal link down to firmware */ -+ netdev->phydev->link = 0; -+ dpaa2_mac_link_changed(netdev); -+ -+done: -+ return 0; -+} -+ -+static int dpaa2_mac_get_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_gset(netdev->phydev, cmd); -+} -+ -+static int dpaa2_mac_set_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_sset(netdev->phydev, cmd); -+} -+ -+static struct rtnl_link_stats64 -+*dpaa2_mac_get_stats(struct net_device *netdev, -+ struct rtnl_link_stats64 *storage) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ u64 tmp; -+ int err; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ &storage->tx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); -+ if (err) -+ goto error; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ &storage->rx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_OVERSIZED, &tmp); -+ if (err) -+ goto error; -+ storage->rx_errors += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); -+ if (err) -+ goto error; -+ -+ return storage; -+ -+error: -+ netdev_err(netdev, "dpmac_get_counter err %d\n", err); -+ return storage; -+} -+ -+static struct { -+ enum dpmac_counter id; -+ char name[ETH_GSTRING_LEN]; -+} dpaa2_mac_counters[] = { -+ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, -+ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, -+ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, -+ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, -+ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, -+ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, -+ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, -+ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, -+ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, -+ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, -+ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, -+ {DPMAC_CNT_ING_FRAG, "rx frags"}, -+ {DPMAC_CNT_ING_JABBER, "rx jabber"}, -+ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, -+ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, -+ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, -+ {DPMAC_CNT_ING_BYTE, "rx bytes"}, -+ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, -+ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, -+ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, -+ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, -+ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, -+ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, -+ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, -+ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, -+ -+}; -+ -+static void dpaa2_mac_get_strings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ dpaa2_mac_counters[i].name, -+ ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int i; -+ int err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { -+ err = dpmac_get_counter(priv->mc_dev->mc_io, -+ 0, -+ priv->mc_dev->mc_handle, -+ dpaa2_mac_counters[i].id, &data[i]); -+ if (err) -+ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", -+ dpaa2_mac_counters[i].name, err); -+ } -+} -+ -+static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(dpaa2_mac_counters); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static const struct net_device_ops dpaa2_mac_ndo_ops = { -+ .ndo_start_xmit = &dpaa2_mac_drop_frame, -+ .ndo_open = &dpaa2_mac_open, -+ .ndo_stop = &dpaa2_mac_stop, -+ .ndo_get_stats64 = &dpaa2_mac_get_stats, -+}; -+ -+static const struct ethtool_ops dpaa2_mac_ethtool_ops = { -+ .get_settings = &dpaa2_mac_get_settings, -+ .set_settings = &dpaa2_mac_set_settings, -+ .get_strings = &dpaa2_mac_get_strings, -+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, -+ .get_sset_count = &dpaa2_mac_get_sset_count, -+}; -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+static int configure_link(struct dpaa2_mac_priv *priv, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct phy_device *phydev = priv->netdev->phydev; -+ -+ if (!phydev) { -+ dev_warn(priv->netdev->dev.parent, -+ "asked to change PHY settings but PHY ref is NULL, ignoring\n"); -+ return 0; -+ } -+ -+ phydev->speed = cfg->rate; -+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); -+ -+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { -+ phydev->autoneg = 1; -+ phydev->advertising |= ADVERTISED_Autoneg; -+ } else { -+ phydev->autoneg = 0; -+ phydev->advertising &= ~ADVERTISED_Autoneg; -+ } -+ -+ phy_start_aneg(phydev); -+ -+ return 0; -+} -+ -+static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ struct dpmac_link_cfg link_cfg; -+ u8 irq_index = DPMAC_IRQ_INDEX; -+ u32 status, clear = 0; -+ int err; -+ -+ if (mc_dev->irqs[0]->msi_desc->irq != irq_num) { -+ dev_err(dev, "received unexpected interrupt %d!\n", irq_num); -+ goto err; -+ } -+ -+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ irq_index, &status); -+ if (err) { -+ dev_err(dev, "dpmac_get_irq_status err %d\n", err); -+ clear = ~0x0u; -+ goto out; -+ } -+ -+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ -+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { -+ dev_dbg(dev, "DPMAC IRQ %d - LINK_CFG_REQ\n", irq_num); -+ clear |= DPMAC_IRQ_EVENT_LINK_CFG_REQ; -+ -+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &link_cfg); -+ if (err) { -+ dev_err(dev, "dpmac_get_link_cfg err %d\n", err); -+ goto out; -+ } -+ -+ err = configure_link(priv, &link_cfg); -+ if (err) { -+ dev_err(dev, "cannot configure link\n"); -+ goto out; -+ } -+ } -+ -+out: -+ err = dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ irq_index, clear); -+ if (err < 0) -+ dev_err(&mc_dev->dev, "dpmac_clear_irq_status() err %d\n", err); -+ -+ return IRQ_HANDLED; -+ -+err: -+ dev_warn(dev, "DPMAC IRQ %d was not handled!\n", irq_num); -+ return IRQ_NONE; -+} -+ -+static int setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = fsl_mc_allocate_irqs(mc_dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); -+ return err; -+ } -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err < 0) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ goto free_irq; -+ } -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 0); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ goto free_irq; -+ } -+ -+ err = devm_request_threaded_irq(&mc_dev->dev, -+ mc_dev->irqs[0]->msi_desc->irq, -+ NULL, &dpaa2_mac_irq_handler, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&mc_dev->dev), &mc_dev->dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", -+ err); -+ goto free_irq; -+ } -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err < 0) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ goto free_irq; -+ } -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 1); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ goto unregister_irq; -+ } -+ -+ return 0; -+ -+unregister_irq: -+ devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev); -+free_irq: -+ fsl_mc_free_irqs(mc_dev); -+ -+ return err; -+} -+ -+static void teardown_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err < 0) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 0); -+ if (err < 0) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ -+ devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev); -+ fsl_mc_free_irqs(mc_dev); -+} -+ -+static struct device_node *lookup_node(struct device *dev, int dpmac_id) -+{ -+ struct device_node *dpmacs, *dpmac = NULL; -+ struct device_node *mc_node = dev->of_node; -+ const void *id; -+ int lenp; -+ int dpmac_id_be32 = cpu_to_be32(dpmac_id); -+ -+ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); -+ if (!dpmacs) { -+ dev_err(dev, "No dpmacs subnode in device-tree\n"); -+ return NULL; -+ } -+ -+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { -+ id = of_get_property(dpmac, "reg", &lenp); -+ if (!id || lenp != sizeof(int)) { -+ dev_warn(dev, "Unsuitable reg property in dpmac node\n"); -+ continue; -+ } -+ if (*(int *)id == dpmac_id_be32) -+ return dpmac; -+ } -+ -+ return NULL; -+} -+ -+static int check_dpmac_version(struct dpaa2_mac_priv *priv) -+{ -+ struct device *dev = &priv->mc_dev->dev; -+ int mc_version = priv->attr.version.major; -+ -+ /* Check that the FLIB-defined version matches the one reported by MC */ -+ if (mc_version != DPMAC_VER_MAJOR) { -+ dev_err(dev, "DPMAC FLIB version mismatch: MC says %d, we have %d\n", -+ mc_version, DPMAC_VER_MAJOR); -+ return -EINVAL; -+ } -+ -+ /* ... and that we actually support it */ -+ if (mc_version < DPAA2_SUPPORTED_DPMAC_VERSION) { -+ dev_err(dev, "Unsupported DPMAC FLIB version (%d)\n", -+ mc_version); -+ return -EINVAL; -+ } -+ -+ dev_dbg(dev, "Using DPMAC FLIB version %d\n", mc_version); -+ -+ return 0; -+} -+ -+static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev; -+ struct dpaa2_mac_priv *priv = NULL; -+ struct device_node *phy_node, *dpmac_node; -+ struct net_device *netdev; -+ phy_interface_t if_mode; -+ int err = 0; -+ -+ /* just being completely paranoid */ -+ if (!mc_dev) -+ return -EFAULT; -+ dev = &mc_dev->dev; -+ -+ /* prepare a net_dev structure to make the phy lib API happy */ -+ netdev = alloc_etherdev(sizeof(*priv)); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ priv = netdev_priv(netdev); -+ priv->mc_dev = mc_dev; -+ priv->netdev = netdev; -+ -+ SET_NETDEV_DEV(netdev, dev); -+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); -+ if (err || !mc_dev->mc_io) { -+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_netdev; -+ } -+ -+ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (err || !mc_dev->mc_handle) { -+ dev_err(dev, "dpmac_open error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_mcp; -+ } -+ -+ err = dpmac_get_attributes(mc_dev->mc_io, 0, -+ mc_dev->mc_handle, &priv->attr); -+ if (err) { -+ dev_err(dev, "dpmac_get_attributes err %d\n", err); -+ err = -EINVAL; -+ goto err_close; -+ } -+ -+ err = check_dpmac_version(priv); -+ if (err) -+ goto err_close; -+ -+ /* Look up the DPMAC node in the device-tree. */ -+ dpmac_node = lookup_node(dev, priv->attr.id); -+ if (!dpmac_node) { -+ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); -+ err = -ENODEV; -+ goto err_close; -+ } -+ -+ err = setup_irqs(mc_dev); -+ if (err) { -+ err = -EFAULT; -+ goto err_close; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ /* OPTIONAL, register netdev just to make it visible to the user */ -+ netdev->netdev_ops = &dpaa2_mac_ndo_ops; -+ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; -+ -+ /* phy starts up enabled so netdev should be up too */ -+ netdev->flags |= IFF_UP; -+ -+ err = register_netdev(priv->netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ err = -ENODEV; -+ goto err_free_irq; -+ } -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+ /* probe the PHY as a fixed-link if the link type declared in DPC -+ * explicitly mandates this -+ */ -+ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED) -+ goto probe_fixed_link; -+ -+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { -+ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; -+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", -+ phy_modes(if_mode), priv->attr.eth_if); -+ } else { -+ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", -+ priv->attr.eth_if); -+ goto probe_fixed_link; -+ } -+ -+ /* try to connect to the PHY */ -+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); -+ if (!phy_node) { -+ if (!phy_node) { -+ dev_err(dev, "dpmac node has no phy-handle property\n"); -+ err = -ENODEV; -+ goto err_no_phy; -+ } -+ } -+ netdev->phydev = of_phy_connect(netdev, phy_node, -+ &dpaa2_mac_link_changed, 0, if_mode); -+ if (!netdev->phydev) { -+ /* No need for dev_err(); the kernel's loud enough as it is. */ -+ dev_dbg(dev, "Can't of_phy_connect() now.\n"); -+ /* We might be waiting for the MDIO MUX to probe, so defer -+ * our own probing. -+ */ -+ err = -EPROBE_DEFER; -+ goto err_defer; -+ } -+ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); -+ -+probe_fixed_link: -+ if (!netdev->phydev) { -+ struct fixed_phy_status status = { -+ .link = 1, -+ /* fixed-phys don't support 10Gbps speed for now */ -+ .speed = 1000, -+ .duplex = 1, -+ }; -+ -+ /* try to register a fixed link phy */ -+ netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1, NULL); -+ if (!netdev->phydev || IS_ERR(netdev->phydev)) { -+ dev_err(dev, "error trying to register fixed PHY\n"); -+ /* So we don't crash unregister_netdev() later on */ -+ netdev->phydev = NULL; -+ err = -EFAULT; -+ goto err_no_phy; -+ } -+ dev_info(dev, "Registered fixed PHY.\n"); -+ } -+ -+ /* start PHY state machine */ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ dpaa2_mac_open(netdev); -+#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ phy_start(netdev->phydev); -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ return 0; -+ -+err_defer: -+err_no_phy: -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ unregister_netdev(netdev); -+err_free_irq: -+#endif -+ teardown_irqs(mc_dev); -+err_close: -+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+err_free_mcp: -+ fsl_mc_portal_free(mc_dev->mc_io); -+err_free_netdev: -+ free_netdev(netdev); -+err_exit: -+ return err; -+} -+ -+static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev = &mc_dev->dev; -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ -+ unregister_netdev(priv->netdev); -+ teardown_irqs(priv->mc_dev); -+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); -+ fsl_mc_portal_free(priv->mc_dev->mc_io); -+ free_netdev(priv->netdev); -+ -+ dev_set_drvdata(dev, NULL); -+ kfree(priv); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpmac", -+ .ver_major = DPMAC_VER_MAJOR, -+ .ver_minor = DPMAC_VER_MINOR, -+ }, -+ {} -+}; -+ -+static struct fsl_mc_driver dpaa2_mac_drv = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_mac_probe, -+ .remove = dpaa2_mac_remove, -+ .match_id_table = dpaa2_mac_match_id_table, -+}; -+ -+module_fsl_mc_driver(dpaa2_mac_drv); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); diff --git a/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch b/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch deleted file mode 100644 index 9ed721f7f..000000000 --- a/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch +++ /dev/null @@ -1,182 +0,0 @@ -From bb42890533f9592e8d30654b4e0b19c3cf7caaec Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Fri, 1 Apr 2016 18:38:18 +0300 -Subject: [PATCH 209/226] staging: fsl-dpaa2/mac: Interrupt code cleanup - -Cleanup and a couple of minor fixes for the interrupt -handling code: -* Removed a few unnecessary checks, unify format for others -* Don't print error/debug messages in interrupt handler -* No need to explicitly disable DPMAC interrupts before -configuring them -* Use unlikely in interrupt handler routine error checks -* if status register is zero or we're unable to read its value, -return IRQ_NONE instead of IRQ_HANDLED -* always clear the entire status register, not just the bit(s) -that were treated - -Signed-off-by: Ioana Radulescu -(cherry picked from commit 4b46eec16c56e4f453ca1558af9aceaf6ffe831a) -(Stuart:resolved merge conflict) -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 77 ++++++++--------------------------- - 1 file changed, 16 insertions(+), 61 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -132,7 +132,7 @@ static void dpaa2_mac_link_changed(struc - } - - /* IRQ bits that we handle */ --static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; -+static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; - - #ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS - static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, -@@ -345,16 +345,13 @@ static const struct ethtool_ops dpaa2_ma - }; - #endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ - --static int configure_link(struct dpaa2_mac_priv *priv, -- struct dpmac_link_cfg *cfg) -+static void configure_link(struct dpaa2_mac_priv *priv, -+ struct dpmac_link_cfg *cfg) - { - struct phy_device *phydev = priv->netdev->phydev; - -- if (!phydev) { -- dev_warn(priv->netdev->dev.parent, -- "asked to change PHY settings but PHY ref is NULL, ignoring\n"); -- return 0; -- } -+ if (unlikely(!phydev)) -+ return; - - phydev->speed = cfg->rate; - phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); -@@ -368,8 +365,6 @@ static int configure_link(struct dpaa2_m - } - - phy_start_aneg(phydev); -- -- return 0; - } - - static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) -@@ -378,53 +373,29 @@ static irqreturn_t dpaa2_mac_irq_handler - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); - struct dpmac_link_cfg link_cfg; -- u8 irq_index = DPMAC_IRQ_INDEX; -- u32 status, clear = 0; -+ u32 status; - int err; - -- if (mc_dev->irqs[0]->msi_desc->irq != irq_num) { -- dev_err(dev, "received unexpected interrupt %d!\n", irq_num); -- goto err; -- } -- - err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -- irq_index, &status); -- if (err) { -- dev_err(dev, "dpmac_get_irq_status err %d\n", err); -- clear = ~0x0u; -- goto out; -- } -+ DPMAC_IRQ_INDEX, &status); -+ if (unlikely(err || !status)) -+ return IRQ_NONE; - - /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ - if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { -- dev_dbg(dev, "DPMAC IRQ %d - LINK_CFG_REQ\n", irq_num); -- clear |= DPMAC_IRQ_EVENT_LINK_CFG_REQ; -- - err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, - &link_cfg); -- if (err) { -- dev_err(dev, "dpmac_get_link_cfg err %d\n", err); -+ if (unlikely(err)) - goto out; -- } - -- err = configure_link(priv, &link_cfg); -- if (err) { -- dev_err(dev, "cannot configure link\n"); -- goto out; -- } -+ configure_link(priv, &link_cfg); - } - - out: -- err = dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -- irq_index, clear); -- if (err < 0) -- dev_err(&mc_dev->dev, "dpmac_clear_irq_status() err %d\n", err); -+ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, status); - - return IRQ_HANDLED; -- --err: -- dev_warn(dev, "DPMAC IRQ %d was not handled!\n", irq_num); -- return IRQ_NONE; - } - - static int setup_irqs(struct fsl_mc_device *mc_dev) -@@ -437,19 +408,6 @@ static int setup_irqs(struct fsl_mc_devi - return err; - } - -- err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -- DPMAC_IRQ_INDEX, dpmac_irq_mask); -- if (err < 0) { -- dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -- goto free_irq; -- } -- err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -- DPMAC_IRQ_INDEX, 0); -- if (err) { -- dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -- goto free_irq; -- } -- - err = devm_request_threaded_irq(&mc_dev->dev, - mc_dev->irqs[0]->msi_desc->irq, - NULL, &dpaa2_mac_irq_handler, -@@ -463,7 +421,7 @@ static int setup_irqs(struct fsl_mc_devi - - err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, - DPMAC_IRQ_INDEX, dpmac_irq_mask); -- if (err < 0) { -+ if (err) { - dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); - goto free_irq; - } -@@ -490,12 +448,12 @@ static void teardown_irqs(struct fsl_mc_ - - err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, - DPMAC_IRQ_INDEX, dpmac_irq_mask); -- if (err < 0) -+ if (err) - dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); - - err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, - DPMAC_IRQ_INDEX, 0); -- if (err < 0) -+ if (err) - dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); - - devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev); -@@ -562,9 +520,6 @@ static int dpaa2_mac_probe(struct fsl_mc - phy_interface_t if_mode; - int err = 0; - -- /* just being completely paranoid */ -- if (!mc_dev) -- return -EFAULT; - dev = &mc_dev->dev; - - /* prepare a net_dev structure to make the phy lib API happy */ diff --git a/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch b/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch deleted file mode 100644 index ea582835c..000000000 --- a/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch +++ /dev/null @@ -1,42 +0,0 @@ -From e74b6010eca026625ba4e39c80620320ca777deb Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Tue, 5 Apr 2016 13:35:14 +0300 -Subject: [PATCH 210/226] staging: fsl-dpaa2/mac: Fix unregister_netdev issue - -We only register the netdevice associated with a mac object if -ONFIG_FSL_DPAA2_MAC_NETDEV is set, but we always unregister it -during device remove(). Fix this by ifdef-ing the unregister -operation. - -Also ifdef the change in netdevice name as it only makes sense -under this option. - -Signed-off-by: Ioana Radulescu -(cherry picked from commit dd6a5313e194168d46fef495a6e3bc5207801473) ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -534,7 +534,10 @@ static int dpaa2_mac_probe(struct fsl_mc - priv->netdev = netdev; - - SET_NETDEV_DEV(netdev, dev); -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS - snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); -+#endif - - dev_set_drvdata(dev, priv); - -@@ -684,7 +687,9 @@ static int dpaa2_mac_remove(struct fsl_m - struct device *dev = &mc_dev->dev; - struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); - -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS - unregister_netdev(priv->netdev); -+#endif - teardown_irqs(priv->mc_dev); - dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); - fsl_mc_portal_free(priv->mc_dev->mc_io); diff --git a/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch b/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch deleted file mode 100644 index 46ecaea1e..000000000 --- a/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch +++ /dev/null @@ -1,42 +0,0 @@ -From b4d01330c66cbab3563c58f66f73f55726c09aec Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Tue, 5 Apr 2016 17:54:14 +0300 -Subject: [PATCH 211/226] staging: fsl-dpaa2/mac: Don't call devm_free_irq - -MAC interrupts are registered with devm_request_threaded_irq(), so -there's no need to explicitly unregister them in case of a probe -error or at device remove, as the kernel will take care of that for us. - -Signed-off-by: Ioana Radulescu -(cherry picked from commit 58e0fd23ade4b13e0a3c7e5f201802013e12df1c) -(Stuart: resolved merge conflict) -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -429,13 +429,11 @@ static int setup_irqs(struct fsl_mc_devi - DPMAC_IRQ_INDEX, 1); - if (err) { - dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -- goto unregister_irq; -+ goto free_irq; - } - - return 0; - --unregister_irq: -- devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev); - free_irq: - fsl_mc_free_irqs(mc_dev); - -@@ -456,7 +454,6 @@ static void teardown_irqs(struct fsl_mc_ - if (err) - dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); - -- devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev); - fsl_mc_free_irqs(mc_dev); - } - diff --git a/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch b/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch deleted file mode 100644 index 8ab6de9bf..000000000 --- a/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch +++ /dev/null @@ -1,43 +0,0 @@ -From e554a03fe11719db373be3c54ce8f230a98dd5e4 Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Wed, 6 Apr 2016 15:05:47 +0300 -Subject: [PATCH 212/226] staging: fsl-dpaa2/mac: Use of_property_read_32() - -Simplify reading of the dpmac id from device tree. - -Signed-off-by: Ioana Radulescu -(cherry picked from commit b0562bda063f95923bcd8b78dea84a6e0587d3da) ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 13 +++++-------- - 1 file changed, 5 insertions(+), 8 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -461,9 +461,8 @@ static struct device_node *lookup_node(s - { - struct device_node *dpmacs, *dpmac = NULL; - struct device_node *mc_node = dev->of_node; -- const void *id; -- int lenp; -- int dpmac_id_be32 = cpu_to_be32(dpmac_id); -+ u32 id; -+ int err; - - dpmacs = of_find_node_by_name(mc_node, "dpmacs"); - if (!dpmacs) { -@@ -472,12 +471,10 @@ static struct device_node *lookup_node(s - } - - while ((dpmac = of_get_next_child(dpmacs, dpmac))) { -- id = of_get_property(dpmac, "reg", &lenp); -- if (!id || lenp != sizeof(int)) { -- dev_warn(dev, "Unsuitable reg property in dpmac node\n"); -+ err = of_property_read_u32(dpmac, "reg", &id); -+ if (err) - continue; -- } -- if (*(int *)id == dpmac_id_be32) -+ if (id == dpmac_id) - return dpmac; - } - diff --git a/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch b/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch deleted file mode 100644 index 2c7bb88cc..000000000 --- a/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 3e4dc755337ca86d29c9f21f5225a77595aee032 Mon Sep 17 00:00:00 2001 -From: Ioana Radulescu -Date: Wed, 6 Apr 2016 12:12:06 +0300 -Subject: [PATCH 213/226] staging: fsl-dpaa2/mac: Remove version checks - -We intend to ensure backward compatibility with all MC versions -going forward, so we don't require an exact version match anymore -between MAC driver, DPMAC API version and DPMAC object version in -MC firmware. - -Signed-off-by: Ioana Radulescu -(cherry picked from commit eafc210ef421fb0dca67b67bf1a2fe98cd060c31) ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 29 ++--------------------------- - 1 file changed, 2 insertions(+), 27 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -481,30 +481,6 @@ static struct device_node *lookup_node(s - return NULL; - } - --static int check_dpmac_version(struct dpaa2_mac_priv *priv) --{ -- struct device *dev = &priv->mc_dev->dev; -- int mc_version = priv->attr.version.major; -- -- /* Check that the FLIB-defined version matches the one reported by MC */ -- if (mc_version != DPMAC_VER_MAJOR) { -- dev_err(dev, "DPMAC FLIB version mismatch: MC says %d, we have %d\n", -- mc_version, DPMAC_VER_MAJOR); -- return -EINVAL; -- } -- -- /* ... and that we actually support it */ -- if (mc_version < DPAA2_SUPPORTED_DPMAC_VERSION) { -- dev_err(dev, "Unsupported DPMAC FLIB version (%d)\n", -- mc_version); -- return -EINVAL; -- } -- -- dev_dbg(dev, "Using DPMAC FLIB version %d\n", mc_version); -- -- return 0; --} -- - static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) - { - struct device *dev; -@@ -558,9 +534,8 @@ static int dpaa2_mac_probe(struct fsl_mc - goto err_close; - } - -- err = check_dpmac_version(priv); -- if (err) -- goto err_close; -+ dev_info_once(dev, "Using DPMAC API %d.%d\n", -+ priv->attr.version.major, priv->attr.version.minor); - - /* Look up the DPMAC node in the device-tree. */ - dpmac_node = lookup_node(dev, priv->attr.id); diff --git a/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch deleted file mode 100644 index 850d0e630..000000000 --- a/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 137f5f17bad655024d18123b1be696ad6b9ec729 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Wed, 15 Jun 2016 14:04:32 -0500 -Subject: [PATCH 214/226] staging: fsl-dpaa2/mac: match id cleanup - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/mac/mac.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -670,12 +670,10 @@ static int dpaa2_mac_remove(struct fsl_m - return 0; - } - --static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { -+static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpmac", -- .ver_major = DPMAC_VER_MAJOR, -- .ver_minor = DPMAC_VER_MINOR, - }, - {} - }; diff --git a/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch b/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch deleted file mode 100644 index 344c2aec3..000000000 --- a/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch +++ /dev/null @@ -1,2918 +0,0 @@ -From 54bcaca10728c1a1c8adfa48124ea79cce4ef929 Mon Sep 17 00:00:00 2001 -From: Razvan Stefanescu -Date: Tue, 22 Sep 2015 08:43:08 +0300 -Subject: [PATCH 215/226] dpaa2-evb: Added Edge Virtual Bridge driver - -This is a commit of the cummulative, squashed dpaa2-evb patches. -All the commit logs are preserved below. - -Signed-off-by: Stuart Yoder - ----------------------------------------------------------------- - -dpaa2-evb: Added Edge Virtual Bridge driver - -This contains the following patches migrated from linux-v4.0: -staging: fsl-dpaa2: evb: Added Edge Virtual Bridge driver -staging: fsl-dpaa2: evb: Added ethtool port counters -staging: fsl-dpaa2: evb: Include by default in configuration -staging: fsl-dpaa2: evb: Rebasing onto kernel 4.0 -staging: fsl-dpaa2: evb: Port to MC-0.7 FLibs -dpaa2-evb: Set carrier state on port open -dpaa2-evb: Add support for link state update -dpaa2-evb: Update flib to MC 8.0.1 -staging: fsl-mc: migrated remaining flibs for MC fw 8.0.0 (split) - -Inital patches have been signed-off by: -Alex Marginean -J. German Rivera -Bogdan Hamciuc -Razvan Stefanescu - -And reviewed by: -Stuart Yoder - -Porting to linux-v4.1 requires changes related to iflink usage and -ndo_bridge_getlink() parameters list. - -Signed-off-by: Razvan Stefanescu - -dpaa2-evb: Port to linux-v4.1 - -Update iflink usage. -Update evb_getlink() parameter list to match ndo_bridge_getlink(). - -Signed-off-by: Razvan Stefanescu - -dpaa2-evb: Add VLAN_8021Q dependency - -EVB traffic steering methods related to VLAN require VLAN support in kernel. - -Signed-off-by: Razvan Stefanescu - -dpaa2-evb: Update dpdmux binary interface to 5.0 - -This corresponds to MC release 0.8.0. - -Signed-off-by: Razvan Stefanescu - -dpaa2-evb: Add support to set max frame length. - -All the packets bigger than max_frame_length will be dropped. - -Signed-off-by: Mihaela Panescu - -dpaa2-evb: resolve compile issues on uprev to 4.5 - --irq_number field no longer exists in fsl-mc interrupt - struct --netdev_master_upper_dev_link() has 2 new parameters, which - are set to NULL for now - -Signed-off-by: Stuart Yoder ---- - MAINTAINERS | 6 + - drivers/staging/fsl-dpaa2/Kconfig | 1 + - drivers/staging/fsl-dpaa2/Makefile | 1 + - drivers/staging/fsl-dpaa2/evb/Kconfig | 8 + - drivers/staging/fsl-dpaa2/evb/Makefile | 10 + - drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 256 ++++++ - drivers/staging/fsl-dpaa2/evb/dpdmux.c | 567 +++++++++++++ - drivers/staging/fsl-dpaa2/evb/dpdmux.h | 724 +++++++++++++++++ - drivers/staging/fsl-dpaa2/evb/evb.c | 1216 ++++++++++++++++++++++++++++ - 9 files changed, 2789 insertions(+) - create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c - create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h - create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -4560,6 +4560,12 @@ L: linux-kernel@vger.kernel.org - S: Maintained - F: drivers/staging/fsl-dpaa2/mac/ - -++FREESCALE DPAA2 EDGE VIRTUAL BRIDGE DRIVER -+M: Alex Marginean -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/evb/ -+ - FREEVXFS FILESYSTEM - M: Christoph Hellwig - W: ftp://ftp.openlinux.org/pub/people/hch/vxfs ---- a/drivers/staging/fsl-dpaa2/Kconfig -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -10,3 +10,4 @@ config FSL_DPAA2 - # TODO move DPIO driver in-here? - source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" - source "drivers/staging/fsl-dpaa2/mac/Kconfig" -+source "drivers/staging/fsl-dpaa2/evb/Kconfig" ---- a/drivers/staging/fsl-dpaa2/Makefile -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -4,3 +4,4 @@ - - obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ - obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ -+obj-$(CONFIG_FSL_DPAA2_EVB) += evb/ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig -@@ -0,0 +1,8 @@ -+config FSL_DPAA2_EVB -+ tristate "DPAA2 Edge Virtual Bridge" -+ depends on FSL_MC_BUS && FSL_DPAA2 && FSL_DPAA2_ETH -+ select FSL_DPAA2_MAC -+ select VLAN_8021Q -+ default y -+ ---help--- -+ Prototype driver for DPAA2 Edge Virtual Bridge. ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/Makefile -@@ -0,0 +1,10 @@ -+ -+obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o -+ -+dpaa2-evb-objs := evb.o dpdmux.o -+ -+all: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules -+ -+clean: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h -@@ -0,0 +1,256 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMUX_CMD_H -+#define _FSL_DPDMUX_CMD_H -+ -+/* DPDMUX Version */ -+#define DPDMUX_VER_MAJOR 5 -+#define DPDMUX_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPDMUX_CMDID_CLOSE 0x800 -+#define DPDMUX_CMDID_OPEN 0x806 -+#define DPDMUX_CMDID_CREATE 0x906 -+#define DPDMUX_CMDID_DESTROY 0x900 -+ -+#define DPDMUX_CMDID_ENABLE 0x002 -+#define DPDMUX_CMDID_DISABLE 0x003 -+#define DPDMUX_CMDID_GET_ATTR 0x004 -+#define DPDMUX_CMDID_RESET 0x005 -+#define DPDMUX_CMDID_IS_ENABLED 0x006 -+ -+#define DPDMUX_CMDID_SET_IRQ 0x010 -+#define DPDMUX_CMDID_GET_IRQ 0x011 -+#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDMUX_CMDID_SET_IRQ_MASK 0x014 -+#define DPDMUX_CMDID_GET_IRQ_MASK 0x015 -+#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1 -+ -+#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3 -+ -+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7 -+#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8 -+ -+#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0 -+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1 -+#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2 -+#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3 -+#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\ -+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \ -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+#define DPDMUX_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\ -+ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \ -+ cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \ -+ attr->accept_frame_type);\ -+ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+#endif /* _FSL_DPDMUX_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c -@@ -0,0 +1,567 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpdmux.h" -+#include "dpdmux-cmd.h" -+ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_OPEN(cmd, dpdmux_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h -@@ -0,0 +1,724 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMUX_H -+#define __FSL_DPDMUX_H -+ -+#include "../../fsl-mc/include/net.h" -+ -+struct fsl_mc_io; -+ -+/* Data Path Demux API -+ * Contains API for handling DPDMUX topology and functionality -+ */ -+ -+/** -+ * dpdmux_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmux_id: DPDMUX unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmux_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token); -+ -+/** -+ * dpdmux_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPDMUX general options -+ */ -+ -+/** -+ * Enable bridging between internal interfaces -+ */ -+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL -+ -+#define DPDMUX_IRQ_INDEX_IF 0x0000 -+#define DPDMUX_IRQ_INDEX 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * enum dpdmux_manip - DPDMUX manipulation operations -+ * @DPDMUX_MANIP_NONE: No manipulation on frames -+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress -+ */ -+enum dpdmux_manip { -+ DPDMUX_MANIP_NONE = 0x0, -+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 -+}; -+ -+/** -+ * enum dpdmux_method - DPDMUX method options -+ * @DPDMUX_METHOD_NONE: no DPDMUX method -+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address -+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address -+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN -+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN -+ */ -+enum dpdmux_method { -+ DPDMUX_METHOD_NONE = 0x0, -+ DPDMUX_METHOD_C_VLAN_MAC = 0x1, -+ DPDMUX_METHOD_MAC = 0x2, -+ DPDMUX_METHOD_C_VLAN = 0x3, -+ DPDMUX_METHOD_S_VLAN = 0x4 -+}; -+ -+/** -+ * struct dpdmux_cfg - DPDMUX configuration parameters -+ * @method: Defines the operation method for the DPDMUX address table -+ * @manip: Required manipulation operation -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpdmux_cfg { -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_' flags -+ * @max_dmat_entries: Maximum entries in DPDMUX address table -+ * 0 - indicates default: 64 entries per interface. -+ * @max_mc_groups: Number of multicast groups in DPDMUX table -+ * 0 - indicates default: 32 multicast groups -+ * @max_vlan_ids: max vlan ids allowed in the system - -+ * relevant only case of working in mac+vlan method. -+ * 0 - indicates default 16 vlan ids. -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_dmat_entries; -+ uint16_t max_mc_groups; -+ uint16_t max_vlan_ids; -+ } adv; -+}; -+ -+/** -+ * dpdmux_create() - Create the DPDMUX object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMUX object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmux_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_enable() - Enable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_disable() - Disable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmux_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmux_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_get_irq() - Get IRQ information from the DPDMUX. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmux_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmux_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmux_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmux_attr - Structure representing DPDMUX attributes -+ * @id: DPDMUX object ID -+ * @version: DPDMUX version -+ * @options: Configuration options (bitmap) -+ * @method: DPDMUX address table method -+ * @manip: DPDMUX manipulation type -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @mem_size: DPDMUX frame storage memory size -+ */ -+struct dpdmux_attr { -+ int id; -+ /** -+ * struct version - DPDMUX version -+ * @major: DPDMUX major version -+ * @minor: DPDMUX minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+}; -+ -+/** -+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr); -+ -+/** -+ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @max_frame_length: The required maximum frame length -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * enum dpdmux_counter_type - Counter types -+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames -+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes -+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames -+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames -+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes -+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ */ -+enum dpdmux_counter_type { -+ DPDMUX_CNT_ING_FRAME = 0x0, -+ DPDMUX_CNT_ING_BYTE = 0x1, -+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2, -+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, -+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4, -+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5, -+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6, -+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7, -+ DPDMUX_CNT_EGR_FRAME = 0x8, -+ DPDMUX_CNT_EGR_BYTE = 0x9, -+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * enum dpdmux_accepted_frames_type - DPDMUX frame types -+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority-tagged frames -+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * priority-tagged frames that are received on this -+ * interface -+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames -+ * received on this interface are accepted -+ */ -+enum dpdmux_accepted_frames_type { -+ DPDMUX_ADMIT_ALL = 0, -+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, -+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2 -+}; -+ -+/** -+ * enum dpdmux_action - DPDMUX action for un-accepted frames -+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames -+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the -+ * control interface -+ */ -+enum dpdmux_action { -+ DPDMUX_ACTION_DROP = 0, -+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 -+}; -+ -+/** -+ * struct dpdmux_accepted_frames - Frame types configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: Defines action on frames not accepted -+ */ -+struct dpdmux_accepted_frames { -+ enum dpdmux_accepted_frames_type type; -+ enum dpdmux_action unaccept_act; -+}; -+ -+/** -+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @cfg: Frame types configuration -+ * -+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or -+ * priority-tagged frames are discarded. -+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or -+ * priority-tagged frames are accepted. -+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, -+ * untagged and priority-tagged frame are accepted; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg); -+ -+/** -+ * struct dpdmux_if_attr - Structure representing frame types configuration -+ * @rate: Configured interface rate (in bits per second) -+ * @enabled: Indicates if interface is enabled -+ * @accept_frame_type: Indicates type of accepted frames for the interface -+ */ -+struct dpdmux_if_attr { -+ uint32_t rate; -+ int enabled; -+ enum dpdmux_accepted_frames_type accept_frame_type; -+}; -+ -+/** -+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @attr: Interface attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr); -+ -+/** -+ * struct dpdmux_l2_rule - Structure representing L2 rule -+ * @mac_addr: MAC address -+ * @vlan_id: VLAN ID -+ */ -+struct dpdmux_l2_rule { -+ uint8_t mac_addr[6]; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function removes a L2 rule from DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function adds a L2 rule into DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+* dpdmux_if_get_counter() - Functions obtains specific counter of an interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* @if_id: Interface Id -+* @counter_type: counter type -+* @counter: Returned specific counter information -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter); -+ -+/** -+* dpdmux_ul_reset_counters() - Function resets the uplink counter -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ */ -+struct dpdmux_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpdmux_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg); -+/** -+ * struct dpdmux_link_state - Structure representing DPDMUX link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ * @up: 0 - down, 1 - up -+ */ -+struct dpdmux_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpdmux_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state); -+ -+#endif /* __FSL_DPDMUX_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -0,0 +1,1216 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+ -+#include "dpdmux.h" -+#include "dpdmux-cmd.h" -+ -+/* IRQ index */ -+#define DPDMUX_MAX_IRQ_NUM 2 -+ -+/* MAX FRAME LENGTH (currently 10k) */ -+#define EVB_MAX_FRAME_LENGTH (10 * 1024) -+/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */ -+#define EVB_MIN_FRAME_LENGTH 68 -+ -+struct evb_port_priv { -+ struct net_device *netdev; -+ struct list_head list; -+ u16 port_index; -+ struct evb_priv *evb_priv; -+ u8 vlans[VLAN_VID_MASK+1]; -+}; -+ -+struct evb_priv { -+ /* keep first */ -+ struct evb_port_priv uplink; -+ -+ struct fsl_mc_io *mc_io; -+ struct list_head port_list; -+ struct dpdmux_attr attr; -+ uint16_t mux_handle; -+ int dev_id; -+}; -+ -+static int _evb_port_carrier_state_sync(struct net_device *netdev) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct dpdmux_link_state state; -+ int err; -+ -+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, &state); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err); -+ return err; -+ } -+ -+ WARN_ONCE(state.up > 1, "Garbage read into link_state"); -+ -+ if (state.up) -+ netif_carrier_on(port_priv->netdev); -+ else -+ netif_carrier_off(port_priv->netdev); -+ -+ return 0; -+} -+ -+static int evb_port_open(struct net_device *netdev) -+{ -+ int err; -+ -+ /* FIXME: enable port when support added */ -+ -+ err = _evb_port_carrier_state_sync(netdev); -+ if (err) { -+ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n", -+ err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ -+static int evb_links_state_update(struct evb_priv *priv) -+{ -+ struct evb_port_priv *port_priv; -+ struct list_head *pos; -+ int err; -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct evb_port_priv, list); -+ -+ err = _evb_port_carrier_state_sync(port_priv->netdev); -+ if (err) -+ netdev_err(port_priv->netdev, -+ "_evb_port_carrier_state_sync err %d\n", -+ err); -+ } -+ -+ return 0; -+} -+ -+static irqreturn_t evb_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev); -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ struct fsl_mc_io *io = priv->mc_io; -+ uint16_t token = priv->mux_handle; -+ int irq_index = DPDMUX_IRQ_INDEX_IF; -+ uint32_t status = 0, clear = 0; -+ int err; -+ -+ /* Sanity check */ -+ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index])) -+ goto out; -+ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != irq_num)) -+ goto out; -+ -+ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status); -+ if (unlikely(err)) { -+ netdev_err(netdev, "Can't get irq status (err %d)", err); -+ clear = 0xffffffff; -+ goto out; -+ } -+ -+ /* FIXME clear irq status */ -+ -+ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) { -+ clear |= DPDMUX_IRQ_EVENT_LINK_CHANGED; -+ -+ err = evb_links_state_update(priv); -+ if (unlikely(err)) -+ goto out; -+ } -+out: -+ err = dpdmux_clear_irq_status(io, 0, token, irq_index, clear); -+ if (unlikely(err)) -+ netdev_err(netdev, "Can't clear irq status (err %d)", err); -+ return IRQ_HANDLED; -+} -+ -+static int evb_setup_irqs(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev = &evb_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ int err = 0; -+ struct fsl_mc_device_irq *irq; -+ const int irq_index = DPDMUX_IRQ_INDEX_IF; -+ uint32_t mask = ~0x0u; /* FIXME: unmask handled irqs */ -+ -+ err = fsl_mc_allocate_irqs(evb_dev); -+ if (unlikely(err)) { -+ dev_err(dev, "MC irqs allocation failed\n"); -+ return err; -+ } -+ -+ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) { -+ err = -EINVAL; -+ goto free_irq; -+ } -+ -+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, -+ irq_index, 0); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err); -+ goto free_irq; -+ } -+ -+ irq = evb_dev->irqs[irq_index]; -+ -+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq, -+ evb_irq0_handler, -+ _evb_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(dev), dev); -+ if (unlikely(err)) { -+ dev_err(dev, "devm_request_threaded_irq(): %d", err); -+ goto free_irq; -+ } -+ -+ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle, -+ irq_index, mask); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_set_irq_mask(): %d", err); -+ goto free_devm_irq; -+ } -+ -+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, -+ irq_index, 1); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_set_irq_enable(): %d", err); -+ goto free_devm_irq; -+ } -+ -+ return 0; -+ -+free_devm_irq: -+ devm_free_irq(dev, irq->msi_desc->irq, dev); -+free_irq: -+ fsl_mc_free_irqs(evb_dev); -+ return err; -+} -+ -+static void evb_teardown_irqs(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev = &evb_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ -+ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, -+ DPDMUX_IRQ_INDEX_IF, 0); -+ -+ devm_free_irq(dev, -+ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq, -+ dev); -+ fsl_mc_free_irqs(evb_dev); -+} -+ -+static int evb_port_add_rule(struct net_device *netdev, -+ const unsigned char *addr, u16 vid) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct dpdmux_l2_rule rule = { .vlan_id = vid }; -+ int err; -+ -+ if (addr) -+ ether_addr_copy(rule.mac_addr, addr); -+ -+ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, &rule); -+ if (unlikely(err)) -+ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err); -+ return err; -+} -+ -+static int evb_port_del_rule(struct net_device *netdev, -+ const unsigned char *addr, u16 vid) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct dpdmux_l2_rule rule = { .vlan_id = vid }; -+ int err; -+ -+ if (addr) -+ ether_addr_copy(rule.mac_addr, addr); -+ -+ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, &rule); -+ if (unlikely(err)) -+ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err); -+ return err; -+} -+ -+static bool _lookup_address(struct net_device *netdev, -+ const unsigned char *addr) -+{ -+ struct netdev_hw_addr *ha; -+ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ? -+ &netdev->uc : &netdev->mc; -+ -+ netif_addr_lock_bh(netdev); -+ list_for_each_entry(ha, &list->list, list) { -+ if (ether_addr_equal(ha->addr, addr)) { -+ netif_addr_unlock_bh(netdev); -+ return true; -+ } -+ } -+ netif_addr_unlock_bh(netdev); -+ return false; -+} -+ -+static inline int evb_port_fdb_prep(struct nlattr *tb[], -+ struct net_device *netdev, -+ const unsigned char *addr, u16 *vid, -+ bool del) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct evb_priv *evb_priv = port_priv->evb_priv; -+ -+ *vid = 0; -+ -+ if (evb_priv->attr.method != DPDMUX_METHOD_MAC && -+ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) { -+ netdev_err(netdev, -+ "EVB mode does not support MAC classification\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ /* check if the address is configured on this port */ -+ if (_lookup_address(netdev, addr)) { -+ if (!del) -+ return -EEXIST; -+ } else { -+ if (del) -+ return -ENOENT; -+ } -+ -+ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { -+ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) { -+ netdev_err(netdev, "invalid vlan size %d\n", -+ nla_len(tb[NDA_VLAN])); -+ return -EINVAL; -+ } -+ -+ *vid = nla_get_u16(tb[NDA_VLAN]); -+ -+ if (!*vid || *vid >= VLAN_VID_MASK) { -+ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid); -+ return -EINVAL; -+ } -+ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { -+ netdev_err(netdev, -+ "EVB mode requires explicit VLAN configuration\n"); -+ return -EINVAL; -+ } else if (tb[NDA_VLAN]) { -+ netdev_warn(netdev, "VLAN not supported, argument ignored\n"); -+ } -+ -+ return 0; -+} -+ -+static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *netdev, -+ const unsigned char *addr, u16 vid, u16 flags) -+{ -+ u16 _vid; -+ int err; -+ -+ /* TODO: add replace support when added to iproute bridge */ -+ if (!(flags & NLM_F_REQUEST)) { -+ netdev_err(netdev, -+ "evb_port_fdb_add unexpected flags value %08x\n", -+ flags); -+ return -EINVAL; -+ } -+ -+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0); -+ if (unlikely(err)) -+ return err; -+ -+ -+ err = evb_port_add_rule(netdev, addr, _vid); -+ if (unlikely(err)) -+ return err; -+ -+ if (is_unicast_ether_addr(addr)) { -+ err = dev_uc_add(netdev, addr); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dev_uc_add err %d\n", err); -+ return err; -+ } -+ } else { -+ err = dev_mc_add(netdev, addr); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dev_mc_add err %d\n", err); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *netdev, -+ const unsigned char *addr, u16 vid) -+{ -+ u16 _vid; -+ int err; -+ -+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1); -+ if (unlikely(err)) -+ return err; -+ -+ err = evb_port_del_rule(netdev, addr, _vid); -+ if (unlikely(err)) -+ return err; -+ -+ if (is_unicast_ether_addr(addr)) { -+ err = dev_uc_del(netdev, addr); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dev_uc_del err %d\n", err); -+ return err; -+ } -+ } else { -+ err = dev_mc_del(netdev, addr); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dev_mc_del err %d\n", err); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static int evb_change_mtu(struct net_device *netdev, -+ int mtu) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct evb_priv *evb_priv = port_priv->evb_priv; -+ struct list_head *pos; -+ int err = 0; -+ -+ /* This operation is not permitted on downlinks */ -+ if (port_priv->port_index > 0) -+ return -EPERM; -+ -+ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) { -+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n", -+ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH); -+ return -EINVAL; -+ } -+ -+ err = dpdmux_ul_set_max_frame_length(evb_priv->mc_io, -+ 0, -+ evb_priv->mux_handle, -+ (uint16_t)mtu); -+ -+ if (unlikely(err)) { -+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n", -+ err); -+ return err; -+ } -+ -+ /* Update the max frame length for downlinks */ -+ list_for_each(pos, &evb_priv->port_list) { -+ port_priv = list_entry(pos, struct evb_port_priv, list); -+ port_priv->netdev->mtu = mtu; -+ } -+ -+ netdev->mtu = mtu; -+ return 0; -+} -+ -+static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = { -+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, -+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, -+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, -+ .len = sizeof(struct bridge_vlan_info), }, -+}; -+ -+static int evb_setlink_af_spec(struct net_device *netdev, -+ struct nlattr **tb) -+{ -+ struct bridge_vlan_info *vinfo; -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ int err = 0; -+ -+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) { -+ netdev_err(netdev, "no VLAN INFO in nlmsg\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); -+ -+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) -+ return -EINVAL; -+ -+ err = evb_port_add_rule(netdev, NULL, vinfo->vid); -+ if (unlikely(err)) -+ return err; -+ -+ port_priv->vlans[vinfo->vid] = 1; -+ -+ return 0; -+} -+ -+static int evb_setlink(struct net_device *netdev, -+ struct nlmsghdr *nlh, -+ u16 flags) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct evb_priv *evb_priv = port_priv->evb_priv; -+ struct nlattr *attr; -+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? -+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX+1]; -+ int err = 0; -+ -+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && -+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { -+ netdev_err(netdev, -+ "EVB mode does not support VLAN only classification\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); -+ if (attr) { -+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, -+ ifla_br_policy); -+ if (unlikely(err)) { -+ netdev_err(netdev, -+ "nla_parse_nested for br_policy err %d\n", -+ err); -+ return err; -+ } -+ -+ err = evb_setlink_af_spec(netdev, tb); -+ return err; -+ } -+ -+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n"); -+ return -EOPNOTSUPP; -+} -+ -+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct evb_priv *evb_priv = port_priv->evb_priv; -+ u8 operstate = netif_running(netdev) ? -+ netdev->operstate : IF_OPER_DOWN; -+ int iflink; -+ int err; -+ -+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); -+ if (unlikely(err)) -+ goto nla_put_err; -+ if (netdev->addr_len) { -+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, -+ netdev->dev_addr); -+ if (unlikely(err)) -+ goto nla_put_err; -+ } -+ -+ iflink = dev_get_iflink(netdev); -+ if (netdev->ifindex != iflink) { -+ err = nla_put_u32(skb, IFLA_LINK, iflink); -+ if (unlikely(err)) -+ goto nla_put_err; -+ } -+ -+ return 0; -+ -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ return err; -+} -+ -+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev) -+{ -+ struct nlattr *nest; -+ int err; -+ -+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); -+ if (!nest) { -+ netdev_err(netdev, "nla_nest_start failed\n"); -+ return -ENOMEM; -+ } -+ -+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0); -+ if (unlikely(err)) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1); -+ if (unlikely(err)) -+ goto nla_put_err; -+ nla_nest_end(skb, nest); -+ -+ return 0; -+ -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ nla_nest_cancel(skb, nest); -+ return err; -+} -+ -+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct nlattr *nest; -+ struct bridge_vlan_info vinfo; -+ const u8 *vlans = port_priv->vlans; -+ u16 i; -+ int err; -+ -+ nest = nla_nest_start(skb, IFLA_AF_SPEC); -+ if (!nest) { -+ netdev_err(netdev, "nla_nest_start failed"); -+ return -ENOMEM; -+ } -+ -+ for (i = 0; i < VLAN_VID_MASK+1; i++) { -+ if (!vlans[i]) -+ continue; -+ -+ vinfo.flags = 0; -+ vinfo.vid = i; -+ -+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, -+ sizeof(vinfo), &vinfo); -+ if (unlikely(err)) -+ goto nla_put_err; -+ } -+ -+ nla_nest_end(skb, nest); -+ -+ return 0; -+ -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ nla_nest_cancel(skb, nest); -+ return err; -+} -+ -+static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq, -+ struct net_device *netdev, u32 filter_mask, int nlflags) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ struct evb_priv *evb_priv = port_priv->evb_priv; -+ struct ifinfomsg *hdr; -+ struct nlmsghdr *nlh; -+ int err; -+ -+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && -+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { -+ return 0; -+ } -+ -+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); -+ if (!nlh) -+ return -EMSGSIZE; -+ -+ hdr = nlmsg_data(nlh); -+ memset(hdr, 0, sizeof(*hdr)); -+ hdr->ifi_family = AF_BRIDGE; -+ hdr->ifi_type = netdev->type; -+ hdr->ifi_index = netdev->ifindex; -+ hdr->ifi_flags = dev_get_flags(netdev); -+ -+ err = __nla_put_netdev(skb, netdev); -+ if (unlikely(err)) -+ goto nla_put_err; -+ -+ err = __nla_put_port(skb, netdev); -+ if (unlikely(err)) -+ goto nla_put_err; -+ -+ /* Check if the VID information is requested */ -+ if (filter_mask & RTEXT_FILTER_BRVLAN) { -+ err = __nla_put_vlan(skb, netdev); -+ if (unlikely(err)) -+ goto nla_put_err; -+ } -+ -+ nlmsg_end(skb, nlh); -+ return skb->len; -+ -+nla_put_err: -+ nlmsg_cancel(skb, nlh); -+ return -EMSGSIZE; -+} -+ -+static int evb_dellink(struct net_device *netdev, -+ struct nlmsghdr *nlh, -+ u16 flags) -+{ -+ struct nlattr *tb[IFLA_BRIDGE_MAX+1]; -+ struct nlattr *spec; -+ struct bridge_vlan_info *vinfo; -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ int err = 0; -+ -+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); -+ if (!spec) -+ return 0; -+ -+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); -+ if (unlikely(err)) -+ return err; -+ -+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) -+ return -EOPNOTSUPP; -+ -+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); -+ -+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) -+ return -EINVAL; -+ -+ err = evb_port_del_rule(netdev, NULL, vinfo->vid); -+ if (unlikely(err)) { -+ netdev_err(netdev, "evb_port_del_rule err %d\n", err); -+ return err; -+ } -+ port_priv->vlans[vinfo->vid] = 0; -+ -+ return 0; -+} -+ -+static struct rtnl_link_stats64 * -+evb_port_get_stats(struct net_device *netdev, -+ struct rtnl_link_stats64 *storage) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ u64 tmp; -+ int err; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_ING_FRAME, &storage->rx_packets); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_ING_FLTR_FRAME, &tmp); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_ING_FRAME_DISCARD, -+ &storage->rx_dropped); -+ if (unlikely(err)) { -+ storage->rx_dropped = tmp; -+ goto error; -+ } -+ storage->rx_dropped += tmp; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_ING_MCAST_FRAME, -+ &storage->multicast); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes); -+ if (unlikely(err)) -+ goto error; -+ -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ DPDMUX_CNT_EGR_FRAME_DISCARD, -+ &storage->tx_dropped); -+ if (unlikely(err)) -+ goto error; -+ -+ return storage; -+ -+error: -+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err); -+ return storage; -+} -+ -+static const struct net_device_ops evb_port_ops = { -+ .ndo_open = &evb_port_open, -+ -+ .ndo_start_xmit = &evb_dropframe, -+ -+ .ndo_fdb_add = &evb_port_fdb_add, -+ .ndo_fdb_del = &evb_port_fdb_del, -+ -+ .ndo_get_stats64 = &evb_port_get_stats, -+ .ndo_change_mtu = &evb_change_mtu, -+}; -+ -+static struct { -+ enum dpdmux_counter_type id; -+ char name[ETH_GSTRING_LEN]; -+} evb_ethtool_counters[] = { -+ {DPDMUX_CNT_ING_FRAME, "rx frames"}, -+ {DPDMUX_CNT_ING_BYTE, "rx bytes"}, -+ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"}, -+ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, -+ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, -+ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, -+ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, -+ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, -+ {DPDMUX_CNT_EGR_FRAME, "tx frames"}, -+ {DPDMUX_CNT_EGR_BYTE, "tx bytes"}, -+ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, -+}; -+ -+static int evb_ethtool_get_sset_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(evb_ethtool_counters); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static void evb_ethtool_get_strings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ evb_ethtool_counters[i].name, ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static void evb_ethtool_get_stats(struct net_device *netdev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct evb_port_priv *port_priv = netdev_priv(netdev); -+ int i; -+ int err; -+ -+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) { -+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, -+ 0, -+ port_priv->evb_priv->mux_handle, -+ port_priv->port_index, -+ evb_ethtool_counters[i].id, -+ &data[i]); -+ if (err) -+ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n", -+ evb_ethtool_counters[i].name, err); -+ } -+} -+ -+static const struct ethtool_ops evb_port_ethtool_ops = { -+ .get_strings = &evb_ethtool_get_strings, -+ .get_ethtool_stats = &evb_ethtool_get_stats, -+ .get_sset_count = &evb_ethtool_get_sset_count, -+}; -+ -+static int evb_open(struct net_device *netdev) -+{ -+ struct evb_priv *priv = netdev_priv(netdev); -+ int err = 0; -+ -+ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle); -+ if (unlikely(err)) -+ netdev_err(netdev, "dpdmux_enable err %d\n", err); -+ -+ return err; -+} -+ -+static int evb_close(struct net_device *netdev) -+{ -+ struct evb_priv *priv = netdev_priv(netdev); -+ int err = 0; -+ -+ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle); -+ if (unlikely(err)) -+ netdev_err(netdev, "dpdmux_disable err %d\n", err); -+ -+ return err; -+} -+ -+static const struct net_device_ops evb_ops = { -+ .ndo_start_xmit = &evb_dropframe, -+ .ndo_open = &evb_open, -+ .ndo_stop = &evb_close, -+ -+ .ndo_bridge_setlink = &evb_setlink, -+ .ndo_bridge_getlink = &evb_getlink, -+ .ndo_bridge_dellink = &evb_dellink, -+ -+ .ndo_get_stats64 = &evb_port_get_stats, -+ .ndo_change_mtu = &evb_change_mtu, -+}; -+ -+static int evb_takedown(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev = &evb_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ int err; -+ -+ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle); -+ if (unlikely(err)) -+ dev_warn(dev, "dpdmux_close err %d\n", err); -+ -+ return 0; -+} -+ -+static int evb_init(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev = &evb_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ int err = 0; -+ -+ priv->dev_id = evb_dev->obj_desc.id; -+ -+ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_open err %d\n", err); -+ goto err_exit; -+ } -+ if (!priv->mux_handle) { -+ dev_err(dev, "dpdmux_open returned null handle but no error\n"); -+ err = -EFAULT; -+ goto err_exit; -+ } -+ -+ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle, -+ &priv->attr); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_get_attributes err %d\n", err); -+ goto err_close; -+ } -+ -+ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle); -+ if (unlikely(err)) { -+ dev_err(dev, "dpdmux_reset err %d\n", err); -+ goto err_close; -+ } -+ -+ return 0; -+ -+err_close: -+ dpdmux_close(priv->mc_io, 0, priv->mux_handle); -+err_exit: -+ return err; -+} -+ -+static int evb_remove(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev = &evb_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct evb_priv *priv = netdev_priv(netdev); -+ struct evb_port_priv *port_priv; -+ struct list_head *pos; -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct evb_port_priv, list); -+ -+ rtnl_lock(); -+ netdev_upper_dev_unlink(port_priv->netdev, netdev); -+ rtnl_unlock(); -+ -+ unregister_netdev(port_priv->netdev); -+ free_netdev(port_priv->netdev); -+ } -+ -+ evb_teardown_irqs(evb_dev); -+ -+ unregister_netdev(netdev); -+ -+ evb_takedown(evb_dev); -+ fsl_mc_portal_free(priv->mc_io); -+ -+ dev_set_drvdata(dev, NULL); -+ free_netdev(netdev); -+ -+ return 0; -+} -+ -+static int evb_probe(struct fsl_mc_device *evb_dev) -+{ -+ struct device *dev; -+ struct evb_priv *priv = NULL; -+ struct net_device *netdev = NULL; -+ char port_name[IFNAMSIZ]; -+ int i; -+ int err = 0; -+ -+ dev = &evb_dev->dev; -+ -+ /* register switch device, it's for management only - no I/O */ -+ netdev = alloc_etherdev(sizeof(*priv)); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ return -ENOMEM; -+ } -+ netdev->netdev_ops = &evb_ops; -+ -+ dev_set_drvdata(dev, netdev); -+ -+ priv = netdev_priv(netdev); -+ -+ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io); -+ if (unlikely(err)) { -+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); -+ goto err_free_netdev; -+ } -+ if (!priv->mc_io) { -+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); -+ err = -EFAULT; -+ goto err_free_netdev; -+ } -+ -+ err = evb_init(evb_dev); -+ if (unlikely(err)) { -+ dev_err(dev, "evb init err %d\n", err); -+ goto err_free_cmdport; -+ } -+ -+ INIT_LIST_HEAD(&priv->port_list); -+ netdev->flags |= IFF_PROMISC | IFF_MASTER; -+ -+ dev_alloc_name(netdev, "evb%d"); -+ -+ /* register switch ports */ -+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); -+ -+ /* only register downlinks? */ -+ for (i = 0; i < priv->attr.num_ifs + 1; i++) { -+ struct net_device *port_netdev; -+ struct evb_port_priv *port_priv; -+ -+ if (i) { -+ port_netdev = -+ alloc_etherdev(sizeof(struct evb_port_priv)); -+ if (!port_netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ goto err_takedown; -+ } -+ -+ port_priv = netdev_priv(port_netdev); -+ -+ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE; -+ -+ dev_alloc_name(port_netdev, port_name); -+ } else { -+ port_netdev = netdev; -+ port_priv = &priv->uplink; -+ } -+ -+ port_priv->netdev = port_netdev; -+ port_priv->evb_priv = priv; -+ port_priv->port_index = i; -+ -+ SET_NETDEV_DEV(port_netdev, dev); -+ -+ if (i) { -+ port_netdev->netdev_ops = &evb_port_ops; -+ -+ err = register_netdev(port_netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev err %d\n", err); -+ free_netdev(port_netdev); -+ goto err_takedown; -+ } -+ -+ rtnl_lock(); -+ err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL); -+ if (unlikely(err)) { -+ dev_err(dev, "netdev_master_upper_dev_link err %d\n", -+ err); -+ unregister_netdev(port_netdev); -+ free_netdev(port_netdev); -+ rtnl_unlock(); -+ goto err_takedown; -+ } -+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, -+ IFF_SLAVE, GFP_KERNEL); -+ rtnl_unlock(); -+ -+ list_add(&(port_priv->list), &(priv->port_list)); -+ } else { -+ err = register_netdev(netdev); -+ -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ goto err_takedown; -+ } -+ } -+ -+ port_netdev->ethtool_ops = &evb_port_ethtool_ops; -+ -+ /* ports are up from init */ -+ rtnl_lock(); -+ err = dev_open(port_netdev); -+ rtnl_unlock(); -+ if (unlikely(err)) -+ dev_warn(dev, "dev_open err %d\n", err); -+ } -+ -+ /* setup irqs */ -+ err = evb_setup_irqs(evb_dev); -+ if (unlikely(err)) { -+ dev_warn(dev, "evb_setup_irqs err %d\n", err); -+ goto err_takedown; -+ } -+ -+ dev_info(dev, "probed evb device with %d ports\n", -+ priv->attr.num_ifs); -+ return 0; -+ -+err_takedown: -+ evb_remove(evb_dev); -+err_free_cmdport: -+ fsl_mc_portal_free(priv->mc_io); -+err_free_netdev: -+ return err; -+} -+ -+static const struct fsl_mc_device_match_id evb_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpdmux", -+ .ver_major = DPDMUX_VER_MAJOR, -+ .ver_minor = DPDMUX_VER_MINOR, -+ }, -+ {} -+}; -+ -+static struct fsl_mc_driver evb_drv = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = evb_probe, -+ .remove = evb_remove, -+ .match_id_table = evb_match_id_table, -+}; -+ -+module_fsl_mc_driver(evb_drv); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)"); diff --git a/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch b/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch deleted file mode 100644 index d3d976af9..000000000 --- a/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 4efb592d8a931669df5df04bedcae8cbc85c3700 Mon Sep 17 00:00:00 2001 -From: Razvan Stefanescu -Date: Wed, 17 Feb 2016 16:31:01 +0200 -Subject: [PATCH 216/226] dpaa2-evb: Fix interrupt handling - -Mask only the events handled by the driver - DPDMUX_IRQ_EVENT_LINK_CHANGED. - -Use clear-on-read mechanism for the interrupt status and avoid calling -dpdmux_clear_irq_status(). Status contains the events handled (only link -state change for the moment) and masks the first 16-bits, as they are used -to store the interface ID that generated the event. - -Signed-off-by: Razvan Stefanescu ---- - drivers/staging/fsl-dpaa2/evb/evb.c | 20 ++++++++++---------- - 1 file changed, 10 insertions(+), 10 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -151,7 +151,9 @@ static irqreturn_t _evb_irq0_handler_thr - struct fsl_mc_io *io = priv->mc_io; - uint16_t token = priv->mux_handle; - int irq_index = DPDMUX_IRQ_INDEX_IF; -- uint32_t status = 0, clear = 0; -+ -+ /* Mask the events and the if_id reserved bits to be cleared on read */ -+ uint32_t status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; - int err; - - /* Sanity check */ -@@ -163,23 +165,21 @@ static irqreturn_t _evb_irq0_handler_thr - err = dpdmux_get_irq_status(io, 0, token, irq_index, &status); - if (unlikely(err)) { - netdev_err(netdev, "Can't get irq status (err %d)", err); -- clear = 0xffffffff; -+ err = dpdmux_clear_irq_status(io, 0, token, irq_index, -+ 0xFFFFFFFF); -+ if (unlikely(err)) -+ netdev_err(netdev, "Can't clear irq status (err %d)", -+ err); - goto out; - } - -- /* FIXME clear irq status */ -- - if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) { -- clear |= DPDMUX_IRQ_EVENT_LINK_CHANGED; -- - err = evb_links_state_update(priv); - if (unlikely(err)) - goto out; - } -+ - out: -- err = dpdmux_clear_irq_status(io, 0, token, irq_index, clear); -- if (unlikely(err)) -- netdev_err(netdev, "Can't clear irq status (err %d)", err); - return IRQ_HANDLED; - } - -@@ -191,7 +191,7 @@ static int evb_setup_irqs(struct fsl_mc_ - int err = 0; - struct fsl_mc_device_irq *irq; - const int irq_index = DPDMUX_IRQ_INDEX_IF; -- uint32_t mask = ~0x0u; /* FIXME: unmask handled irqs */ -+ uint32_t mask = DPDMUX_IRQ_EVENT_LINK_CHANGED; - - err = fsl_mc_allocate_irqs(evb_dev); - if (unlikely(err)) { diff --git a/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch b/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch deleted file mode 100644 index 13d61cfb5..000000000 --- a/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 213c59501bbd6da8c56e95f90f8a8c6af2682002 Mon Sep 17 00:00:00 2001 -From: Razvan Stefanescu -Date: Thu, 18 Feb 2016 10:54:40 +0200 -Subject: [PATCH 217/226] dpaa2-evb: Add object version check - -Abort probing if DPDMUX object version is smaller than required. - -Signed-off-by: Razvan Stefanescu ---- - drivers/staging/fsl-dpaa2/evb/evb.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -44,6 +44,10 @@ - #include "dpdmux.h" - #include "dpdmux-cmd.h" - -+/* Minimal supported DPDMUX version */ -+#define DPDMUX_MIN_VER_MAJOR 5 -+#define DPDMUX_MIN_VER_MINOR 0 -+ - /* IRQ index */ - #define DPDMUX_MAX_IRQ_NUM 2 - -@@ -1004,6 +1008,17 @@ static int evb_init(struct fsl_mc_device - goto err_close; - } - -+ /* Minimum supported DPDMUX version check */ -+ if (priv->attr.version.major < DPDMUX_MIN_VER_MAJOR || -+ (priv->attr.version.major == DPDMUX_MIN_VER_MAJOR && -+ priv->attr.version.minor < DPDMUX_MIN_VER_MINOR)) { -+ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n", -+ priv->attr.version.major, priv->attr.version.minor, -+ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR); -+ err = -ENOTSUPP; -+ goto err_close; -+ } -+ - err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle); - if (unlikely(err)) { - dev_err(dev, "dpdmux_reset err %d\n", err); diff --git a/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch b/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch deleted file mode 100644 index 54f77ab65..000000000 --- a/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch +++ /dev/null @@ -1,20 +0,0 @@ -From 54d026dafa1f7d17758615736123917cc4f3f203 Mon Sep 17 00:00:00 2001 -From: Mihai Caraman -Date: Tue, 5 Apr 2016 14:12:10 +0000 -Subject: [PATCH 218/226] dpaa2-evb: Cosmetic cleanup - -Replace obsolete terms. - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/evb/evb.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -1228,4 +1228,4 @@ static struct fsl_mc_driver evb_drv = { - module_fsl_mc_driver(evb_drv); - - MODULE_LICENSE("GPL"); --MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)"); -+MODULE_DESCRIPTION("DPAA2 Edge Virtual Bridge driver (prototype)"); diff --git a/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch deleted file mode 100644 index be1ab1baf..000000000 --- a/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 744bd6494a51443c2a7d32ed76e94e4fc5bd2404 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Thu, 14 Jul 2016 17:32:23 -0500 -Subject: [PATCH 219/226] dpaa2-evb: match id cleanup - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/evb/evb.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -1205,12 +1205,10 @@ err_free_netdev: - return err; - } - --static const struct fsl_mc_device_match_id evb_match_id_table[] = { -+static const struct fsl_mc_device_id evb_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpdmux", -- .ver_major = DPDMUX_VER_MAJOR, -- .ver_minor = DPDMUX_VER_MINOR, - }, - {} - }; diff --git a/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch b/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch deleted file mode 100644 index 067eb59a2..000000000 --- a/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch +++ /dev/null @@ -1,6605 +0,0 @@ -From 8df017d70c54ceafc99b7904785603c678a2e5c1 Mon Sep 17 00:00:00 2001 -From: Razvan Stefanescu -Date: Tue, 22 Sep 2015 11:36:34 +0300 -Subject: [PATCH 220/226] dpaa2-ethsw: Ethernet Switch driver - -This is a commit of the cummulative, squashed dpaa2-l2switch patches. -All the commit logs are preserved below. - -Signed-off-by: Stuart Yoder - ---------------------------------------------------------------------- - -dpaa2-ethsw: Ethernet Switch driver - -Initial support for DPAA2 L2 switch. The switch and all ports are -presented as network interfaces in linux (swX and swXpY). I/O -functionality is not available on these interfaces, they are exclusively -for management. - -Configuration is done using bridge tool. Supported commands are: -- fdb operations with unicast/multicast addresses -- vlan configuration -- setting STP state of ports -- flooding, learning control - -Offers support for retrieving port statistics via ethtool (or similar -applications). - -This patch contains the following patches squashed together: -staging: fsl-dpaa2: ethsw: ethernet switch driver -dpaa2-ethsw: Include by default in configuration -staging: fsl-dpaa2: ethsw: Rebasing onto kernel 4.0 -staging: fsl-mc: migrated remaining flibs for MC fw 8.0.0 -dpaa2-ethsw: Prefix driver name with dpaa2- -dpaa2-ethsw: Set carrier state on probe -dpaa2-ethsw: Add support for link state update - -These patches were initally submitted by: -Alex Marginean -J. German Rivera -Razvan Stefanescu - -and reviewed by Stuart Yoder - -Ported to linux-v4.1 by updating iflink usage and ndo_bridge_getlink() -parameters list update. - -Signed-off-by: Razvan Stefanescu -[Stuart: resolved minor merge conflicts] -Signed-off-by: Stuart Yoder - -dpaa2-ethsw: Update dpsw binary interface to 7.0 - -This corresponds to MC release 0.8.0. - -Signed-off-by: Razvan Stefanescu - -dpaa2-ethsw: Add object version check - -Abort probing if DPSW object version is smaller than required. - -Signed-off-by: Razvan Stefanescu - -dpaa2-ethsw: Fix interrupt handling - -Mask only the events handled by the driver - DPSW_IRQ_EVENT_LINK_CHANGED. - -Use clear-on-read mechanism for the interrupt status and avoid calling -dpsw_clear_irq_status(). Status contains the events handled (only link -state change for the moment) and masks the first 16-bits, as they are used -to store the interface ID that generated the event. - -Signed-off-by: Razvan Stefanescu - -dpaa2-ethsw: resolve compile issues on uprev to 4.5 - --irq_number field no longer exists in fsl-mc interrupt - struct --netdev_master_upper_dev_link() has 2 new parameters, which - are set to NULL for now - -Signed-off-by: Stuart Yoder ---- - MAINTAINERS | 6 + - drivers/staging/fsl-dpaa2/Kconfig | 1 + - drivers/staging/fsl-dpaa2/Makefile | 1 + - drivers/staging/fsl-dpaa2/ethsw/Kconfig | 7 + - drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 + - drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 916 ++++++++++++ - drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1639 +++++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 2164 ++++++++++++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethsw/switch.c | 1711 ++++++++++++++++++++++ - drivers/staging/fsl-mc/include/net.h | 1 - - 10 files changed, 6455 insertions(+), 1 deletion(-) - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h - create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -4554,6 +4554,12 @@ S: Maintained - F: drivers/staging/fsl-mc/bus/mc-ioctl.h - F: drivers/staging/fsl-mc/bus/mc-restool.c - -+FREESCALE DPAA2 ETHERNET SWITCH DRIVER -+M: Alex Marginean -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/ethsw/ -+ - FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER - M: Alex Marginean - L: linux-kernel@vger.kernel.org ---- a/drivers/staging/fsl-dpaa2/Kconfig -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -11,3 +11,4 @@ config FSL_DPAA2 - source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" - source "drivers/staging/fsl-dpaa2/mac/Kconfig" - source "drivers/staging/fsl-dpaa2/evb/Kconfig" -+source "drivers/staging/fsl-dpaa2/ethsw/Kconfig" ---- a/drivers/staging/fsl-dpaa2/Makefile -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -5,3 +5,4 @@ - obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ - obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ - obj-$(CONFIG_FSL_DPAA2_EVB) += evb/ -+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig -@@ -0,0 +1,7 @@ -+config FSL_DPAA2_ETHSW -+ tristate "DPAA2 Ethernet Switch" -+ depends on FSL_MC_BUS && FSL_DPAA2 && FSL_DPAA2_ETH -+ select FSL_DPAA2_MAC -+ default y -+ ---help--- -+ Prototype driver for DPAA2 Ethernet Switch. ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile -@@ -0,0 +1,10 @@ -+ -+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o -+ -+dpaa2-ethsw-objs := switch.o dpsw.o -+ -+all: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules -+ -+clean: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h -@@ -0,0 +1,916 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_CMD_H -+#define __FSL_DPSW_CMD_H -+ -+/* DPSW Version */ -+#define DPSW_VER_MAJOR 7 -+#define DPSW_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPSW_CMDID_CLOSE 0x800 -+#define DPSW_CMDID_OPEN 0x802 -+#define DPSW_CMDID_CREATE 0x902 -+#define DPSW_CMDID_DESTROY 0x900 -+ -+#define DPSW_CMDID_ENABLE 0x002 -+#define DPSW_CMDID_DISABLE 0x003 -+#define DPSW_CMDID_GET_ATTR 0x004 -+#define DPSW_CMDID_RESET 0x005 -+#define DPSW_CMDID_IS_ENABLED 0x006 -+ -+#define DPSW_CMDID_SET_IRQ 0x010 -+#define DPSW_CMDID_GET_IRQ 0x011 -+#define DPSW_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPSW_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPSW_CMDID_SET_IRQ_MASK 0x014 -+#define DPSW_CMDID_GET_IRQ_MASK 0x015 -+#define DPSW_CMDID_GET_IRQ_STATUS 0x016 -+#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPSW_CMDID_SET_REFLECTION_IF 0x022 -+ -+#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024 -+ -+#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026 -+ -+#define DPSW_CMDID_IF_SET_TCI 0x030 -+#define DPSW_CMDID_IF_SET_STP 0x031 -+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032 -+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033 -+#define DPSW_CMDID_IF_GET_COUNTER 0x034 -+#define DPSW_CMDID_IF_SET_COUNTER 0x035 -+#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036 -+#define DPSW_CMDID_IF_ADD_REFLECTION 0x037 -+#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038 -+#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039 -+#define DPSW_CMDID_IF_SET_METERING 0x03A -+#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B -+ -+#define DPSW_CMDID_IF_ENABLE 0x03D -+#define DPSW_CMDID_IF_DISABLE 0x03E -+ -+#define DPSW_CMDID_IF_GET_ATTR 0x042 -+ -+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044 -+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045 -+#define DPSW_CMDID_IF_GET_LINK_STATE 0x046 -+#define DPSW_CMDID_IF_SET_FLOODING 0x047 -+#define DPSW_CMDID_IF_SET_BROADCAST 0x048 -+#define DPSW_CMDID_IF_SET_MULTICAST 0x049 -+#define DPSW_CMDID_IF_GET_TCI 0x04A -+ -+#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C -+ -+#define DPSW_CMDID_VLAN_ADD 0x060 -+#define DPSW_CMDID_VLAN_ADD_IF 0x061 -+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062 -+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063 -+#define DPSW_CMDID_VLAN_REMOVE_IF 0x064 -+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065 -+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066 -+#define DPSW_CMDID_VLAN_REMOVE 0x067 -+#define DPSW_CMDID_VLAN_GET_IF 0x068 -+#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069 -+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A -+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B -+ -+#define DPSW_CMDID_FDB_GET_MULTICAST 0x080 -+#define DPSW_CMDID_FDB_GET_UNICAST 0x081 -+#define DPSW_CMDID_FDB_ADD 0x082 -+#define DPSW_CMDID_FDB_REMOVE 0x083 -+#define DPSW_CMDID_FDB_ADD_UNICAST 0x084 -+#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085 -+#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086 -+#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087 -+#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088 -+#define DPSW_CMDID_FDB_GET_ATTR 0x089 -+ -+#define DPSW_CMDID_ACL_ADD 0x090 -+#define DPSW_CMDID_ACL_REMOVE 0x091 -+#define DPSW_CMDID_ACL_ADD_ENTRY 0x092 -+#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093 -+#define DPSW_CMDID_ACL_ADD_IF 0x094 -+#define DPSW_CMDID_ACL_REMOVE_IF 0x095 -+#define DPSW_CMDID_ACL_GET_ATTR 0x096 -+ -+#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0 -+#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1 -+#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2 -+#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_OPEN(cmd, dpsw_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \ -+ cfg->adv.component_type);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\ -+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \ -+ attr->component_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\ -+ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \ -+ cfg->priority_selector);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[0].mode);\ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[1].mode);\ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[2].mode);\ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[3].mode);\ -+ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[4].mode);\ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[5].mode);\ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[6].mode);\ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[7].mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ENABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_DISABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \ -+ attr->admit_untagged);\ -+ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \ -+ attr->learning_mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_ADD(cmd, acl_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+#endif /* __FSL_DPSW_CMD_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c -@@ -0,0 +1,1639 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpsw.h" -+#include "dpsw-cmd.h" -+ -+/* internal functions */ -+static void build_if_id_bitmap(const uint16_t *if_id, -+ const uint16_t num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int i; -+ -+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) -+ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc( -+ (if_id[i] % 64), 1, 1); -+} -+ -+static int read_if_id_bitmap(uint16_t *if_id, -+ uint16_t *num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int bitmap[DPSW_MAX_IF] = { 0 }; -+ int i, j = 0; -+ int count = 0; -+ -+ for (i = 0; i < DPSW_MAX_IF; i++) { -+ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64], -+ i % 64, 1); -+ count += bitmap[i]; -+ } -+ -+ *num_ifs = (uint16_t)count; -+ -+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) { -+ if (bitmap[i]) { -+ if_id[j] = (uint16_t)i; -+ j++; -+ } -+ } -+ -+ return 0; -+} -+ -+/* DPSW APIs */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPSW_CMD_OPEN(cmd, dpsw_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPSW_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_TCI(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_TCI(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPSW_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ENABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_DISABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length); -+ -+ return 0; -+} -+ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_ADD(cmd, *fdb_id); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_ADD(cmd, *acl_id); -+ -+ return 0; -+} -+ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE(cmd, acl_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)entry_cfg_buf; -+ -+ DPSW_PREP_ACL_ENTRY(ext_params, key); -+} -+ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *pools) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+/** -+* @brief Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h -@@ -0,0 +1,2164 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_H -+#define __FSL_DPSW_H -+ -+#include "../../fsl-mc/include/net.h" -+ -+/* Data Path L2-Switch API -+ * Contains API for handling DPSW topology and functionality -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * DPSW general definitions -+ */ -+ -+/** -+ * Maximum number of traffic class priorities -+ */ -+#define DPSW_MAX_PRIORITIES 8 -+/** -+ * Maximum number of interfaces -+ */ -+#define DPSW_MAX_IF 64 -+ -+/** -+ * dpsw_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpsw_id: DPSW unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpsw_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token); -+ -+/** -+ * dpsw_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW options -+ */ -+ -+/** -+ * Disable flooding -+ */ -+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL -+/** -+ * Disable Multicast -+ */ -+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL -+/** -+ * Support control interface -+ */ -+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL -+/** -+ * Disable flooding metering -+ */ -+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL -+/** -+ * Enable metering -+ */ -+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL -+ -+/** -+ * enum dpsw_component_type - component type of a bridge -+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an -+ * enterprise VLAN bridge or of a Provider Bridge used -+ * to process C-tagged frames -+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a -+ * Provider Bridge -+ * -+ */ -+enum dpsw_component_type { -+ DPSW_COMPONENT_TYPE_C_VLAN = 0, -+ DPSW_COMPONENT_TYPE_S_VLAN -+}; -+ -+/** -+ * struct dpsw_cfg - DPSW configuration -+ * @num_ifs: Number of external and internal interfaces -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpsw_cfg { -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Enable/Disable DPSW features (bitmap) -+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16 -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16 -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @component_type: Indicates the component type of this bridge -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ enum dpsw_component_type component_type; -+ } adv; -+}; -+ -+/** -+ * dpsw_create() - Create the DPSW object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPSW object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpsw_open() function to get an authentication -+ * token first -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpsw_destroy() - Destroy the DPSW object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_enable() - Enable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_disable() - Disable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_is_enabled() - Check if the DPSW is enabled -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpsw_reset() - Reset the DPSW, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW IRQ Index and Events -+ */ -+ -+#define DPSW_IRQ_INDEX_IF 0x0000 -+#define DPSW_IRQ_INDEX_L2SW 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * struct dpsw_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpsw_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_get_irq() - Get IRQ information from the DPSW -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpsw_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpsw_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpsw_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpsw_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpsw_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+/** -+ * struct dpsw_attr - Structure representing DPSW attributes -+ * @id: DPSW object ID -+ * @version: DPSW version -+ * @options: Enable/Disable DPSW features -+ * @max_vlans: Maximum Number of VLANs -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDBs -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @mem_size: DPSW frame storage memory size -+ * @num_ifs: Number of interfaces -+ * @num_vlans: Current number of VLANs -+ * @num_fdbs: Current number of FDBs -+ * @component_type: Component type of this bridge -+ */ -+struct dpsw_attr { -+ int id; -+ /** -+ * struct version - DPSW version -+ * @major: DPSW major version -+ * @minor: DPSW minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+ uint16_t num_vlans; -+ uint8_t num_fdbs; -+ enum dpsw_component_type component_type; -+}; -+ -+/** -+ * dpsw_get_attributes() - Retrieve DPSW attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr); -+ -+/** -+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Id -+ * -+ * Only one reflection receive interface is allowed per switch -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * enum dpsw_action - Action selection for special/control frames -+ * @DPSW_ACTION_DROP: Drop frame -+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port -+ */ -+enum dpsw_action { -+ DPSW_ACTION_DROP = 0, -+ DPSW_ACTION_REDIRECT = 1 -+}; -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpsw_link_cfg - Structure representing DPSW link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ */ -+struct dpsw_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpsw_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg); -+/** -+ * struct dpsw_link_state - Structure representing DPSW link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ * @up: 0 - covers two cases: down and disconnected, 1 - up -+ */ -+struct dpsw_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpsw_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state 1 - linkup, 0 - link down or disconnected -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state); -+ -+/** -+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration -+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers -+ * to the IEEE 802.1p priority -+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used -+ * separately or in conjunction with PCP to indicate frames -+ * eligible to be dropped in the presence of congestion -+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN -+ * to which the frame belongs. The hexadecimal values -+ * of 0x000 and 0xFFF are reserved; -+ * all other values may be used as VLAN identifiers, -+ * allowing up to 4,094 VLANs -+ */ -+struct dpsw_tci_cfg { -+ uint8_t pcp; -+ uint8_t dei; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg); -+ -+/** -+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg); -+ -+/** -+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states -+ * @DPSW_STP_STATE_BLOCKING: Blocking state -+ * @DPSW_STP_STATE_LISTENING: Listening state -+ * @DPSW_STP_STATE_LEARNING: Learning state -+ * @DPSW_STP_STATE_FORWARDING: Forwarding state -+ * -+ */ -+enum dpsw_stp_state { -+ DPSW_STP_STATE_BLOCKING = 0, -+ DPSW_STP_STATE_LISTENING = 1, -+ DPSW_STP_STATE_LEARNING = 2, -+ DPSW_STP_STATE_FORWARDING = 3 -+}; -+ -+/** -+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration -+ * @vlan_id: VLAN ID STP state -+ * @state: STP state -+ */ -+struct dpsw_stp_cfg { -+ uint16_t vlan_id; -+ enum dpsw_stp_state state; -+}; -+ -+/** -+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: STP State configuration parameters -+ * -+ * The following STP states are supported - -+ * blocking, listening, learning, forwarding and disabled. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg); -+ -+/** -+ * enum dpsw_accepted_frames - Types of frames to accept -+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority tagged frames -+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * Priority-Tagged frames received on this interface. -+ * -+ */ -+enum dpsw_accepted_frames { -+ DPSW_ADMIT_ALL = 1, -+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 -+}; -+ -+/** -+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: When a frame is not accepted, it may be discarded or -+ * redirected to control interface depending on this mode -+ */ -+struct dpsw_accepted_frames_cfg { -+ enum dpsw_accepted_frames type; -+ enum dpsw_action unaccept_act; -+}; -+ -+/** -+ * dpsw_if_set_accepted_frames() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Frame types configuration -+ * -+ * When is admit_only_vlan_tagged- the device will discard untagged -+ * frames or Priority-Tagged frames received on this interface. -+ * When admit_only_untagged- untagged frames or Priority-Tagged -+ * frames received on this interface will be accepted and assigned -+ * to a VID based on the PVID and VID Set for this interface. -+ * When admit_all - the device will accept VLAN tagged, untagged -+ * and priority tagged frames. -+ * The default is admit_all -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg); -+ -+/** -+ * dpsw_if_set_accept_all_vlan() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @accept_all: Accept or drop frames having different VLAN -+ * -+ * When this is accept (FALSE), the device will discard incoming -+ * frames for VLANs that do not include this interface in its -+ * Member set. When accept (TRUE), the interface will accept all incoming frames -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all); -+ -+/** -+ * enum dpsw_counter - Counters types -+ * @DPSW_CNT_ING_FRAME: Counts ingress frames -+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes -+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame -+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPSW_CNT_EGR_FRAME: Counts egress frames -+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes -+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames -+ */ -+enum dpsw_counter { -+ DPSW_CNT_ING_FRAME = 0x0, -+ DPSW_CNT_ING_BYTE = 0x1, -+ DPSW_CNT_ING_FLTR_FRAME = 0x2, -+ DPSW_CNT_ING_FRAME_DISCARD = 0x3, -+ DPSW_CNT_ING_MCAST_FRAME = 0x4, -+ DPSW_CNT_ING_MCAST_BYTE = 0x5, -+ DPSW_CNT_ING_BCAST_FRAME = 0x6, -+ DPSW_CNT_ING_BCAST_BYTES = 0x7, -+ DPSW_CNT_EGR_FRAME = 0x8, -+ DPSW_CNT_EGR_BYTE = 0x9, -+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa, -+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb -+}; -+ -+/** -+ * dpsw_if_get_counter() - Get specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: return value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter); -+ -+/** -+ * dpsw_if_set_counter() - Set specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: New counter value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter); -+ -+/** -+ * Maximum number of TC -+ */ -+#define DPSW_MAX_TC 8 -+ -+/** -+ * enum dpsw_priority_selector - User priority -+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which -+ * refers to the IEEE 802.1p priority. -+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit -+ * field from IP header -+ * -+ */ -+enum dpsw_priority_selector { -+ DPSW_UP_PCP = 0, -+ DPSW_UP_DSCP = 1 -+}; -+ -+/** -+ * enum dpsw_schedule_mode - Traffic classes scheduling -+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority -+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm -+ */ -+enum dpsw_schedule_mode { -+ DPSW_SCHED_STRICT_PRIORITY, -+ DPSW_SCHED_WEIGHTED -+}; -+ -+/** -+ * struct dpsw_tx_schedule_cfg - traffic class configuration -+ * @mode: Strict or weight-based scheduling -+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000 -+ */ -+struct dpsw_tx_schedule_cfg { -+ enum dpsw_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic -+ * class configuration -+ * @priority_selector: Source for user priority regeneration -+ * @tc_id: The Regenerated User priority that the incoming -+ * User Priority is mapped to for this interface -+ * @tc_sched: Traffic classes configuration -+ */ -+struct dpsw_tx_selection_cfg { -+ enum dpsw_priority_selector priority_selector; -+ uint8_t tc_id[DPSW_MAX_PRIORITIES]; -+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC]; -+}; -+ -+/** -+ * dpsw_if_set_tx_selection() - Function is used for mapping variety -+ * of frame fields -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Traffic class mapping configuration -+ * -+ * Function is used for mapping variety of frame fields (DSCP, PCP) -+ * to Traffic Class. Traffic class is a number -+ * in the range from 0 to 7 -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpsw_reflection_filter - Filter type for frames to reflect -+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames -+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to -+ * particular VLAN defined by vid parameter -+ * -+ */ -+enum dpsw_reflection_filter { -+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, -+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 -+}; -+ -+/** -+ * struct dpsw_reflection_cfg - Structure representing reflection information -+ * @filter: Filter type for frames to reflect -+ * @vlan_id: Vlan Id to reflect; valid only when filter type is -+ * DPSW_INGRESS_VLAN -+ */ -+struct dpsw_reflection_cfg { -+ enum dpsw_reflection_filter filter; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * enum dpsw_metering_mode - Metering modes -+ * @DPSW_METERING_MODE_NONE: metering disabled -+ * @DPSW_METERING_MODE_RFC2698: RFC 2698 -+ * @DPSW_METERING_MODE_RFC4115: RFC 4115 -+ */ -+enum dpsw_metering_mode { -+ DPSW_METERING_MODE_NONE = 0, -+ DPSW_METERING_MODE_RFC2698, -+ DPSW_METERING_MODE_RFC4115 -+}; -+ -+/** -+ * enum dpsw_metering_unit - Metering count -+ * @DPSW_METERING_UNIT_BYTES: count bytes -+ * @DPSW_METERING_UNIT_FRAMES: count frames -+ */ -+enum dpsw_metering_unit { -+ DPSW_METERING_UNIT_BYTES = 0, -+ DPSW_METERING_UNIT_FRAMES -+}; -+ -+/** -+ * struct dpsw_metering_cfg - Metering configuration -+ * @mode: metering modes -+ * @units: Bytes or frame units -+ * @cir: Committed information rate (CIR) in Kbits/s -+ * @eir: Peak information rate (PIR) Kbit/s rfc2698 -+ * Excess information rate (EIR) Kbit/s rfc4115 -+ * @cbs: Committed burst size (CBS) in bytes -+ * @ebs: Peak burst size (PBS) in bytes for rfc2698 -+ * Excess bust size (EBS) in bytes rfc4115 -+ * -+ */ -+struct dpsw_metering_cfg { -+ enum dpsw_metering_mode mode; -+ enum dpsw_metering_unit units; -+ uint32_t cir; -+ uint32_t eir; -+ uint32_t cbs; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpsw_if_set_flooding_metering() - Set flooding metering -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * dpsw_if_set_metering() - Set interface metering for flooding -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class ID -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * enum dpsw_early_drop_unit - DPSW early drop unit -+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes -+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames -+ */ -+enum dpsw_early_drop_unit { -+ DPSW_EARLY_DROP_UNIT_BYTE = 0, -+ DPSW_EARLY_DROP_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpsw_early_drop_mode - DPSW early drop mode -+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpsw_early_drop_mode { -+ DPSW_EARLY_DROP_MODE_NONE = 0, -+ DPSW_EARLY_DROP_MODE_TAIL, -+ DPSW_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpsw_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the maximum threshold) -+ */ -+struct dpsw_wred_cfg { -+ uint64_t min_threshold; -+ uint64_t max_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpsw_early_drop_cfg - early-drop configuration -+ * @drop_mode: drop mode -+ * @units: count units -+ * @yellow: WRED - 'yellow' configuration -+ * @green: WRED - 'green' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpsw_early_drop_cfg { -+ enum dpsw_early_drop_mode drop_mode; -+ enum dpsw_early_drop_unit units; -+ struct dpsw_wred_cfg yellow; -+ struct dpsw_wred_cfg green; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpsw_if_tc_set_early_drop -+ * -+ */ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop -+ * configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 64 bytes; -+ * Must be cacheline-aligned and DMA-able memory -+ * -+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop() -+ * to prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier -+ * @tpid: An additional tag protocol identifier -+ */ -+struct dpsw_custom_tpid_cfg { -+ uint16_t tpid; -+}; -+ -+/** -+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * API Configures a distinct Ethernet type value (or TPID value) -+ * to indicate a VLAN tag in addition to the common -+ * TPID values 0x8100 and 0x88A8. -+ * Two additional TPID's are supported -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_if_enable() - Enable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * dpsw_if_disable() - Disable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * struct dpsw_if_attr - Structure representing DPSW interface attributes -+ * @num_tcs: Number of traffic classes -+ * @rate: Transmit rate in bits per second -+ * @options: Interface configuration options (bitmap) -+ * @enabled: Indicates if interface is enabled -+ * @accept_all_vlan: The device discards/accepts incoming frames -+ * for VLANs that do not include this interface -+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device -+ * discards untagged frames or priority-tagged frames received on -+ * this interface; -+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- -+ * tagged frames received on this interface are accepted -+ * @qdid: control frames transmit qdid -+ */ -+struct dpsw_if_attr { -+ uint8_t num_tcs; -+ uint32_t rate; -+ uint32_t options; -+ int enabled; -+ int accept_all_vlan; -+ enum dpsw_accepted_frames admit_untagged; -+ uint16_t qdid; -+}; -+ -+/** -+ * dpsw_if_get_attributes() - Function obtains attributes of interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @attr: Returned interface attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr); -+ -+/** -+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length); -+ -+/** -+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Returned maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length); -+ -+/** -+ * struct dpsw_vlan_cfg - VLAN Configuration -+ * @fdb_id: Forwarding Data Base -+ */ -+struct dpsw_vlan_cfg { -+ uint16_t fdb_id; -+}; -+ -+/** -+ * dpsw_vlan_add() - Adding new VLAN to DPSW. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: VLAN configuration -+ * -+ * Only VLAN ID and FDB ID are required parameters here. -+ * 12 bit VLAN ID is defined in IEEE802.1Q. -+ * Adding a duplicate VLAN ID is not allowed. -+ * FDB ID can be shared across multiple VLANs. Shared learning -+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs -+ * with same fdb_id -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg); -+ -+/** -+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces -+ * @num_ifs: The number of interfaces that are assigned to the egress -+ * list for this VLAN -+ * @if_id: The set of interfaces that are -+ * assigned to the egress list for this VLAN -+ */ -+struct dpsw_vlan_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces to add -+ * -+ * It adds only interfaces not belonging to this VLAN yet, -+ * otherwise an error is generated and an entire command is -+ * ignored. This function can be called numerous times always -+ * providing required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be -+ * transmitted as untagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be transmitted as untagged -+ * -+ * These interfaces should already belong to this VLAN. -+ * By default all interfaces are transmitted as tagged. -+ * Providing un-existing interface or untagged interface that is -+ * configured untagged already generates an error and the entire -+ * command is ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be -+ * included in flooding when frame with unknown destination -+ * unicast MAC arrived. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be used for flooding -+ * -+ * These interfaces should belong to this VLAN. By default all -+ * interfaces are included into flooding list. Providing -+ * un-existing interface or an interface that already in the -+ * flooding list generates an error and the entire command is -+ * ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be removed -+ * -+ * Interfaces must belong to this VLAN, otherwise an error -+ * is returned and an the command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be -+ * converted from transmitted as untagged to transmit as tagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be removed -+ * -+ * Interfaces provided by API have to belong to this VLAN and -+ * configured untagged, otherwise an error is returned and the -+ * command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be -+ * removed from the flooding list. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces used for flooding -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove() - Remove an entire VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * struct dpsw_vlan_attr - VLAN attributes -+ * @fdb_id: Associated FDB ID -+ * @num_ifs: Number of interfaces -+ * @num_untagged_ifs: Number of untagged interfaces -+ * @num_flooding_ifs: Number of flooding interfaces -+ */ -+struct dpsw_vlan_attr { -+ uint16_t fdb_id; -+ uint16_t num_ifs; -+ uint16_t num_untagged_ifs; -+ uint16_t num_flooding_ifs; -+}; -+ -+/** -+ * dpsw_vlan_get_attributes() - Get VLAN attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr); -+ -+/** -+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of interfaces belong to this VLAN -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of flooding interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as -+ * untagged -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of untagged interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_cfg - FDB Configuration -+ * @num_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ */ -+struct dpsw_fdb_cfg { -+ uint16_t num_fdb_entries; -+ uint16_t fdb_aging_time; -+}; -+ -+/** -+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for -+ * the reference -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Returned Forwarding Database Identifier -+ * @cfg: FDB Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove() - Remove FDB from switch -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id); -+ -+/** -+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic -+ * @DPSW_FDB_ENTRY_STATIC: Static entry -+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry -+ */ -+enum dpsw_fdb_entry_type { -+ DPSW_FDB_ENTRY_STATIC = 0, -+ DPSW_FDB_ENTRY_DINAMIC = 1 -+}; -+ -+/** -+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @if_egress: Egress interface ID -+ */ -+struct dpsw_fdb_unicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t if_egress; -+}; -+ -+/** -+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by -+ * unicast Ethernet address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @num_ifs: Number of external and internal interfaces -+ * @if_id: Egress interface IDs -+ */ -+struct dpsw_fdb_multicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * If group doesn't exist, it will be created. -+ * It adds only interfaces not belonging to this multicast group -+ * yet, otherwise error will be generated and the command is -+ * ignored. -+ * This function may be called numerous times always providing -+ * required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet -+ * address. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned multicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast -+ * group. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * Interfaces provided by this API have to exist in the group, -+ * otherwise an error will be returned and an entire command -+ * ignored. If there is no interface left in the group, -+ * an entire group is deleted -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * enum dpsw_fdb_learning_mode - Auto-learning modes -+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning -+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning -+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU -+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU -+ * -+ * NONE - SECURE LEARNING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. DMAC destination -+ * 2. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Flooding list of interfaces -+ * 2. Control interface -+ * SECURE LEARING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Control interface -+ */ -+enum dpsw_fdb_learning_mode { -+ DPSW_FDB_LEARNING_MODE_DIS = 0, -+ DPSW_FDB_LEARNING_MODE_HW = 1, -+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, -+ DPSW_FDB_LEARNING_MODE_SECURE = 3 -+}; -+ -+/** -+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @mode: learning mode -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode); -+ -+/** -+ * struct dpsw_fdb_attr - FDB Attributes -+ * @max_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ * @learning_mode: Learning mode -+ * @num_fdb_mc_groups: Current number of multicast groups -+ * @max_fdb_mc_groups: Maximum number of multicast groups -+ */ -+struct dpsw_fdb_attr { -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ enum dpsw_fdb_learning_mode learning_mode; -+ uint16_t num_fdb_mc_groups; -+ uint16_t max_fdb_mc_groups; -+}; -+ -+/** -+ * dpsw_fdb_get_attributes() - Get FDB attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @attr: Returned FDB attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr); -+ -+/** -+ * struct dpsw_acl_cfg - ACL Configuration -+ * @max_entries: Number of FDB entries -+ */ -+struct dpsw_acl_cfg { -+ uint16_t max_entries; -+}; -+ -+/** -+ * struct dpsw_acl_fields - ACL fields. -+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, -+ * slow protocols, MVRP, STP -+ * @l2_source_mac: Source MAC address -+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following -+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, -+ * Q-in-Q, IPv4, IPv6, PPPoE -+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload -+ * @l2_vlan_id: layer 2 VLAN ID -+ * @l2_ether_type: layer 2 Ethernet type -+ * @l3_dscp: Layer 3 differentiated services code point -+ * @l3_protocol: Tells the Network layer at the destination host, to which -+ * Protocol this packet belongs to. The following protocol are -+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 -+ * (encapsulation), GRE, PTP -+ * @l3_source_ip: Source IPv4 IP -+ * @l3_dest_ip: Destination IPv4 IP -+ * @l4_source_port: Source TCP/UDP Port -+ * @l4_dest_port: Destination TCP/UDP Port -+ */ -+struct dpsw_acl_fields { -+ uint8_t l2_dest_mac[6]; -+ uint8_t l2_source_mac[6]; -+ uint16_t l2_tpid; -+ uint8_t l2_pcp_dei; -+ uint16_t l2_vlan_id; -+ uint16_t l2_ether_type; -+ uint8_t l3_dscp; -+ uint8_t l3_protocol; -+ uint32_t l3_source_ip; -+ uint32_t l3_dest_ip; -+ uint16_t l4_source_port; -+ uint16_t l4_dest_port; -+}; -+ -+/** -+ * struct dpsw_acl_key - ACL key -+ * @match: Match fields -+ * @mask: Mask: b'1 - valid, b'0 don't care -+ */ -+struct dpsw_acl_key { -+ struct dpsw_acl_fields match; -+ struct dpsw_acl_fields mask; -+}; -+ -+/** -+ * enum dpsw_acl_action -+ * @DPSW_ACL_ACTION_DROP: Drop frame -+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port -+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame -+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface -+ */ -+enum dpsw_acl_action { -+ DPSW_ACL_ACTION_DROP, -+ DPSW_ACL_ACTION_REDIRECT, -+ DPSW_ACL_ACTION_ACCEPT, -+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF -+}; -+ -+/** -+ * struct dpsw_acl_result - ACL action -+ * @action: Action should be taken when ACL entry hit -+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for -+ * action -+ */ -+struct dpsw_acl_result { -+ enum dpsw_acl_action action; -+ uint16_t if_id; -+}; -+ -+/** -+ * struct dpsw_acl_entry_cfg - ACL entry -+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call -+ * to dpsw_acl_prepare_entry_cfg() -+ * @result: Required action when entry hit occurs -+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change -+ * during the lifetime of a Policy. It is user responsibility to -+ * space the priorities according to consequent rule additions. -+ */ -+struct dpsw_acl_entry_cfg { -+ uint64_t key_iova; -+ struct dpsw_acl_result result; -+ int precedence; -+}; -+ -+/** -+ * dpsw_acl_add() - Adds ACL to L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: Returned ACL ID, for the future reference -+ * @cfg: ACL configuration -+ * -+ * Create Access Control List. Multiple ACLs can be created and -+ * co-exist in L2 switch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove() - Removes ACL from L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id); -+ -+/** -+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL. -+ * @key: key -+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before adding or removing acl_entry -+ * -+ */ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf); -+ -+/** -+ * dpsw_acl_add_entry() - Adds an entry to ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_entry() - Removes an entry from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL -+ * @num_ifs: Number of interfaces -+ * @if_id: List of interfaces -+ */ -+struct dpsw_acl_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_attr - ACL Attributes -+ * @max_entries: Max number of ACL entries -+ * @num_entries: Number of used ACL entries -+ * @num_ifs: Number of interfaces associated with ACL -+ */ -+struct dpsw_acl_attr { -+ uint16_t max_entries; -+ uint16_t num_entries; -+ uint16_t num_ifs; -+}; -+ -+/** -+* dpsw_acl_get_attributes() - Get specific counter of particular interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @acl_id: ACL Identifier -+* @attr: Returned ACL attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr); -+/** -+* struct dpsw_ctrl_if_attr - Control interface attributes -+* @rx_fqid: Receive FQID -+* @rx_err_fqid: Receive error FQID -+* @tx_err_conf_fqid: Transmit error and confirmation FQID -+*/ -+struct dpsw_ctrl_if_attr { -+ uint32_t rx_fqid; -+ uint32_t rx_err_fqid; -+ uint32_t tx_err_conf_fqid; -+}; -+ -+/** -+* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @attr: Returned control interface attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr); -+ -+/** -+ * Maximum number of DPBP -+ */ -+#define DPSW_MAX_DPBP 8 -+ -+/** -+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpsw_ctrl_if_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPSW_MAX_DPBP]; -+}; -+ -+/** -+* dpsw_ctrl_if_set_pools() - Set control interface buffer pools -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @cfg: buffer pools configuration -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *cfg); -+ -+/** -+* dpsw_ctrl_if_enable() - Enable control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+* dpsw_ctrl_if_disable() - Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+#endif /* __FSL_DPSW_H */ ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c -@@ -0,0 +1,1711 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "dpsw.h" -+#include "dpsw-cmd.h" -+ -+/* Minimal supported DPSE version */ -+#define DPSW_MIN_VER_MAJOR 7 -+#define DPSW_MIN_VER_MINOR 0 -+ -+/* IRQ index */ -+#define DPSW_MAX_IRQ_NUM 2 -+ -+#define ETHSW_VLAN_MEMBER 1 -+#define ETHSW_VLAN_UNTAGGED 2 -+#define ETHSW_VLAN_PVID 4 -+#define ETHSW_VLAN_GLOBAL 8 -+ -+struct ethsw_port_priv { -+ struct net_device *netdev; -+ struct list_head list; -+ u16 port_index; -+ struct ethsw_dev_priv *ethsw_priv; -+ u8 stp_state; -+ -+ char vlans[VLAN_VID_MASK+1]; -+ -+}; -+ -+struct ethsw_dev_priv { -+ struct net_device *netdev; -+ struct fsl_mc_io *mc_io; -+ uint16_t dpsw_handle; -+ struct dpsw_attr sw_attr; -+ int dev_id; -+ /*TODO: redundant, we can use the slave dev list */ -+ struct list_head port_list; -+ -+ bool flood; -+ bool learning; -+ -+ char vlans[VLAN_VID_MASK+1]; -+}; -+ -+static int ethsw_port_stop(struct net_device *netdev); -+static int ethsw_port_open(struct net_device *netdev); -+ -+static inline void __get_priv(struct net_device *netdev, -+ struct ethsw_dev_priv **priv, -+ struct ethsw_port_priv **port_priv) -+{ -+ struct ethsw_dev_priv *_priv = NULL; -+ struct ethsw_port_priv *_port_priv = NULL; -+ -+ if (netdev->flags & IFF_MASTER) { -+ _priv = netdev_priv(netdev); -+ } else { -+ _port_priv = netdev_priv(netdev); -+ _priv = _port_priv->ethsw_priv; -+ } -+ -+ if (priv) -+ *priv = _priv; -+ if (port_priv) -+ *port_priv = _port_priv; -+} -+ -+/* -------------------------------------------------------------------------- */ -+/* ethsw netdevice ops */ -+ -+static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ -+static int ethsw_open(struct net_device *netdev) -+{ -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ struct list_head *pos; -+ struct ethsw_port_priv *port_priv = NULL; -+ int err; -+ -+ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle); -+ if (err) { -+ netdev_err(netdev, "dpsw_enable err %d\n", err); -+ return err; -+ } -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct ethsw_port_priv, list); -+ err = dev_open(port_priv->netdev); -+ if (err) -+ netdev_err(port_priv->netdev, "dev_open err %d\n", err); -+ } -+ -+ return 0; -+} -+ -+static int ethsw_stop(struct net_device *netdev) -+{ -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ struct list_head *pos; -+ struct ethsw_port_priv *port_priv = NULL; -+ int err; -+ -+ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle); -+ if (err) { -+ netdev_err(netdev, "dpsw_disable err %d\n", err); -+ return err; -+ } -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct ethsw_port_priv, list); -+ err = dev_close(port_priv->netdev); -+ if (err) -+ netdev_err(port_priv->netdev, -+ "dev_close err %d\n", err); -+ } -+ -+ return 0; -+} -+ -+static int ethsw_add_vlan(struct net_device *netdev, u16 vid) -+{ -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ int err; -+ -+ struct dpsw_vlan_cfg vcfg = { -+ /* TODO: add support for VLAN private FDBs */ -+ .fdb_id = 0, -+ }; -+ if (priv->vlans[vid]) { -+ netdev_err(netdev, "VLAN already configured\n"); -+ return -EEXIST; -+ } -+ -+ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); -+ if (err) { -+ netdev_err(netdev, "dpsw_vlan_add err %d\n", err); -+ return err; -+ } -+ priv->vlans[vid] = ETHSW_VLAN_MEMBER; -+ -+ return 0; -+} -+ -+static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; -+ int err; -+ -+ struct dpsw_vlan_if_cfg vcfg = { -+ .num_ifs = 1, -+ .if_id[0] = port_priv->port_index, -+ }; -+ -+ if (port_priv->vlans[vid]) { -+ netdev_err(netdev, "VLAN already configured\n"); -+ return -EEXIST; -+ } -+ -+ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) { -+ netdev_err(netdev, "interface must be down to change PVID!\n"); -+ return -EBUSY; -+ } -+ -+ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); -+ if (err) { -+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); -+ return err; -+ } -+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; -+ -+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { -+ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0, -+ priv->dpsw_handle, vid, &vcfg); -+ if (err) { -+ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n", -+ err); -+ return err; -+ } -+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; -+ } -+ -+ if (flags & BRIDGE_VLAN_INFO_PVID) { -+ struct dpsw_tci_cfg tci_cfg = { -+ /* TODO: at least add better defaults if these cannot -+ * be configured -+ */ -+ .pcp = 0, -+ .dei = 0, -+ .vlan_id = vid, -+ }; -+ -+ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle, -+ port_priv->port_index, &tci_cfg); -+ if (err) { -+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); -+ return err; -+ } -+ port_priv->vlans[vid] |= ETHSW_VLAN_PVID; -+ } -+ -+ return 0; -+} -+ -+static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = { -+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, -+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, -+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, -+ .len = sizeof(struct bridge_vlan_info), }, -+}; -+ -+static int ethsw_setlink_af_spec(struct net_device *netdev, -+ struct nlattr **tb) -+{ -+ struct bridge_vlan_info *vinfo; -+ struct ethsw_dev_priv *priv = NULL; -+ struct ethsw_port_priv *port_priv = NULL; -+ int err = 0; -+ -+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) { -+ netdev_err(netdev, "no VLAN INFO in nlmsg\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); -+ -+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) -+ return -EINVAL; -+ -+ __get_priv(netdev, &priv, &port_priv); -+ -+ if (!port_priv || !priv->vlans[vinfo->vid]) { -+ /* command targets switch device or this is a new VLAN */ -+ err = ethsw_add_vlan(priv->netdev, vinfo->vid); -+ if (err) -+ return err; -+ -+ /* command targets switch device; mark it*/ -+ if (!port_priv) -+ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL; -+ } -+ -+ if (port_priv) { -+ /* command targets switch port */ -+ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { -+ [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, -+ [IFLA_BRPORT_COST] = { .type = NLA_U32 }, -+ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, -+ [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, -+ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, -+ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, -+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, -+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, -+}; -+ -+static int ethsw_set_learning(struct net_device *netdev, u8 flag) -+{ -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ enum dpsw_fdb_learning_mode learn_mode; -+ int err; -+ -+ if (flag) -+ learn_mode = DPSW_FDB_LEARNING_MODE_HW; -+ else -+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS; -+ -+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, -+ 0, learn_mode); -+ if (err) { -+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); -+ return err; -+ } -+ priv->learning = !!flag; -+ -+ return 0; -+} -+ -+static int ethsw_port_set_flood(struct net_device *netdev, u8 flag) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; -+ int err; -+ -+ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle, -+ port_priv->port_index, (int)flag); -+ if (err) { -+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); -+ return err; -+ } -+ priv->flood = !!flag; -+ -+ return 0; -+} -+ -+static int ethsw_port_set_state(struct net_device *netdev, u8 state) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; -+ u8 old_state = port_priv->stp_state; -+ int err; -+ -+ struct dpsw_stp_cfg stp_cfg = { -+ .vlan_id = 1, -+ .state = state, -+ }; -+ /* TODO: check port state, interface may be down */ -+ -+ if (state > BR_STATE_BLOCKING) -+ return -EINVAL; -+ -+ if (state == port_priv->stp_state) -+ return 0; -+ -+ if (state == BR_STATE_DISABLED) { -+ port_priv->stp_state = state; -+ -+ err = ethsw_port_stop(netdev); -+ if (err) -+ goto error; -+ } else { -+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, -+ port_priv->port_index, &stp_cfg); -+ if (err) { -+ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err); -+ return err; -+ } -+ -+ port_priv->stp_state = state; -+ -+ if (old_state == BR_STATE_DISABLED) { -+ err = ethsw_port_open(netdev); -+ if (err) -+ goto error; -+ } -+ } -+ -+ return 0; -+error: -+ port_priv->stp_state = old_state; -+ return err; -+} -+ -+static int ethsw_setlink_protinfo(struct net_device *netdev, -+ struct nlattr **tb) -+{ -+ struct ethsw_dev_priv *priv; -+ struct ethsw_port_priv *port_priv = NULL; -+ int err = 0; -+ -+ __get_priv(netdev, &priv, &port_priv); -+ -+ if (tb[IFLA_BRPORT_LEARNING]) { -+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]); -+ -+ if (port_priv) -+ netdev_warn(netdev, -+ "learning set on whole switch dev\n"); -+ -+ err = ethsw_set_learning(priv->netdev, flag); -+ if (err) -+ return err; -+ -+ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) { -+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]); -+ -+ err = ethsw_port_set_flood(port_priv->netdev, flag); -+ if (err) -+ return err; -+ -+ } else if (tb[IFLA_BRPORT_STATE] && port_priv) { -+ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]); -+ -+ err = ethsw_port_set_state(port_priv->netdev, state); -+ if (err) -+ return err; -+ -+ } else { -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+static int ethsw_setlink(struct net_device *netdev, -+ struct nlmsghdr *nlh, -+ u16 flags) -+{ -+ struct nlattr *attr; -+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? -+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX+1]; -+ int err = 0; -+ -+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); -+ if (attr) { -+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, -+ ifla_br_policy); -+ if (err) { -+ netdev_err(netdev, -+ "nla_parse_nested for br_policy err %d\n", -+ err); -+ return err; -+ } -+ -+ err = ethsw_setlink_af_spec(netdev, tb); -+ return err; -+ } -+ -+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); -+ if (attr) { -+ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr, -+ ifla_brport_policy); -+ if (err) { -+ netdev_err(netdev, -+ "nla_parse_nested for brport_policy err %d\n", -+ err); -+ return err; -+ } -+ -+ err = ethsw_setlink_protinfo(netdev, tb); -+ return err; -+ } -+ -+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n"); -+ return -EOPNOTSUPP; -+} -+ -+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev, -+ struct ethsw_dev_priv *priv) -+{ -+ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN; -+ int iflink; -+ int err; -+ -+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); -+ if (err) -+ goto nla_put_err; -+ if (netdev->addr_len) { -+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, -+ netdev->dev_addr); -+ if (err) -+ goto nla_put_err; -+ } -+ -+ iflink = dev_get_iflink(netdev); -+ if (netdev->ifindex != iflink) { -+ err = nla_put_u32(skb, IFLA_LINK, iflink); -+ if (err) -+ goto nla_put_err; -+ } -+ -+ return 0; -+ -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ return err; -+} -+ -+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev, -+ struct ethsw_port_priv *port_priv) -+{ -+ struct nlattr *nest; -+ int err; -+ -+ u8 stp_state = port_priv->stp_state; -+ -+ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING) -+ stp_state = BR_STATE_BLOCKING; -+ -+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); -+ if (!nest) { -+ netdev_err(netdev, "nla_nest_start failed\n"); -+ return -ENOMEM; -+ } -+ -+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, -+ port_priv->ethsw_priv->learning); -+ if (err) -+ goto nla_put_err; -+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, -+ port_priv->ethsw_priv->flood); -+ if (err) -+ goto nla_put_err; -+ nla_nest_end(skb, nest); -+ -+ return 0; -+ -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ nla_nest_cancel(skb, nest); -+ return err; -+} -+ -+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev, -+ struct ethsw_dev_priv *priv, -+ struct ethsw_port_priv *port_priv) -+{ -+ struct nlattr *nest; -+ struct bridge_vlan_info vinfo; -+ const char *vlans; -+ u16 i; -+ int err; -+ -+ nest = nla_nest_start(skb, IFLA_AF_SPEC); -+ if (!nest) { -+ netdev_err(netdev, "nla_nest_start failed"); -+ return -ENOMEM; -+ } -+ -+ if (port_priv) -+ vlans = port_priv->vlans; -+ else -+ vlans = priv->vlans; -+ -+ for (i = 0; i < VLAN_VID_MASK+1; i++) { -+ vinfo.flags = 0; -+ vinfo.vid = i; -+ -+ if (vlans[i] & ETHSW_VLAN_UNTAGGED) -+ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; -+ -+ if (vlans[i] & ETHSW_VLAN_PVID) -+ vinfo.flags |= BRIDGE_VLAN_INFO_PVID; -+ -+ if (vlans[i] & ETHSW_VLAN_MEMBER) { -+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, -+ sizeof(vinfo), &vinfo); -+ if (err) -+ goto nla_put_err; -+ } -+ } -+ -+ nla_nest_end(skb, nest); -+ -+ return 0; -+nla_put_err: -+ netdev_err(netdev, "nla_put_ err %d\n", err); -+ nla_nest_cancel(skb, nest); -+ return err; -+} -+ -+static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq, -+ struct net_device *netdev, u32 filter_mask, -+ int nlflags) -+{ -+ struct ethsw_dev_priv *priv; -+ struct ethsw_port_priv *port_priv = NULL; -+ struct ifinfomsg *hdr; -+ struct nlmsghdr *nlh; -+ int err; -+ -+ __get_priv(netdev, &priv, &port_priv); -+ -+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); -+ if (!nlh) -+ return -EMSGSIZE; -+ -+ hdr = nlmsg_data(nlh); -+ memset(hdr, 0, sizeof(*hdr)); -+ hdr->ifi_family = AF_BRIDGE; -+ hdr->ifi_type = netdev->type; -+ hdr->ifi_index = netdev->ifindex; -+ hdr->ifi_flags = dev_get_flags(netdev); -+ -+ err = __nla_put_netdev(skb, netdev, priv); -+ if (err) -+ goto nla_put_err; -+ -+ if (port_priv) { -+ err = __nla_put_port(skb, netdev, port_priv); -+ if (err) -+ goto nla_put_err; -+ } -+ -+ /* Check if the VID information is requested */ -+ if (filter_mask & RTEXT_FILTER_BRVLAN) { -+ err = __nla_put_vlan(skb, netdev, priv, port_priv); -+ if (err) -+ goto nla_put_err; -+ } -+ -+ nlmsg_end(skb, nlh); -+ return skb->len; -+ -+nla_put_err: -+ nlmsg_cancel(skb, nlh); -+ return -EMSGSIZE; -+} -+ -+static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid) -+{ -+ struct list_head *pos; -+ struct ethsw_port_priv *ppriv_local = NULL; -+ int err = 0; -+ -+ if (!priv->vlans[vid]) -+ return -ENOENT; -+ -+ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid); -+ if (err) { -+ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err); -+ return err; -+ } -+ priv->vlans[vid] = 0; -+ -+ list_for_each(pos, &priv->port_list) { -+ ppriv_local = list_entry(pos, struct ethsw_port_priv, -+ list); -+ ppriv_local->vlans[vid] = 0; -+ } -+ -+ return 0; -+} -+ -+static int ethsw_dellink_port(struct ethsw_dev_priv *priv, -+ struct ethsw_port_priv *port_priv, -+ u16 vid) -+{ -+ struct list_head *pos; -+ struct ethsw_port_priv *ppriv_local = NULL; -+ struct dpsw_vlan_if_cfg vcfg = { -+ .num_ifs = 1, -+ .if_id[0] = port_priv->port_index, -+ }; -+ unsigned int count = 0; -+ int err = 0; -+ -+ if (!port_priv->vlans[vid]) -+ return -ENOENT; -+ -+ /* VLAN will be deleted from switch if global flag is not set -+ * and is configured on only one port -+ */ -+ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) { -+ list_for_each(pos, &priv->port_list) { -+ ppriv_local = list_entry(pos, struct ethsw_port_priv, -+ list); -+ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER) -+ count++; -+ } -+ -+ if (count == 1) -+ return ethsw_dellink_switch(priv, vid); -+ } -+ -+ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle, -+ vid, &vcfg); -+ if (err) { -+ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err); -+ return err; -+ } -+ port_priv->vlans[vid] = 0; -+ return 0; -+} -+ -+static int ethsw_dellink(struct net_device *netdev, -+ struct nlmsghdr *nlh, -+ u16 flags) -+{ -+ struct nlattr *tb[IFLA_BRIDGE_MAX+1]; -+ struct nlattr *spec; -+ struct bridge_vlan_info *vinfo; -+ struct ethsw_dev_priv *priv; -+ struct ethsw_port_priv *port_priv = NULL; -+ int err = 0; -+ -+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); -+ if (!spec) -+ return 0; -+ -+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); -+ if (err) -+ return err; -+ -+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) -+ return -EOPNOTSUPP; -+ -+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); -+ -+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) -+ return -EINVAL; -+ -+ __get_priv(netdev, &priv, &port_priv); -+ -+ /* decide if command targets switch device or port */ -+ if (!port_priv) -+ err = ethsw_dellink_switch(priv, vinfo->vid); -+ else -+ err = ethsw_dellink_port(priv, port_priv, vinfo->vid); -+ -+ return err; -+} -+ -+static const struct net_device_ops ethsw_ops = { -+ .ndo_open = ðsw_open, -+ .ndo_stop = ðsw_stop, -+ -+ .ndo_bridge_setlink = ðsw_setlink, -+ .ndo_bridge_getlink = ðsw_getlink, -+ .ndo_bridge_dellink = ðsw_dellink, -+ -+ .ndo_start_xmit = ðsw_dropframe, -+}; -+ -+/*--------------------------------------------------------------------------- */ -+/* switch port netdevice ops */ -+ -+static int _ethsw_port_carrier_state_sync(struct net_device *netdev) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct dpsw_link_state state; -+ int err; -+ -+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, &state); -+ if (unlikely(err)) { -+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); -+ return err; -+ } -+ -+ WARN_ONCE(state.up > 1, "Garbage read into link_state"); -+ -+ if (state.up) -+ netif_carrier_on(port_priv->netdev); -+ else -+ netif_carrier_off(port_priv->netdev); -+ -+ return 0; -+} -+ -+static int ethsw_port_open(struct net_device *netdev) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ int err; -+ -+ if (!netif_oper_up(netdev) || -+ port_priv->stp_state == BR_STATE_DISABLED) -+ return 0; -+ -+ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index); -+ if (err) { -+ netdev_err(netdev, "dpsw_if_enable err %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int ethsw_port_stop(struct net_device *netdev) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ int err; -+ -+ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index); -+ if (err) { -+ netdev_err(netdev, "dpsw_if_disable err %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int ethsw_port_fdb_add_uc(struct net_device *netdev, -+ const unsigned char *addr) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct dpsw_fdb_unicast_cfg entry = {0}; -+ int err; -+ -+ entry.if_egress = port_priv->port_index; -+ entry.type = DPSW_FDB_ENTRY_STATIC; -+ ether_addr_copy(entry.mac_addr, addr); -+ -+ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ 0, &entry); -+ if (err) -+ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err); -+ return err; -+} -+ -+static int ethsw_port_fdb_del_uc(struct net_device *netdev, -+ const unsigned char *addr) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct dpsw_fdb_unicast_cfg entry = {0}; -+ int err; -+ -+ entry.if_egress = port_priv->port_index; -+ entry.type = DPSW_FDB_ENTRY_STATIC; -+ ether_addr_copy(entry.mac_addr, addr); -+ -+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ 0, &entry); -+ if (err) -+ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err); -+ return err; -+} -+ -+static int ethsw_port_fdb_add_mc(struct net_device *netdev, -+ const unsigned char *addr) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct dpsw_fdb_multicast_cfg entry = {0}; -+ int err; -+ -+ ether_addr_copy(entry.mac_addr, addr); -+ entry.type = DPSW_FDB_ENTRY_STATIC; -+ entry.num_ifs = 1; -+ entry.if_id[0] = port_priv->port_index; -+ -+ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ 0, &entry); -+ if (err) -+ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err); -+ return err; -+} -+ -+static int ethsw_port_fdb_del_mc(struct net_device *netdev, -+ const unsigned char *addr) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct dpsw_fdb_multicast_cfg entry = {0}; -+ int err; -+ -+ ether_addr_copy(entry.mac_addr, addr); -+ entry.type = DPSW_FDB_ENTRY_STATIC; -+ entry.num_ifs = 1; -+ entry.if_id[0] = port_priv->port_index; -+ -+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ 0, &entry); -+ if (err) -+ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err); -+ return err; -+} -+ -+static int _lookup_address(struct net_device *netdev, int is_uc, -+ const unsigned char *addr) -+{ -+ struct netdev_hw_addr *ha; -+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; -+ -+ netif_addr_lock_bh(netdev); -+ list_for_each_entry(ha, &list->list, list) { -+ if (ether_addr_equal(ha->addr, addr)) { -+ netif_addr_unlock_bh(netdev); -+ return 1; -+ } -+ } -+ netif_addr_unlock_bh(netdev); -+ return 0; -+} -+ -+static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *netdev, -+ const unsigned char *addr, u16 vid, -+ u16 flags) -+{ -+ struct list_head *pos; -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; -+ int err; -+ -+ /* TODO: add replace support when added to iproute bridge */ -+ if (!(flags & NLM_F_REQUEST)) { -+ netdev_err(netdev, -+ "ethsw_port_fdb_add unexpected flags value %08x\n", -+ flags); -+ return -EINVAL; -+ } -+ -+ if (is_unicast_ether_addr(addr)) { -+ /* if entry cannot be replaced, return error if exists */ -+ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) { -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, -+ struct ethsw_port_priv, -+ list); -+ if (_lookup_address(port_priv->netdev, -+ 1, addr)) -+ return -EEXIST; -+ } -+ } -+ -+ err = ethsw_port_fdb_add_uc(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n", -+ err); -+ return err; -+ } -+ -+ /* we might have replaced an existing entry for a different -+ * switch port, make sure the address doesn't linger in any -+ * port address list -+ */ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct ethsw_port_priv, -+ list); -+ dev_uc_del(port_priv->netdev, addr); -+ } -+ -+ err = dev_uc_add(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "dev_uc_add err %d\n", err); -+ return err; -+ } -+ } else { -+ struct dpsw_fdb_multicast_cfg entry = { -+ .type = DPSW_FDB_ENTRY_STATIC, -+ .num_ifs = 0, -+ }; -+ -+ /* check if address is already set on this port */ -+ if (_lookup_address(netdev, 0, addr)) -+ return -EEXIST; -+ -+ /* check if the address exists on other port */ -+ ether_addr_copy(entry.mac_addr, addr); -+ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle, -+ 0, &entry); -+ if (!err) { -+ /* entry exists, can we replace it? */ -+ if (flags & NLM_F_EXCL) -+ return -EEXIST; -+ } else if (err != -ENAVAIL) { -+ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n", -+ err); -+ return err; -+ } -+ -+ err = ethsw_port_fdb_add_mc(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n", -+ err); -+ return err; -+ } -+ -+ err = dev_mc_add(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "dev_mc_add err %d\n", err); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *netdev, -+ const unsigned char *addr, u16 vid) -+{ -+ int err; -+ -+ if (is_unicast_ether_addr(addr)) { -+ err = ethsw_port_fdb_del_uc(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n", -+ err); -+ return err; -+ } -+ -+ /* also delete if configured on port */ -+ err = dev_uc_del(netdev, addr); -+ if (err && err != -ENOENT) { -+ netdev_err(netdev, "dev_uc_del err %d\n", err); -+ return err; -+ } -+ } else { -+ if (!_lookup_address(netdev, 0, addr)) -+ return -ENOENT; -+ -+ err = dev_mc_del(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "dev_mc_del err %d\n", err); -+ return err; -+ } -+ -+ err = ethsw_port_fdb_del_mc(netdev, addr); -+ if (err) { -+ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n", -+ err); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static struct rtnl_link_stats64 * -+ethsw_port_get_stats(struct net_device *netdev, -+ struct rtnl_link_stats64 *storage) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ u64 tmp; -+ int err; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_ING_FRAME, &storage->rx_packets); -+ if (err) -+ goto error; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_EGR_FRAME, &storage->tx_packets); -+ if (err) -+ goto error; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_ING_BYTE, &storage->rx_bytes); -+ if (err) -+ goto error; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_EGR_BYTE, &storage->tx_bytes); -+ if (err) -+ goto error; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_ING_FRAME_DISCARD, -+ &storage->rx_dropped); -+ if (err) -+ goto error; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_ING_FLTR_FRAME, -+ &tmp); -+ if (err) -+ goto error; -+ storage->rx_dropped += tmp; -+ -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ DPSW_CNT_EGR_FRAME_DISCARD, -+ &storage->tx_dropped); -+ if (err) -+ goto error; -+ -+ return storage; -+ -+error: -+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); -+ return storage; -+} -+ -+static const struct net_device_ops ethsw_port_ops = { -+ .ndo_open = ðsw_port_open, -+ .ndo_stop = ðsw_port_stop, -+ -+ .ndo_fdb_add = ðsw_port_fdb_add, -+ .ndo_fdb_del = ðsw_port_fdb_del, -+ .ndo_fdb_dump = &ndo_dflt_fdb_dump, -+ -+ .ndo_get_stats64 = ðsw_port_get_stats, -+ -+ .ndo_start_xmit = ðsw_dropframe, -+}; -+ -+static struct { -+ enum dpsw_counter id; -+ char name[ETH_GSTRING_LEN]; -+} ethsw_ethtool_counters[] = { -+ {DPSW_CNT_ING_FRAME, "rx frames"}, -+ {DPSW_CNT_ING_BYTE, "rx bytes"}, -+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"}, -+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, -+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, -+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, -+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, -+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, -+ {DPSW_CNT_EGR_FRAME, "tx frames"}, -+ {DPSW_CNT_EGR_BYTE, "tx bytes"}, -+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, -+ -+}; -+ -+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(ethsw_ethtool_counters); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static void ethsw_ethtool_get_strings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static void ethsw_ethtool_get_stats(struct net_device *netdev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct ethsw_port_priv *port_priv = netdev_priv(netdev); -+ int i; -+ int err; -+ -+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) { -+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, -+ port_priv->ethsw_priv->dpsw_handle, -+ port_priv->port_index, -+ ethsw_ethtool_counters[i].id, -+ &data[i]); -+ if (err) -+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n", -+ ethsw_ethtool_counters[i].name, err); -+ } -+} -+ -+static const struct ethtool_ops ethsw_port_ethtool_ops = { -+ .get_strings = ðsw_ethtool_get_strings, -+ .get_ethtool_stats = ðsw_ethtool_get_stats, -+ .get_sset_count = ðsw_ethtool_get_sset_count, -+}; -+ -+/* -------------------------------------------------------------------------- */ -+/* ethsw driver functions */ -+ -+static int ethsw_links_state_update(struct ethsw_dev_priv *priv) -+{ -+ struct list_head *pos; -+ struct ethsw_port_priv *port_priv; -+ int err; -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct ethsw_port_priv, -+ list); -+ -+ err = _ethsw_port_carrier_state_sync(port_priv->netdev); -+ if (err) -+ netdev_err(port_priv->netdev, -+ "_ethsw_port_carrier_state_sync err %d\n", -+ err); -+ } -+ -+ return 0; -+} -+ -+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *sw_dev = to_fsl_mc_device(dev); -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ -+ struct fsl_mc_io *io = priv->mc_io; -+ uint16_t token = priv->dpsw_handle; -+ int irq_index = DPSW_IRQ_INDEX_IF; -+ -+ /* Mask the events and the if_id reserved bits to be cleared on read */ -+ uint32_t status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; -+ int err; -+ -+ /* Sanity check */ -+ if (WARN_ON(!sw_dev || !sw_dev->irqs || !sw_dev->irqs[irq_index])) -+ goto out; -+ if (WARN_ON(sw_dev->irqs[irq_index]->msi_desc->irq != irq_num)) -+ goto out; -+ -+ err = dpsw_get_irq_status(io, 0, token, irq_index, &status); -+ if (unlikely(err)) { -+ netdev_err(netdev, "Can't get irq status (err %d)", err); -+ -+ err = dpsw_clear_irq_status(io, 0, token, irq_index, -+ 0xFFFFFFFF); -+ if (unlikely(err)) -+ netdev_err(netdev, "Can't clear irq status (err %d)", -+ err); -+ goto out; -+ } -+ -+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { -+ err = ethsw_links_state_update(priv); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+out: -+ return IRQ_HANDLED; -+} -+ -+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev = &sw_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ int err = 0; -+ struct fsl_mc_device_irq *irq; -+ const int irq_index = DPSW_IRQ_INDEX_IF; -+ uint32_t mask = DPSW_IRQ_EVENT_LINK_CHANGED; -+ -+ err = fsl_mc_allocate_irqs(sw_dev); -+ if (unlikely(err)) { -+ dev_err(dev, "MC irqs allocation failed\n"); -+ return err; -+ } -+ -+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) { -+ err = -EINVAL; -+ goto free_irq; -+ } -+ -+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, -+ irq_index, 0); -+ if (unlikely(err)) { -+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err); -+ goto free_irq; -+ } -+ -+ irq = sw_dev->irqs[irq_index]; -+ -+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq, -+ ethsw_irq0_handler, -+ _ethsw_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(dev), dev); -+ if (unlikely(err)) { -+ dev_err(dev, "devm_request_threaded_irq(): %d", err); -+ goto free_irq; -+ } -+ -+ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle, -+ irq_index, mask); -+ if (unlikely(err)) { -+ dev_err(dev, "dpsw_set_irq_mask(): %d", err); -+ goto free_devm_irq; -+ } -+ -+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, -+ irq_index, 1); -+ if (unlikely(err)) { -+ dev_err(dev, "dpsw_set_irq_enable(): %d", err); -+ goto free_devm_irq; -+ } -+ -+ return 0; -+ -+free_devm_irq: -+ devm_free_irq(dev, irq->msi_desc->irq, dev); -+free_irq: -+ fsl_mc_free_irqs(sw_dev); -+ return err; -+} -+ -+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev = &sw_dev->dev; -+ struct net_device *netdev = dev_get_drvdata(dev); -+ struct ethsw_dev_priv *priv = netdev_priv(netdev); -+ -+ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, -+ DPSW_IRQ_INDEX_IF, 0); -+ devm_free_irq(dev, -+ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq, -+ dev); -+ fsl_mc_free_irqs(sw_dev); -+} -+ -+static int __cold -+ethsw_init(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev = &sw_dev->dev; -+ struct ethsw_dev_priv *priv; -+ struct net_device *netdev; -+ int err = 0; -+ u16 i; -+ const struct dpsw_stp_cfg stp_cfg = { -+ .vlan_id = 1, -+ .state = DPSW_STP_STATE_FORWARDING, -+ }; -+ -+ netdev = dev_get_drvdata(dev); -+ priv = netdev_priv(netdev); -+ -+ priv->dev_id = sw_dev->obj_desc.id; -+ -+ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle); -+ if (err) { -+ dev_err(dev, "dpsw_open err %d\n", err); -+ goto err_exit; -+ } -+ if (!priv->dpsw_handle) { -+ dev_err(dev, "dpsw_open returned null handle but no error\n"); -+ err = -EFAULT; -+ goto err_exit; -+ } -+ -+ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle, -+ &priv->sw_attr); -+ if (err) { -+ dev_err(dev, "dpsw_get_attributes err %d\n", err); -+ goto err_close; -+ } -+ -+ /* Minimum supported DPSW version check */ -+ if (priv->sw_attr.version.major < DPSW_MIN_VER_MAJOR || -+ (priv->sw_attr.version.major == DPSW_MIN_VER_MAJOR && -+ priv->sw_attr.version.minor < DPSW_MIN_VER_MINOR)) { -+ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n", -+ priv->sw_attr.version.major, -+ priv->sw_attr.version.minor, -+ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR); -+ err = -ENOTSUPP; -+ goto err_close; -+ } -+ -+ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle); -+ if (err) { -+ dev_err(dev, "dpsw_reset err %d\n", err); -+ goto err_close; -+ } -+ -+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0, -+ DPSW_FDB_LEARNING_MODE_HW); -+ if (err) { -+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err); -+ goto err_close; -+ } -+ -+ for (i = 0; i < priv->sw_attr.num_ifs; i++) { -+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i, -+ &stp_cfg); -+ if (err) { -+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", -+ err, i); -+ goto err_close; -+ } -+ -+ err = dpsw_if_set_broadcast(priv->mc_io, 0, -+ priv->dpsw_handle, i, 1); -+ if (err) { -+ dev_err(dev, -+ "dpsw_if_set_broadcast err %d for port %d\n", -+ err, i); -+ goto err_close; -+ } -+ } -+ -+ return 0; -+ -+err_close: -+ dpsw_close(priv->mc_io, 0, priv->dpsw_handle); -+err_exit: -+ return err; -+} -+ -+static int __cold -+ethsw_takedown(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev = &sw_dev->dev; -+ struct net_device *netdev; -+ struct ethsw_dev_priv *priv; -+ int err; -+ -+ netdev = dev_get_drvdata(dev); -+ priv = netdev_priv(netdev); -+ -+ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle); -+ if (err) -+ dev_warn(dev, "dpsw_close err %d\n", err); -+ -+ return 0; -+} -+ -+static int __cold -+ethsw_remove(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev; -+ struct net_device *netdev; -+ struct ethsw_dev_priv *priv; -+ struct ethsw_port_priv *port_priv; -+ struct list_head *pos; -+ -+ dev = &sw_dev->dev; -+ netdev = dev_get_drvdata(dev); -+ priv = netdev_priv(netdev); -+ -+ list_for_each(pos, &priv->port_list) { -+ port_priv = list_entry(pos, struct ethsw_port_priv, list); -+ -+ rtnl_lock(); -+ netdev_upper_dev_unlink(port_priv->netdev, netdev); -+ rtnl_unlock(); -+ -+ unregister_netdev(port_priv->netdev); -+ free_netdev(port_priv->netdev); -+ } -+ -+ ethsw_teardown_irqs(sw_dev); -+ -+ unregister_netdev(netdev); -+ -+ ethsw_takedown(sw_dev); -+ fsl_mc_portal_free(priv->mc_io); -+ -+ dev_set_drvdata(dev, NULL); -+ free_netdev(netdev); -+ -+ return 0; -+} -+ -+static int __cold -+ethsw_probe(struct fsl_mc_device *sw_dev) -+{ -+ struct device *dev; -+ struct net_device *netdev = NULL; -+ struct ethsw_dev_priv *priv = NULL; -+ int err = 0; -+ u16 i; -+ const char def_mcast[ETH_ALEN] = { -+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01, -+ }; -+ char port_name[IFNAMSIZ]; -+ -+ dev = &sw_dev->dev; -+ -+ /* register switch device, it's for management only - no I/O */ -+ netdev = alloc_etherdev(sizeof(*priv)); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ return -ENOMEM; -+ } -+ netdev->netdev_ops = ðsw_ops; -+ -+ SET_NETDEV_DEV(netdev, dev); -+ dev_set_drvdata(dev, netdev); -+ -+ priv = netdev_priv(netdev); -+ priv->netdev = netdev; -+ -+ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io); -+ if (err) { -+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); -+ goto err_free_netdev; -+ } -+ if (!priv->mc_io) { -+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); -+ err = -EFAULT; -+ goto err_free_netdev; -+ } -+ -+ err = ethsw_init(sw_dev); -+ if (err) { -+ dev_err(dev, "switch init err %d\n", err); -+ goto err_free_cmdport; -+ } -+ -+ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER; -+ -+ /* TODO: should we hold rtnl_lock here? We can't register_netdev under -+ * lock -+ */ -+ dev_alloc_name(netdev, "sw%d"); -+ err = register_netdev(netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ goto err_takedown; -+ } -+ if (err) -+ dev_info(dev, "register_netdev res %d\n", err); -+ -+ /* VLAN 1 is implicitly configured on the switch */ -+ priv->vlans[1] = ETHSW_VLAN_MEMBER; -+ /* Flooding, learning are implicitly enabled */ -+ priv->learning = true; -+ priv->flood = true; -+ -+ /* register switch ports */ -+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); -+ -+ INIT_LIST_HEAD(&priv->port_list); -+ for (i = 0; i < priv->sw_attr.num_ifs; i++) { -+ struct net_device *port_netdev; -+ struct ethsw_port_priv *port_priv; -+ -+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); -+ if (!port_netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ goto err_takedown; -+ } -+ -+ port_priv = netdev_priv(port_netdev); -+ port_priv->netdev = port_netdev; -+ port_priv->ethsw_priv = priv; -+ -+ port_priv->port_index = i; -+ port_priv->stp_state = BR_STATE_FORWARDING; -+ /* VLAN 1 is configured by default on all switch ports */ -+ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED | -+ ETHSW_VLAN_PVID; -+ -+ SET_NETDEV_DEV(port_netdev, dev); -+ port_netdev->netdev_ops = ðsw_port_ops; -+ port_netdev->ethtool_ops = ðsw_port_ethtool_ops; -+ -+ port_netdev->flags = port_netdev->flags | -+ IFF_PROMISC | IFF_SLAVE; -+ -+ dev_alloc_name(port_netdev, port_name); -+ err = register_netdev(port_netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ free_netdev(port_netdev); -+ goto err_takedown; -+ } -+ -+ rtnl_lock(); -+ -+ err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL); -+ if (err) { -+ dev_err(dev, "netdev_master_upper_dev_link error %d\n", -+ err); -+ unregister_netdev(port_netdev); -+ free_netdev(port_netdev); -+ rtnl_unlock(); -+ goto err_takedown; -+ } -+ -+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL); -+ -+ rtnl_unlock(); -+ -+ list_add(&port_priv->list, &priv->port_list); -+ -+ /* TODO: implmenet set_rm_mode instead of this */ -+ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast); -+ if (err) -+ dev_warn(&netdev->dev, -+ "ethsw_port_fdb_add_mc err %d\n", err); -+ -+ -+ /* sync carrier state */ -+ err = _ethsw_port_carrier_state_sync(port_netdev); -+ if (err) -+ netdev_err(netdev, -+ "_ethsw_port_carrier_state_sync err %d\n", -+ err); -+ } -+ -+ /* the switch starts up enabled */ -+ rtnl_lock(); -+ err = dev_open(netdev); -+ rtnl_unlock(); -+ if (err) -+ dev_warn(dev, "dev_open err %d\n", err); -+ -+ /* setup irqs */ -+ err = ethsw_setup_irqs(sw_dev); -+ if (unlikely(err)) { -+ dev_warn(dev, "ethsw_setup_irqs err %d\n", err); -+ goto err_takedown; -+ } -+ -+ dev_info(&netdev->dev, -+ "probed %d port switch\n", priv->sw_attr.num_ifs); -+ return 0; -+ -+err_takedown: -+ ethsw_remove(sw_dev); -+err_free_cmdport: -+ fsl_mc_portal_free(priv->mc_io); -+err_free_netdev: -+ dev_set_drvdata(dev, NULL); -+ free_netdev(netdev); -+ -+ return err; -+} -+ -+static const struct fsl_mc_device_match_id ethsw_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpsw", -+ .ver_major = DPSW_VER_MAJOR, -+ .ver_minor = DPSW_VER_MINOR, -+ }, -+ {} -+}; -+ -+static struct fsl_mc_driver eth_sw_drv = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = ethsw_probe, -+ .remove = ethsw_remove, -+ .match_id_table = ethsw_match_id_table, -+}; -+ -+module_fsl_mc_driver(eth_sw_drv); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)"); ---- a/drivers/staging/fsl-mc/include/net.h -+++ b/drivers/staging/fsl-mc/include/net.h -@@ -367,7 +367,6 @@ - /*************************** GTP fields ************************************/ - #define NH_FLD_GTP_TEID (1) - -- - /* Protocol options */ - - /* Ethernet options */ diff --git a/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch deleted file mode 100644 index eac134026..000000000 --- a/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 535826c8b725f752e5da17ea576d6d96e7d53f13 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Fri, 15 Jul 2016 13:13:41 -0500 -Subject: [PATCH 221/226] dpaa2-ethsw: match id cleanup - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/ethsw/switch.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethsw/switch.c -+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c -@@ -1685,12 +1685,10 @@ err_free_netdev: - return err; - } - --static const struct fsl_mc_device_match_id ethsw_match_id_table[] = { -+static const struct fsl_mc_device_id ethsw_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpsw", -- .ver_major = DPSW_VER_MAJOR, -- .ver_minor = DPSW_VER_MINOR, - }, - {} - }; diff --git a/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch b/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch deleted file mode 100644 index 16499218a..000000000 --- a/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch +++ /dev/null @@ -1,21 +0,0 @@ -From c51ed10a001d3fd5b80b7bb92f2d5182f1d9fa5a Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Thu, 25 Aug 2016 16:10:12 -0500 -Subject: [PATCH 222/226] dpaa2-ethsw: fix compile error on backport to 4.4 - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/ethsw/switch.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-dpaa2/ethsw/switch.c -+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c -@@ -1625,7 +1625,7 @@ ethsw_probe(struct fsl_mc_device *sw_dev - - rtnl_lock(); - -- err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL); -+ err = netdev_master_upper_dev_link(port_netdev, netdev); - if (err) { - dev_err(dev, "netdev_master_upper_dev_link error %d\n", - err); diff --git a/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch b/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch deleted file mode 100644 index 1d719e088..000000000 --- a/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch +++ /dev/null @@ -1,26 +0,0 @@ -From b565bd9a6011819ff66bd4fa0a50f7e54dff2753 Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:19 -0600 -Subject: [PATCH 223/226] irqdomain: Added domain bus token - DOMAIN_BUS_FSL_MC_MSI - -Since an FSL-MC bus is a new bus type that is neither PCI nor -PLATFORM, we need a new domain bus token to disambiguate the -IRQ domain for FSL-MC MSIs. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - include/linux/irqdomain.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/irqdomain.h -+++ b/include/linux/irqdomain.h -@@ -73,6 +73,7 @@ enum irq_domain_bus_token { - DOMAIN_BUS_PCI_MSI, - DOMAIN_BUS_PLATFORM_MSI, - DOMAIN_BUS_NEXUS, -+ DOMAIN_BUS_FSL_MC_MSI, - }; - - /** diff --git a/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch b/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch deleted file mode 100644 index 8dd11f416..000000000 --- a/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 359c7977e003781024154da61e55181b92b12bdf Mon Sep 17 00:00:00 2001 -From: "J. German Rivera" -Date: Wed, 6 Jan 2016 16:03:20 -0600 -Subject: [PATCH 224/226] fsl-mc: msi: Added FSL-MC-specific member to the - msi_desc's union - -FSL-MC is a bus type different from PCI and platform, so it needs -its own member in the msi_desc's union. - -Signed-off-by: J. German Rivera -Signed-off-by: Greg Kroah-Hartman ---- - include/linux/msi.h | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/include/linux/msi.h -+++ b/include/linux/msi.h -@@ -33,6 +33,14 @@ struct platform_msi_desc { - }; - - /** -+ * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data -+ * @msi_index: The index of the MSI descriptor -+ */ -+struct fsl_mc_msi_desc { -+ u16 msi_index; -+}; -+ -+/** - * struct msi_desc - Descriptor structure for MSI based interrupts - * @list: List head for management - * @irq: The base interrupt number -@@ -87,6 +95,7 @@ struct msi_desc { - * tree wide cleanup. - */ - struct platform_msi_desc platform; -+ struct fsl_mc_msi_desc fsl_mc; - }; - }; - diff --git a/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch b/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch deleted file mode 100644 index 2815d46e3..000000000 --- a/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch +++ /dev/null @@ -1,21 +0,0 @@ -From dbdf9b1fe83f88090d88bce980885df4fac46162 Mon Sep 17 00:00:00 2001 -From: Stuart Yoder -Date: Thu, 25 Aug 2016 11:17:52 -0500 -Subject: [PATCH 225/226] dpaa2-evb: fix 4.4 backport compile error - -Signed-off-by: Stuart Yoder ---- - drivers/staging/fsl-dpaa2/evb/evb.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -1153,7 +1153,7 @@ static int evb_probe(struct fsl_mc_devic - } - - rtnl_lock(); -- err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL); -+ err = netdev_master_upper_dev_link(port_netdev, netdev); - if (unlikely(err)) { - dev_err(dev, "netdev_master_upper_dev_link err %d\n", - err); diff --git a/target/linux/layerscape/patches-4.4/7226-dpaa_eth-fix-adjust_link-for-10G-2.5G.patch b/target/linux/layerscape/patches-4.4/7226-dpaa_eth-fix-adjust_link-for-10G-2.5G.patch deleted file mode 100644 index f5a2ed73c..000000000 --- a/target/linux/layerscape/patches-4.4/7226-dpaa_eth-fix-adjust_link-for-10G-2.5G.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 09a3bd3bb7e59703bfa5fa580436b4a85b15cfb0 Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Fri, 22 Apr 2016 11:26:14 +0800 -Subject: [PATCH] dpaa_eth: fix adjust_link for 10G & 2.5G - -We don't support adjust link for 10G & 2.5G. - -Signed-off-by: Shaohui Xie ---- - drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c -@@ -409,6 +409,10 @@ void get_pause_cfg(struct mac_device *ma - } - EXPORT_SYMBOL(get_pause_cfg); - -+static void adjust_link_void(struct net_device *net_dev) -+{ -+} -+ - static void adjust_link(struct net_device *net_dev) - { - struct dpa_priv_s *priv = netdev_priv(net_dev); -@@ -473,7 +477,7 @@ static int xgmac_init_phy(struct net_dev - mac_dev->phy_if); - else - phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -- &adjust_link, 0, mac_dev->phy_if); -+ &adjust_link_void, 0, mac_dev->phy_if); - if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { - netdev_err(net_dev, "Could not attach to PHY %s\n", - mac_dev->phy_node ? -@@ -506,7 +510,7 @@ static int memac_init_phy(struct net_dev - return 0; - } else - phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, -- &adjust_link, 0, -+ &adjust_link_void, 0, - mac_dev->phy_if); - } else { - if (!mac_dev->phy_node) diff --git a/target/linux/layerscape/patches-4.4/8036-ls2085a-Add-support-for-reset.patch b/target/linux/layerscape/patches-4.4/8036-ls2085a-Add-support-for-reset.patch deleted file mode 100644 index 62420191b..000000000 --- a/target/linux/layerscape/patches-4.4/8036-ls2085a-Add-support-for-reset.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 4b9227ba510562a51e87487905895aea97e17e77 Mon Sep 17 00:00:00 2001 -From: pankaj chauhan -Date: Tue, 3 Feb 2015 13:08:52 +0530 -Subject: [PATCH 36/70] ls2085a: Add support for reset - -Add support for layerscape reset driver. -Reset is implemented by raising RESET_REQ_B. - -Signed-off-by: pankaj chauhan -Signed-off-by: Stuart Yoder -(cherry picked from commit f248be3aea58ca32b7d77413d742996249b293e9) ---- - drivers/power/reset/Kconfig | 7 ++- - drivers/power/reset/Makefile | 1 + - drivers/power/reset/ls-reboot.c | 93 +++++++++++++++++++++++++++++++++++++++ - 3 files changed, 100 insertions(+), 1 deletion(-) - create mode 100644 drivers/power/reset/ls-reboot.c - ---- a/drivers/power/reset/Kconfig -+++ b/drivers/power/reset/Kconfig -@@ -173,5 +173,10 @@ config POWER_RESET_ZX - help - Reboot support for ZTE SoCs. - --endif -+config POWER_RESET_LAYERSCAPE -+ bool "Freescale LayerScape reset driver" -+ depends on ARCH_FSL_LS2085A -+ help -+ Reboot support for the Freescale LayerScape SoCs. - -+endif ---- a/drivers/power/reset/Makefile -+++ b/drivers/power/reset/Makefile -@@ -20,3 +20,4 @@ obj-$(CONFIG_POWER_RESET_SYSCON) += sysc - obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o - obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o - obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o -+obj-$(CONFIG_POWER_RESET_LAYERSCAPE) += ls-reboot.o ---- /dev/null -+++ b/drivers/power/reset/ls-reboot.c -@@ -0,0 +1,93 @@ -+/* -+ * Freescale LayerScape reboot driver -+ * -+ * Copyright (c) 2015, Freescale Semiconductor. -+ * Author: Pankaj Chauhan -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct ls_reboot_priv { -+ struct device *dev; -+ u32 *rstcr; -+}; -+ -+static struct ls_reboot_priv *ls_reboot_priv; -+ -+static void ls_reboot(enum reboot_mode reboot_mode, const char *cmd) -+{ -+ struct ls_reboot_priv *priv = ls_reboot_priv; -+ u32 val; -+ unsigned long timeout; -+ -+ if (ls_reboot_priv) { -+ val = readl(priv->rstcr); -+ val |= 0x02; -+ writel(val, priv->rstcr); -+ } -+ -+ timeout = jiffies + HZ; -+ while (time_before(jiffies, timeout)) -+ cpu_relax(); -+ -+} -+ -+static int ls_reboot_probe(struct platform_device *pdev) -+{ -+ ls_reboot_priv = devm_kzalloc(&pdev->dev, -+ sizeof(*ls_reboot_priv), GFP_KERNEL); -+ if (!ls_reboot_priv) { -+ dev_err(&pdev->dev, "out of memory for context\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->rstcr = of_iomap(pdev->dev.of_node, 0); -+ if (!ls_reboot_priv->rstcr) { -+ devm_kfree(&pdev->dev, ls_reboot_priv); -+ dev_err(&pdev->dev, "can not map resource\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->dev = &pdev->dev; -+ -+ arm_pm_restart = ls_reboot; -+ -+ return 0; -+} -+ -+static struct of_device_id ls_reboot_of_match[] = { -+ { .compatible = "fsl,ls-reset" }, -+ {} -+}; -+ -+static struct platform_driver ls_reboot_driver = { -+ .probe = ls_reboot_probe, -+ .driver = { -+ .name = "ls-reset", -+ .of_match_table = ls_reboot_of_match, -+ }, -+}; -+ -+static int __init ls_reboot_init(void) -+{ -+ return platform_driver_register(&ls_reboot_driver); -+} -+device_initcall(ls_reboot_init); diff --git a/target/linux/layerscape/patches-4.4/8037-ls1043a-Add-support-for-reset.patch b/target/linux/layerscape/patches-4.4/8037-ls1043a-Add-support-for-reset.patch deleted file mode 100644 index bd2d90e9b..000000000 --- a/target/linux/layerscape/patches-4.4/8037-ls1043a-Add-support-for-reset.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 5d7f8473e3472c16703bf5692d0b13f47d08c70a Mon Sep 17 00:00:00 2001 -From: Shaohui Xie -Date: Fri, 22 Jan 2016 12:01:28 +0800 -Subject: [PATCH 37/70] ls1043a: Add support for reset - -The reset in ls1043a is similar to ls2085a, but accessed in big endian, -so we modify the existing driver to handle endianness, if driver probes -property 'big-endian' in DTS, driver works in big endian mode, -otherwise, driver works in little endian by default. - -Signed-off-by: Shaohui Xie ---- - drivers/power/reset/ls-reboot.c | 18 +++++++++++++++--- - 1 file changed, 15 insertions(+), 3 deletions(-) - ---- a/drivers/power/reset/ls-reboot.c -+++ b/drivers/power/reset/ls-reboot.c -@@ -28,6 +28,7 @@ - struct ls_reboot_priv { - struct device *dev; - u32 *rstcr; -+ bool is_big_endian; - }; - - static struct ls_reboot_priv *ls_reboot_priv; -@@ -39,9 +40,15 @@ static void ls_reboot(enum reboot_mode r - unsigned long timeout; - - if (ls_reboot_priv) { -- val = readl(priv->rstcr); -- val |= 0x02; -- writel(val, priv->rstcr); -+ if (priv->is_big_endian) { -+ val = ioread32be(priv->rstcr); -+ val |= 0x02; -+ iowrite32be(val, priv->rstcr); -+ } else { -+ val = readl(priv->rstcr); -+ val |= 0x02; -+ writel(val, priv->rstcr); -+ } - } - - timeout = jiffies + HZ; -@@ -66,6 +73,11 @@ static int ls_reboot_probe(struct platfo - return -ENODEV; - } - -+ if (of_get_property(pdev->dev.of_node, "big-endian", NULL)) -+ ls_reboot_priv->is_big_endian = true; -+ else -+ ls_reboot_priv->is_big_endian = false; -+ - ls_reboot_priv->dev = &pdev->dev; - - arm_pm_restart = ls_reboot; diff --git a/target/linux/layerscape/patches-4.4/8038-reset-driver-Kconfig-Change-define-to-ARCH_LAYERSCAP.patch b/target/linux/layerscape/patches-4.4/8038-reset-driver-Kconfig-Change-define-to-ARCH_LAYERSCAP.patch deleted file mode 100644 index 793b3700d..000000000 --- a/target/linux/layerscape/patches-4.4/8038-reset-driver-Kconfig-Change-define-to-ARCH_LAYERSCAP.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 6569dc5ba472db76aefe65ef552c3f448348fbec Mon Sep 17 00:00:00 2001 -From: Rai Harninder-B01044 -Date: Wed, 28 Oct 2015 13:24:44 +0530 -Subject: [PATCH 38/70] reset driver: Kconfig: Change define to - ARCH_LAYERSCAPE - -Signed-off-by: Rai Harninder-B01044 ---- - drivers/power/reset/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/power/reset/Kconfig -+++ b/drivers/power/reset/Kconfig -@@ -175,7 +175,7 @@ config POWER_RESET_ZX - - config POWER_RESET_LAYERSCAPE - bool "Freescale LayerScape reset driver" -- depends on ARCH_FSL_LS2085A -+ depends on ARCH_LAYERSCAPE - help - Reboot support for the Freescale LayerScape SoCs. - diff --git a/target/linux/layerscape/patches-4.4/8042-drivers-gpio-Port-gpio-driver-to-support-layerscape-.patch b/target/linux/layerscape/patches-4.4/8042-drivers-gpio-Port-gpio-driver-to-support-layerscape-.patch deleted file mode 100644 index c857dbe77..000000000 --- a/target/linux/layerscape/patches-4.4/8042-drivers-gpio-Port-gpio-driver-to-support-layerscape-.patch +++ /dev/null @@ -1,289 +0,0 @@ -From c2d0a12b5cfa61e43494483f5d1ee466b4998830 Mon Sep 17 00:00:00 2001 -From: Liu Gang -Date: Thu, 14 Jan 2016 19:48:09 +0800 -Subject: [PATCH 42/70] drivers/gpio: Port gpio driver to support layerscape - platform - -Layerscape has the same ip block/controller as -GPIO on powerpc platform(MPC8XXX). - -So use portable i/o accessors, as in_be32/out_be32 -accessors are Power architecture specific whereas -ioread32/iowrite32 and ioread32be/iowrite32be are -available in other architectures. - -Layerscape GPIO controller's registers may be big -or little endian, so the code needs to get the -endian property from DTB, then make additional -functions to fit right register read/write -operations. - -Currently the code can support ls2080a GPIO with -little endian registers. And it can also work well -on other layerscape platform with big endian GPIO -registers. - -Signed-off-by: Liu Gang ---- - drivers/gpio/Kconfig | 7 ++-- - drivers/gpio/gpio-mpc8xxx.c | 87 +++++++++++++++++++++++++++++++------------ - 2 files changed, 68 insertions(+), 26 deletions(-) - ---- a/drivers/gpio/Kconfig -+++ b/drivers/gpio/Kconfig -@@ -282,12 +282,13 @@ config GPIO_MPC5200 - depends on PPC_MPC52xx - - config GPIO_MPC8XXX -- bool "MPC512x/MPC8xxx GPIO support" -+ bool "MPC512x/MPC8xxx/QorIQ GPIO support" - depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \ -- FSL_SOC_BOOKE || PPC_86xx -+ FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \ -+ COMPILE_TEST - help - Say Y here if you're going to use hardware that connects to the -- MPC512x/831x/834x/837x/8572/8610 GPIOs. -+ MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs. - - config GPIO_MVEBU - def_bool y ---- a/drivers/gpio/gpio-mpc8xxx.c -+++ b/drivers/gpio/gpio-mpc8xxx.c -@@ -1,5 +1,5 @@ - /* -- * GPIOs on MPC512x/8349/8572/8610 and compatible -+ * GPIOs on MPC512x/8349/8572/8610/QorIQ and compatible - * - * Copyright (C) 2008 Peter Korsgaard - * -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - - #define MPC8XXX_GPIO_PINS 32 - -@@ -44,6 +45,27 @@ struct mpc8xxx_gpio_chip { - const void *of_dev_id_data; - }; - -+static bool gpio_little_endian; -+static inline u32 gpio_in32(void __iomem *addr) -+{ -+ u32 val; -+ -+ if (gpio_little_endian) -+ val = ioread32(addr); -+ else -+ val = ioread32be(addr); -+ -+ return val; -+} -+ -+static inline void gpio_out32(u32 val, void __iomem *addr) -+{ -+ if (gpio_little_endian) -+ iowrite32(val, addr); -+ else -+ iowrite32be(val, addr); -+} -+ - static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) - { - return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio); -@@ -59,9 +81,17 @@ static void mpc8xxx_gpio_save_regs(struc - { - struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); - -- mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); -+ mpc8xxx_gc->data = gpio_in32(mm->regs + GPIO_DAT); - } - -+/* Generic set and clear bits accessor ports */ -+#define bgpio_setbits32(_addr, _v) \ -+ gpio_out32(gpio_in32(_addr) | (_v), (_addr)) -+#define bgpio_clrbits32(_addr, _v) \ -+ gpio_out32(gpio_in32(_addr) & ~(_v), (_addr)) -+#define bgpio_clrsetbits32(addr, clear, set) \ -+ gpio_out32((gpio_in32(addr) & ~(clear)) | (set), (addr)) -+ - /* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs - * defined as output cannot be determined by reading GPDAT register, - * so we use shadow data register instead. The status of input pins -@@ -74,9 +104,9 @@ static int mpc8572_gpio_get(struct gpio_ - struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); - u32 out_mask, out_shadow; - -- out_mask = in_be32(mm->regs + GPIO_DIR); -+ out_mask = gpio_in32(mm->regs + GPIO_DIR); - -- val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; -+ val = gpio_in32(mm->regs + GPIO_DAT) & ~out_mask; - out_shadow = mpc8xxx_gc->data & out_mask; - - return (val | out_shadow) & mpc8xxx_gpio2mask(gpio); -@@ -86,7 +116,7 @@ static int mpc8xxx_gpio_get(struct gpio_ - { - struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); - -- return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); -+ return gpio_in32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); - } - - static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -@@ -102,7 +132,7 @@ static void mpc8xxx_gpio_set(struct gpio - else - mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio); - -- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); -+ gpio_out32(mpc8xxx_gc->data, mm->regs + GPIO_DAT); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - } -@@ -128,7 +158,7 @@ static void mpc8xxx_gpio_set_multiple(st - } - } - -- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); -+ gpio_out32(mpc8xxx_gc->data, mm->regs + GPIO_DAT); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - } -@@ -141,7 +171,7 @@ static int mpc8xxx_gpio_dir_in(struct gp - - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - -- clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); -+ bgpio_clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - -@@ -158,7 +188,7 @@ static int mpc8xxx_gpio_dir_out(struct g - - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - -- setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); -+ bgpio_setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - -@@ -201,7 +231,8 @@ static void mpc8xxx_gpio_irq_cascade(str - struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; - unsigned int mask; - -- mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); -+ mask = gpio_in32(mm->regs + GPIO_IER) -+ & gpio_in32(mm->regs + GPIO_IMR); - if (mask) - generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, - 32 - ffs(mask))); -@@ -217,7 +248,8 @@ static void mpc8xxx_irq_unmask(struct ir - - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - -- setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); -+ bgpio_setbits32(mm->regs + GPIO_IMR, -+ mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - } -@@ -230,7 +262,8 @@ static void mpc8xxx_irq_mask(struct irq_ - - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - -- clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); -+ bgpio_clrbits32(mm->regs + GPIO_IMR, -+ mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - } -@@ -240,7 +273,7 @@ static void mpc8xxx_irq_ack(struct irq_d - struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); - struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; - -- out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); -+ gpio_out32(mpc8xxx_gpio2mask(irqd_to_hwirq(d)), mm->regs + GPIO_IER); - } - - static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) -@@ -252,15 +285,15 @@ static int mpc8xxx_irq_set_type(struct i - switch (flow_type) { - case IRQ_TYPE_EDGE_FALLING: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -- setbits32(mm->regs + GPIO_ICR, -- mpc8xxx_gpio2mask(irqd_to_hwirq(d))); -+ bgpio_setbits32(mm->regs + GPIO_ICR, -+ mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - break; - - case IRQ_TYPE_EDGE_BOTH: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -- clrbits32(mm->regs + GPIO_ICR, -- mpc8xxx_gpio2mask(irqd_to_hwirq(d))); -+ bgpio_clrbits32(mm->regs + GPIO_ICR, -+ mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - break; - -@@ -292,20 +325,20 @@ static int mpc512x_irq_set_type(struct i - case IRQ_TYPE_EDGE_FALLING: - case IRQ_TYPE_LEVEL_LOW: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -- clrsetbits_be32(reg, 3 << shift, 2 << shift); -+ bgpio_clrsetbits32(reg, 3 << shift, 2 << shift); - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - break; - - case IRQ_TYPE_EDGE_RISING: - case IRQ_TYPE_LEVEL_HIGH: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -- clrsetbits_be32(reg, 3 << shift, 1 << shift); -+ bgpio_clrsetbits32(reg, 3 << shift, 1 << shift); - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - break; - - case IRQ_TYPE_EDGE_BOTH: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -- clrbits32(reg, 3 << shift); -+ bgpio_clrbits32(reg, 3 << shift); - raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); - break; - -@@ -398,6 +431,14 @@ static int mpc8xxx_probe(struct platform - mm_gc = &mpc8xxx_gc->mm_gc; - gc = &mm_gc->gc; - -+ if (of_property_read_bool(np, "little-endian")) { -+ gpio_little_endian = true; -+ dev_dbg(&pdev->dev, "GPIO REGISTERS are LITTLE endian\n"); -+ } else { -+ gpio_little_endian = false; -+ dev_dbg(&pdev->dev, "GPIO REGISTERS are BIG endian\n"); -+ } -+ - mm_gc->save_regs = mpc8xxx_gpio_save_regs; - gc->ngpio = MPC8XXX_GPIO_PINS; - gc->direction_input = mpc8xxx_gpio_dir_in; -@@ -422,7 +463,7 @@ static int mpc8xxx_probe(struct platform - return ret; - - mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0); -- if (mpc8xxx_gc->irqn == NO_IRQ) -+ if (mpc8xxx_gc->irqn == 0) - return 0; - - mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, -@@ -435,8 +476,8 @@ static int mpc8xxx_probe(struct platform - mpc8xxx_gc->of_dev_id_data = id->data; - - /* ack and mask all irqs */ -- out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); -- out_be32(mm_gc->regs + GPIO_IMR, 0); -+ gpio_out32(0xffffffff, mm_gc->regs + GPIO_IER); -+ gpio_out32(0, mm_gc->regs + GPIO_IMR); - - irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, - mpc8xxx_gpio_irq_cascade, mpc8xxx_gc); diff --git a/target/linux/layerscape/patches-4.4/8048-mmc-sdhci-of-esdhc-add-remove-some-quirks-according-.patch b/target/linux/layerscape/patches-4.4/8048-mmc-sdhci-of-esdhc-add-remove-some-quirks-according-.patch deleted file mode 100644 index 09afa5e2a..000000000 --- a/target/linux/layerscape/patches-4.4/8048-mmc-sdhci-of-esdhc-add-remove-some-quirks-according-.patch +++ /dev/null @@ -1,60 +0,0 @@ -From dfc4661c2499aeb0a43b8a13b55213d1c190f640 Mon Sep 17 00:00:00 2001 -From: yangbo lu -Date: Tue, 19 Apr 2016 09:47:15 +0800 -Subject: [PATCH 48/70] mmc: sdhci-of-esdhc: add/remove some quirks according - to vendor version - -commit 6c42cb309fee2e126beed6b96a986dc7d85a033d -[context adjustment] - -A previous patch had removed esdhc_of_platform_init() by mistake. -static void esdhc_of_platform_init(struct sdhci_host *host) -{ - u32 vvn; - - vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); - vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; - if (vvn == VENDOR_V_22) - host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; - - if (vvn > VENDOR_V_22) - host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -} - -This patch is used to fix it by add/remove some quirks according to -verdor version in probe. - -Signed-off-by: Yangbo Lu -Fixes: f4932cfd22f1 ("mmc: sdhci-of-esdhc: support both BE and LE host controller") -Signed-off-by: Ulf Hansson -Integrated-by: Zhao Qiang ---- - drivers/mmc/host/sdhci-of-esdhc.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/drivers/mmc/host/sdhci-of-esdhc.c -+++ b/drivers/mmc/host/sdhci-of-esdhc.c -@@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct plat - { - struct sdhci_host *host; - struct device_node *np; -+ struct sdhci_pltfm_host *pltfm_host; -+ struct sdhci_esdhc *esdhc; - int ret; - - np = pdev->dev.of_node; -@@ -611,6 +613,14 @@ static int sdhci_esdhc_probe(struct plat - if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - -+ pltfm_host = sdhci_priv(host); -+ esdhc = pltfm_host->priv; -+ if (esdhc->vendor_ver == VENDOR_V_22) -+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; -+ -+ if (esdhc->vendor_ver > VENDOR_V_22) -+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -+ - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { - /* - * Freescale messed up with P2020 as it has a non-standard diff --git a/target/linux/layerscape/patches-4.4/8049-PCI-layerscape-Add-fsl-ls2085a-pcie-compatible-ID.patch b/target/linux/layerscape/patches-4.4/8049-PCI-layerscape-Add-fsl-ls2085a-pcie-compatible-ID.patch deleted file mode 100644 index 678da2fc1..000000000 --- a/target/linux/layerscape/patches-4.4/8049-PCI-layerscape-Add-fsl-ls2085a-pcie-compatible-ID.patch +++ /dev/null @@ -1,25 +0,0 @@ -From f2d357f86d79141969e29ec1dc1669da5120e022 Mon Sep 17 00:00:00 2001 -From: Yang Shi -Date: Wed, 27 Jan 2016 09:32:05 -0800 -Subject: [PATCH 49/70] PCI: layerscape: Add "fsl,ls2085a-pcie" compatible ID - -The Layerscape PCI host driver must recognize ls2085a compatible when using -firmware with ls2085a compatible property, otherwise the PCI bus won't be -detected even though ls2085a compatible is included by the dts. - -Signed-off-by: Yang Shi -Signed-off-by: Bjorn Helgaas ---- - drivers/pci/host/pci-layerscape.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -203,6 +203,7 @@ static const struct of_device_id ls_pcie - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, -+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { }, - }; - MODULE_DEVICE_TABLE(of, ls_pcie_of_match); diff --git a/target/linux/layerscape/patches-4.4/8050-PCI-layerscape-Fix-MSG-TLP-drop-setting.patch b/target/linux/layerscape/patches-4.4/8050-PCI-layerscape-Fix-MSG-TLP-drop-setting.patch deleted file mode 100644 index 1aa23ab07..000000000 --- a/target/linux/layerscape/patches-4.4/8050-PCI-layerscape-Fix-MSG-TLP-drop-setting.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 61959c53020fff0584d88e28d6dae9806184f1a8 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Mon, 29 Feb 2016 17:24:15 -0600 -Subject: [PATCH 50/70] PCI: layerscape: Fix MSG TLP drop setting - -Some kinds of Layerscape PCIe controllers will forward the received message -TLPs to system application address space, which could corrupt system memory -or lead to a system hang. Enable MSG_DROP to fix this issue. - -Signed-off-by: Minghuan Lian -Signed-off-by: Bjorn Helgaas ---- - drivers/pci/host/pci-layerscape.c | 21 +++++++++++++-------- - 1 file changed, 13 insertions(+), 8 deletions(-) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_ - iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); - } - -+/* Drop MSG TLP except for Vendor MSG */ -+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ val = ioread32(pcie->dbi + PCIE_STRFMR1); -+ val &= 0xDFFFFFFF; -+ iowrite32(val, pcie->dbi + PCIE_STRFMR1); -+} -+ - static int ls1021_pcie_link_up(struct pcie_port *pp) - { - u32 state; -@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pc - static void ls1021_pcie_host_init(struct pcie_port *pp) - { - struct ls_pcie *pcie = to_ls_pcie(pp); -- u32 val, index[2]; -+ u32 index[2]; - - pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, - "fsl,pcie-scfg"); -@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct - - dw_pcie_setup_rc(pp); - -- /* -- * LS1021A Workaround for internal TKT228622 -- * to fix the INTx hang issue -- */ -- val = ioread32(pcie->dbi + PCIE_STRFMR1); -- val &= 0xffff; -- iowrite32(val, pcie->dbi + PCIE_STRFMR1); -+ ls_pcie_drop_msg_tlp(pcie); - } - - static int ls_pcie_link_up(struct pcie_port *pp) -@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pci - iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); - ls_pcie_fix_class(pcie); - ls_pcie_clear_multifunction(pcie); -+ ls_pcie_drop_msg_tlp(pcie); - iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); - } - diff --git a/target/linux/layerscape/patches-4.4/8060-irqchip-Add-Layerscape-SCFG-MSI-controller-support.patch b/target/linux/layerscape/patches-4.4/8060-irqchip-Add-Layerscape-SCFG-MSI-controller-support.patch deleted file mode 100644 index bfbce21bf..000000000 --- a/target/linux/layerscape/patches-4.4/8060-irqchip-Add-Layerscape-SCFG-MSI-controller-support.patch +++ /dev/null @@ -1,285 +0,0 @@ -From 83ec4322b33e8d7908a3df0343246882e4e6b83a Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Wed, 23 Mar 2016 19:08:20 +0800 -Subject: [PATCH 60/70] irqchip: Add Layerscape SCFG MSI controller support - -upstream b8f3ebe630a4f1b4ff9340103d3b565ad5d78d43 commit -[context adjustment] - -Some kind of Freescale Layerscape SoC provides a MSI -implementation which uses two SCFG registers MSIIR and -MSIR to support 32 MSI interrupts for each PCIe controller. -The patch is to support it. - -Signed-off-by: Minghuan Lian -Tested-by: Alexander Stein -Acked-by: Marc Zyngier -Signed-off-by: Marc Zyngier ---- - drivers/irqchip/Kconfig | 5 + - drivers/irqchip/Makefile | 1 + - drivers/irqchip/irq-ls-scfg-msi.c | 240 +++++++++++++++++++++++++++++++++++++ - 3 files changed, 246 insertions(+) - create mode 100644 drivers/irqchip/irq-ls-scfg-msi.c - ---- a/drivers/irqchip/Kconfig -+++ b/drivers/irqchip/Kconfig -@@ -193,3 +193,8 @@ config IRQ_MXS - def_bool y if MACH_ASM9260 || ARCH_MXS - select IRQ_DOMAIN - select STMP_DEVICE -+ -+config LS_SCFG_MSI -+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE -+ depends on PCI && PCI_MSI -+ select PCI_MSI_IRQ_DOMAIN ---- a/drivers/irqchip/Makefile -+++ b/drivers/irqchip/Makefile -@@ -55,3 +55,4 @@ obj-$(CONFIG_RENESAS_H8S_INTC) += irq-r - obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o - obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o - obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o -+obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o ---- /dev/null -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -0,0 +1,240 @@ -+/* -+ * Freescale SCFG MSI(-X) support -+ * -+ * Copyright (C) 2016 Freescale Semiconductor. -+ * -+ * Author: Minghuan Lian -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MSI_MAX_IRQS 32 -+#define MSI_IBS_SHIFT 3 -+#define MSIR 4 -+ -+struct ls_scfg_msi { -+ spinlock_t lock; -+ struct platform_device *pdev; -+ struct irq_domain *parent; -+ struct irq_domain *msi_domain; -+ void __iomem *regs; -+ phys_addr_t msiir_addr; -+ int irq; -+ DECLARE_BITMAP(used, MSI_MAX_IRQS); -+}; -+ -+static struct irq_chip ls_scfg_msi_irq_chip = { -+ .name = "MSI", -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+}; -+ -+static struct msi_domain_info ls_scfg_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | -+ MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_PCI_MSIX), -+ .chip = &ls_scfg_msi_irq_chip, -+}; -+ -+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); -+ -+ msg->address_hi = upper_32_bits(msi_data->msiir_addr); -+ msg->address_lo = lower_32_bits(msi_data->msiir_addr); -+ msg->data = data->hwirq << MSI_IBS_SHIFT; -+} -+ -+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, -+ const struct cpumask *mask, bool force) -+{ -+ return -EINVAL; -+} -+ -+static struct irq_chip ls_scfg_msi_parent_chip = { -+ .name = "SCFG", -+ .irq_compose_msi_msg = ls_scfg_msi_compose_msg, -+ .irq_set_affinity = ls_scfg_msi_set_affinity, -+}; -+ -+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, -+ unsigned int virq, -+ unsigned int nr_irqs, -+ void *args) -+{ -+ struct ls_scfg_msi *msi_data = domain->host_data; -+ int pos, err = 0; -+ -+ WARN_ON(nr_irqs != 1); -+ -+ spin_lock(&msi_data->lock); -+ pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); -+ if (pos < MSI_MAX_IRQS) -+ __set_bit(pos, msi_data->used); -+ else -+ err = -ENOSPC; -+ spin_unlock(&msi_data->lock); -+ -+ if (err) -+ return err; -+ -+ irq_domain_set_info(domain, virq, pos, -+ &ls_scfg_msi_parent_chip, msi_data, -+ handle_simple_irq, NULL, NULL); -+ -+ return 0; -+} -+ -+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d); -+ int pos; -+ -+ pos = d->hwirq; -+ if (pos < 0 || pos >= MSI_MAX_IRQS) { -+ pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); -+ return; -+ } -+ -+ spin_lock(&msi_data->lock); -+ __clear_bit(pos, msi_data->used); -+ spin_unlock(&msi_data->lock); -+} -+ -+static const struct irq_domain_ops ls_scfg_msi_domain_ops = { -+ .alloc = ls_scfg_msi_domain_irq_alloc, -+ .free = ls_scfg_msi_domain_irq_free, -+}; -+ -+static void ls_scfg_msi_irq_handler(struct irq_desc *desc) -+{ -+ struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); -+ unsigned long val; -+ int pos, virq; -+ -+ chained_irq_enter(irq_desc_get_chip(desc), desc); -+ -+ val = ioread32be(msi_data->regs + MSIR); -+ for_each_set_bit(pos, &val, MSI_MAX_IRQS) { -+ virq = irq_find_mapping(msi_data->parent, (31 - pos)); -+ if (virq) -+ generic_handle_irq(virq); -+ } -+ -+ chained_irq_exit(irq_desc_get_chip(desc), desc); -+} -+ -+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data) -+{ -+ /* Initialize MSI domain parent */ -+ msi_data->parent = irq_domain_add_linear(NULL, -+ MSI_MAX_IRQS, -+ &ls_scfg_msi_domain_ops, -+ msi_data); -+ if (!msi_data->parent) { -+ dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n"); -+ return -ENOMEM; -+ } -+ -+ msi_data->msi_domain = pci_msi_create_irq_domain( -+ of_node_to_fwnode(msi_data->pdev->dev.of_node), -+ &ls_scfg_msi_domain_info, -+ msi_data->parent); -+ if (!msi_data->msi_domain) { -+ dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n"); -+ irq_domain_remove(msi_data->parent); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static int ls_scfg_msi_probe(struct platform_device *pdev) -+{ -+ struct ls_scfg_msi *msi_data; -+ struct resource *res; -+ int ret; -+ -+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); -+ if (!msi_data) -+ return -ENOMEM; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ msi_data->regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(msi_data->regs)) { -+ dev_err(&pdev->dev, "failed to initialize 'regs'\n"); -+ return PTR_ERR(msi_data->regs); -+ } -+ msi_data->msiir_addr = res->start; -+ -+ msi_data->irq = platform_get_irq(pdev, 0); -+ if (msi_data->irq <= 0) { -+ dev_err(&pdev->dev, "failed to get MSI irq\n"); -+ return -ENODEV; -+ } -+ -+ msi_data->pdev = pdev; -+ spin_lock_init(&msi_data->lock); -+ -+ ret = ls_scfg_msi_domains_init(msi_data); -+ if (ret) -+ return ret; -+ -+ irq_set_chained_handler_and_data(msi_data->irq, -+ ls_scfg_msi_irq_handler, -+ msi_data); -+ -+ platform_set_drvdata(pdev, msi_data); -+ -+ return 0; -+} -+ -+static int ls_scfg_msi_remove(struct platform_device *pdev) -+{ -+ struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); -+ -+ irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); -+ -+ irq_domain_remove(msi_data->msi_domain); -+ irq_domain_remove(msi_data->parent); -+ -+ platform_set_drvdata(pdev, NULL); -+ -+ return 0; -+} -+ -+static const struct of_device_id ls_scfg_msi_id[] = { -+ { .compatible = "fsl,1s1021a-msi", }, -+ { .compatible = "fsl,1s1043a-msi", }, -+ {}, -+}; -+ -+static struct platform_driver ls_scfg_msi_driver = { -+ .driver = { -+ .name = "ls-scfg-msi", -+ .of_match_table = ls_scfg_msi_id, -+ }, -+ .probe = ls_scfg_msi_probe, -+ .remove = ls_scfg_msi_remove, -+}; -+ -+module_platform_driver(ls_scfg_msi_driver); -+ -+MODULE_AUTHOR("Minghuan Lian "); -+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver"); -+MODULE_LICENSE("GPL v2"); diff --git a/target/linux/layerscape/patches-4.4/8061-arm64-layerscape-Enable-PCIe-for-Layerscape.patch b/target/linux/layerscape/patches-4.4/8061-arm64-layerscape-Enable-PCIe-for-Layerscape.patch deleted file mode 100644 index 49a07beb5..000000000 --- a/target/linux/layerscape/patches-4.4/8061-arm64-layerscape-Enable-PCIe-for-Layerscape.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 99b0e55881b217e9de4efd4d812dd3bd4f8c9380 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Thu, 19 Nov 2015 12:49:50 +0800 -Subject: [PATCH 61/70] arm64/layerscape: Enable PCIe for Layerscape - -Signed-off-by: Mingkai Hu -Integrated-by: Jiang Yutang ---- - arch/arm64/Kconfig.platforms | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/arm64/Kconfig.platforms -+++ b/arch/arm64/Kconfig.platforms -@@ -31,6 +31,8 @@ config ARCH_EXYNOS7 - - config ARCH_LAYERSCAPE - bool "ARMv8 based Freescale Layerscape SoC family" -+ select PCI_LAYERSCAPE if PCI -+ select LS_SCFG_MSI if PCI_MSI - help - This enables support for the Freescale Layerscape SoC family. - diff --git a/target/linux/layerscape/patches-4.4/8062-armv8-aarch32-enable-pci_domains-for-armv8-32bit.patch b/target/linux/layerscape/patches-4.4/8062-armv8-aarch32-enable-pci_domains-for-armv8-32bit.patch deleted file mode 100644 index 3c80e9c29..000000000 --- a/target/linux/layerscape/patches-4.4/8062-armv8-aarch32-enable-pci_domains-for-armv8-32bit.patch +++ /dev/null @@ -1,20 +0,0 @@ -From 53ff2bc222c446dfb6ffa9b708a23e7b8b82abb3 Mon Sep 17 00:00:00 2001 -From: Pan Jiafei -Date: Fri, 3 Jun 2016 06:45:57 +0000 -Subject: [PATCH 62/70] armv8: aarch32: enable pci_domains for armv8-32bit - -Signed-off-by: Pan Jiafei ---- - arch/arm/mach-imx/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/arm/mach-imx/Kconfig -+++ b/arch/arm/mach-imx/Kconfig -@@ -617,6 +617,7 @@ config ARCH_LAYERSCAPE - select ARM_GIC - select HAVE_ARM_ARCH_TIMER - select PCI_LAYERSCAPE if PCI -+ select PCI_DOMAINS if PCI - select LS1_MSI if PCI_MSI - - help diff --git a/target/linux/layerscape/patches-4.4/8073-ls1012a-added-clock-configuration.patch b/target/linux/layerscape/patches-4.4/8073-ls1012a-added-clock-configuration.patch deleted file mode 100644 index bee2824da..000000000 --- a/target/linux/layerscape/patches-4.4/8073-ls1012a-added-clock-configuration.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 9112596c3c7b7b8b1eded3323765fa711dc58e74 Mon Sep 17 00:00:00 2001 -From: Tang Yuantian -Date: Thu, 25 Aug 2016 10:38:28 +0800 -Subject: [PATCH 073/113] ls1012a: added clock configuration - -commit c9c11181191938b77bfd61e5094a63955cf711fd -[context adjustment] -[don't apply fsl-ls1012a.dtsi] - -Currently ls1012a used the clock configuration of ls1043a's. -But there is a little different between them. This patch added -ls1012a its own clock configuration. - -Signed-off-by: Tang Yuantian -Integrated-by: Zhao Qiang ---- - drivers/clk/clk-qoriq.c | 19 +++++++++++++++++++ - 1 file changed, 19 insertions(+) - ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -195,6 +195,14 @@ static const struct clockgen_muxinfo t10 - } - }; - -+static const struct clockgen_muxinfo ls1012a_cmux = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ {}, -+ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ } -+}; -+ - static const struct clockgen_muxinfo t1040_cmux = { - { - [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -@@ -475,6 +483,16 @@ static const struct clockgen_chipinfo ch - .pll_mask = 0x03, - }, - { -+ .compat = "fsl,ls1012a-clockgen", -+ .cmux_groups = { -+ &ls1012a_cmux -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x03, -+ }, -+ { - .compat = "fsl,ls1043a-clockgen", - .init_periph = t2080_init_periph, - .cmux_groups = { -@@ -1275,6 +1293,7 @@ CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qo - CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); - - /* Legacy nodes */ - CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); diff --git a/target/linux/layerscape/patches-4.4/8114-drivers-PCIE-enable-for-Linux.patch b/target/linux/layerscape/patches-4.4/8114-drivers-PCIE-enable-for-Linux.patch deleted file mode 100644 index 61c85a1f3..000000000 --- a/target/linux/layerscape/patches-4.4/8114-drivers-PCIE-enable-for-Linux.patch +++ /dev/null @@ -1,49 +0,0 @@ -From cfe7a6abd3d7e9ffeed8230847bbe2f680757305 Mon Sep 17 00:00:00 2001 -From: Pratiyush Mohan Srivastava -Date: Sun, 24 Apr 2016 23:43:19 +0530 -Subject: [PATCH 114/123] drivers: PCIE enable for Linux - -[This patch from sdk release, just context adjustment] -Add support for PCIE for LS1012A in kernel - -Signed-off-by: Pratiyush Mohan Srivastava -Signed-off-by: Prabhakar Kushwaha -Integrated-by: Jiang Yutang ---- - drivers/irqchip/irq-ls-scfg-msi.c | 1 + - drivers/pci/host/pci-layerscape.c | 7 +++++++ - 2 files changed, 8 insertions(+) - ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -219,6 +219,7 @@ static int ls_scfg_msi_remove(struct pla - } - - static const struct of_device_id ls_scfg_msi_id[] = { -+ { .compatible = "fsl,ls1012a-msi", }, - { .compatible = "fsl,1s1021a-msi", }, - { .compatible = "fsl,1s1043a-msi", }, - {}, ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -192,6 +192,12 @@ static struct ls_pcie_drvdata ls1021_drv - .ops = &ls1021_pcie_host_ops, - }; - -+static struct ls_pcie_drvdata ls1012_drvdata = { -+ .lut_offset = 0xC0000, -+ .ltssm_shift = 24, -+ .ops = &ls_pcie_host_ops, -+}; -+ - static struct ls_pcie_drvdata ls1043_drvdata = { - .lut_offset = 0x10000, - .ltssm_shift = 24, -@@ -205,6 +211,7 @@ static struct ls_pcie_drvdata ls2080_drv - }; - - static const struct of_device_id ls_pcie_of_match[] = { -+ { .compatible = "fsl,ls1012a-pcie", .data = &ls1012_drvdata }, - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, diff --git a/target/linux/layerscape/patches-4.4/8115-PCI-layerscape-call-dw_pcie_setup_rc-in-host-initial.patch b/target/linux/layerscape/patches-4.4/8115-PCI-layerscape-call-dw_pcie_setup_rc-in-host-initial.patch deleted file mode 100644 index cf67e5947..000000000 --- a/target/linux/layerscape/patches-4.4/8115-PCI-layerscape-call-dw_pcie_setup_rc-in-host-initial.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 5d661761fd2354e4f976c83143a1bf7c1ecad766 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Tue, 20 Sep 2016 16:07:10 +0800 -Subject: [PATCH 115/115] PCI: layerscape: call dw_pcie_setup_rc() in host - initialization - -A previous patch moved some Root Complex programming from -dw_pcie_host_init() to dw_pcie_setup_rc() where it belongs, -while the pci-layerscape driver didn't call dw_pcie_setup_rc() -anywhere after that. This patch is to add dw_pcie_setup_rc() -calling in layerscape host initialization to fix ls1012a pci -issue caused by that patch. - -Fixes: c49b76f3c613("PCI: designware: Move Root Complex setup - code to dw_pcie_setup_rc()") -Signed-off-by: Yangbo Lu ---- - drivers/pci/host/pci-layerscape.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -148,6 +148,7 @@ static void ls_pcie_host_init(struct pci - { - struct ls_pcie *pcie = to_ls_pcie(pp); - -+ dw_pcie_setup_rc(pp); - iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); - ls_pcie_fix_class(pcie); - ls_pcie_clear_multifunction(pcie); diff --git a/target/linux/layerscape/patches-4.4/8125-rtc-pcf2127-add-pcf2129-device-id.patch b/target/linux/layerscape/patches-4.4/8125-rtc-pcf2127-add-pcf2129-device-id.patch deleted file mode 100644 index 6ece59771..000000000 --- a/target/linux/layerscape/patches-4.4/8125-rtc-pcf2127-add-pcf2129-device-id.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 228d15f0c525d1d62540539ebb8d311feddb79f5 Mon Sep 17 00:00:00 2001 -From: Akinobu Mita -Date: Mon, 26 Sep 2016 11:26:11 +0800 -Subject: [PATCH 125/141] rtc: pcf2127: add pcf2129 device id - -commit e8bf83a7f9454ed1026f100139ebd10eace0e280 -[context adjustment] - -There are only a few differences between PCF2127 and PCF2129 (PCF2127 -has 512 bytes of general purpose SRAM and count-down timer). - -The rtc-pcf2127 driver currently doesn't use the PCF2127 specific -functionality and Kconfig help text already says this driver supports -PCF2127/29, so we can simply add pcf2129 to device id list. - -Signed-off-by: Akinobu Mita -Signed-off-by: Alexandre Belloni -Signed-off-by: Shaohui Xie -Integrated-by: Zhao Qiang ---- - drivers/rtc/rtc-pcf2127.c | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - ---- a/drivers/rtc/rtc-pcf2127.c -+++ b/drivers/rtc/rtc-pcf2127.c -@@ -1,12 +1,12 @@ - /* -- * An I2C driver for the NXP PCF2127 RTC -+ * An I2C and SPI driver for the NXP PCF2127/29 RTC - * Copyright 2013 Til-Technologies - * - * Author: Renaud Cerrato - * - * based on the other drivers in this same directory. - * -- * http://www.nxp.com/documents/data_sheet/PCF2127AT.pdf -+ * Datasheet: http://cache.nxp.com/documents/data_sheet/PCF2127.pdf - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as -@@ -216,6 +216,7 @@ static int pcf2127_probe(struct i2c_clie - - static const struct i2c_device_id pcf2127_id[] = { - { "pcf2127", 0 }, -+ { "pcf2129", 0 }, - { } - }; - MODULE_DEVICE_TABLE(i2c, pcf2127_id); -@@ -223,6 +224,7 @@ MODULE_DEVICE_TABLE(i2c, pcf2127_id); - #ifdef CONFIG_OF - static const struct of_device_id pcf2127_of_match[] = { - { .compatible = "nxp,pcf2127" }, -+ { .compatible = "nxp,pcf2129" }, - {} - }; - MODULE_DEVICE_TABLE(of, pcf2127_of_match); -@@ -240,5 +242,5 @@ static struct i2c_driver pcf2127_driver - module_i2c_driver(pcf2127_driver); - - MODULE_AUTHOR("Renaud Cerrato "); --MODULE_DESCRIPTION("NXP PCF2127 RTC driver"); -+MODULE_DESCRIPTION("NXP PCF2127/29 RTC driver"); - MODULE_LICENSE("GPL v2"); diff --git a/target/linux/layerscape/patches-4.4/8127-ls1046a-msi-Add-LS1046A-MSI-support.patch b/target/linux/layerscape/patches-4.4/8127-ls1046a-msi-Add-LS1046A-MSI-support.patch deleted file mode 100644 index a3850c706..000000000 --- a/target/linux/layerscape/patches-4.4/8127-ls1046a-msi-Add-LS1046A-MSI-support.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 51ab89c787184d47dbff37c3c69ea2c82cc4749c Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 26 Sep 2016 12:13:56 +0800 -Subject: [PATCH 127/141] ls1046a/msi: Add LS1046A MSI support - -commit 2b5a00d46a8f8762b9f78b727eab6fc0e12f3fd7 -[context adjustment] - -Signed-off-by: Mingkai Hu -Integated-by: Zhao Qiang -Integated-by: Yutang Jiang ---- - drivers/irqchip/irq-ls-scfg-msi.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -222,6 +222,7 @@ static const struct of_device_id ls_scfg - { .compatible = "fsl,ls1012a-msi", }, - { .compatible = "fsl,1s1021a-msi", }, - { .compatible = "fsl,1s1043a-msi", }, -+ { .compatible = "fsl,ls1046a-msi", }, - {}, - }; - diff --git a/target/linux/layerscape/patches-4.4/8128-pci-layerscape-add-LS1046A-support.patch b/target/linux/layerscape/patches-4.4/8128-pci-layerscape-add-LS1046A-support.patch deleted file mode 100644 index 0a9f29655..000000000 --- a/target/linux/layerscape/patches-4.4/8128-pci-layerscape-add-LS1046A-support.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 5a95e58e58da417ca8a8f4ae11347ad9a66803cd Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 26 Sep 2016 12:17:36 +0800 -Subject: [PATCH 128/141] pci/layerscape: add LS1046A support - -commit a52797fe443a79c53d0b63c505c99f65264de8b7 -[context adjustment] - -Signed-off-by: Mingkai Hu -Integrated-by: Zhao Qiang -Integrated-by: Yutang Jiang ---- - drivers/pci/host/pci-layerscape.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -205,6 +205,12 @@ static struct ls_pcie_drvdata ls1043_drv - .ops = &ls_pcie_host_ops, - }; - -+static struct ls_pcie_drvdata ls1046_drvdata = { -+ .lut_offset = 0x10000, -+ .ltssm_shift = 24, -+ .ops = &ls_pcie_host_ops, -+}; -+ - static struct ls_pcie_drvdata ls2080_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, -@@ -215,6 +221,7 @@ static const struct of_device_id ls_pcie - { .compatible = "fsl,ls1012a-pcie", .data = &ls1012_drvdata }, - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, -+ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { }, diff --git a/target/linux/layerscape/patches-4.4/8129-clk-qoriq-add-ls1046a-support.patch b/target/linux/layerscape/patches-4.4/8129-clk-qoriq-add-ls1046a-support.patch deleted file mode 100644 index ed2f565c3..000000000 --- a/target/linux/layerscape/patches-4.4/8129-clk-qoriq-add-ls1046a-support.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 4fe33d4f4dc608fc5013390db58df06723282d01 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Thu, 2 Jun 2016 11:15:58 +0800 -Subject: [PATCH 129/141] clk: qoriq: add ls1046a support - -Signed-off-by: Mingkai Hu -Integated-by: Yutang Jiang ---- - drivers/clk/clk-qoriq.c | 41 +++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 41 insertions(+) - ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -275,6 +275,31 @@ static const struct clockgen_muxinfo ls1 - }, - }; - -+static const struct clockgen_muxinfo ls1046a_hwa1 = { -+ { -+ {}, -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo ls1046a_hwa2 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ {}, -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ }, -+}; -+ - static const struct clockgen_muxinfo t1023_hwa1 = { - { - {}, -@@ -508,6 +533,21 @@ static const struct clockgen_chipinfo ch - .flags = CG_PLL_8BIT, - }, - { -+ .compat = "fsl,ls1046a-clockgen", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &t1040_cmux -+ }, -+ .hwaccel = { -+ &ls1046a_hwa1, &ls1046a_hwa2 -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x07, -+ .flags = CG_PLL_8BIT, -+ }, -+ { - .compat = "fsl,ls2080a-clockgen", - .cmux_groups = { - &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -@@ -1292,6 +1332,7 @@ CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qo - CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); - diff --git a/target/linux/layerscape/patches-4.4/8130-ls1046a-sata-Add-LS1046A-sata-support.patch b/target/linux/layerscape/patches-4.4/8130-ls1046a-sata-Add-LS1046A-sata-support.patch deleted file mode 100644 index ea384e6bd..000000000 --- a/target/linux/layerscape/patches-4.4/8130-ls1046a-sata-Add-LS1046A-sata-support.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 5cd461cd17c3e27e5501e499d5d865b60ee58257 Mon Sep 17 00:00:00 2001 -From: Gong Qianyu -Date: Mon, 26 Sep 2016 12:29:24 +0800 -Subject: [PATCH 130/141] ls1046a/sata: Add LS1046A sata support - -Signed-off-by: Gong Qianyu -Integrated-by: Zhao Qiang -Integated-by: Yutang Jiang ---- - drivers/ata/ahci_qoriq.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - ---- a/drivers/ata/ahci_qoriq.c -+++ b/drivers/ata/ahci_qoriq.c -@@ -40,11 +40,16 @@ - #define AHCI_PORT_PHY_5_CFG 0x192c96a4 - #define AHCI_PORT_TRANS_CFG 0x08000025 - -+/* for ls1046a */ -+#define LS1046A_PORT_PHY2 0x28184d1f -+#define LS1046A_PORT_PHY3 0x0e081509 -+ - #define SATA_ECC_DISABLE 0x00020000 - - enum ahci_qoriq_type { - AHCI_LS1021A, - AHCI_LS1043A, -+ AHCI_LS1046A, - AHCI_LS2080A, - }; - -@@ -57,6 +62,7 @@ struct ahci_qoriq_priv { - static const struct of_device_id ahci_qoriq_of_match[] = { - { .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A}, - { .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A}, -+ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A}, - { .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A}, - {}, - }; -@@ -158,6 +164,13 @@ static int ahci_qoriq_phy_init(struct ah - writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); - break; - -+ case AHCI_LS1046A: -+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); -+ writel(LS1046A_PORT_PHY2, reg_base + PORT_PHY2); -+ writel(LS1046A_PORT_PHY3, reg_base + PORT_PHY3); -+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); -+ break; -+ - case AHCI_LS1043A: - case AHCI_LS2080A: - writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); diff --git a/target/linux/layerscape/patches-4.4/8134-pci-layerscape-add-LUT-DBG-reigster-offset-member.patch b/target/linux/layerscape/patches-4.4/8134-pci-layerscape-add-LUT-DBG-reigster-offset-member.patch deleted file mode 100644 index c3256e983..000000000 --- a/target/linux/layerscape/patches-4.4/8134-pci-layerscape-add-LUT-DBG-reigster-offset-member.patch +++ /dev/null @@ -1,67 +0,0 @@ -From 57d147c02fdcbae5e61ba322d51c5734f9511fd7 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 26 Sep 2016 14:19:32 +0800 -Subject: [PATCH 134/141] pci/layerscape: add LUT DBG reigster offset member - -commit 59ab37d6f46356a5b9755fcec74b23616dfdd62f -[doesn't apply pm part] - -Different chip have different LUT debug register offset, -so add a member to avoid macro redifinition. - -Signed-off-by: Mingkai Hu -Integrated-by: Zhao Qiang -Integrated-by: Yutang Jiang ---- - drivers/pci/host/pci-layerscape.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -41,6 +41,7 @@ - struct ls_pcie_drvdata { - u32 lut_offset; - u32 ltssm_shift; -+ u32 lut_dbg; - struct pcie_host_ops *ops; - }; - -@@ -134,7 +135,7 @@ static int ls_pcie_link_up(struct pcie_p - struct ls_pcie *pcie = to_ls_pcie(pp); - u32 state; - -- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> -+ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> - pcie->drvdata->ltssm_shift) & - LTSSM_STATE_MASK; - -@@ -196,24 +197,28 @@ static struct ls_pcie_drvdata ls1021_drv - static struct ls_pcie_drvdata ls1012_drvdata = { - .lut_offset = 0xC0000, - .ltssm_shift = 24, -+ .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - }; - - static struct ls_pcie_drvdata ls1043_drvdata = { - .lut_offset = 0x10000, - .ltssm_shift = 24, -+ .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - }; - - static struct ls_pcie_drvdata ls1046_drvdata = { -- .lut_offset = 0x10000, -+ .lut_offset = 0x80000, - .ltssm_shift = 24, -+ .lut_dbg = 0x407fc, - .ops = &ls_pcie_host_ops, - }; - - static struct ls_pcie_drvdata ls2080_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, -+ .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - }; - diff --git a/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch b/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch deleted file mode 100644 index cf0cecf78..000000000 --- a/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch +++ /dev/null @@ -1,24 +0,0 @@ -From f6f7c6ecdecfb75412a17205d9ac4905f6bc2851 Mon Sep 17 00:00:00 2001 -From: Rai Harninder -Date: Thu, 18 Feb 2016 16:35:35 +0530 -Subject: [PATCH 136/141] drivers/mmc: Add compatible string for LS1088A - -Signed-off-by: Rai Harninder -Signed-off-by: Pratiyush Mohan Srivastava -Signed-off-by: Raghav Dogra ---- - drivers/mmc/host/sdhci-pltfm.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/mmc/host/sdhci-pltfm.c -+++ b/drivers/mmc/host/sdhci-pltfm.c -@@ -93,6 +93,9 @@ void sdhci_get_of_property(struct platfo - if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) - host->quirks |= SDHCI_QUIRK_BROKEN_DMA; - -+ if (of_device_is_compatible(np, "fsl,ls1088a-esdhc")) -+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; -+ - if (of_device_is_compatible(np, "fsl,p2020-esdhc") || - of_device_is_compatible(np, "fsl,p1010-esdhc") || - of_device_is_compatible(np, "fsl,t4240-esdhc") || diff --git a/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch b/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch deleted file mode 100644 index d6137a8af..000000000 --- a/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 1aeb63c52ade1219032161fcdb923aa4c62b3796 Mon Sep 17 00:00:00 2001 -From: Prabhakar Kushwaha -Date: Sun, 9 Oct 2016 14:52:49 +0800 -Subject: [PATCH 137/141] armv8: ls1088a: Add PCIe compatible - -commit: 1a089a382b187c80390f022d1e3f3749b2adcc64 -[don't apply dtsi] - -Signed-off-by: Prabhakar Kushwaha -Integrated-by: Zhao Qiang ---- - drivers/pci/host/pci-layerscape.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -215,6 +215,13 @@ static struct ls_pcie_drvdata ls1046_drv - .ops = &ls_pcie_host_ops, - }; - -+static struct ls_pcie_drvdata ls1088_drvdata = { -+ .lut_offset = 0x80000, -+ .ltssm_shift = 0, -+ .lut_dbg = 0x407fc, -+ .ops = &ls_pcie_host_ops, -+}; -+ - static struct ls_pcie_drvdata ls2080_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, -@@ -227,6 +234,7 @@ static const struct of_device_id ls_pcie - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, - { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, -+ { .compatible = "fsl,ls1088a-pcie", .data = &ls1088_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { }, diff --git a/target/linux/layerscape/patches-4.4/8138-pci-layerscape-add-MSI-interrupt-support.patch b/target/linux/layerscape/patches-4.4/8138-pci-layerscape-add-MSI-interrupt-support.patch deleted file mode 100644 index cbfa86c1e..000000000 --- a/target/linux/layerscape/patches-4.4/8138-pci-layerscape-add-MSI-interrupt-support.patch +++ /dev/null @@ -1,259 +0,0 @@ -From b0e74277164b17bb0d207ffe16056e13e558f6ba Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Tue, 11 Oct 2016 16:25:07 +0800 -Subject: [PATCH 138/141] pci-layerscape: add MSI interrupt support - -Signed-off-by: Zhao Qiang ---- - drivers/iommu/amd_iommu.c | 5 +++-- - drivers/iommu/arm-smmu.c | 21 ++++++++++++++++++ - drivers/iommu/iommu.c | 8 +++---- - drivers/pci/host/pci-layerscape.c | 43 +++++++++++++++++++++++++++++++++++++ - drivers/pci/host/pci-layerscape.h | 17 +++++++++++++++ - drivers/pci/quirks.c | 19 +++++++++------- - drivers/pci/search.c | 5 ++--- - include/linux/pci.h | 6 +++--- - 8 files changed, 104 insertions(+), 20 deletions(-) - create mode 100644 drivers/pci/host/pci-layerscape.h - ---- a/drivers/iommu/amd_iommu.c -+++ b/drivers/iommu/amd_iommu.c -@@ -222,8 +222,9 @@ static u16 get_alias(struct device *dev) - */ - if (pci_alias == devid && - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { -- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -- pdev->dma_alias_devfn = ivrs_alias & 0xff; -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = PCI_DEVID(pdev->bus->number, -+ ivrs_alias & 0xff); - pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), - dev_name(dev)); ---- a/drivers/iommu/arm-smmu.c -+++ b/drivers/iommu/arm-smmu.c -@@ -45,6 +45,10 @@ - - #include - -+#ifdef CONFIG_PCI_LAYERSCAPE -+#include <../drivers/pci/host/pci-layerscape.h> -+#endif -+ - #include "io-pgtable.h" - - /* Maximum number of stream IDs assigned to a single device */ -@@ -1352,6 +1356,23 @@ static int arm_smmu_init_platform_device - static int arm_smmu_add_device(struct device *dev) - { - struct iommu_group *group; -+#ifdef CONFIG_PCI_LAYERSCAPE -+ u16 sid; -+ u32 streamid; -+ struct pci_dev *pdev; -+ if (dev_is_pci(dev)) { -+ pdev = to_pci_dev(dev); -+ -+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); -+ streamid = set_pcie_streamid_translation(pdev, sid); -+ if (~streamid == 0) { -+ return -ENODEV; -+ } -+ -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = streamid; -+ } -+#endif - - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) ---- a/drivers/iommu/iommu.c -+++ b/drivers/iommu/iommu.c -@@ -697,10 +697,10 @@ static struct iommu_group *get_pci_alias - continue; - - /* We alias them or they alias us */ -- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- pdev->dma_alias_devfn == tmp->devfn) || -- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- tmp->dma_alias_devfn == pdev->devfn)) { -+ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (pdev->dma_alias_devid & 0xff) == tmp->devfn) || -+ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (tmp->dma_alias_devid & 0xff) == pdev->devfn)) { - - group = get_pci_alias_group(tmp, devfns); - if (group) { ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -37,6 +37,11 @@ - - /* PEX LUT registers */ - #define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ -+#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) -+#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) -+#define PCIE_LUT_MASK_ALL 0xffff -+#define PCIE_LUT_DR_NUM 32 -+#define PCIE_LUT_ENABLE (1 << 31) - - struct ls_pcie_drvdata { - u32 lut_offset; -@@ -52,10 +57,30 @@ struct ls_pcie { - struct pcie_port pp; - const struct ls_pcie_drvdata *drvdata; - int index; -+ const u32 *avail_streamids; -+ int streamid_index; - }; - - #define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) - -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) -+{ -+ u32 index, streamid; -+ struct pcie_port *pp = pdev->bus->sysdata; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ if (!pcie->avail_streamids || !pcie->streamid_index) -+ return ~(u32)0; -+ -+ index = --pcie->streamid_index; -+ /* mask is set as all zeroes, want to match all bits */ -+ iowrite32((devid << 16), pcie->lut + PCIE_LUT_UDR(index)); -+ streamid = be32_to_cpup(&pcie->avail_streamids[index]); -+ iowrite32(streamid | PCIE_LUT_ENABLE, pcie->lut + PCIE_LUT_LDR(index)); -+ -+ return streamid; -+} -+ - static bool ls_pcie_is_bridge(struct ls_pcie *pcie) - { - u32 header_type; -@@ -284,10 +309,28 @@ static int __init ls_pcie_probe(struct p - - pcie->drvdata = match->data; - pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; -+ /* Disable LDR zero */ -+ iowrite32(0, pcie->lut + PCIE_LUT_LDR(0)); - - if (!ls_pcie_is_bridge(pcie)) - return -ENODEV; - -+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2085a-pcie") || -+ of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie") || -+ of_device_is_compatible(pdev->dev.of_node, "fsl,ls1088a-pcie")) { -+ int len; -+ const u32 *prop; -+ struct device_node *np; -+ -+ np = pdev->dev.of_node; -+ prop = (u32 *)of_get_property(np, "available-stream-ids", &len); -+ if (prop) { -+ pcie->avail_streamids = prop; -+ pcie->streamid_index = len/sizeof(u32); -+ } else -+ dev_err(&pdev->dev, "PCIe endpoint partitioning not possible\n"); -+ } -+ - ret = ls_add_pcie_port(&pcie->pp, pdev); - if (ret < 0) - return ret; ---- /dev/null -+++ b/drivers/pci/host/pci-layerscape.h -@@ -0,0 +1,17 @@ -+/* -+ * Copyright (C) 2015 Freescale Semiconductor. -+ * -+ * Author: Varun Sethi -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#ifndef _PCI_LAYERSCAPE_H -+#define _PCI_LAYERSCAPE_H -+ -+/* function for setting up stream id to device id translation */ -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid); -+ -+#endif /* _PCI_LAYERSCAPE_H */ ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -3589,8 +3589,9 @@ int pci_dev_specific_reset(struct pci_de - static void quirk_dma_func0_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 0) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -@@ -3605,8 +3606,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_R - static void quirk_dma_func1_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 1) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -@@ -3670,11 +3672,12 @@ static void quirk_fixed_dma_alias(struct - - id = pci_match_id(fixed_dma_alias_tbl, dev); - if (id) { -- dev->dma_alias_devfn = id->driver_data; -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ id->driver_data); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", -- PCI_SLOT(dev->dma_alias_devfn), -- PCI_FUNC(dev->dma_alias_devfn)); -+ PCI_SLOT(dev->dma_alias_devid), -+ PCI_FUNC(dev->dma_alias_devid)); - } - } - ---- a/drivers/pci/search.c -+++ b/drivers/pci/search.c -@@ -40,9 +40,8 @@ int pci_for_each_dma_alias(struct pci_de - * If the device is broken and uses an alias requester ID for - * DMA, iterate over that too. - */ -- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { -- ret = fn(pdev, PCI_DEVID(pdev->bus->number, -- pdev->dma_alias_devfn), data); -+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID)) { -+ ret = fn(pdev, pdev->dma_alias_devid, data); - if (ret) - return ret; - } ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -172,8 +172,8 @@ enum pci_dev_flags { - PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), - /* Flag for quirk use to store if quirk-specific ACS is enabled */ - PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), -- /* Flag to indicate the device uses dma_alias_devfn */ -- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), -+ /* Flag to indicate the device uses dma_alias_devid */ -+ PCI_DEV_FLAGS_DMA_ALIAS_DEVID = (__force pci_dev_flags_t) (1 << 4), - /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ - PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), - /* Do not use bus resets for device */ -@@ -279,7 +279,7 @@ struct pci_dev { - u8 rom_base_reg; /* which config register controls the ROM */ - u8 pin; /* which interrupt pin this device uses */ - u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ -- u8 dma_alias_devfn;/* devfn of DMA alias, if any */ -+ u32 dma_alias_devid;/* devid of DMA alias */ - - struct pci_driver *driver; /* which driver has allocated this device */ - u64 dma_mask; /* Mask of the bits of bus address this diff --git a/target/linux/layerscape/patches-4.4/8142-drivers-mmc-Add-compatible-string-for-LS1046A.patch b/target/linux/layerscape/patches-4.4/8142-drivers-mmc-Add-compatible-string-for-LS1046A.patch deleted file mode 100644 index 34add83db..000000000 --- a/target/linux/layerscape/patches-4.4/8142-drivers-mmc-Add-compatible-string-for-LS1046A.patch +++ /dev/null @@ -1,20 +0,0 @@ -From 81c67bb120106c4fbd9b7191ab057b48e13e3e5e Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Tue, 8 Nov 2016 01:27:50 +0800 -Subject: [PATCH 142/142] drivers/mmc: Add compatible string for LS1046A - -Signed-off-by: Yutang Jiang ---- - drivers/mmc/host/sdhci-of-esdhc.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/mmc/host/sdhci-of-esdhc.c -+++ b/drivers/mmc/host/sdhci-of-esdhc.c -@@ -607,6 +607,7 @@ static int sdhci_esdhc_probe(struct plat - of_device_is_compatible(np, "fsl,p4080-esdhc") || - of_device_is_compatible(np, "fsl,p1020-esdhc") || - of_device_is_compatible(np, "fsl,t1040-esdhc") || -+ of_device_is_compatible(np, "fsl,ls1046a-esdhc") || - of_device_is_compatible(np, "fsl,ls1021a-esdhc")) - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - diff --git a/target/linux/layerscape/patches-4.4/8229-drivers-clk-qoriq-Add-ls2088a-key-to-chipinfo-table.patch b/target/linux/layerscape/patches-4.4/8229-drivers-clk-qoriq-Add-ls2088a-key-to-chipinfo-table.patch deleted file mode 100644 index efc6467b0..000000000 --- a/target/linux/layerscape/patches-4.4/8229-drivers-clk-qoriq-Add-ls2088a-key-to-chipinfo-table.patch +++ /dev/null @@ -1,30 +0,0 @@ -From cb8a47d43caa2b07a62d81ee0b65c0d16560c276 Mon Sep 17 00:00:00 2001 -From: Abhimanyu Saini -Date: Fri, 3 Jun 2016 13:15:28 +0530 -Subject: [PATCH 229/238] drivers: clk: qoriq: Add ls2088a key to chipinfo - table - ---- - drivers/clk/clk-qoriq.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -559,6 +559,17 @@ static const struct clockgen_chipinfo ch - .flags = CG_VER3 | CG_LITTLE_ENDIAN, - }, - { -+ .compat = "fsl,ls2088a-clockgen", -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x37, -+ .flags = CG_VER3 | CG_LITTLE_ENDIAN, -+ }, -+ { - .compat = "fsl,p2041-clockgen", - .guts_compat = "fsl,qoriq-device-config-1.0", - .init_periph = p2041_init_periph, diff --git a/target/linux/layerscape/patches-4.4/8230-layerscape-pci-fix-linkup-issue.patch b/target/linux/layerscape/patches-4.4/8230-layerscape-pci-fix-linkup-issue.patch deleted file mode 100644 index f28776cba..000000000 --- a/target/linux/layerscape/patches-4.4/8230-layerscape-pci-fix-linkup-issue.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 1b23a4e0f03063f823ea38065c1106f62a56b408 Mon Sep 17 00:00:00 2001 -From: Mingkai Hu -Date: Mon, 7 Nov 2016 15:03:51 +0800 -Subject: [PATCH 230/238] layerscape/pci: fix linkup issue - -commit e6612d785198abbb39142e2acb63f9bff26ab718 -[context adjustment] - -Signed-off-by: Mingkai Hu -Integrated-by: Zhao Qiang ---- - drivers/pci/host/pci-layerscape.c | 13 +++++++++---- - 1 file changed, 9 insertions(+), 4 deletions(-) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -158,11 +158,16 @@ static void ls1021_pcie_host_init(struct - static int ls_pcie_link_up(struct pcie_port *pp) - { - struct ls_pcie *pcie = to_ls_pcie(pp); -- u32 state; -+ u32 state, offset; - -- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> -- pcie->drvdata->ltssm_shift) & -- LTSSM_STATE_MASK; -+ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) -+ offset = 0x407fc; -+ else -+ offset = PCIE_LUT_DBG; -+ -+ state = (ioread32(pcie->lut + offset) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; diff --git a/target/linux/layerscape/patches-4.4/8231-driver-clk-qoriq-Add-ls2088a-clk-support.patch b/target/linux/layerscape/patches-4.4/8231-driver-clk-qoriq-Add-ls2088a-clk-support.patch deleted file mode 100644 index 6c95cd025..000000000 --- a/target/linux/layerscape/patches-4.4/8231-driver-clk-qoriq-Add-ls2088a-clk-support.patch +++ /dev/null @@ -1,20 +0,0 @@ -From c62b4977614e133acc95c61237bcc8fe30581d13 Mon Sep 17 00:00:00 2001 -From: "ying.zhang" -Date: Thu, 22 Dec 2016 23:29:39 +0800 -Subject: [PATCH 231/238] driver: clk: qoriq: Add ls2088a clk support - -Signed-off-by: Zhao Qiang wq ---- - drivers/clk/clk-qoriq.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -1346,6 +1346,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1043a, " - CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); - CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init); - - /* Legacy nodes */ - CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); diff --git a/target/linux/layerscape/patches-4.4/8233-i2c-pca954x-Add-option-to-skip-disabling-PCA954x-Mux.patch b/target/linux/layerscape/patches-4.4/8233-i2c-pca954x-Add-option-to-skip-disabling-PCA954x-Mux.patch deleted file mode 100644 index 3cb39764f..000000000 --- a/target/linux/layerscape/patches-4.4/8233-i2c-pca954x-Add-option-to-skip-disabling-PCA954x-Mux.patch +++ /dev/null @@ -1,105 +0,0 @@ -From a4be9046c3a3fc39a06089553df8cc19a2abd814 Mon Sep 17 00:00:00 2001 -From: Priyanka Jain -Date: Tue, 3 Nov 2015 11:25:24 +0530 -Subject: [PATCH 233/238] i2c: pca954x: Add option to skip disabling PCA954x - Mux device - -On some Layerscape boards like LS2085ARDB/LS2080ARDB, -input pull-up resistors on PCA954x Mux device are -missing on board. So, if mux are disabled after powered-on, -input lines will float leading to incorrect functionality. - -Hence, PCA954x Mux device should never be turned-off after -power-on. - -Add option to skip disabling PCA954x Mux device -if device tree contians "i2c-mux-never-disable" property -for pca954x device node. - -Signed-off-by: Priyanka Jain ---- - drivers/i2c/muxes/i2c-mux-pca954x.c | 38 +++++++++++++++++++++++++++++++++++ - 1 file changed, 38 insertions(+) - ---- a/drivers/i2c/muxes/i2c-mux-pca954x.c -+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c -@@ -63,6 +63,7 @@ struct pca954x { - struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS]; - - u8 last_chan; /* last register value */ -+ u8 disable_mux; /* do not disable mux if val not 0 */ - }; - - struct chip_desc { -@@ -174,6 +175,13 @@ static int pca954x_deselect_mux(struct i - { - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return pca954x_reg_write(adap, client, data->disable_mux); -+#endif - /* Deselect active channel */ - data->last_chan = 0; - return pca954x_reg_write(adap, client, data->last_chan); -@@ -201,6 +209,23 @@ static int pca954x_probe(struct i2c_clie - if (!data) - return -ENOMEM; - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ /* The point here is that you must not disable a mux if there -+ * are no pullups on the input or you mess up the I2C. This -+ * needs to be put into the DTS really as the kernel cannot -+ * know this otherwise. -+ */ -+ data->type = id->driver_data; -+ data->disable_mux = of_node && -+ of_property_read_bool(of_node, "i2c-mux-never-disable") && -+ chips[data->type].muxtype == pca954x_ismux ? -+ chips[data->type].enable : 0; -+ /* force the first selection */ -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+#endif - i2c_set_clientdata(client, data); - - /* Get the mux out of reset if a reset GPIO is specified. */ -@@ -212,13 +237,19 @@ static int pca954x_probe(struct i2c_clie - * that the mux is in fact present. This also - * initializes the mux to disconnected state. - */ -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { -+#else - if (i2c_smbus_write_byte(client, 0) < 0) { -+#endif - dev_warn(&client->dev, "probe failed\n"); - return -ENODEV; - } - -+#ifndef CONFIG_ARCH_LAYERSCAPE - data->type = id->driver_data; - data->last_chan = 0; /* force the first selection */ -+#endif - - idle_disconnect_dt = of_node && - of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); -@@ -289,6 +320,13 @@ static int pca954x_resume(struct device - struct i2c_client *client = to_i2c_client(dev); - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return i2c_smbus_write_byte(client, data->disable_mux); -+#endif - data->last_chan = 0; - return i2c_smbus_write_byte(client, 0); - } diff --git a/target/linux/layerscape/patches-4.4/8235-pci-layerscape-fix-pci-lut-offset-issue.patch b/target/linux/layerscape/patches-4.4/8235-pci-layerscape-fix-pci-lut-offset-issue.patch deleted file mode 100644 index def79412d..000000000 --- a/target/linux/layerscape/patches-4.4/8235-pci-layerscape-fix-pci-lut-offset-issue.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 2f3ea65dc8909cbf4116bd74b3dea8d25749508f Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Wed, 23 Nov 2016 11:29:45 +0800 -Subject: [PATCH 235/238] pci/layerscape: fix pci lut offset issue - -Signed-off-by: Zhao Qiang ---- - drivers/pci/host/pci-layerscape.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -158,16 +158,11 @@ static void ls1021_pcie_host_init(struct - static int ls_pcie_link_up(struct pcie_port *pp) - { - struct ls_pcie *pcie = to_ls_pcie(pp); -- u32 state, offset; -+ u32 state; - -- if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) -- offset = 0x407fc; -- else -- offset = PCIE_LUT_DBG; -- -- state = (ioread32(pcie->lut + offset) >> -- pcie->drvdata->ltssm_shift) & -- LTSSM_STATE_MASK; -+ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; diff --git a/target/linux/layerscape/patches-4.4/8236-clk-add-API-of-clks.patch b/target/linux/layerscape/patches-4.4/8236-clk-add-API-of-clks.patch deleted file mode 100644 index ce8b51830..000000000 --- a/target/linux/layerscape/patches-4.4/8236-clk-add-API-of-clks.patch +++ /dev/null @@ -1,75 +0,0 @@ -From df2373ca941741f3f66750241a048ad4e2ff2c91 Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Thu, 24 Nov 2016 11:47:45 +0800 -Subject: [PATCH 236/238] clk: add API of clks - -Signed-off-by: Zhao Qiang ---- - drivers/clk/clk.c | 19 +++++++++++++++++++ - include/linux/clk-provider.h | 1 + - include/linux/clk.h | 9 +++++++++ - 3 files changed, 29 insertions(+) - ---- a/drivers/clk/clk.c -+++ b/drivers/clk/clk.c -@@ -359,6 +359,19 @@ static struct clk_core *clk_core_get_par - return core->parents[index]; - } - -+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) -+{ -+ struct clk_core *parent; -+ -+ if (!clk) -+ return NULL; -+ -+ parent = clk_core_get_parent_by_index(clk->core, index); -+ -+ return !parent ? NULL : parent->hw->clk; -+} -+EXPORT_SYMBOL_GPL(clk_get_parent_by_index); -+ - struct clk_hw * - clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) - { -@@ -2033,6 +2046,12 @@ static const struct file_operations clk_ - .release = single_release, - }; - -+unsigned int clk_get_num_parents(struct clk *clk) -+{ -+ return !clk ? 0 : clk->core->num_parents; -+} -+EXPORT_SYMBOL_GPL(clk_get_num_parents); -+ - static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) - { - if (!c) ---- a/include/linux/clk-provider.h -+++ b/include/linux/clk-provider.h -@@ -656,6 +656,7 @@ unsigned int clk_hw_get_num_parents(cons - struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); - struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, - unsigned int index); -+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); - unsigned int __clk_get_enable_count(struct clk *clk); - unsigned long clk_hw_get_rate(const struct clk_hw *hw); - unsigned long __clk_get_flags(struct clk *clk); ---- a/include/linux/clk.h -+++ b/include/linux/clk.h -@@ -392,6 +392,15 @@ int clk_set_parent(struct clk *clk, stru - struct clk *clk_get_parent(struct clk *clk); - - /** -+ * clk_get_num_parents - get number of possible parents -+ * @clk: clock source -+ * -+ * Returns the number of possible parents of this clock, -+ * which can then be enumerated using clk_get_parent_by_index(). -+ */ -+unsigned int clk_get_num_parents(struct clk *clk); -+ -+/** - * clk_get_sys - get a clock based upon the device name - * @dev_id: device name - * @con_id: connection ID diff --git a/target/linux/layerscape/patches-4.4/8237-pcie-ls208x-use-unified-compatible-fsl-ls2080a-pcie-.patch b/target/linux/layerscape/patches-4.4/8237-pcie-ls208x-use-unified-compatible-fsl-ls2080a-pcie-.patch deleted file mode 100644 index 6162e2b25..000000000 --- a/target/linux/layerscape/patches-4.4/8237-pcie-ls208x-use-unified-compatible-fsl-ls2080a-pcie-.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 562f1311b529d81662ed41786b8d240db2e2ff51 Mon Sep 17 00:00:00 2001 -From: Shengzhou Liu -Date: Tue, 6 Dec 2016 15:30:39 +0800 -Subject: [PATCH 237/238] pcie/ls208x: use unified compatible - "fsl,ls2080a-pcie" for ls208x - -To avoid unnecessary reduplication, let's use unified compatible -"fsl,ls2080a-pcie" for ls2080a, ls2085a, ls2088a. - -This patch fixes issue of pcie not working on ls2088a. - -Signed-off-by: Shengzhou Liu ---- - arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 12 ++++-------- - drivers/pci/host/pci-layerscape.c | 13 ++++++++----- - 2 files changed, 12 insertions(+), 13 deletions(-) - ---- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi -@@ -513,8 +513,7 @@ - }; - - pcie1: pcie@3400000 { -- compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -- "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ compatible = "fsl,ls2080a-pcie", "snps,dw-pcie"; - reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ - 0x20 0x00000000 0x0 0x00002000>; /* configuration space */ - reg-names = "regs", "config"; -@@ -539,8 +538,7 @@ - }; - - pcie2: pcie@3500000 { -- compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -- "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ compatible = "fsl,ls2080a-pcie", "snps,dw-pcie"; - reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ - 0x28 0x00000000 0x0 0x00002000>; /* configuration space */ - reg-names = "regs", "config"; -@@ -565,8 +563,7 @@ - }; - - pcie3: pcie@3600000 { -- compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -- "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ compatible = "fsl,ls2080a-pcie", "snps,dw-pcie"; - reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ - 0x30 0x00000000 0x0 0x00002000>; /* configuration space */ - reg-names = "regs", "config"; -@@ -591,8 +588,7 @@ - }; - - pcie4: pcie@3700000 { -- compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -- "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ compatible = "fsl,ls2080a-pcie", "snps,dw-pcie"; - reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ - 0x38 0x00000000 0x0 0x00002000>; /* configuration space */ - reg-names = "regs", "config"; ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -158,9 +158,14 @@ static void ls1021_pcie_host_init(struct - static int ls_pcie_link_up(struct pcie_port *pp) - { - struct ls_pcie *pcie = to_ls_pcie(pp); -- u32 state; -+ u32 state, offset; - -- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> -+ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) -+ offset = 0x407fc; -+ else -+ offset = pcie->drvdata->lut_dbg; -+ -+ state = (ioread32(pcie->lut + offset) >> - pcie->drvdata->ltssm_shift) & - LTSSM_STATE_MASK; - -@@ -261,7 +266,6 @@ static const struct of_device_id ls_pcie - { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls1088a-pcie", .data = &ls1088_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, -- { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { }, - }; - MODULE_DEVICE_TABLE(of, ls_pcie_of_match); -@@ -315,8 +319,7 @@ static int __init ls_pcie_probe(struct p - if (!ls_pcie_is_bridge(pcie)) - return -ENODEV; - -- if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2085a-pcie") || -- of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie") || -+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie") || - of_device_is_compatible(pdev->dev.of_node, "fsl,ls1088a-pcie")) { - int len; - const u32 *prop; diff --git a/target/linux/layerscape/patches-4.4/8238-irqchip-ls-scfg-msi-fix-typo-of-MSI-compatible-strin.patch b/target/linux/layerscape/patches-4.4/8238-irqchip-ls-scfg-msi-fix-typo-of-MSI-compatible-strin.patch deleted file mode 100644 index 8f2c3831a..000000000 --- a/target/linux/layerscape/patches-4.4/8238-irqchip-ls-scfg-msi-fix-typo-of-MSI-compatible-strin.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 5cb8ea9dfcea4092fd3710cce3980a44433dc58f Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:35 +0800 -Subject: [PATCH 04/13] irqchip/ls-scfg-msi: fix typo of MSI compatible strings - -Cherry-pick patchwork patch with context adjustment. - -The patch is to fix typo of the Layerscape SCFG MSI dts compatible -strings. "1" is replaced by "l". - -Signed-off-by: Minghuan Lian -Acked-by: Rob Herring -Signed-off-by: Yangbo Lu ---- - .../devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt | 6 +++--- - drivers/irqchip/irq-ls-scfg-msi.c | 6 ++++-- - 2 files changed, 7 insertions(+), 5 deletions(-) - ---- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt -+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt -@@ -4,8 +4,8 @@ Required properties: - - - compatible: should be "fsl,-msi" to identify - Layerscape PCIe MSI controller block such as: -- "fsl,1s1021a-msi" -- "fsl,1s1043a-msi" -+ "fsl,ls1021a-msi" -+ "fsl,ls1043a-msi" - - msi-controller: indicates that this is a PCIe MSI controller node - - reg: physical base address of the controller and length of memory mapped. - - interrupts: an interrupt to the parent interrupt controller. -@@ -23,7 +23,7 @@ MSI controller node - Examples: - - msi1: msi-controller@1571000 { -- compatible = "fsl,1s1043a-msi"; -+ compatible = "fsl,ls1043a-msi"; - reg = <0x0 0x1571000 0x0 0x8>, - msi-controller; - interrupts = <0 116 0x4>; ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -220,8 +220,10 @@ static int ls_scfg_msi_remove(struct pla - - static const struct of_device_id ls_scfg_msi_id[] = { - { .compatible = "fsl,ls1012a-msi", }, -- { .compatible = "fsl,1s1021a-msi", }, -- { .compatible = "fsl,1s1043a-msi", }, -+ { .compatible = "fsl,1s1021a-msi", }, /* a typo */ -+ { .compatible = "fsl,1s1043a-msi", }, /* a typo */ -+ { .compatible = "fsl,ls1021a-msi", }, -+ { .compatible = "fsl,ls1043a-msi", }, - { .compatible = "fsl,ls1046a-msi", }, - {}, - }; diff --git a/target/linux/layerscape/patches-4.4/8239-irqchip-ls-scfg-msi-add-LS1046a-MSI-support.patch b/target/linux/layerscape/patches-4.4/8239-irqchip-ls-scfg-msi-add-LS1046a-MSI-support.patch deleted file mode 100644 index bea806bf1..000000000 --- a/target/linux/layerscape/patches-4.4/8239-irqchip-ls-scfg-msi-add-LS1046a-MSI-support.patch +++ /dev/null @@ -1,293 +0,0 @@ -From 20fd0e76257005cb46a2ce1a30018a45d96199bf Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:41 +0800 -Subject: [PATCH 10/13] irqchip/ls-scfg-msi: add LS1046a MSI support - -Cherry-pick patchwork patch with context adjustment. - -LS1046a includes 4 MSIRs, each MSIR is assigned a dedicate GIC -SPI interrupt and provides 32 MSI interrupts. Compared to previous -MSI, LS1046a's IBS(interrupt bit select) shift is changed to 2 and -total MSI interrupt number is changed to 128. - -The patch adds structure 'ls_scfg_msir' to describe MSIR setting and -'ibs_shift' to store the different value between the SoCs. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - drivers/irqchip/irq-ls-scfg-msi.c | 168 +++++++++++++++++++++++++++++--------- - 1 file changed, 131 insertions(+), 37 deletions(-) - ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -17,13 +17,24 @@ - #include - #include - #include -+#include - #include - #include - #include - --#define MSI_MAX_IRQS 32 --#define MSI_IBS_SHIFT 3 --#define MSIR 4 -+#define MSI_IRQS_PER_MSIR 32 -+#define MSI_MSIR_OFFSET 4 -+ -+struct ls_scfg_msi_cfg { -+ u32 ibs_shift; /* Shift of interrupt bit select */ -+}; -+ -+struct ls_scfg_msir { -+ struct ls_scfg_msi *msi_data; -+ unsigned int index; -+ unsigned int gic_irq; -+ void __iomem *reg; -+}; - - struct ls_scfg_msi { - spinlock_t lock; -@@ -32,8 +43,11 @@ struct ls_scfg_msi { - struct irq_domain *msi_domain; - void __iomem *regs; - phys_addr_t msiir_addr; -- int irq; -- DECLARE_BITMAP(used, MSI_MAX_IRQS); -+ struct ls_scfg_msi_cfg *cfg; -+ u32 msir_num; -+ struct ls_scfg_msir *msir; -+ u32 irqs_num; -+ unsigned long *used; - }; - - static struct irq_chip ls_scfg_msi_irq_chip = { -@@ -55,7 +69,7 @@ static void ls_scfg_msi_compose_msg(stru - - msg->address_hi = upper_32_bits(msi_data->msiir_addr); - msg->address_lo = lower_32_bits(msi_data->msiir_addr); -- msg->data = data->hwirq << MSI_IBS_SHIFT; -+ msg->data = data->hwirq; - } - - static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, -@@ -81,8 +95,8 @@ static int ls_scfg_msi_domain_irq_alloc( - WARN_ON(nr_irqs != 1); - - spin_lock(&msi_data->lock); -- pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); -- if (pos < MSI_MAX_IRQS) -+ pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num); -+ if (pos < msi_data->irqs_num) - __set_bit(pos, msi_data->used); - else - err = -ENOSPC; -@@ -106,7 +120,7 @@ static void ls_scfg_msi_domain_irq_free( - int pos; - - pos = d->hwirq; -- if (pos < 0 || pos >= MSI_MAX_IRQS) { -+ if (pos < 0 || pos >= msi_data->irqs_num) { - pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); - return; - } -@@ -123,15 +137,17 @@ static const struct irq_domain_ops ls_sc - - static void ls_scfg_msi_irq_handler(struct irq_desc *desc) - { -- struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); -+ struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); -+ struct ls_scfg_msi *msi_data = msir->msi_data; - unsigned long val; -- int pos, virq; -+ int pos, virq, hwirq; - - chained_irq_enter(irq_desc_get_chip(desc), desc); - -- val = ioread32be(msi_data->regs + MSIR); -- for_each_set_bit(pos, &val, MSI_MAX_IRQS) { -- virq = irq_find_mapping(msi_data->parent, (31 - pos)); -+ val = ioread32be(msir->reg); -+ for_each_set_bit(pos, &val, MSI_IRQS_PER_MSIR) { -+ hwirq = ((31 - pos) << msi_data->cfg->ibs_shift) | msir->index; -+ virq = irq_find_mapping(msi_data->parent, hwirq); - if (virq) - generic_handle_irq(virq); - } -@@ -143,7 +159,7 @@ static int ls_scfg_msi_domains_init(stru - { - /* Initialize MSI domain parent */ - msi_data->parent = irq_domain_add_linear(NULL, -- MSI_MAX_IRQS, -+ msi_data->irqs_num, - &ls_scfg_msi_domain_ops, - msi_data); - if (!msi_data->parent) { -@@ -164,16 +180,88 @@ static int ls_scfg_msi_domains_init(stru - return 0; - } - -+static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index) -+{ -+ struct ls_scfg_msir *msir; -+ int virq, i, hwirq; -+ -+ virq = platform_get_irq(msi_data->pdev, index); -+ if (virq <= 0) -+ return -ENODEV; -+ -+ msir = &msi_data->msir[index]; -+ msir->index = index; -+ msir->msi_data = msi_data; -+ msir->gic_irq = virq; -+ msir->reg = msi_data->regs + MSI_MSIR_OFFSET + 4 * index; -+ -+ irq_set_chained_handler_and_data(msir->gic_irq, -+ ls_scfg_msi_irq_handler, -+ msir); -+ -+ /* Release the hwirqs corresponding to this MSIR */ -+ for (i = 0; i < MSI_IRQS_PER_MSIR; i++) { -+ hwirq = i << msi_data->cfg->ibs_shift | msir->index; -+ bitmap_clear(msi_data->used, hwirq, 1); -+ } -+ -+ return 0; -+} -+ -+static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir) -+{ -+ struct ls_scfg_msi *msi_data = msir->msi_data; -+ int i, hwirq; -+ -+ if (msir->gic_irq > 0) -+ irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL); -+ -+ for (i = 0; i < MSI_IRQS_PER_MSIR; i++) { -+ hwirq = i << msi_data->cfg->ibs_shift | msir->index; -+ bitmap_set(msi_data->used, hwirq, 1); -+ } -+ -+ return 0; -+} -+ -+static struct ls_scfg_msi_cfg ls1021_msi_cfg = { -+ .ibs_shift = 3, -+}; -+ -+static struct ls_scfg_msi_cfg ls1046_msi_cfg = { -+ .ibs_shift = 2, -+}; -+ -+static const struct of_device_id ls_scfg_msi_id[] = { -+ /* The following two misspelled compatibles are obsolete */ -+ { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg}, -+ { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg}, -+ -+ { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg }, -+ { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg }, -+ { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg }, -+ { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, ls_scfg_msi_id); -+ - static int ls_scfg_msi_probe(struct platform_device *pdev) - { -+ const struct of_device_id *match; - struct ls_scfg_msi *msi_data; - struct resource *res; -- int ret; -+ int i, ret; -+ -+ match = of_match_device(ls_scfg_msi_id, &pdev->dev); -+ if (!match) -+ return -ENODEV; - - msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); - if (!msi_data) - return -ENOMEM; - -+ msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data; -+ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - msi_data->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(msi_data->regs)) { -@@ -182,23 +270,37 @@ static int ls_scfg_msi_probe(struct plat - } - msi_data->msiir_addr = res->start; - -- msi_data->irq = platform_get_irq(pdev, 0); -- if (msi_data->irq <= 0) { -- dev_err(&pdev->dev, "failed to get MSI irq\n"); -- return -ENODEV; -- } -- - msi_data->pdev = pdev; - spin_lock_init(&msi_data->lock); - -+ msi_data->irqs_num = MSI_IRQS_PER_MSIR * -+ (1 << msi_data->cfg->ibs_shift); -+ msi_data->used = devm_kcalloc(&pdev->dev, -+ BITS_TO_LONGS(msi_data->irqs_num), -+ sizeof(*msi_data->used), -+ GFP_KERNEL); -+ if (!msi_data->used) -+ return -ENOMEM; -+ /* -+ * Reserve all the hwirqs -+ * The available hwirqs will be released in ls1_msi_setup_hwirq() -+ */ -+ bitmap_set(msi_data->used, 0, msi_data->irqs_num); -+ -+ msi_data->msir_num = of_irq_count(pdev->dev.of_node); -+ msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num, -+ sizeof(*msi_data->msir), -+ GFP_KERNEL); -+ if (!msi_data->msir) -+ return -ENOMEM; -+ -+ for (i = 0; i < msi_data->msir_num; i++) -+ ls_scfg_msi_setup_hwirq(msi_data, i); -+ - ret = ls_scfg_msi_domains_init(msi_data); - if (ret) - return ret; - -- irq_set_chained_handler_and_data(msi_data->irq, -- ls_scfg_msi_irq_handler, -- msi_data); -- - platform_set_drvdata(pdev, msi_data); - - return 0; -@@ -207,8 +309,10 @@ static int ls_scfg_msi_probe(struct plat - static int ls_scfg_msi_remove(struct platform_device *pdev) - { - struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); -+ int i; - -- irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); -+ for (i = 0; i < msi_data->msir_num; i++) -+ ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]); - - irq_domain_remove(msi_data->msi_domain); - irq_domain_remove(msi_data->parent); -@@ -218,16 +322,6 @@ static int ls_scfg_msi_remove(struct pla - return 0; - } - --static const struct of_device_id ls_scfg_msi_id[] = { -- { .compatible = "fsl,ls1012a-msi", }, -- { .compatible = "fsl,1s1021a-msi", }, /* a typo */ -- { .compatible = "fsl,1s1043a-msi", }, /* a typo */ -- { .compatible = "fsl,ls1021a-msi", }, -- { .compatible = "fsl,ls1043a-msi", }, -- { .compatible = "fsl,ls1046a-msi", }, -- {}, --}; -- - static struct platform_driver ls_scfg_msi_driver = { - .driver = { - .name = "ls-scfg-msi", diff --git a/target/linux/layerscape/patches-4.4/8240-irqchip-ls-scfg-msi-add-LS1043a-v1.1-MSI-support.patch b/target/linux/layerscape/patches-4.4/8240-irqchip-ls-scfg-msi-add-LS1043a-v1.1-MSI-support.patch deleted file mode 100644 index d1266a23a..000000000 --- a/target/linux/layerscape/patches-4.4/8240-irqchip-ls-scfg-msi-add-LS1043a-v1.1-MSI-support.patch +++ /dev/null @@ -1,134 +0,0 @@ -From ab9d5c5c767c17bf9526f84beb5667f2a50e1a4d Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:42 +0800 -Subject: [PATCH] irqchip/ls-scfg-msi: add LS1043a v1.1 MSI support - -Cherry-pick patchwork patch with context adjustment. - -A MSI controller of LS1043a v1.0 only includes one MSIR and -is assigned one GIC interrupt. In order to support affinity, -LS1043a v1.1 MSI is assigned 4 MSIRs and 4 GIC interrupts. -But the MSIR has the different offset and only supports 8 MSIs. -The bits between variable bit_start and bit_end in structure -ls_scfg_msir are used to show 8 MSI interrupts. msir_irqs and -msir_base are added to describe the difference of MSI between -LS1043a v1.1 and other SoCs. - -Signed-off-by: Minghuan Lian -Acked-by: Rob Herring -Signed-off-by: Yangbo Lu ---- - drivers/irqchip/irq-ls-scfg-msi.c | 45 +++++++++++++++++++++++++++++++++------ - 1 file changed, 39 insertions(+), 6 deletions(-) - ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -25,14 +25,21 @@ - #define MSI_IRQS_PER_MSIR 32 - #define MSI_MSIR_OFFSET 4 - -+#define MSI_LS1043V1_1_IRQS_PER_MSIR 8 -+#define MSI_LS1043V1_1_MSIR_OFFSET 0x10 -+ - struct ls_scfg_msi_cfg { - u32 ibs_shift; /* Shift of interrupt bit select */ -+ u32 msir_irqs; /* The irq number per MSIR */ -+ u32 msir_base; /* The base address of MSIR */ - }; - - struct ls_scfg_msir { - struct ls_scfg_msi *msi_data; - unsigned int index; - unsigned int gic_irq; -+ unsigned int bit_start; -+ unsigned int bit_end; - void __iomem *reg; - }; - -@@ -140,13 +147,18 @@ static void ls_scfg_msi_irq_handler(stru - struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); - struct ls_scfg_msi *msi_data = msir->msi_data; - unsigned long val; -- int pos, virq, hwirq; -+ int pos, size, virq, hwirq; - - chained_irq_enter(irq_desc_get_chip(desc), desc); - - val = ioread32be(msir->reg); -- for_each_set_bit(pos, &val, MSI_IRQS_PER_MSIR) { -- hwirq = ((31 - pos) << msi_data->cfg->ibs_shift) | msir->index; -+ -+ pos = msir->bit_start; -+ size = msir->bit_end + 1; -+ -+ for_each_set_bit_from(pos, &val, size) { -+ hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | -+ msir->index; - virq = irq_find_mapping(msi_data->parent, hwirq); - if (virq) - generic_handle_irq(virq); -@@ -193,14 +205,24 @@ static int ls_scfg_msi_setup_hwirq(struc - msir->index = index; - msir->msi_data = msi_data; - msir->gic_irq = virq; -- msir->reg = msi_data->regs + MSI_MSIR_OFFSET + 4 * index; -+ msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index; -+ -+ if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) { -+ msir->bit_start = 32 - ((msir->index + 1) * -+ MSI_LS1043V1_1_IRQS_PER_MSIR); -+ msir->bit_end = msir->bit_start + -+ MSI_LS1043V1_1_IRQS_PER_MSIR - 1; -+ } else { -+ msir->bit_start = 0; -+ msir->bit_end = msi_data->cfg->msir_irqs - 1; -+ } - - irq_set_chained_handler_and_data(msir->gic_irq, - ls_scfg_msi_irq_handler, - msir); - - /* Release the hwirqs corresponding to this MSIR */ -- for (i = 0; i < MSI_IRQS_PER_MSIR; i++) { -+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) { - hwirq = i << msi_data->cfg->ibs_shift | msir->index; - bitmap_clear(msi_data->used, hwirq, 1); - } -@@ -216,7 +238,7 @@ static int ls_scfg_msi_teardown_hwirq(st - if (msir->gic_irq > 0) - irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL); - -- for (i = 0; i < MSI_IRQS_PER_MSIR; i++) { -+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) { - hwirq = i << msi_data->cfg->ibs_shift | msir->index; - bitmap_set(msi_data->used, hwirq, 1); - } -@@ -226,10 +248,20 @@ static int ls_scfg_msi_teardown_hwirq(st - - static struct ls_scfg_msi_cfg ls1021_msi_cfg = { - .ibs_shift = 3, -+ .msir_irqs = MSI_IRQS_PER_MSIR, -+ .msir_base = MSI_MSIR_OFFSET, - }; - - static struct ls_scfg_msi_cfg ls1046_msi_cfg = { - .ibs_shift = 2, -+ .msir_irqs = MSI_IRQS_PER_MSIR, -+ .msir_base = MSI_MSIR_OFFSET, -+}; -+ -+static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = { -+ .ibs_shift = 2, -+ .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR, -+ .msir_base = MSI_LS1043V1_1_MSIR_OFFSET, - }; - - static const struct of_device_id ls_scfg_msi_id[] = { -@@ -240,6 +272,7 @@ static const struct of_device_id ls_scfg - { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg }, - { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg }, - { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg }, -+ { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg }, - { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg }, - {}, - }; diff --git a/target/linux/layerscape/patches-4.4/8241-irqchip-ls-scfg-msi-add-MSI-affinity-support.patch b/target/linux/layerscape/patches-4.4/8241-irqchip-ls-scfg-msi-add-MSI-affinity-support.patch deleted file mode 100644 index b3b3795b6..000000000 --- a/target/linux/layerscape/patches-4.4/8241-irqchip-ls-scfg-msi-add-MSI-affinity-support.patch +++ /dev/null @@ -1,152 +0,0 @@ -From a761ae710d6395af0d8d17a0b4b8f93a816ead46 Mon Sep 17 00:00:00 2001 -From: Minghuan Lian -Date: Tue, 17 Jan 2017 17:32:43 +0800 -Subject: [PATCH 12/13] irqchip/ls-scfg-msi: add MSI affinity support - -Cherry-pick patchwork patch. - -For LS1046a and LS1043a v1.1, the MSI controller has 4 MSIRs and 4 GIC -SPI interrupts which can be associated with different Core. -So we can support affinity to improve the performance. -The MSI message data is a byte for Layerscape MSI. - 7 6 5 4 3 2 1 0 -| - | IBS | SRS | -SRS bit0-1 is to select a MSIR which is associated with a CPU. -IBS bit2-6 of ls1046, bit2-4 of ls1043a v1.1 is to select bit of the -MSIR. With affinity, only bits of MSIR0(srs=0 cpu0) are available. -All other bits of the MSIR1-3(cpu1-3) are reserved. The MSI hwirq -always equals bit index of the MSIR0. When changing affinity, MSI -message data will be appended corresponding SRS then MSI will be -moved to the corresponding core. -But in affinity mode, there is only 8 MSI interrupts for a controller -of LS1043a v1.1. It cannot meet the requirement of the some PCIe -devices such as 4 ports Ethernet card. In contrast, without affinity, -all MSIRs can be used for core 0, the MSI interrupts can up to 32. -So the parameter is added to control affinity mode. -"lsmsi=no-affinity" will disable affinity and increase MSI -interrupt number. - -Signed-off-by: Minghuan Lian -Signed-off-by: Yangbo Lu ---- - drivers/irqchip/irq-ls-scfg-msi.c | 68 ++++++++++++++++++++++++++++++++++++--- - 1 file changed, 63 insertions(+), 5 deletions(-) - ---- a/drivers/irqchip/irq-ls-scfg-msi.c -+++ b/drivers/irqchip/irq-ls-scfg-msi.c -@@ -40,6 +40,7 @@ struct ls_scfg_msir { - unsigned int gic_irq; - unsigned int bit_start; - unsigned int bit_end; -+ unsigned int srs; /* Shared interrupt register select */ - void __iomem *reg; - }; - -@@ -70,6 +71,19 @@ static struct msi_domain_info ls_scfg_ms - .chip = &ls_scfg_msi_irq_chip, - }; - -+static int msi_affinity_flag = 1; -+ -+static int __init early_parse_ls_scfg_msi(char *p) -+{ -+ if (p && strncmp(p, "no-affinity", 11) == 0) -+ msi_affinity_flag = 0; -+ else -+ msi_affinity_flag = 1; -+ -+ return 0; -+} -+early_param("lsmsi", early_parse_ls_scfg_msi); -+ - static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) - { - struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); -@@ -77,12 +91,36 @@ static void ls_scfg_msi_compose_msg(stru - msg->address_hi = upper_32_bits(msi_data->msiir_addr); - msg->address_lo = lower_32_bits(msi_data->msiir_addr); - msg->data = data->hwirq; -+ -+ if (msi_affinity_flag) -+ msg->data |= cpumask_first(data->common->affinity); - } - - static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) - { -- return -EINVAL; -+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data); -+ u32 cpu; -+ -+ if (!msi_affinity_flag) -+ return -EINVAL; -+ -+ if (!force) -+ cpu = cpumask_any_and(mask, cpu_online_mask); -+ else -+ cpu = cpumask_first(mask); -+ -+ if (cpu >= msi_data->msir_num) -+ return -EINVAL; -+ -+ if (msi_data->msir[cpu].gic_irq <= 0) { -+ pr_warn("cannot bind the irq to cpu%d\n", cpu); -+ return -EINVAL; -+ } -+ -+ cpumask_copy(irq_data->common->affinity, mask); -+ -+ return IRQ_SET_MASK_OK; - } - - static struct irq_chip ls_scfg_msi_parent_chip = { -@@ -158,7 +196,7 @@ static void ls_scfg_msi_irq_handler(stru - - for_each_set_bit_from(pos, &val, size) { - hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | -- msir->index; -+ msir->srs; - virq = irq_find_mapping(msi_data->parent, hwirq); - if (virq) - generic_handle_irq(virq); -@@ -221,10 +259,19 @@ static int ls_scfg_msi_setup_hwirq(struc - ls_scfg_msi_irq_handler, - msir); - -+ if (msi_affinity_flag) { -+ /* Associate MSIR interrupt to the cpu */ -+ irq_set_affinity(msir->gic_irq, get_cpu_mask(index)); -+ msir->srs = 0; /* This value is determined by the CPU */ -+ } else -+ msir->srs = index; -+ - /* Release the hwirqs corresponding to this MSIR */ -- for (i = 0; i < msi_data->cfg->msir_irqs; i++) { -- hwirq = i << msi_data->cfg->ibs_shift | msir->index; -- bitmap_clear(msi_data->used, hwirq, 1); -+ if (!msi_affinity_flag || msir->index == 0) { -+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) { -+ hwirq = i << msi_data->cfg->ibs_shift | msir->index; -+ bitmap_clear(msi_data->used, hwirq, 1); -+ } - } - - return 0; -@@ -321,6 +368,17 @@ static int ls_scfg_msi_probe(struct plat - bitmap_set(msi_data->used, 0, msi_data->irqs_num); - - msi_data->msir_num = of_irq_count(pdev->dev.of_node); -+ -+ if (msi_affinity_flag) { -+ u32 cpu_num; -+ -+ cpu_num = num_possible_cpus(); -+ if (msi_data->msir_num >= cpu_num) -+ msi_data->msir_num = cpu_num; -+ else -+ msi_affinity_flag = 0; -+ } -+ - msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num, - sizeof(*msi_data->msir), - GFP_KERNEL); diff --git a/target/linux/layerscape/patches-4.4/9069-Revert-arm64-simplify-dma_get_ops.patch b/target/linux/layerscape/patches-4.4/9069-Revert-arm64-simplify-dma_get_ops.patch deleted file mode 100644 index e2c356a7d..000000000 --- a/target/linux/layerscape/patches-4.4/9069-Revert-arm64-simplify-dma_get_ops.patch +++ /dev/null @@ -1,93 +0,0 @@ -From 4885eb650b27f5639c8c72b8d4daa37f533b0b4d Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Fri, 22 Jul 2016 01:03:29 +0800 -Subject: [PATCH 69/70] Revert "arm64: simplify dma_get_ops" - -This reverts commit 1dccb598df549d892b6450c261da54cdd7af44b4. ---- - arch/arm64/include/asm/dma-mapping.h | 13 ++++++++++--- - arch/arm64/mm/dma-mapping.c | 16 ++++++++++++---- - 2 files changed, 22 insertions(+), 7 deletions(-) - ---- a/arch/arm64/include/asm/dma-mapping.h -+++ b/arch/arm64/include/asm/dma-mapping.h -@@ -18,6 +18,7 @@ - - #ifdef __KERNEL__ - -+#include - #include - #include - -@@ -25,16 +26,22 @@ - #include - - #define DMA_ERROR_CODE (~(dma_addr_t)0) -+extern struct dma_map_ops *dma_ops; - extern struct dma_map_ops dummy_dma_ops; - - static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) - { -- if (dev && dev->archdata.dma_ops) -+ if (unlikely(!dev)) -+ return dma_ops; -+ else if (dev->archdata.dma_ops) - return dev->archdata.dma_ops; -+ else if (acpi_disabled) -+ return dma_ops; - - /* -- * We expect no ISA devices, and all other DMA masters are expected to -- * have someone call arch_setup_dma_ops at device creation time. -+ * When ACPI is enabled, if arch_set_dma_ops is not called, -+ * we will disable device DMA capability by setting it -+ * to dummy_dma_ops. - */ - return &dummy_dma_ops; - } ---- a/arch/arm64/mm/dma-mapping.c -+++ b/arch/arm64/mm/dma-mapping.c -@@ -18,7 +18,6 @@ - */ - - #include --#include - #include - #include - #include -@@ -29,6 +28,9 @@ - - #include - -+struct dma_map_ops *dma_ops; -+EXPORT_SYMBOL(dma_ops); -+ - static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, - bool coherent) - { -@@ -513,7 +515,13 @@ EXPORT_SYMBOL(dummy_dma_ops); - - static int __init arm64_dma_init(void) - { -- return atomic_pool_init(); -+ int ret; -+ -+ dma_ops = &swiotlb_dma_ops; -+ -+ ret = atomic_pool_init(); -+ -+ return ret; - } - arch_initcall(arm64_dma_init); - -@@ -987,8 +995,8 @@ static void __iommu_setup_dma_ops(struct - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - struct iommu_ops *iommu, bool coherent) - { -- if (!dev->archdata.dma_ops) -- dev->archdata.dma_ops = &swiotlb_dma_ops; -+ if (!acpi_disabled && !dev->archdata.dma_ops) -+ dev->archdata.dma_ops = dma_ops; - - dev->archdata.dma_coherent = coherent; - __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch b/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch deleted file mode 100644 index c247db474..000000000 --- a/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch +++ /dev/null @@ -1,304 +0,0 @@ -From 6f7a129e59721f6d97a0f06f7078d06f19ade69e Mon Sep 17 00:00:00 2001 -From: Yutang Jiang -Date: Thu, 21 Jul 2016 19:37:42 +0800 -Subject: [PATCH 70/70] Revert "arm64: use fixmap region for permanent FDT - mapping" - -Signed-off-by: Yutang Jiang ---- - Documentation/arm64/booting.txt | 10 +++---- - arch/arm64/include/asm/boot.h | 14 ---------- - arch/arm64/include/asm/fixmap.h | 15 ----------- - arch/arm64/include/asm/mmu.h | 1 - - arch/arm64/kernel/head.S | 39 ++++++++++++++++++++++++++- - arch/arm64/kernel/setup.c | 29 +++++++++++++------- - arch/arm64/mm/init.c | 1 + - arch/arm64/mm/mmu.c | 57 --------------------------------------- - 8 files changed, 62 insertions(+), 104 deletions(-) - delete mode 100644 arch/arm64/include/asm/boot.h - ---- a/Documentation/arm64/booting.txt -+++ b/Documentation/arm64/booting.txt -@@ -45,13 +45,11 @@ sees fit.) - - Requirement: MANDATORY - --The device tree blob (dtb) must be placed on an 8-byte boundary and must --not exceed 2 megabytes in size. Since the dtb will be mapped cacheable --using blocks of up to 2 megabytes in size, it must not be placed within --any 2M region which must be mapped with any specific attributes. -+The device tree blob (dtb) must be placed on an 8-byte boundary within -+the first 512 megabytes from the start of the kernel image and must not -+cross a 2-megabyte boundary. This is to allow the kernel to map the -+blob using a single section mapping in the initial page tables. - --NOTE: versions prior to v4.2 also require that the DTB be placed within --the 512 MB region starting at text_offset bytes below the kernel Image. - - 3. Decompress the kernel image - ------------------------------ ---- a/arch/arm64/include/asm/boot.h -+++ /dev/null -@@ -1,14 +0,0 @@ -- --#ifndef __ASM_BOOT_H --#define __ASM_BOOT_H -- --#include -- --/* -- * arm64 requires the DTB to be 8 byte aligned and -- * not exceed 2MB in size. -- */ --#define MIN_FDT_ALIGN 8 --#define MAX_FDT_SIZE SZ_2M -- --#endif ---- a/arch/arm64/include/asm/fixmap.h -+++ b/arch/arm64/include/asm/fixmap.h -@@ -18,7 +18,6 @@ - #ifndef __ASSEMBLY__ - #include - #include --#include - #include - - /* -@@ -34,20 +33,6 @@ - */ - enum fixed_addresses { - FIX_HOLE, -- -- /* -- * Reserve a virtual window for the FDT that is 2 MB larger than the -- * maximum supported size, and put it at the top of the fixmap region. -- * The additional space ensures that any FDT that does not exceed -- * MAX_FDT_SIZE can be mapped regardless of whether it crosses any -- * 2 MB alignment boundaries. -- * -- * Keep this at the top so it remains 2 MB aligned. -- */ --#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) -- FIX_FDT_END, -- FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, -- - FIX_EARLYCON_MEM_BASE, - FIX_TEXT_POKE0, - __end_of_permanent_fixed_addresses, ---- a/arch/arm64/include/asm/mmu.h -+++ b/arch/arm64/include/asm/mmu.h -@@ -34,6 +34,5 @@ extern void init_mem_pgprot(void); - extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, - unsigned long virt, phys_addr_t size, - pgprot_t prot); --extern void *fixmap_remap_fdt(phys_addr_t dt_phys); - - #endif ---- a/arch/arm64/kernel/head.S -+++ b/arch/arm64/kernel/head.S -@@ -212,6 +212,8 @@ ENTRY(stext) - bl el2_setup // Drop to EL1, w20=cpu_boot_mode - adrp x24, __PHYS_OFFSET - bl set_cpu_boot_mode_flag -+ -+ bl __vet_fdt - bl __create_page_tables // x25=TTBR0, x26=TTBR1 - /* - * The following calls CPU setup code, see arch/arm64/mm/proc.S for -@@ -243,6 +245,24 @@ preserve_boot_args: - ENDPROC(preserve_boot_args) - - /* -+ * Determine validity of the x21 FDT pointer. -+ * The dtb must be 8-byte aligned and live in the first 512M of memory. -+ */ -+__vet_fdt: -+ tst x21, #0x7 -+ b.ne 1f -+ cmp x21, x24 -+ b.lt 1f -+ mov x0, #(1 << 29) -+ add x0, x0, x24 -+ cmp x21, x0 -+ b.ge 1f -+ ret -+1: -+ mov x21, #0 -+ ret -+ENDPROC(__vet_fdt) -+/* - * Macro to create a table entry to the next page. - * - * tbl: page table address -@@ -306,7 +326,8 @@ ENDPROC(preserve_boot_args) - * required to get the kernel running. The following sections are required: - * - identity mapping to enable the MMU (low address, TTBR0) - * - first few MB of the kernel linear mapping to jump to once the MMU has -- * been enabled -+ * been enabled, including the FDT blob (TTBR1) -+ * - pgd entry for fixed mappings (TTBR1) - */ - __create_page_tables: - adrp x25, idmap_pg_dir -@@ -396,6 +417,22 @@ __create_page_tables: - create_block_map x0, x7, x3, x5, x6 - - /* -+ * Map the FDT blob (maximum 2MB; must be within 512MB of -+ * PHYS_OFFSET). -+ */ -+ mov x3, x21 // FDT phys address -+ and x3, x3, #~((1 << 21) - 1) // 2MB aligned -+ mov x6, #PAGE_OFFSET -+ sub x5, x3, x24 // subtract PHYS_OFFSET -+ tst x5, #~((1 << 29) - 1) // within 512MB? -+ csel x21, xzr, x21, ne // zero the FDT pointer -+ b.ne 1f -+ add x5, x5, x6 // __va(FDT blob) -+ add x6, x5, #1 << 21 // 2MB for the FDT blob -+ sub x6, x6, #1 // inclusive range -+ create_block_map x0, x7, x3, x5, x6 -+1: -+ /* - * Since the page tables have been populated with non-cacheable - * accesses (MMU disabled), invalidate the idmap and swapper page - * tables again to remove any speculatively loaded cache lines. ---- a/arch/arm64/kernel/setup.c -+++ b/arch/arm64/kernel/setup.c -@@ -87,6 +87,18 @@ static struct resource mem_res[] = { - #define kernel_code mem_res[0] - #define kernel_data mem_res[1] - -+void __init early_print(const char *str, ...) -+{ -+ char buf[256]; -+ va_list ap; -+ -+ va_start(ap, str); -+ vsnprintf(buf, sizeof(buf), str, ap); -+ va_end(ap); -+ -+ printk("%s", buf); -+} -+ - /* - * The recorded values of x0 .. x3 upon kernel entry. - */ -@@ -180,14 +192,12 @@ static void __init smp_build_mpidr_hash( - - static void __init setup_machine_fdt(phys_addr_t dt_phys) - { -- void *dt_virt = fixmap_remap_fdt(dt_phys); -- -- if (!dt_virt || !early_init_dt_scan(dt_virt)) { -- pr_crit("\n" -- "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" -- "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" -- "\nPlease check your bootloader.", -- &dt_phys, dt_virt); -+ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { -+ early_print("\n" -+ "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" -+ "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" -+ "\nPlease check your bootloader.\n", -+ dt_phys, phys_to_virt(dt_phys)); - - while (true) - cpu_relax(); -@@ -294,6 +304,7 @@ void __init setup_arch(char **cmdline_p) - pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); - - sprintf(init_utsname()->machine, ELF_PLATFORM); -+ setup_machine_fdt(__fdt_pointer); - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; -@@ -304,8 +315,6 @@ void __init setup_arch(char **cmdline_p) - early_fixmap_init(); - early_ioremap_init(); - -- setup_machine_fdt(__fdt_pointer); -- - parse_early_param(); - - /* ---- a/arch/arm64/mm/init.c -+++ b/arch/arm64/mm/init.c -@@ -171,6 +171,7 @@ void __init arm64_memblock_init(void) - memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); - #endif - -+ early_init_fdt_reserve_self(); - early_init_fdt_scan_reserved_mem(); - - /* 4GB maximum for 32-bit only capable devices */ ---- a/arch/arm64/mm/mmu.c -+++ b/arch/arm64/mm/mmu.c -@@ -21,7 +21,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -641,59 +640,3 @@ void __set_fixmap(enum fixed_addresses i - flush_tlb_kernel_range(addr, addr+PAGE_SIZE); - } - } -- --void *__init fixmap_remap_fdt(phys_addr_t dt_phys) --{ -- const u64 dt_virt_base = __fix_to_virt(FIX_FDT); -- pgprot_t prot = PAGE_KERNEL_RO; -- int size, offset; -- void *dt_virt; -- -- /* -- * Check whether the physical FDT address is set and meets the minimum -- * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be -- * at least 8 bytes so that we can always access the magic and size -- * fields of the FDT header after mapping the first chunk, double check -- * here if that is indeed the case. -- */ -- BUILD_BUG_ON(MIN_FDT_ALIGN < 8); -- if (!dt_phys || dt_phys % MIN_FDT_ALIGN) -- return NULL; -- -- /* -- * Make sure that the FDT region can be mapped without the need to -- * allocate additional translation table pages, so that it is safe -- * to call create_mapping() this early. -- * -- * On 64k pages, the FDT will be mapped using PTEs, so we need to -- * be in the same PMD as the rest of the fixmap. -- * On 4k pages, we'll use section mappings for the FDT so we only -- * have to be in the same PUD. -- */ -- BUILD_BUG_ON(dt_virt_base % SZ_2M); -- -- BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != -- __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); -- -- offset = dt_phys % SWAPPER_BLOCK_SIZE; -- dt_virt = (void *)dt_virt_base + offset; -- -- /* map the first chunk so we can read the size from the header */ -- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, -- SWAPPER_BLOCK_SIZE, prot); -- -- if (fdt_magic(dt_virt) != FDT_MAGIC) -- return NULL; -- -- size = fdt_totalsize(dt_virt); -- if (size > MAX_FDT_SIZE) -- return NULL; -- -- if (offset + size > SWAPPER_BLOCK_SIZE) -- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, -- round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); -- -- memblock_reserve(dt_phys, size); -- -- return dt_virt; --} diff --git a/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch new file mode 100644 index 000000000..939880fd1 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch @@ -0,0 +1,486 @@ +From 11edf9c88acea13d1a02901289060263b4027a77 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 09:52:26 +0800 +Subject: [PATCH] config: support layerscape +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a integrated patch for layerscape config/makefile support. + +Signed-off-by: Yuantian Tang +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Zhao Qiang +Signed-off-by: Bogdan Purcareata +Signed-off-by: Zhao Qiang +Signed-off-by: Horia Geantă +Signed-off-by: Yangbo Lu +--- + drivers/base/Kconfig | 1 + + drivers/crypto/Makefile | 2 +- + drivers/net/ethernet/freescale/Kconfig | 4 +- + drivers/net/ethernet/freescale/Makefile | 2 + + drivers/ptp/Kconfig | 29 ++++++ + drivers/rtc/Kconfig | 8 ++ + drivers/rtc/Makefile | 1 + + drivers/soc/Kconfig | 3 +- + drivers/soc/fsl/Kconfig | 22 +++++ + drivers/soc/fsl/Kconfig.arm | 16 ++++ + drivers/soc/fsl/Makefile | 4 + + drivers/soc/fsl/layerscape/Kconfig | 10 +++ + drivers/soc/fsl/layerscape/Makefile | 1 + + drivers/soc/fsl/rcpm.c | 154 ++++++++++++++++++++++++++++++++ + drivers/staging/Kconfig | 4 + + drivers/staging/Makefile | 2 + + drivers/staging/fsl-dpaa2/Kconfig | 41 +++++++++ + drivers/staging/fsl-dpaa2/Makefile | 9 ++ + 18 files changed, 309 insertions(+), 4 deletions(-) + create mode 100644 drivers/soc/fsl/Kconfig + create mode 100644 drivers/soc/fsl/Kconfig.arm + create mode 100644 drivers/soc/fsl/layerscape/Kconfig + create mode 100644 drivers/soc/fsl/layerscape/Makefile + create mode 100644 drivers/soc/fsl/rcpm.c + create mode 100644 drivers/staging/fsl-dpaa2/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/Makefile + +--- a/drivers/base/Kconfig ++++ b/drivers/base/Kconfig +@@ -237,6 +237,7 @@ config GENERIC_CPU_AUTOPROBE + + config SOC_BUS + bool ++ select GLOB + + source "drivers/base/regmap/Kconfig" + +--- a/drivers/crypto/Makefile ++++ b/drivers/crypto/Makefile +@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += at + obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o + obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o + obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ +-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ ++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/ + obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o + obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o + obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o +--- a/drivers/net/ethernet/freescale/Kconfig ++++ b/drivers/net/ethernet/freescale/Kconfig +@@ -5,7 +5,7 @@ + config NET_VENDOR_FREESCALE + bool "Freescale devices" + default y +- depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ ++ depends on FSL_SOC || (QUICC_ENGINE && PPC32) || CPM1 || CPM2 || PPC_MPC512x || \ + M523x || M527x || M5272 || M528x || M520x || M532x || \ + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ + ARCH_LAYERSCAPE +@@ -93,4 +93,6 @@ config GIANFAR + and MPC86xx family of chips, the eTSEC on LS1021A and the FEC + on the 8540. + ++source "drivers/net/ethernet/freescale/sdk_fman/Kconfig" ++source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig" + endif # NET_VENDOR_FREESCALE +--- a/drivers/net/ethernet/freescale/Makefile ++++ b/drivers/net/ethernet/freescale/Makefile +@@ -21,4 +21,6 @@ gianfar_driver-objs := gianfar.o \ + obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o + ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o + ++obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/ ++obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/ + obj-$(CONFIG_FSL_FMAN) += fman/ +--- a/drivers/ptp/Kconfig ++++ b/drivers/ptp/Kconfig +@@ -39,6 +39,35 @@ config PTP_1588_CLOCK_GIANFAR + To compile this driver as a module, choose M here: the module + will be called gianfar_ptp. + ++config PTP_1588_CLOCK_DPAA ++ tristate "Freescale DPAA as PTP clock" ++ depends on FSL_SDK_DPAA_ETH ++ select PTP_1588_CLOCK ++ select FSL_DPAA_TS ++ default n ++ help ++ This driver adds support for using the DPAA 1588 timer module ++ as a PTP clock. This clock is only useful if your PTP programs are ++ getting hardware time stamps on the PTP Ethernet packets ++ using the SO_TIMESTAMPING API. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called dpaa_ptp. ++ ++config PTP_1588_CLOCK_DPAA2 ++ tristate "Freescale DPAA2 as PTP clock" ++ depends on FSL_DPAA2_ETH ++ select PTP_1588_CLOCK ++ default y ++ help ++ This driver adds support for using the DPAA2 1588 timer module ++ as a PTP clock. This clock is only useful if your PTP programs are ++ getting hardware time stamps on the PTP Ethernet packets ++ using the SO_TIMESTAMPING API. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called dpaa2-rtc. ++ + config PTP_1588_CLOCK_IXP46X + tristate "Intel IXP46x as PTP clock" + depends on IXP4XX_ETH +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -414,6 +414,14 @@ config RTC_DRV_PCF85063 + This driver can also be built as a module. If so, the module + will be called rtc-pcf85063. + ++config RTC_DRV_PCF85263 ++ tristate "NXP PCF85263" ++ help ++ If you say yes here you get support for the PCF85263 RTC chip ++ ++ This driver can also be built as a module. If so, the module ++ will be called rtc-pcf85263. ++ + config RTC_DRV_PCF8563 + tristate "Philips PCF8563/Epson RTC8564" + help +--- a/drivers/rtc/Makefile ++++ b/drivers/rtc/Makefile +@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf + obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o + obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o + obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o ++obj-$(CONFIG_RTC_DRV_PCF85263) += rtc-pcf85263.o + obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o + obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o + obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o +--- a/drivers/soc/Kconfig ++++ b/drivers/soc/Kconfig +@@ -1,8 +1,7 @@ + menu "SOC (System On Chip) specific Drivers" + + source "drivers/soc/bcm/Kconfig" +-source "drivers/soc/fsl/qbman/Kconfig" +-source "drivers/soc/fsl/qe/Kconfig" ++source "drivers/soc/fsl/Kconfig" + source "drivers/soc/mediatek/Kconfig" + source "drivers/soc/qcom/Kconfig" + source "drivers/soc/rockchip/Kconfig" +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig +@@ -0,0 +1,22 @@ ++# ++# Freescale SOC drivers ++# ++ ++source "drivers/soc/fsl/qbman/Kconfig" ++source "drivers/soc/fsl/qe/Kconfig" ++source "drivers/soc/fsl/ls2-console/Kconfig" ++ ++config FSL_GUTS ++ bool ++ select SOC_BUS ++ help ++ The global utilities block controls power management, I/O device ++ enabling, power-onreset(POR) configuration monitoring, alternate ++ function selection for multiplexed signals,and clock control. ++ This driver is to manage and access global utilities block. ++ Initially only reading SVR and registering soc device are supported. ++ Other guts accesses, such as reading RCW, should eventually be moved ++ into this driver as well. ++if ARM || ARM64 ++source "drivers/soc/fsl/Kconfig.arm" ++endif +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig.arm +@@ -0,0 +1,16 @@ ++# ++# Freescale ARM SOC Drivers ++# ++ ++config LS_SOC_DRIVERS ++ bool "Layerscape Soc Drivers" ++ depends on ARCH_LAYERSCAPE || SOC_LS1021A ++ default n ++ help ++ Say y here to enable Freescale Layerscape Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on Layerscape platform. ++ ++if LS_SOC_DRIVERS ++ source "drivers/soc/fsl/layerscape/Kconfig" ++endif +--- a/drivers/soc/fsl/Makefile ++++ b/drivers/soc/fsl/Makefile +@@ -5,3 +5,7 @@ + obj-$(CONFIG_FSL_DPAA) += qbman/ + obj-$(CONFIG_QUICC_ENGINE) += qe/ + obj-$(CONFIG_CPM) += qe/ ++obj-$(CONFIG_FSL_GUTS) += guts.o ++obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console/ ++obj-$(CONFIG_SUSPEND) += rcpm.o ++obj-$(CONFIG_LS_SOC_DRIVERS) += layerscape/ +--- /dev/null ++++ b/drivers/soc/fsl/layerscape/Kconfig +@@ -0,0 +1,10 @@ ++# ++# Layerscape Soc drivers ++# ++config FTM_ALARM ++ bool "FTM alarm driver" ++ default n ++ help ++ Say y here to enable FTM alarm support. The FTM alarm provides ++ alarm functions for wakeup system from deep sleep. There is only ++ one FTM can be used in ALARM(FTM 0). +--- /dev/null ++++ b/drivers/soc/fsl/layerscape/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o +--- /dev/null ++++ b/drivers/soc/fsl/rcpm.c +@@ -0,0 +1,154 @@ ++/* ++ * Run Control and Power Management (RCPM) driver ++ * ++ * Copyright 2016 NXP ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#define pr_fmt(fmt) "RCPM: %s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/* RCPM register offset */ ++#define RCPM_IPPDEXPCR0 0x140 ++ ++#define RCPM_WAKEUP_CELL_SIZE 2 ++ ++struct rcpm_config { ++ int ipp_num; ++ int ippdexpcr_offset; ++ u32 ippdexpcr[2]; ++ void *rcpm_reg_base; ++}; ++ ++static struct rcpm_config *rcpm; ++ ++static inline void rcpm_reg_write(u32 offset, u32 value) ++{ ++ iowrite32be(value, rcpm->rcpm_reg_base + offset); ++} ++ ++static inline u32 rcpm_reg_read(u32 offset) ++{ ++ return ioread32be(rcpm->rcpm_reg_base + offset); ++} ++ ++static void rcpm_wakeup_fixup(struct device *dev, void *data) ++{ ++ struct device_node *node = dev ? dev->of_node : NULL; ++ u32 value[RCPM_WAKEUP_CELL_SIZE]; ++ int ret, i; ++ ++ if (!dev || !node || !device_may_wakeup(dev)) ++ return; ++ ++ /* ++ * Get the values in the "rcpm-wakeup" property. ++ * Three values are: ++ * The first is a pointer to the RCPM node. ++ * The second is the value of the ippdexpcr0 register. ++ * The third is the value of the ippdexpcr1 register. ++ */ ++ ret = of_property_read_u32_array(node, "fsl,rcpm-wakeup", ++ value, RCPM_WAKEUP_CELL_SIZE); ++ if (ret) ++ return; ++ ++ pr_debug("wakeup source: the device %s\n", node->full_name); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) ++ rcpm->ippdexpcr[i] |= value[i + 1]; ++} ++ ++static int rcpm_suspend_prepare(void) ++{ ++ int i; ++ ++ BUG_ON(!rcpm); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) ++ rcpm->ippdexpcr[i] = 0; ++ ++ dpm_for_each_dev(NULL, rcpm_wakeup_fixup); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) { ++ rcpm_reg_write(rcpm->ippdexpcr_offset + 4 * i, ++ rcpm->ippdexpcr[i]); ++ pr_debug("ippdexpcr%d = 0x%x\n", i, rcpm->ippdexpcr[i]); ++ } ++ ++ return 0; ++} ++ ++static int rcpm_suspend_notifier_call(struct notifier_block *bl, ++ unsigned long state, ++ void *unused) ++{ ++ switch (state) { ++ case PM_SUSPEND_PREPARE: ++ rcpm_suspend_prepare(); ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++ ++static struct rcpm_config rcpm_default_config = { ++ .ipp_num = 1, ++ .ippdexpcr_offset = RCPM_IPPDEXPCR0, ++}; ++ ++static const struct of_device_id rcpm_matches[] = { ++ { ++ .compatible = "fsl,qoriq-rcpm-2.1", ++ .data = &rcpm_default_config, ++ }, ++ {} ++}; ++ ++static struct notifier_block rcpm_suspend_notifier = { ++ .notifier_call = rcpm_suspend_notifier_call, ++}; ++ ++static int __init layerscape_rcpm_init(void) ++{ ++ const struct of_device_id *match; ++ struct device_node *np; ++ ++ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match); ++ if (!np) { ++ pr_err("Can't find the RCPM node.\n"); ++ return -EINVAL; ++ } ++ ++ if (match->data) ++ rcpm = (struct rcpm_config *)match->data; ++ else ++ return -EINVAL; ++ ++ rcpm->rcpm_reg_base = of_iomap(np, 0); ++ of_node_put(np); ++ if (!rcpm->rcpm_reg_base) ++ return -ENOMEM; ++ ++ register_pm_notifier(&rcpm_suspend_notifier); ++ ++ pr_info("The RCPM driver initialized.\n"); ++ ++ return 0; ++} ++ ++subsys_initcall(layerscape_rcpm_init); +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -94,6 +94,8 @@ source "drivers/staging/fbtft/Kconfig" + + source "drivers/staging/fsl-mc/Kconfig" + ++source "drivers/staging/fsl-dpaa2/Kconfig" ++ + source "drivers/staging/wilc1000/Kconfig" + + source "drivers/staging/most/Kconfig" +@@ -106,4 +108,6 @@ source "drivers/staging/greybus/Kconfig" + + source "drivers/staging/vc04_services/Kconfig" + ++source "drivers/staging/fsl_qbman/Kconfig" ++ + endif # STAGING +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -36,9 +36,11 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/ + obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/ + obj-$(CONFIG_FB_TFT) += fbtft/ + obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ ++obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ + obj-$(CONFIG_WILC1000) += wilc1000/ + obj-$(CONFIG_MOST) += most/ + obj-$(CONFIG_ISDN_I4L) += i4l/ + obj-$(CONFIG_KS7010) += ks7010/ + obj-$(CONFIG_GREYBUS) += greybus/ + obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/ ++obj-$(CONFIG_FSL_SDK_DPA) += fsl_qbman/ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Kconfig +@@ -0,0 +1,41 @@ ++# ++# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers ++# ++ ++config FSL_DPAA2 ++ bool "Freescale DPAA2 devices" ++ depends on FSL_MC_BUS ++ ---help--- ++ Build drivers for Freescale DataPath Acceleration ++ Architecture (DPAA2) family of SoCs. ++ ++config FSL_DPAA2_ETH ++ tristate "Freescale DPAA2 Ethernet" ++ depends on FSL_DPAA2 && FSL_MC_DPIO ++ ---help--- ++ Ethernet driver for Freescale DPAA2 SoCs, using the ++ Freescale MC bus driver ++ ++if FSL_DPAA2_ETH ++config FSL_DPAA2_ETH_USE_ERR_QUEUE ++ bool "Enable Rx error queue" ++ default n ++ ---help--- ++ Allow Rx error frames to be enqueued on an error queue ++ and processed by the driver (by default they are dropped ++ in hardware). ++ This may impact performance, recommended for debugging ++ purposes only. ++ ++# QBMAN_DEBUG requires some additional DPIO APIs ++config FSL_DPAA2_ETH_DEBUGFS ++ depends on DEBUG_FS && FSL_QBMAN_DEBUG ++ bool "Enable debugfs support" ++ default n ++ ---help--- ++ Enable advanced statistics through debugfs interface. ++endif ++ ++source "drivers/staging/fsl-dpaa2/mac/Kconfig" ++source "drivers/staging/fsl-dpaa2/evb/Kconfig" ++source "drivers/staging/fsl-dpaa2/ethsw/Kconfig" +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Makefile +@@ -0,0 +1,9 @@ ++# ++# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers ++# ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ ++obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ ++obj-$(CONFIG_FSL_DPAA2_EVB) += evb/ ++obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/ ++obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += rtc/ diff --git a/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch new file mode 100644 index 000000000..3a2e9c64b --- /dev/null +++ b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch @@ -0,0 +1,428 @@ +From 7edaf7ed8fbd5fb50950a4fc8067a9c14557d010 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 10:03:52 +0800 +Subject: [PATCH] arch: support layerscape +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a integrated patch for layerscape arch support. + +Signed-off-by: Madalin Bucur +Signed-off-by: Nipun Gupta +Signed-off-by: Zhao Qiang +Signed-off-by: Camelia Groza +Signed-off-by: Haiying Wang +Signed-off-by: Pan Jiafei +Signed-off-by: Po Liu +Signed-off-by: Bharat Bhushan +Signed-off-by: Jianhua Xie +Signed-off-by: Horia Geantă +Signed-off-by: Yangbo Lu +--- + arch/arm/include/asm/delay.h | 16 +++++++++ + arch/arm/include/asm/io.h | 31 ++++++++++++++++++ + arch/arm/include/asm/mach/map.h | 4 +-- + arch/arm/include/asm/pgtable.h | 7 ++++ + arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++ + arch/arm/mm/dma-mapping.c | 1 + + arch/arm/mm/ioremap.c | 7 ++++ + arch/arm/mm/mmu.c | 9 +++++ + arch/arm64/include/asm/cache.h | 2 +- + arch/arm64/include/asm/io.h | 2 ++ + arch/arm64/include/asm/pci.h | 4 +++ + arch/arm64/include/asm/pgtable-prot.h | 1 + + arch/arm64/include/asm/pgtable.h | 5 +++ + arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++ + arch/arm64/mm/dma-mapping.c | 6 ++++ + 15 files changed, 197 insertions(+), 3 deletions(-) + +--- a/arch/arm/include/asm/delay.h ++++ b/arch/arm/include/asm/delay.h +@@ -57,6 +57,22 @@ extern void __bad_udelay(void); + __const_udelay((n) * UDELAY_MULT)) : \ + __udelay(n)) + ++#define spin_event_timeout(condition, timeout, delay) \ ++({ \ ++ typeof(condition) __ret; \ ++ int i = 0; \ ++ while (!(__ret = (condition)) && (i++ < timeout)) { \ ++ if (delay) \ ++ udelay(delay); \ ++ else \ ++ cpu_relax(); \ ++ udelay(1); \ ++ } \ ++ if (!__ret) \ ++ __ret = (condition); \ ++ __ret; \ ++}) ++ + /* Loop-based definitions for assembly code. */ + extern void __loop_delay(unsigned long loops); + extern void __loop_udelay(unsigned long usecs); +--- a/arch/arm/include/asm/io.h ++++ b/arch/arm/include/asm/io.h +@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola + #define MT_DEVICE_NONSHARED 1 + #define MT_DEVICE_CACHED 2 + #define MT_DEVICE_WC 3 ++#define MT_MEMORY_RW_NS 4 + /* + * types 4 onwards can be found in asm/mach/map.h and are undefined + * for ioremap +@@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o + #endif + #endif + ++/* access ports */ ++#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) ++#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) ++ ++#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) ++#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) ++ ++#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) ++#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) ++ ++/* Clear and set bits in one shot. These macros can be used to clear and ++ * set multiple bits in a register using a single read-modify-write. These ++ * macros can also be used to set a multiple-bit bit pattern using a mask, ++ * by specifying the mask in the 'clear' parameter and the new bit pattern ++ * in the 'set' parameter. ++ */ ++ ++#define clrsetbits_be32(addr, clear, set) \ ++ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_le32(addr, clear, set) \ ++ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_be16(addr, clear, set) \ ++ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_le16(addr, clear, set) \ ++ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_8(addr, clear, set) \ ++ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) ++ + /* + * IO port access primitives + * ------------------------- +@@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t + #define ioremap_wc ioremap_wc + #define ioremap_wt ioremap_wc + ++void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size); ++ + void iounmap(volatile void __iomem *iomem_cookie); + #define iounmap iounmap + +--- a/arch/arm/include/asm/mach/map.h ++++ b/arch/arm/include/asm/mach/map.h +@@ -21,9 +21,9 @@ struct map_desc { + unsigned int type; + }; + +-/* types 0-3 are defined in asm/io.h */ ++/* types 0-4 are defined in asm/io.h */ + enum { +- MT_UNCACHED = 4, ++ MT_UNCACHED = 5, + MT_CACHECLEAN, + MT_MINICLEAN, + MT_LOW_VECTORS, +--- a/arch/arm/include/asm/pgtable.h ++++ b/arch/arm/include/asm/pgtable.h +@@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device; + #define pgprot_noncached(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + ++#define pgprot_cached(prot) \ ++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED) ++ ++#define pgprot_cached_ns(prot) \ ++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \ ++ L_PTE_MT_DEV_NONSHARED) ++ + #define pgprot_writecombine(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +--- a/arch/arm/kernel/bios32.c ++++ b/arch/arm/kernel/bios32.c +@@ -11,6 +11,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -64,6 +66,47 @@ void pcibios_report_status(u_int status_ + } + + /* ++ * Check device tree if the service interrupts are there ++ */ ++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask) ++{ ++ int ret, count = 0; ++ struct device_node *np = NULL; ++ ++ if (dev->bus->dev.of_node) ++ np = dev->bus->dev.of_node; ++ ++ if (np == NULL) ++ return 0; ++ ++ if (!IS_ENABLED(CONFIG_OF_IRQ)) ++ return 0; ++ ++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, ++ * request irq for aer ++ */ ++ if (mask & PCIE_PORT_SERVICE_AER) { ++ ret = of_irq_get_byname(np, "aer"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; ++ count++; ++ } ++ } ++ ++ if (mask & PCIE_PORT_SERVICE_PME) { ++ ret = of_irq_get_byname(np, "pme"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret; ++ count++; ++ } ++ } ++ ++ /* TODO: add more service interrupts if there it is in the device tree*/ ++ ++ return count; ++} ++ ++/* + * We don't use this to fix the device, but initialisation of it. + * It's not the correct use for this, but it works. + * Note that the arbiter/ISA bridge appears to be buggy, specifically in +--- a/arch/arm/mm/dma-mapping.c ++++ b/arch/arm/mm/dma-mapping.c +@@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *d + + set_dma_ops(dev, dma_ops); + } ++EXPORT_SYMBOL(arch_setup_dma_ops); + + void arch_teardown_dma_ops(struct device *dev) + { +--- a/arch/arm/mm/ioremap.c ++++ b/arch/arm/mm/ioremap.c +@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t + } + EXPORT_SYMBOL(ioremap_wc); + ++void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size) ++{ ++ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS, ++ __builtin_return_address(0)); ++} ++EXPORT_SYMBOL(ioremap_cache_ns); ++ + /* + * Remap an arbitrary physical address space into the kernel virtual + * address space as memory. Needed when the kernel wants to execute +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_ + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_RW_NS] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | ++ L_PTE_XN, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN, ++ .domain = DOMAIN_KERNEL, ++ }, + [MT_ROM] = { + .prot_sect = PMD_TYPE_SECT, + .domain = DOMAIN_KERNEL, +@@ -644,6 +651,7 @@ static void __init build_mem_type_table( + } + kern_pgprot |= PTE_EXT_AF; + vecs_pgprot |= PTE_EXT_AF; ++ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte; + + /* + * Set PXN for user mappings +@@ -672,6 +680,7 @@ static void __init build_mem_type_table( + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; + mem_types[MT_ROM].prot_sect |= cp->pmd; +--- a/arch/arm64/include/asm/cache.h ++++ b/arch/arm64/include/asm/cache.h +@@ -18,7 +18,7 @@ + + #include + +-#define L1_CACHE_SHIFT 7 ++#define L1_CACHE_SHIFT 6 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + + /* +--- a/arch/arm64/include/asm/io.h ++++ b/arch/arm64/include/asm/io.h +@@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_ + #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) + #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) + #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) ++#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \ ++ __pgprot(PROT_NORMAL_NS)) + #define iounmap __iounmap + + /* +--- a/arch/arm64/include/asm/pci.h ++++ b/arch/arm64/include/asm/pci.h +@@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq + return -ENODEV; + } + ++#define HAVE_PCI_MMAP ++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ enum pci_mmap_state mmap_state, ++ int write_combine); + static inline int pci_proc_domain(struct pci_bus *bus) + { + return 1; +--- a/arch/arm64/include/asm/pgtable-prot.h ++++ b/arch/arm64/include/asm/pgtable-prot.h +@@ -42,6 +42,7 @@ + #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) + #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) + #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) ++#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) + + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -356,6 +356,11 @@ static inline int pmd_protnone(pmd_t pmd + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) + #define pgprot_writecombine(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) ++#define pgprot_cached(prot) \ ++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \ ++ PTE_PXN | PTE_UXN) ++#define pgprot_cached_ns(prot) \ ++ __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED) + #define pgprot_device(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) + #define __HAVE_PHYS_MEM_ACCESS_PROT +--- a/arch/arm64/kernel/pci.c ++++ b/arch/arm64/kernel/pci.c +@@ -17,6 +17,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -53,6 +55,66 @@ int pcibios_alloc_irq(struct pci_dev *de + + return 0; + } ++ ++/* ++ * Check device tree if the service interrupts are there ++ */ ++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask) ++{ ++ int ret, count = 0; ++ struct device_node *np = NULL; ++ ++ if (dev->bus->dev.of_node) ++ np = dev->bus->dev.of_node; ++ ++ if (np == NULL) ++ return 0; ++ ++ if (!IS_ENABLED(CONFIG_OF_IRQ)) ++ return 0; ++ ++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, ++ * request irq for aer ++ */ ++ if (mask & PCIE_PORT_SERVICE_AER) { ++ ret = of_irq_get_byname(np, "aer"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; ++ count++; ++ } ++ } ++ ++ if (mask & PCIE_PORT_SERVICE_PME) { ++ ret = of_irq_get_byname(np, "pme"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret; ++ count++; ++ } ++ } ++ ++ /* TODO: add more service interrupts if there it is in the device tree*/ ++ ++ return count; ++} ++ ++int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ enum pci_mmap_state mmap_state, int write_combine) ++{ ++ if (mmap_state == pci_mmap_io) ++ return -EINVAL; ++ ++ /* ++ * Mark this as IO ++ */ ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot)) ++ return -EAGAIN; ++ ++ return 0; ++} + + /* + * raw_pci_read/write - Platform-specific PCI config space access. +--- a/arch/arm64/mm/dma-mapping.c ++++ b/arch/arm64/mm/dma-mapping.c +@@ -30,6 +30,7 @@ + #include + + #include ++#include <../../../drivers/staging/fsl-mc/include/mc-bus.h> + + static int swiotlb __ro_after_init; + +@@ -918,6 +919,10 @@ static int __init __iommu_dma_init(void) + if (!ret) + ret = register_iommu_dma_ops_notifier(&pci_bus_type); + #endif ++#ifdef CONFIG_FSL_MC_BUS ++ if (!ret) ++ ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type); ++#endif + return ret; + } + arch_initcall(__iommu_dma_init); +@@ -971,3 +976,4 @@ void arch_setup_dma_ops(struct device *d + dev->archdata.dma_coherent = coherent; + __iommu_setup_dma_ops(dev, dma_base, size, iommu); + } ++EXPORT_SYMBOL(arch_setup_dma_ops); diff --git a/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch new file mode 100644 index 000000000..a7a3a48e6 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch @@ -0,0 +1,9944 @@ +From 2b2e3b9a0d2abf276b40843f75d97b623e4ee109 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 10:02:10 +0800 +Subject: [PATCH] dts: support layercape + +This is a integrated patch for layerscape dts support. + +Signed-off-by: Amrita Kumari +Signed-off-by: Alison Wang +Signed-off-by: Li Yang +Signed-off-by: Ashish Kumar +Signed-off-by: Zhao Qiang +Signed-off-by: Rajesh Bhagat +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Madalin Bucur +Signed-off-by: Minghuan Lian +Signed-off-by: Suresh Gupta +Signed-off-by: Chenhui Zhao +Signed-off-by: Priyanka Jain +Signed-off-by: Hou Zhiqiang +Signed-off-by: Changming Huang +Signed-off-by: Bharat Bhushan +Signed-off-by: Meng Yi +Signed-off-by: Shaohui Xie +Signed-off-by: Marc Zyngier +Signed-off-by: Prabhakar Kushwaha +Signed-off-by: Ran Wang +Signed-off-by: Yangbo Lu +--- + arch/arm/boot/dts/alpine.dtsi | 2 +- + arch/arm/boot/dts/axm55xx.dtsi | 2 +- + arch/arm/boot/dts/ecx-2000.dts | 2 +- + arch/arm/boot/dts/imx6ul.dtsi | 4 +- + arch/arm/boot/dts/keystone.dtsi | 4 +- + arch/arm/boot/dts/ls1021a-qds.dts | 13 + + arch/arm/boot/dts/ls1021a-twr.dts | 13 + + arch/arm/boot/dts/ls1021a.dtsi | 155 ++-- + arch/arm/boot/dts/mt6580.dtsi | 2 +- + arch/arm/boot/dts/mt6589.dtsi | 2 +- + arch/arm/boot/dts/mt8127.dtsi | 2 +- + arch/arm/boot/dts/mt8135.dtsi | 2 +- + arch/arm/boot/dts/rk3288.dtsi | 2 +- + arch/arm/boot/dts/sun6i-a31.dtsi | 2 +- + arch/arm/boot/dts/sun7i-a20.dtsi | 4 +- + arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +- + arch/arm/boot/dts/sun9i-a80.dtsi | 2 +- + arch/arm64/boot/dts/freescale/Makefile | 16 + + arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts | 134 +++ + arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts | 155 ++++ + arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 91 +++ + arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 517 ++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi | 45 + + .../boot/dts/freescale/fsl-ls1043a-qds-sdk.dts | 69 ++ + arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts | 171 +++- + .../boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts | 69 ++ + .../boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++ + arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 113 ++- + arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 302 ++++++- + arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi | 48 ++ + .../boot/dts/freescale/fsl-ls1046a-qds-sdk.dts | 109 +++ + arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts | 363 ++++++++ + .../boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts | 76 ++ + .../boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 +++ + arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 218 +++++ + arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 793 ++++++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts | 173 ++++ + arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 236 ++++++ + arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 816 ++++++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts | 191 ++--- + arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts | 169 ++-- + arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts | 9 +- + arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 763 +++-------------- + arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts | 161 ++++ + arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts | 162 ++++ + arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts | 140 ++++ + arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 195 +++++ + arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi | 198 +++++ + arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 161 ++++ + arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 910 +++++++++++++++++++++ + .../boot/dts/freescale/qoriq-bman1-portals.dtsi | 81 ++ + arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi | 66 ++ + .../boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi | 43 + + .../boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi | 43 + + .../boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi | 42 + + .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 ++ + arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi | 130 +++ + .../boot/dts/freescale/qoriq-qman1-portals.dtsi | 104 +++ + arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi | 10 + + arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +- + arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +- + 66 files changed, 7778 insertions(+), 1021 deletions(-) + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi + create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi + +--- a/arch/arm/boot/dts/alpine.dtsi ++++ b/arch/arm/boot/dts/alpine.dtsi +@@ -93,7 +93,7 @@ + interrupt-controller; + reg = <0x0 0xfb001000 0x0 0x1000>, + <0x0 0xfb002000 0x0 0x2000>, +- <0x0 0xfb004000 0x0 0x1000>, ++ <0x0 0xfb004000 0x0 0x2000>, + <0x0 0xfb006000 0x0 0x2000>; + interrupts = + ; +--- a/arch/arm/boot/dts/axm55xx.dtsi ++++ b/arch/arm/boot/dts/axm55xx.dtsi +@@ -62,7 +62,7 @@ + #address-cells = <0>; + interrupt-controller; + reg = <0x20 0x01001000 0 0x1000>, +- <0x20 0x01002000 0 0x1000>, ++ <0x20 0x01002000 0 0x2000>, + <0x20 0x01004000 0 0x2000>, + <0x20 0x01006000 0 0x2000>; + interrupts = ; + reg = <0xfff11000 0x1000>, +- <0xfff12000 0x1000>, ++ <0xfff12000 0x2000>, + <0xfff14000 0x2000>, + <0xfff16000 0x2000>; + }; +--- a/arch/arm/boot/dts/imx6ul.dtsi ++++ b/arch/arm/boot/dts/imx6ul.dtsi +@@ -89,11 +89,11 @@ + }; + + intc: interrupt-controller@00a01000 { +- compatible = "arm,cortex-a7-gic"; ++ compatible = "arm,gic-400", "arm,cortex-a7-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x00a01000 0x1000>, +- <0x00a02000 0x1000>, ++ <0x00a02000 0x2000>, + <0x00a04000 0x2000>, + <0x00a06000 0x2000>; + }; +--- a/arch/arm/boot/dts/keystone.dtsi ++++ b/arch/arm/boot/dts/keystone.dtsi +@@ -30,12 +30,12 @@ + }; + + gic: interrupt-controller { +- compatible = "arm,cortex-a15-gic"; ++ compatible = "arm,gic-400", "arm,cortex-a15-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x0 0x02561000 0x0 0x1000>, + <0x0 0x02562000 0x0 0x2000>, +- <0x0 0x02564000 0x0 0x1000>, ++ <0x0 0x02564000 0x0 0x2000>, + <0x0 0x02566000 0x0 0x2000>; + interrupts = ; +--- a/arch/arm/boot/dts/ls1021a-qds.dts ++++ b/arch/arm/boot/dts/ls1021a-qds.dts +@@ -124,6 +124,19 @@ + }; + }; + ++&qspi { ++ num-cs = <2>; ++ status = "okay"; ++ ++ qflash0: s25fl128s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ + &enet0 { + tbi-handle = <&tbi0>; + phy-handle = <&sgmii_phy1c>; +--- a/arch/arm/boot/dts/ls1021a-twr.dts ++++ b/arch/arm/boot/dts/ls1021a-twr.dts +@@ -142,6 +142,19 @@ + }; + }; + ++&qspi { ++ num-cs = <2>; ++ status = "okay"; ++ ++ qflash0: n25q128a13@0 { ++ compatible = "n25q128a13", "jedec,spi-nor"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ + &enet0 { + tbi-handle = <&tbi1>; + phy-handle = <&sgmii_phy2>; +--- a/arch/arm/boot/dts/ls1021a.dtsi ++++ b/arch/arm/boot/dts/ls1021a.dtsi +@@ -74,17 +74,24 @@ + compatible = "arm,cortex-a7"; + device_type = "cpu"; + reg = <0xf00>; +- clocks = <&cluster1_clk>; ++ clocks = <&clockgen 1 0>; + }; + + cpu@f01 { + compatible = "arm,cortex-a7"; + device_type = "cpu"; + reg = <0xf01>; +- clocks = <&cluster1_clk>; ++ clocks = <&clockgen 1 0>; + }; + }; + ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ + timer { + compatible = "arm,armv7-timer"; + interrupts = , +@@ -108,11 +115,11 @@ + ranges; + + gic: interrupt-controller@1400000 { +- compatible = "arm,cortex-a7-gic"; ++ compatible = "arm,gic-400", "arm,cortex-a7-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x0 0x1401000 0x0 0x1000>, +- <0x0 0x1402000 0x0 0x1000>, ++ <0x0 0x1402000 0x0 0x2000>, + <0x0 0x1404000 0x0 0x2000>, + <0x0 0x1406000 0x0 0x2000>; + interrupts = ; +@@ -120,14 +127,14 @@ + }; + + msi1: msi-controller@1570e00 { +- compatible = "fsl,1s1021a-msi"; ++ compatible = "fsl,ls1021a-msi"; + reg = <0x0 0x1570e00 0x0 0x8>; + msi-controller; + interrupts = ; + }; + + msi2: msi-controller@1570e08 { +- compatible = "fsl,1s1021a-msi"; ++ compatible = "fsl,ls1021a-msi"; + reg = <0x0 0x1570e08 0x0 0x8>; + msi-controller; + interrupts = ; +@@ -137,11 +144,12 @@ + compatible = "fsl,ifc", "simple-bus"; + reg = <0x0 0x1530000 0x0 0x10000>; + interrupts = ; ++ big-endian; + }; + + dcfg: dcfg@1ee0000 { + compatible = "fsl,ls1021a-dcfg", "syscon"; +- reg = <0x0 0x1ee0000 0x0 0x10000>; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; + big-endian; + }; + +@@ -163,7 +171,7 @@ + <0x0 0x20220520 0x0 0x4>; + reg-names = "ahci", "sata-ecc"; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + dma-coherent; + status = "disabled"; + }; +@@ -214,41 +222,10 @@ + }; + + clockgen: clocking@1ee1000 { +- #address-cells = <1>; +- #size-cells = <1>; +- ranges = <0x0 0x0 0x1ee1000 0x10000>; +- +- sysclk: sysclk { +- compatible = "fixed-clock"; +- #clock-cells = <0>; +- clock-output-names = "sysclk"; +- }; +- +- cga_pll1: pll@800 { +- compatible = "fsl,qoriq-core-pll-2.0"; +- #clock-cells = <1>; +- reg = <0x800 0x10>; +- clocks = <&sysclk>; +- clock-output-names = "cga-pll1", "cga-pll1-div2", +- "cga-pll1-div4"; +- }; +- +- platform_clk: pll@c00 { +- compatible = "fsl,qoriq-core-pll-2.0"; +- #clock-cells = <1>; +- reg = <0xc00 0x10>; +- clocks = <&sysclk>; +- clock-output-names = "platform-clk", "platform-clk-div2"; +- }; +- +- cluster1_clk: clk0c0@0 { +- compatible = "fsl,qoriq-core-mux-2.0"; +- #clock-cells = <0>; +- reg = <0x0 0x10>; +- clock-names = "pll1cga", "pll1cga-div2", "pll1cga-div4"; +- clocks = <&cga_pll1 0>, <&cga_pll1 1>, <&cga_pll1 2>; +- clock-output-names = "cluster1-clk"; +- }; ++ compatible = "fsl,ls1021a-clockgen"; ++ reg = <0x0 0x1ee1000 0x0 0x1000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; + }; + + dspi0: dspi@2100000 { +@@ -258,7 +235,7 @@ + reg = <0x0 0x2100000 0x0 0x10000>; + interrupts = ; + clock-names = "dspi"; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + spi-num-chipselects = <6>; + big-endian; + status = "disabled"; +@@ -271,12 +248,27 @@ + reg = <0x0 0x2110000 0x0 0x10000>; + interrupts = ; + clock-names = "dspi"; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + spi-num-chipselects = <6>; + big-endian; + status = "disabled"; + }; + ++ qspi: quadspi@1550000 { ++ compatible = "fsl,ls1021a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x1550000 0x0 0x10000>, ++ <0x0 0x40000000 0x0 0x4000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = ; ++ clock-names = "qspi_en", "qspi"; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ big-endian; ++ amba-base = <0x40000000>; ++ status = "disabled"; ++ }; ++ + i2c0: i2c@2180000 { + compatible = "fsl,vf610-i2c"; + #address-cells = <1>; +@@ -284,7 +276,7 @@ + reg = <0x0 0x2180000 0x0 0x10000>; + interrupts = ; + clock-names = "i2c"; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + status = "disabled"; + }; + +@@ -295,7 +287,7 @@ + reg = <0x0 0x2190000 0x0 0x10000>; + interrupts = ; + clock-names = "i2c"; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + status = "disabled"; + }; + +@@ -306,7 +298,7 @@ + reg = <0x0 0x21a0000 0x0 0x10000>; + interrupts = ; + clock-names = "i2c"; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + status = "disabled"; + }; + +@@ -399,7 +391,7 @@ + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x2960000 0x0 0x1000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "ipg"; + status = "disabled"; + }; +@@ -408,7 +400,7 @@ + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x2970000 0x0 0x1000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "ipg"; + status = "disabled"; + }; +@@ -417,7 +409,7 @@ + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x2980000 0x0 0x1000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "ipg"; + status = "disabled"; + }; +@@ -426,7 +418,7 @@ + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x2990000 0x0 0x1000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "ipg"; + status = "disabled"; + }; +@@ -435,16 +427,26 @@ + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x29a0000 0x0 0x1000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "ipg"; + status = "disabled"; + }; + ++ ftm0: ftm0@29d0000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x29d0000 0x0 0x10000>, ++ <0x0 0x1ee2140 0x0 0x4>; ++ reg-names = "ftm", "FlexTimer1"; ++ interrupts = ; ++ big-endian; ++ status = "okay"; ++ }; ++ + wdog0: watchdog@2ad0000 { + compatible = "fsl,imx21-wdt"; + reg = <0x0 0x2ad0000 0x0 0x10000>; + interrupts = ; +- clocks = <&platform_clk 1>; ++ clocks = <&clockgen 4 1>; + clock-names = "wdog-en"; + big-endian; + }; +@@ -454,8 +456,8 @@ + compatible = "fsl,vf610-sai"; + reg = <0x0 0x2b50000 0x0 0x10000>; + interrupts = ; +- clocks = <&platform_clk 1>, <&platform_clk 1>, +- <&platform_clk 1>, <&platform_clk 1>; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>, ++ <&clockgen 4 1>, <&clockgen 4 1>; + clock-names = "bus", "mclk1", "mclk2", "mclk3"; + dma-names = "tx", "rx"; + dmas = <&edma0 1 47>, +@@ -468,8 +470,8 @@ + compatible = "fsl,vf610-sai"; + reg = <0x0 0x2b60000 0x0 0x10000>; + interrupts = ; +- clocks = <&platform_clk 1>, <&platform_clk 1>, +- <&platform_clk 1>, <&platform_clk 1>; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>, ++ <&clockgen 4 1>, <&clockgen 4 1>; + clock-names = "bus", "mclk1", "mclk2", "mclk3"; + dma-names = "tx", "rx"; + dmas = <&edma0 1 45>, +@@ -489,16 +491,31 @@ + dma-channels = <32>; + big-endian; + clock-names = "dmamux0", "dmamux1"; +- clocks = <&platform_clk 1>, +- <&platform_clk 1>; ++ clocks = <&clockgen 4 1>, ++ <&clockgen 4 1>; ++ }; ++ ++ qdma: qdma@8390000 { ++ compatible = "fsl,ls1021a-qdma"; ++ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */ ++ <0x0 0x8389000 0x0 0x1000>, /* Status regs */ ++ <0x0 0x838a000 0x0 0x2000>; /* Block regs */ ++ interrupts = , ++ ; ++ interrupt-names = "qdma-error", "qdma-queue"; ++ channels = <8>; ++ queues = <2>; ++ status-sizes = <64>; ++ queue-sizes = <64 64>; ++ big-endian; + }; + + dcu: dcu@2ce0000 { + compatible = "fsl,ls1021a-dcu"; + reg = <0x0 0x2ce0000 0x0 0x10000>; + interrupts = ; +- clocks = <&platform_clk 0>, +- <&platform_clk 0>; ++ clocks = <&clockgen 4 0>, ++ <&clockgen 4 0>; + clock-names = "dcu", "pix"; + big-endian; + status = "disabled"; +@@ -626,6 +643,8 @@ + interrupts = ; + dr_mode = "host"; + snps,quirk-frame-length-adjustment = <0x20>; ++ configure-gfladj; ++ dma-coherent; + snps,dis_rxdet_inp3_quirk; + }; + +@@ -634,7 +653,9 @@ + reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */ + 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; +- interrupts = ; /* controller interrupt */ ++ interrupts = , ++ ; /* aer interrupt */ ++ interrupt-names = "pme", "aer"; + fsl,pcie-scfg = <&scfg 0>; + #address-cells = <3>; + #size-cells = <2>; +@@ -643,7 +664,7 @@ + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&msi1>; ++ msi-parent = <&msi1>, <&msi2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, +@@ -657,7 +678,9 @@ + reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */ + 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; +- interrupts = ; ++ interrupts = , ++ ; /* aer interrupt */ ++ interrupt-names = "pme", "aer"; + fsl,pcie-scfg = <&scfg 1>; + #address-cells = <3>; + #size-cells = <2>; +@@ -666,7 +689,7 @@ + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&msi2>; ++ msi-parent = <&msi1>, <&msi2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, +--- a/arch/arm/boot/dts/mt6580.dtsi ++++ b/arch/arm/boot/dts/mt6580.dtsi +@@ -91,7 +91,7 @@ + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0x10211000 0x1000>, +- <0x10212000 0x1000>, ++ <0x10212000 0x2000>, + <0x10214000 0x2000>, + <0x10216000 0x2000>; + }; +--- a/arch/arm/boot/dts/mt6589.dtsi ++++ b/arch/arm/boot/dts/mt6589.dtsi +@@ -102,7 +102,7 @@ + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0x10211000 0x1000>, +- <0x10212000 0x1000>, ++ <0x10212000 0x2000>, + <0x10214000 0x2000>, + <0x10216000 0x2000>; + }; +--- a/arch/arm/boot/dts/mt8127.dtsi ++++ b/arch/arm/boot/dts/mt8127.dtsi +@@ -129,7 +129,7 @@ + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0 0x10211000 0 0x1000>, +- <0 0x10212000 0 0x1000>, ++ <0 0x10212000 0 0x2000>, + <0 0x10214000 0 0x2000>, + <0 0x10216000 0 0x2000>; + }; +--- a/arch/arm/boot/dts/mt8135.dtsi ++++ b/arch/arm/boot/dts/mt8135.dtsi +@@ -221,7 +221,7 @@ + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0 0x10211000 0 0x1000>, +- <0 0x10212000 0 0x1000>, ++ <0 0x10212000 0 0x2000>, + <0 0x10214000 0 0x2000>, + <0 0x10216000 0 0x2000>; + }; +--- a/arch/arm/boot/dts/rk3288.dtsi ++++ b/arch/arm/boot/dts/rk3288.dtsi +@@ -1109,7 +1109,7 @@ + #address-cells = <0>; + + reg = <0xffc01000 0x1000>, +- <0xffc02000 0x1000>, ++ <0xffc02000 0x2000>, + <0xffc04000 0x2000>, + <0xffc06000 0x2000>; + interrupts = ; +--- a/arch/arm/boot/dts/sun6i-a31.dtsi ++++ b/arch/arm/boot/dts/sun6i-a31.dtsi +@@ -791,7 +791,7 @@ + gic: interrupt-controller@01c81000 { + compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; + reg = <0x01c81000 0x1000>, +- <0x01c82000 0x1000>, ++ <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; +--- a/arch/arm/boot/dts/sun7i-a20.dtsi ++++ b/arch/arm/boot/dts/sun7i-a20.dtsi +@@ -1685,9 +1685,9 @@ + }; + + gic: interrupt-controller@01c81000 { +- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; ++ compatible = "arm,gic-400", "arm,cortex-a7-gic", "arm,cortex-a15-gic"; + reg = <0x01c81000 0x1000>, +- <0x01c82000 0x1000>, ++ <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; +--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi ++++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi +@@ -488,7 +488,7 @@ + gic: interrupt-controller@01c81000 { + compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; + reg = <0x01c81000 0x1000>, +- <0x01c82000 0x1000>, ++ <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; +--- a/arch/arm/boot/dts/sun9i-a80.dtsi ++++ b/arch/arm/boot/dts/sun9i-a80.dtsi +@@ -613,7 +613,7 @@ + gic: interrupt-controller@01c41000 { + compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; + reg = <0x01c41000 0x1000>, +- <0x01c42000 0x1000>, ++ <0x01c42000 0x2000>, + <0x01c44000 0x2000>, + <0x01c46000 0x2000>; + interrupt-controller; +--- a/arch/arm64/boot/dts/freescale/Makefile ++++ b/arch/arm64/boot/dts/freescale/Makefile +@@ -1,8 +1,24 @@ ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds-sdk.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-sdk.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-usdpaa.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds-sdk.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-sdk.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-usdpaa.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2081a-rdb.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb + + always := $(dtb-y) + subdir-y := $(dts-dirs) +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts +@@ -0,0 +1,134 @@ ++/* ++ * Device Tree file for Freescale LS1012A Freedom Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/dts-v1/; ++ ++#include "fsl-ls1012a.dtsi" ++ ++/ { ++ model = "LS1012A Freedom Board"; ++ compatible = "fsl,ls1012a-frdm", "fsl,ls1012a"; ++ ++ sys_mclk: clock-mclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <25000000>; ++ }; ++ ++ reg_1p8v: regulator-1p8v { ++ compatible = "regulator-fixed"; ++ regulator-name = "1P8V"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-always-on; ++ }; ++ ++ sound { ++ compatible = "simple-audio-card"; ++ simple-audio-card,format = "i2s"; ++ simple-audio-card,widgets = ++ "Microphone", "Microphone Jack", ++ "Headphone", "Headphone Jack", ++ "Speaker", "Speaker Ext", ++ "Line", "Line In Jack"; ++ simple-audio-card,routing = ++ "MIC_IN", "Microphone Jack", ++ "Microphone Jack", "Mic Bias", ++ "LINE_IN", "Line In Jack", ++ "Headphone Jack", "HP_OUT", ++ "Speaker Ext", "LINE_OUT"; ++ ++ simple-audio-card,cpu { ++ sound-dai = <&sai2>; ++ frame-master; ++ bitclock-master; ++ }; ++ ++ simple-audio-card,codec { ++ sound-dai = <&codec>; ++ frame-master; ++ bitclock-master; ++ system-clock-frequency = <25000000>; ++ }; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ codec: sgtl5000@a { ++ #sound-dai-cells = <0>; ++ compatible = "fsl,sgtl5000"; ++ reg = <0xa>; ++ VDDA-supply = <®_1p8v>; ++ VDDIO-supply = <®_1p8v>; ++ clocks = <&sys_mclk>; ++ }; ++}; ++ ++&qspi { ++ num-cs = <1>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ ++&sai2 { ++ status = "okay"; ++}; ++ ++&sata { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts +@@ -0,0 +1,155 @@ ++/* ++ * Device Tree file for Freescale LS1012A QDS Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/dts-v1/; ++ ++#include "fsl-ls1012a.dtsi" ++ ++/ { ++ model = "LS1012A QDS Board"; ++ compatible = "fsl,ls1012a-qds", "fsl,ls1012a"; ++ ++ sys_mclk: clock-mclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <24576000>; ++ }; ++ ++ reg_3p3v: regulator-3p3v { ++ compatible = "regulator-fixed"; ++ regulator-name = "3P3V"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-always-on; ++ }; ++ ++ sound { ++ compatible = "simple-audio-card"; ++ simple-audio-card,format = "i2s"; ++ simple-audio-card,widgets = ++ "Microphone", "Microphone Jack", ++ "Headphone", "Headphone Jack", ++ "Speaker", "Speaker Ext", ++ "Line", "Line In Jack"; ++ simple-audio-card,routing = ++ "MIC_IN", "Microphone Jack", ++ "Microphone Jack", "Mic Bias", ++ "LINE_IN", "Line In Jack", ++ "Headphone Jack", "HP_OUT", ++ "Speaker Ext", "LINE_OUT"; ++ ++ simple-audio-card,cpu { ++ sound-dai = <&sai2>; ++ frame-master; ++ bitclock-master; ++ }; ++ ++ simple-audio-card,codec { ++ sound-dai = <&codec>; ++ frame-master; ++ bitclock-master; ++ system-clock-frequency = <24576000>; ++ }; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ pca9547@77 { ++ compatible = "nxp,pca9547"; ++ reg = <0x77>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ i2c@4 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x4>; ++ ++ codec: sgtl5000@a { ++ #sound-dai-cells = <0>; ++ compatible = "fsl,sgtl5000"; ++ reg = <0xa>; ++ VDDA-supply = <®_3p3v>; ++ VDDIO-supply = <®_3p3v>; ++ clocks = <&sys_mclk>; ++ }; ++ }; ++ }; ++}; ++ ++&qspi { ++ num-cs = <2>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ m25p,fast-read; ++ reg = <0>; ++ }; ++}; ++ ++&sai2 { ++ status = "okay"; ++}; ++ ++&sata { ++ status = "okay"; ++}; ++ ++&esdhc0 { ++ status = "okay"; ++}; ++ ++&esdhc1 { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts +@@ -0,0 +1,91 @@ ++/* ++ * Device Tree file for Freescale LS1012A RDB Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/dts-v1/; ++ ++#include "fsl-ls1012a.dtsi" ++ ++/ { ++ model = "LS1012A RDB Board"; ++ compatible = "fsl,ls1012a-rdb", "fsl,ls1012a"; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++}; ++ ++&qspi { ++ num-cs = <2>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ m25p,fast-read; ++ reg = <0>; ++ }; ++}; ++ ++&sata { ++ status = "okay"; ++}; ++ ++&esdhc0 { ++ sd-uhs-sdr104; ++ sd-uhs-sdr50; ++ sd-uhs-sdr25; ++ sd-uhs-sdr12; ++ status = "okay"; ++}; ++ ++&esdhc1 { ++ mmc-hs200-1_8v; ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +@@ -0,0 +1,517 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1012A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++/ { ++ compatible = "fsl,ls1012a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ crypto = &crypto; ++ rtic_a = &rtic_a; ++ rtic_b = &rtic_b; ++ rtic_c = &rtic_c; ++ rtic_d = &rtic_d; ++ sec_mon = &sec_mon; ++ }; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x0>; ++ clocks = <&clockgen 1 0>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ }; ++ ++ idle-states { ++ /* ++ * PSCI node is not added default, U-boot will add missing ++ * parts if it determines to use PSCI. ++ */ ++ entry-method = "arm,psci"; ++ ++ CPU_PH20: cpu-ph20 { ++ compatible = "arm,idle-state"; ++ idle-state-name = "PH20"; ++ arm,psci-suspend-param = <0x0>; ++ entry-latency-us = <1000>; ++ exit-latency-us = <1000>; ++ min-residency-us = <3000>; ++ }; ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <125000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ coreclk: coreclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "coreclk"; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */ ++ <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */ ++ <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */ ++ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */ ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ gic: interrupt-controller@1400000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x0 0x1401000 0 0x1000>, /* GICD */ ++ <0x0 0x1402000 0 0x2000>, /* GICC */ ++ <0x0 0x1404000 0 0x2000>, /* GICH */ ++ <0x0 0x1406000 0 0x2000>; /* GICV */ ++ interrupts = <1 9 IRQ_TYPE_LEVEL_LOW>; ++ }; ++ ++ reboot { ++ compatible = "syscon-reboot"; ++ regmap = <&dcfg>; ++ offset = <0xb0>; ++ mask = <0x02>; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ scfg: scfg@1570000 { ++ compatible = "fsl,ls1012a-scfg", "syscon"; ++ reg = <0x0 0x1570000 0x0 0x10000>; ++ big-endian; ++ }; ++ ++ crypto: crypto@1700000 { ++ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", ++ "fsl,sec-v4.0"; ++ fsl,sec-era = <8>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x00 0x1700000 0x100000>; ++ reg = <0x00 0x1700000 0x0 0x100000>; ++ interrupts = ; ++ ++ sec_jr0: jr@10000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x10000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr1: jr@20000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x20000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr2: jr@30000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x30000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr3: jr@40000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x40000 0x10000>; ++ interrupts = ; ++ }; ++ ++ rtic@60000 { ++ compatible = "fsl,sec-v5.4-rtic", ++ "fsl,sec-v5.0-rtic", ++ "fsl,sec-v4.0-rtic"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x60000 0x100 0x60e00 0x18>; ++ ranges = <0x0 0x60100 0x500>; ++ ++ rtic_a: rtic-a@0 { ++ compatible = "fsl,sec-v5.4-rtic-memory", ++ "fsl,sec-v5.0-rtic-memory", ++ "fsl,sec-v4.0-rtic-memory"; ++ reg = <0x00 0x20 0x100 0x100>; ++ }; ++ ++ rtic_b: rtic-b@20 { ++ compatible = "fsl,sec-v5.4-rtic-memory", ++ "fsl,sec-v5.0-rtic-memory", ++ "fsl,sec-v4.0-rtic-memory"; ++ reg = <0x20 0x20 0x200 0x100>; ++ }; ++ ++ rtic_c: rtic-c@40 { ++ compatible = "fsl,sec-v5.4-rtic-memory", ++ "fsl,sec-v5.0-rtic-memory", ++ "fsl,sec-v4.0-rtic-memory"; ++ reg = <0x40 0x20 0x300 0x100>; ++ }; ++ ++ rtic_d: rtic-d@60 { ++ compatible = "fsl,sec-v5.4-rtic-memory", ++ "fsl,sec-v5.0-rtic-memory", ++ "fsl,sec-v4.0-rtic-memory"; ++ reg = <0x60 0x20 0x400 0x100>; ++ }; ++ }; ++ }; ++ ++ sec_mon: sec_mon@1e90000 { ++ compatible = "fsl,sec-v5.4-mon", "fsl,sec-v5.0-mon", ++ "fsl,sec-v4.0-mon"; ++ reg = <0x0 0x1e90000 0x0 0x10000>; ++ interrupts = , ++ ; ++ }; ++ ++ dcfg: dcfg@1ee0000 { ++ compatible = "fsl,ls1012a-dcfg", ++ "syscon"; ++ reg = <0x0 0x1ee0000 0x0 0x10000>; ++ big-endian; ++ }; ++ ++ clockgen: clocking@1ee1000 { ++ compatible = "fsl,ls1012a-clockgen"; ++ reg = <0x0 0x1ee1000 0x0 0x1000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk &coreclk>; ++ clock-names = "sysclk", "coreclk"; ++ }; ++ ++ tmu: tmu@1f00000 { ++ compatible = "fsl,qoriq-tmu"; ++ reg = <0x0 0x1f00000 0x0 0x10000>; ++ interrupts = <0 33 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ big-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ thermal-sensors = <&tmu 0>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ cpu_crit: cpu-crit { ++ temperature = <95000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ esdhc0: esdhc@1560000 { ++ compatible = "fsl,ls1012a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x1560000 0x0 0x10000>; ++ interrupts = <0 62 0x4>; ++ clocks = <&clockgen 4 0>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ big-endian; ++ bus-width = <4>; ++ status = "disabled"; ++ }; ++ ++ esdhc1: esdhc@1580000 { ++ compatible = "fsl,ls1012a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x1580000 0x0 0x10000>; ++ interrupts = <0 65 0x4>; ++ clocks = <&clockgen 4 0>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ big-endian; ++ broken-cd; ++ bus-width = <4>; ++ status = "disabled"; ++ }; ++ ++ ftm0: ftm0@29d0000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x29d0000 0x0 0x10000>, ++ <0x0 0x1ee2140 0x0 0x4>; ++ reg-names = "ftm", "FlexTimer1"; ++ interrupts = <0 86 0x4>; ++ big-endian; ++ }; ++ ++ i2c0: i2c@2180000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2180000 0x0 0x10000>; ++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@2190000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2190000 0x0 0x10000>; ++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ status = "disabled"; ++ }; ++ ++ duart0: serial@21c0500 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x00 0x21c0500 0x0 0x100>; ++ interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ status = "disabled"; ++ }; ++ ++ duart1: serial@21c0600 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x00 0x21c0600 0x0 0x100>; ++ interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ status = "disabled"; ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ qspi: quadspi@1550000 { ++ compatible = "fsl,ls1012a-qspi", "fsl,ls1021a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x1550000 0x0 0x10000>, ++ <0x0 0x40000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>; ++ clock-names = "qspi_en", "qspi"; ++ clocks = <&clockgen 4 0>, <&clockgen 4 0>; ++ big-endian; ++ fsl,qspi-has-second-chip; ++ status = "disabled"; ++ }; ++ ++ wdog0: wdog@2ad0000 { ++ compatible = "fsl,ls1012a-wdt", ++ "fsl,imx21-wdt"; ++ reg = <0x0 0x2ad0000 0x0 0x10000>; ++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ big-endian; ++ }; ++ ++ sai1: sai@2b50000 { ++ #sound-dai-cells = <0>; ++ compatible = "fsl,vf610-sai"; ++ reg = <0x0 0x2b50000 0x0 0x10000>; ++ interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>, ++ <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "bus", "mclk1", "mclk2", "mclk3"; ++ dma-names = "tx", "rx"; ++ dmas = <&edma0 1 47>, ++ <&edma0 1 46>; ++ status = "disabled"; ++ }; ++ ++ sai2: sai@2b60000 { ++ #sound-dai-cells = <0>; ++ compatible = "fsl,vf610-sai"; ++ reg = <0x0 0x2b60000 0x0 0x10000>; ++ interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>, ++ <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "bus", "mclk1", "mclk2", "mclk3"; ++ dma-names = "tx", "rx"; ++ dmas = <&edma0 1 45>, ++ <&edma0 1 44>; ++ status = "disabled"; ++ }; ++ ++ edma0: edma@2c00000 { ++ #dma-cells = <2>; ++ compatible = "fsl,vf610-edma"; ++ reg = <0x0 0x2c00000 0x0 0x10000>, ++ <0x0 0x2c10000 0x0 0x10000>, ++ <0x0 0x2c20000 0x0 0x10000>; ++ interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>, ++ <0 103 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "edma-tx", "edma-err"; ++ dma-channels = <32>; ++ big-endian; ++ clock-names = "dmamux0", "dmamux1"; ++ clocks = <&clockgen 4 3>, ++ <&clockgen 4 3>; ++ }; ++ ++ usb0: usb3@2f00000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x2f00000 0x0 0x10000>; ++ interrupts = <0 60 0x4>; ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb1: usb2@8600000 { ++ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr"; ++ reg = <0x0 0x8600000 0x0 0x1000>; ++ interrupts = <0 139 0x4>; ++ dr_mode = "host"; ++ phy_type = "ulpi"; ++ }; ++ ++ sata: sata@3200000 { ++ compatible = "fsl,ls1012a-ahci", "fsl,ls1043a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>, ++ <0x0 0x20140520 0x0 0x4>; ++ reg-names = "ahci", "sata-ecc"; ++ interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 0>; ++ dma-coherent; ++ status = "disabled"; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +@@ -0,0 +1,45 @@ ++/* ++ * QorIQ FMan v3 device tree nodes for ls1043 ++ * ++ * Copyright 2015-2016 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++&soc { ++ ++/* include used FMan blocks */ ++#include "qoriq-fman3-0.dtsi" ++#include "qoriq-fman3-0-1g-0.dtsi" ++#include "qoriq-fman3-0-1g-1.dtsi" ++#include "qoriq-fman3-0-1g-2.dtsi" ++#include "qoriq-fman3-0-1g-3.dtsi" ++#include "qoriq-fman3-0-1g-4.dtsi" ++#include "qoriq-fman3-0-1g-5.dtsi" ++#include "qoriq-fman3-0-10g-0.dtsi" ++ ++}; ++ ++&fman0 { ++ /* these aliases provide the FMan ports mapping */ ++ enet0: ethernet@e0000 { ++ }; ++ ++ enet1: ethernet@e2000 { ++ }; ++ ++ enet2: ethernet@e4000 { ++ }; ++ ++ enet3: ethernet@e6000 { ++ }; ++ ++ enet4: ethernet@e8000 { ++ }; ++ ++ enet5: ethernet@ea000 { ++ }; ++ ++ enet6: ethernet@f0000 { ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts +@@ -0,0 +1,69 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1043A family SoC. ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "fsl-ls1043a-qds.dts" ++ ++&bman_fbpr { ++ compatible = "fsl,bman-fbpr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_fqd { ++ compatible = "fsl,qman-fqd"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_pfdr { ++ compatible = "fsl,qman-pfdr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++ ++&soc { ++#include "qoriq-dpaa-eth.dtsi" ++#include "qoriq-fman3-0-6oh.dtsi" ++}; ++ ++&fman0 { ++ compatible = "fsl,fman", "simple-bus"; ++}; +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts +@@ -1,7 +1,7 @@ + /* + * Device Tree Include file for Freescale Layerscape-1043A family SoC. + * +- * Copyright 2014-2015, Freescale Semiconductor ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. + * + * Mingkai Hu + * +@@ -45,7 +45,7 @@ + */ + + /dts-v1/; +-/include/ "fsl-ls1043a.dtsi" ++#include "fsl-ls1043a.dtsi" + + / { + model = "LS1043A QDS Board"; +@@ -60,6 +60,22 @@ + serial1 = &duart1; + serial2 = &duart2; + serial3 = &duart3; ++ sgmii_riser_s1_p1 = &sgmii_phy_s1_p1; ++ sgmii_riser_s2_p1 = &sgmii_phy_s2_p1; ++ sgmii_riser_s3_p1 = &sgmii_phy_s3_p1; ++ sgmii_riser_s4_p1 = &sgmii_phy_s4_p1; ++ qsgmii_s1_p1 = &qsgmii_phy_s1_p1; ++ qsgmii_s1_p2 = &qsgmii_phy_s1_p2; ++ qsgmii_s1_p3 = &qsgmii_phy_s1_p3; ++ qsgmii_s1_p4 = &qsgmii_phy_s1_p4; ++ qsgmii_s2_p1 = &qsgmii_phy_s2_p1; ++ qsgmii_s2_p2 = &qsgmii_phy_s2_p2; ++ qsgmii_s2_p3 = &qsgmii_phy_s2_p3; ++ qsgmii_s2_p4 = &qsgmii_phy_s2_p4; ++ emi1_slot1 = &ls1043mdio_s1; ++ emi1_slot2 = &ls1043mdio_s2; ++ emi1_slot3 = &ls1043mdio_s3; ++ emi1_slot4 = &ls1043mdio_s4; + }; + + chosen { +@@ -97,8 +113,11 @@ + }; + + fpga: board-control@2,0 { +- compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis", "simple-bus"; + reg = <0x2 0x0 0x0000100>; ++ ranges = <0 2 0 0x100>; + }; + }; + +@@ -181,3 +200,149 @@ + reg = <0>; + }; + }; ++ ++#include "fsl-ls1043-post.dtsi" ++ ++&fman0 { ++ ethernet@e0000 { ++ phy-handle = <&qsgmii_phy_s2_p1>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@e2000 { ++ phy-handle = <&qsgmii_phy_s2_p2>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@e4000 { ++ phy-handle = <&rgmii_phy1>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e6000 { ++ phy-handle = <&rgmii_phy2>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e8000 { ++ phy-handle = <&qsgmii_phy_s2_p3>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@ea000 { ++ phy-handle = <&qsgmii_phy_s2_p4>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@f0000 { /* DTSEC9/10GEC1 */ ++ fixed-link = <1 1 10000 0 0>; ++ phy-connection-type = "xgmii"; ++ }; ++}; ++ ++&fpga { ++ mdio-mux-emi1 { ++ compatible = "mdio-mux-mmioreg", "mdio-mux"; ++ mdio-parent-bus = <&mdio0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x54 1>; /* BRDCFG4 */ ++ mux-mask = <0xe0>; /* EMI1 */ ++ ++ /* On-board RGMII1 PHY */ ++ ls1043mdio0: mdio@0 { ++ reg = <0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ rgmii_phy1: ethernet-phy@1 { /* MAC3 */ ++ reg = <0x1>; ++ }; ++ }; ++ ++ /* On-board RGMII2 PHY */ ++ ls1043mdio1: mdio@1 { ++ reg = <0x20>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ rgmii_phy2: ethernet-phy@2 { /* MAC4 */ ++ reg = <0x2>; ++ }; ++ }; ++ ++ /* Slot 1 */ ++ ls1043mdio_s1: mdio@2 { ++ reg = <0x40>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ qsgmii_phy_s1_p1: ethernet-phy@4 { ++ reg = <0x4>; ++ }; ++ qsgmii_phy_s1_p2: ethernet-phy@5 { ++ reg = <0x5>; ++ }; ++ qsgmii_phy_s1_p3: ethernet-phy@6 { ++ reg = <0x6>; ++ }; ++ qsgmii_phy_s1_p4: ethernet-phy@7 { ++ reg = <0x7>; ++ }; ++ ++ sgmii_phy_s1_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ }; ++ ++ /* Slot 2 */ ++ ls1043mdio_s2: mdio@3 { ++ reg = <0x60>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ qsgmii_phy_s2_p1: ethernet-phy@8 { ++ reg = <0x8>; ++ }; ++ qsgmii_phy_s2_p2: ethernet-phy@9 { ++ reg = <0x9>; ++ }; ++ qsgmii_phy_s2_p3: ethernet-phy@a { ++ reg = <0xa>; ++ }; ++ qsgmii_phy_s2_p4: ethernet-phy@b { ++ reg = <0xb>; ++ }; ++ ++ sgmii_phy_s2_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ }; ++ ++ /* Slot 3 */ ++ ls1043mdio_s3: mdio@4 { ++ reg = <0x80>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ sgmii_phy_s3_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ }; ++ ++ /* Slot 4 */ ++ ls1043mdio_s4: mdio@5 { ++ reg = <0xa0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ sgmii_phy_s4_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts +@@ -0,0 +1,69 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1043A family SoC. ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "fsl-ls1043a-rdb.dts" ++ ++&bman_fbpr { ++ compatible = "fsl,bman-fbpr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_fqd { ++ compatible = "fsl,qman-fqd"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_pfdr { ++ compatible = "fsl,qman-pfdr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++ ++&soc { ++#include "qoriq-dpaa-eth.dtsi" ++#include "qoriq-fman3-0-6oh.dtsi" ++}; ++ ++&fman0 { ++ compatible = "fsl,fman", "simple-bus"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts +@@ -0,0 +1,117 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1043A family SoC. ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "fsl-ls1043a-rdb-sdk.dts" ++ ++&soc { ++ bp7: buffer-pool@7 { ++ compatible = "fsl,p4080-bpool", "fsl,bpool"; ++ fsl,bpid = <7>; ++ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; ++ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; ++ }; ++ ++ bp8: buffer-pool@8 { ++ compatible = "fsl,p4080-bpool", "fsl,bpool"; ++ fsl,bpid = <8>; ++ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; ++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; ++ }; ++ ++ bp9: buffer-pool@9 { ++ compatible = "fsl,p4080-bpool", "fsl,bpool"; ++ fsl,bpid = <9>; ++ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>; ++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; ++ }; ++ ++ fsl,dpaa { ++ compatible = "fsl,ls1043a", "fsl,dpaa", "simple-bus"; ++ ++ ethernet@0 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x50 1 0x51 1>; ++ fsl,qman-frame-queues-tx = <0x70 1 0x71 1>; ++ }; ++ ++ ethernet@1 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x52 1 0x53 1>; ++ fsl,qman-frame-queues-tx = <0x72 1 0x73 1>; ++ }; ++ ++ ethernet@2 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; ++ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; ++ }; ++ ++ ethernet@3 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; ++ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; ++ }; ++ ++ ethernet@4 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>; ++ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>; ++ }; ++ ++ ethernet@5 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x60 1 0x61 1>; ++ fsl,qman-frame-queues-tx = <0x80 1 0x81 1>; ++ }; ++ ++ ethernet@8 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; ++ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; ++ ++ }; ++ dpa-fman0-oh@2 { ++ compatible = "fsl,dpa-oh"; ++ /* Define frame queues for the OH port*/ ++ /* */ ++ fsl,qman-frame-queues-oh = <0x5a 1 0x5b 1>; ++ fsl,fman-oh-port = <&fman0_oh2>; ++ }; ++ }; ++}; ++/ { ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ usdpaa_mem: usdpaa_mem { ++ compatible = "fsl,usdpaa-mem"; ++ alloc-ranges = <0 0 0x10000 0>; ++ size = <0 0x10000000>; ++ alignment = <0 0x10000000>; ++ }; ++ }; ++}; ++ ++&fman0 { ++ fman0_oh2: port@83000 { ++ cell-index = <1>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x83000 0x1000>; ++ }; ++}; +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts +@@ -1,7 +1,7 @@ + /* + * Device Tree Include file for Freescale Layerscape-1043A family SoC. + * +- * Copyright 2014-2015, Freescale Semiconductor ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. + * + * Mingkai Hu + * +@@ -45,7 +45,7 @@ + */ + + /dts-v1/; +-/include/ "fsl-ls1043a.dtsi" ++#include "fsl-ls1043a.dtsi" + + / { + model = "LS1043A RDB Board"; +@@ -86,6 +86,10 @@ + compatible = "pericom,pt7c4338"; + reg = <0x68>; + }; ++ rtc@51 { ++ compatible = "nxp,pcf85263"; ++ reg = <0x51>; ++ }; + }; + + &ifc { +@@ -130,6 +134,38 @@ + reg = <0>; + spi-max-frequency = <1000000>; /* input clock */ + }; ++ ++ slic@2 { ++ compatible = "maxim,ds26522"; ++ reg = <2>; ++ spi-max-frequency = <2000000>; ++ fsl,spi-cs-sck-delay = <100>; ++ fsl,spi-sck-cs-delay = <50>; ++ }; ++ ++ slic@3 { ++ compatible = "maxim,ds26522"; ++ reg = <3>; ++ spi-max-frequency = <2000000>; ++ fsl,spi-cs-sck-delay = <100>; ++ fsl,spi-sck-cs-delay = <50>; ++ }; ++}; ++ ++&uqe { ++ ucc_hdlc: ucc@2000 { ++ compatible = "fsl,ucc-hdlc"; ++ rx-clock-name = "clk8"; ++ tx-clock-name = "clk9"; ++ fsl,rx-sync-clock = "rsync_pin"; ++ fsl,tx-sync-clock = "tsync_pin"; ++ fsl,tx-timeslot-mask = <0xfffffffe>; ++ fsl,rx-timeslot-mask = <0xfffffffe>; ++ fsl,tdm-framer-type = "e1"; ++ fsl,tdm-id = <0>; ++ fsl,siram-entry-id = <0>; ++ fsl,tdm-interface; ++ }; + }; + + &duart0 { +@@ -139,3 +175,76 @@ + &duart1 { + status = "okay"; + }; ++ ++#include "fsl-ls1043-post.dtsi" ++ ++&fman0 { ++ ethernet@e0000 { ++ phy-handle = <&qsgmii_phy1>; ++ phy-connection-type = "qsgmii"; ++ }; ++ ++ ethernet@e2000 { ++ phy-handle = <&qsgmii_phy2>; ++ phy-connection-type = "qsgmii"; ++ }; ++ ++ ethernet@e4000 { ++ phy-handle = <&rgmii_phy1>; ++ phy-connection-type = "rgmii-txid"; ++ }; ++ ++ ethernet@e6000 { ++ phy-handle = <&rgmii_phy2>; ++ phy-connection-type = "rgmii-txid"; ++ }; ++ ++ ethernet@e8000 { ++ phy-handle = <&qsgmii_phy3>; ++ phy-connection-type = "qsgmii"; ++ }; ++ ++ ethernet@ea000 { ++ phy-handle = <&qsgmii_phy4>; ++ phy-connection-type = "qsgmii"; ++ }; ++ ++ ethernet@f0000 { /* 10GEC1 */ ++ phy-handle = <&aqr105_phy>; ++ phy-connection-type = "xgmii"; ++ }; ++ ++ mdio@fc000 { ++ rgmii_phy1: ethernet-phy@1 { ++ reg = <0x1>; ++ }; ++ ++ rgmii_phy2: ethernet-phy@2 { ++ reg = <0x2>; ++ }; ++ ++ qsgmii_phy1: ethernet-phy@4 { ++ reg = <0x4>; ++ }; ++ ++ qsgmii_phy2: ethernet-phy@5 { ++ reg = <0x5>; ++ }; ++ ++ qsgmii_phy3: ethernet-phy@6 { ++ reg = <0x6>; ++ }; ++ ++ qsgmii_phy4: ethernet-phy@7 { ++ reg = <0x7>; ++ }; ++ }; ++ ++ mdio@fd000 { ++ aqr105_phy: ethernet-phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 132 4>; ++ reg = <0x1>; ++ }; ++ }; ++}; +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +@@ -1,7 +1,7 @@ + /* + * Device Tree Include file for Freescale Layerscape-1043A family SoC. + * +- * Copyright 2014-2015, Freescale Semiconductor ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. + * + * Mingkai Hu + * +@@ -44,12 +44,25 @@ + * OTHER DEALINGS IN THE SOFTWARE. + */ + ++#include ++ + / { + compatible = "fsl,ls1043a"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + ++ aliases { ++ fman0 = &fman0; ++ ethernet0 = &enet0; ++ ethernet1 = &enet1; ++ ethernet2 = &enet2; ++ ethernet3 = &enet3; ++ ethernet4 = &enet4; ++ ethernet5 = &enet5; ++ ethernet6 = &enet6; ++ }; ++ + cpus { + #address-cells = <1>; + #size-cells = <0>; +@@ -66,6 +79,8 @@ + reg = <0x0>; + clocks = <&clockgen 1 0>; + next-level-cache = <&l2>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; + }; + + cpu1: cpu@1 { +@@ -74,6 +89,7 @@ + reg = <0x1>; + clocks = <&clockgen 1 0>; + next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; + }; + + cpu2: cpu@2 { +@@ -82,6 +98,7 @@ + reg = <0x2>; + clocks = <&clockgen 1 0>; + next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; + }; + + cpu3: cpu@3 { +@@ -90,6 +107,7 @@ + reg = <0x3>; + clocks = <&clockgen 1 0>; + next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; + }; + + l2: l2-cache { +@@ -97,12 +115,56 @@ + }; + }; + ++ idle-states { ++ /* ++ * PSCI node is not added default, U-boot will add missing ++ * parts if it determines to use PSCI. ++ */ ++ entry-method = "arm,psci"; ++ ++ CPU_PH20: cpu-ph20 { ++ compatible = "arm,idle-state"; ++ idle-state-name = "PH20"; ++ arm,psci-suspend-param = <0x0>; ++ entry-latency-us = <1000>; ++ exit-latency-us = <1000>; ++ min-residency-us = <3000>; ++ }; ++ }; ++ + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0 0x80000000>; + /* DRAM space 1, size: 2GiB DRAM */ + }; + ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ bman_fbpr: bman-fbpr { ++ compatible = "shared-dma-pool"; ++ size = <0 0x1000000>; ++ alignment = <0 0x1000000>; ++ no-map; ++ }; ++ ++ qman_fqd: qman-fqd { ++ compatible = "shared-dma-pool"; ++ size = <0 0x400000>; ++ alignment = <0 0x400000>; ++ no-map; ++ }; ++ ++ qman_pfdr: qman-pfdr { ++ compatible = "shared-dma-pool"; ++ size = <0 0x2000000>; ++ alignment = <0 0x2000000>; ++ no-map; ++ }; ++ }; ++ + sysclk: sysclk { + compatible = "fixed-clock"; + #clock-cells = <0>; +@@ -149,7 +211,7 @@ + interrupts = <1 9 0xf08>; + }; + +- soc { ++ soc: soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; +@@ -213,13 +275,14 @@ + + dcfg: dcfg@1ee0000 { + compatible = "fsl,ls1043a-dcfg", "syscon"; +- reg = <0x0 0x1ee0000 0x0 0x10000>; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; + big-endian; + }; + + ifc: ifc@1530000 { + compatible = "fsl,ifc", "simple-bus"; + reg = <0x0 0x1530000 0x0 0x10000>; ++ big-endian; + interrupts = <0 43 0x4>; + }; + +@@ -255,6 +318,103 @@ + big-endian; + }; + ++ tmu: tmu@1f00000 { ++ compatible = "fsl,qoriq-tmu"; ++ reg = <0x0 0x1f00000 0x0 0x10000>; ++ interrupts = <0 33 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ ++ thermal-sensors = <&tmu 3>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ cpu_crit: cpu-crit { ++ temperature = <95000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ qman: qman@1880000 { ++ compatible = "fsl,qman"; ++ reg = <0x00 0x1880000 0x0 0x10000>; ++ interrupts = <0 45 0x4>; ++ memory-region = <&qman_fqd &qman_pfdr>; ++ }; ++ ++ bman: bman@1890000 { ++ compatible = "fsl,bman"; ++ reg = <0x00 0x1890000 0x0 0x10000>; ++ interrupts = <0 45 0x4>; ++ memory-region = <&bman_fbpr>; ++ }; ++ ++ bportals: bman-portals@508000000 { ++ ranges = <0x0 0x5 0x08000000 0x8000000>; ++ }; ++ ++ qportals: qman-portals@500000000 { ++ ranges = <0x0 0x5 0x00000000 0x8000000>; ++ }; ++ + dspi0: dspi@2100000 { + compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; + #address-cells = <1>; +@@ -396,6 +556,72 @@ + #interrupt-cells = <2>; + }; + ++ uqe: uqe@2400000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ device_type = "qe"; ++ compatible = "fsl,qe", "simple-bus"; ++ ranges = <0x0 0x0 0x2400000 0x40000>; ++ reg = <0x0 0x2400000 0x0 0x480>; ++ brg-frequency = <100000000>; ++ bus-frequency = <200000000>; ++ ++ fsl,qe-num-riscs = <1>; ++ fsl,qe-num-snums = <28>; ++ ++ qeic: qeic@80 { ++ compatible = "fsl,qe-ic"; ++ reg = <0x80 0x80>; ++ #address-cells = <0>; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ interrupts = <0 77 0x04 0 77 0x04>; ++ }; ++ ++ si1: si@700 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,ls1043-qe-si", ++ "fsl,t1040-qe-si"; ++ reg = <0x700 0x80>; ++ }; ++ ++ siram1: siram@1000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,ls1043-qe-siram", ++ "fsl,t1040-qe-siram"; ++ reg = <0x1000 0x800>; ++ }; ++ ++ ucc@2000 { ++ cell-index = <1>; ++ reg = <0x2000 0x200>; ++ interrupts = <32>; ++ interrupt-parent = <&qeic>; ++ }; ++ ++ ucc@2200 { ++ cell-index = <3>; ++ reg = <0x2200 0x200>; ++ interrupts = <34>; ++ interrupt-parent = <&qeic>; ++ }; ++ ++ muram@10000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,qe-muram", "fsl,cpm-muram"; ++ ranges = <0x0 0x10000 0x6000>; ++ ++ data-only@0 { ++ compatible = "fsl,qe-muram-data", ++ "fsl,cpm-muram-data"; ++ reg = <0x0 0x6000>; ++ }; ++ }; ++ }; ++ + lpuart0: serial@2950000 { + compatible = "fsl,ls1021a-lpuart"; + reg = <0x0 0x2950000 0x0 0x1000>; +@@ -450,6 +676,16 @@ + status = "disabled"; + }; + ++ ftm0: ftm0@29d0000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x29d0000 0x0 0x10000>, ++ <0x0 0x1ee2140 0x0 0x4>; ++ reg-names = "ftm", "FlexTimer1"; ++ interrupts = <0 86 0x4>; ++ big-endian; ++ status = "okay"; ++ }; ++ + wdog0: wdog@2ad0000 { + compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt"; + reg = <0x0 0x2ad0000 0x0 0x10000>; +@@ -482,6 +718,8 @@ + dr_mode = "host"; + snps,quirk-frame-length-adjustment = <0x20>; + snps,dis_rxdet_inp3_quirk; ++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; ++ snps,dma-snooping; + }; + + usb1: usb3@3000000 { +@@ -491,6 +729,9 @@ + dr_mode = "host"; + snps,quirk-frame-length-adjustment = <0x20>; + snps,dis_rxdet_inp3_quirk; ++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; ++ snps,dma-snooping; ++ configure-gfladj; + }; + + usb2: usb3@3100000 { +@@ -500,32 +741,52 @@ + dr_mode = "host"; + snps,quirk-frame-length-adjustment = <0x20>; + snps,dis_rxdet_inp3_quirk; ++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; ++ snps,dma-snooping; ++ configure-gfladj; + }; + + sata: sata@3200000 { + compatible = "fsl,ls1043a-ahci"; +- reg = <0x0 0x3200000 0x0 0x10000>; ++ reg = <0x0 0x3200000 0x0 0x10000>, ++ <0x0 0x20140520 0x0 0x4>; ++ reg-names = "ahci", "sata-ecc"; + interrupts = <0 69 0x4>; + clocks = <&clockgen 4 0>; + dma-coherent; + }; + ++ qdma: qdma@8380000 { ++ compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma"; ++ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ ++ <0x0 0x8390000 0x0 0x10000>, /* Status regs */ ++ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */ ++ interrupts = <0 152 0x4>, ++ <0 39 0x4>; ++ interrupt-names = "qdma-error", "qdma-queue"; ++ channels = <8>; ++ queues = <2>; ++ status-sizes = <64>; ++ queue-sizes = <64 64>; ++ big-endian; ++ }; ++ + msi1: msi-controller1@1571000 { +- compatible = "fsl,1s1043a-msi"; ++ compatible = "fsl,ls1043a-msi"; + reg = <0x0 0x1571000 0x0 0x8>; + msi-controller; + interrupts = <0 116 0x4>; + }; + + msi2: msi-controller2@1572000 { +- compatible = "fsl,1s1043a-msi"; ++ compatible = "fsl,ls1043a-msi"; + reg = <0x0 0x1572000 0x0 0x8>; + msi-controller; + interrupts = <0 126 0x4>; + }; + + msi3: msi-controller3@1573000 { +- compatible = "fsl,1s1043a-msi"; ++ compatible = "fsl,ls1043a-msi"; + reg = <0x0 0x1573000 0x0 0x8>; + msi-controller; + interrupts = <0 160 0x4>; +@@ -536,9 +797,9 @@ + reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ + 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; +- interrupts = <0 118 0x4>, /* controller interrupt */ +- <0 117 0x4>; /* PME interrupt */ +- interrupt-names = "intr", "pme"; ++ interrupts = <0 117 0x4>, /* PME interrupt */ ++ <0 118 0x4>; /* aer interrupt */ ++ interrupt-names = "pme", "aer"; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; +@@ -547,7 +808,7 @@ + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&msi1>; ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, +@@ -561,9 +822,9 @@ + reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ + 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; +- interrupts = <0 128 0x4>, +- <0 127 0x4>; +- interrupt-names = "intr", "pme"; ++ interrupts = <0 127 0x4>, ++ <0 128 0x4>; ++ interrupt-names = "pme", "aer"; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; +@@ -572,7 +833,7 @@ + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&msi2>; ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, +@@ -586,9 +847,9 @@ + reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ + 0x50 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; +- interrupts = <0 162 0x4>, +- <0 161 0x4>; +- interrupt-names = "intr", "pme"; ++ interrupts = <0 161 0x4>, ++ <0 162 0x4>; ++ interrupt-names = "pme", "aer"; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; +@@ -597,7 +858,7 @@ + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&msi3>; ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, +@@ -608,3 +869,6 @@ + }; + + }; ++ ++#include "qoriq-qman1-portals.dtsi" ++#include "qoriq-bman1-portals.dtsi" +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi +@@ -0,0 +1,48 @@ ++/* ++ * QorIQ FMan v3 device tree nodes for ls1046 ++ * ++ * Copyright 2015-2016 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++&soc { ++ ++/* include used FMan blocks */ ++#include "qoriq-fman3-0.dtsi" ++#include "qoriq-fman3-0-1g-0.dtsi" ++#include "qoriq-fman3-0-1g-1.dtsi" ++#include "qoriq-fman3-0-1g-2.dtsi" ++#include "qoriq-fman3-0-1g-3.dtsi" ++#include "qoriq-fman3-0-1g-4.dtsi" ++#include "qoriq-fman3-0-1g-5.dtsi" ++#include "qoriq-fman3-0-10g-0.dtsi" ++#include "qoriq-fman3-0-10g-1.dtsi" ++}; ++ ++&fman0 { ++ /* these aliases provide the FMan ports mapping */ ++ enet0: ethernet@e0000 { ++ }; ++ ++ enet1: ethernet@e2000 { ++ }; ++ ++ enet2: ethernet@e4000 { ++ }; ++ ++ enet3: ethernet@e6000 { ++ }; ++ ++ enet4: ethernet@e8000 { ++ }; ++ ++ enet5: ethernet@ea000 { ++ }; ++ ++ enet6: ethernet@f0000 { ++ }; ++ ++ enet7: ethernet@f2000 { ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts +@@ -0,0 +1,109 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "fsl-ls1046a-qds.dts" ++ ++&bman_fbpr { ++ compatible = "fsl,bman-fbpr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_fqd { ++ compatible = "fsl,qman-fqd"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_pfdr { ++ compatible = "fsl,qman-pfdr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++ ++&soc { ++#include "qoriq-dpaa-eth.dtsi" ++#include "qoriq-fman3-0-6oh.dtsi" ++}; ++ ++&fsldpaa { ++ ethernet@9 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet7>; ++ }; ++}; ++ ++&fman0 { ++ compatible = "fsl,fman", "simple-bus"; ++}; ++ ++&dspi { ++ bus-num = <0>; ++ status = "okay"; ++ ++ flash@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "n25q128a11", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <10000000>; ++ }; ++ ++ flash@1 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "sst25wf040b", "jedec,spi-nor"; ++ spi-cpol; ++ spi-cpha; ++ reg = <1>; ++ spi-max-frequency = <10000000>; ++ }; ++ ++ flash@2 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "en25s64", "jedec,spi-nor"; ++ spi-cpol; ++ spi-cpha; ++ reg = <2>; ++ spi-max-frequency = <10000000>; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts +@@ -0,0 +1,363 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * Shaohui Xie ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls1046a.dtsi" ++ ++/ { ++ model = "LS1046A QDS Board"; ++ compatible = "fsl,ls1046a-qds", "fsl,ls1046a"; ++ ++ aliases { ++ gpio0 = &gpio0; ++ gpio1 = &gpio1; ++ gpio2 = &gpio2; ++ gpio3 = &gpio3; ++ serial0 = &duart0; ++ serial1 = &duart1; ++ serial2 = &duart2; ++ serial3 = &duart3; ++ ++ emi1_slot1 = &ls1046mdio_s1; ++ emi1_slot2 = &ls1046mdio_s2; ++ emi1_slot4 = &ls1046mdio_s4; ++ ++ sgmii_s1_p1 = &sgmii_phy_s1_p1; ++ sgmii_s1_p2 = &sgmii_phy_s1_p2; ++ sgmii_s1_p3 = &sgmii_phy_s1_p3; ++ sgmii_s1_p4 = &sgmii_phy_s1_p4; ++ sgmii_s4_p1 = &sgmii_phy_s4_p1; ++ qsgmii_s2_p1 = &qsgmii_phy_s2_p1; ++ qsgmii_s2_p2 = &qsgmii_phy_s2_p2; ++ qsgmii_s2_p3 = &qsgmii_phy_s2_p3; ++ qsgmii_s2_p4 = &qsgmii_phy_s2_p4; ++ }; ++ ++ chosen { ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&dspi { ++ bus-num = <0>; ++ status = "okay"; ++ ++ flash@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "n25q128a11", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <10000000>; ++ }; ++ ++ flash@1 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "sst25wf040b", "jedec,spi-nor"; ++ spi-cpol; ++ spi-cpha; ++ reg = <1>; ++ spi-max-frequency = <10000000>; ++ }; ++ ++ flash@2 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "en25s64", "jedec,spi-nor"; ++ spi-cpol; ++ spi-cpha; ++ reg = <2>; ++ spi-max-frequency = <10000000>; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&duart1 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ pca9547@77 { ++ compatible = "nxp,pca9547"; ++ reg = <0x77>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ i2c@2 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x2>; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <1000>; ++ }; ++ ++ ina220@41 { ++ compatible = "ti,ina220"; ++ reg = <0x41>; ++ shunt-resistor = <1000>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ rtc@51 { ++ compatible = "nxp,pcf2129"; ++ reg = <0x51>; ++ /* IRQ10_B */ ++ interrupts = <0 150 0x4>; ++ }; ++ ++ eeprom@56 { ++ compatible = "atmel,24c512"; ++ reg = <0x56>; ++ }; ++ ++ eeprom@57 { ++ compatible = "atmel,24c512"; ++ reg = <0x57>; ++ }; ++ ++ temp-sensor@4c { ++ compatible = "adi,adt7461a"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&ifc { ++ #address-cells = <2>; ++ #size-cells = <1>; ++ /* NOR, NAND Flashes and FPGA on board */ ++ ranges = <0x0 0x0 0x0 0x60000000 0x08000000 ++ 0x1 0x0 0x0 0x7e800000 0x00010000 ++ 0x2 0x0 0x0 0x7fb00000 0x00000100>; ++ status = "okay"; ++ ++ nor@0,0 { ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ }; ++ ++ nand@1,0 { ++ compatible = "fsl,ifc-nand"; ++ reg = <0x1 0x0 0x10000>; ++ }; ++ ++ fpga: board-control@2,0 { ++ compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis", "simple-bus"; ++ reg = <0x2 0x0 0x0000100>; ++ ranges = <0 2 0 0x100>; ++ }; ++}; ++ ++&lpuart0 { ++ status = "okay"; ++}; ++ ++&qspi { ++ num-cs = <2>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fl128s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ ++#include "fsl-ls1046-post.dtsi" ++ ++&fman0 { ++ ethernet@e0000 { ++ phy-handle = <&qsgmii_phy_s2_p1>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@e2000 { ++ phy-handle = <&sgmii_phy_s4_p1>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@e4000 { ++ phy-handle = <&rgmii_phy1>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e6000 { ++ phy-handle = <&rgmii_phy2>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e8000 { ++ phy-handle = <&sgmii_phy_s1_p3>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@ea000 { ++ phy-handle = <&sgmii_phy_s1_p4>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@f0000 { /* DTSEC9/10GEC1 */ ++ phy-handle = <&sgmii_phy_s1_p1>; ++ phy-connection-type = "xgmii"; ++ }; ++ ++ ethernet@f2000 { /* DTSEC10/10GEC2 */ ++ phy-handle = <&sgmii_phy_s1_p2>; ++ phy-connection-type = "xgmii"; ++ }; ++}; ++ ++&fpga { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ mdio-mux-emi1 { ++ compatible = "mdio-mux-mmioreg", "mdio-mux"; ++ mdio-parent-bus = <&mdio0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x54 1>; /* BRDCFG4 */ ++ mux-mask = <0xe0>; /* EMI1 */ ++ ++ /* On-board RGMII1 PHY */ ++ ls1046mdio0: mdio@0 { ++ reg = <0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ rgmii_phy1: ethernet-phy@1 { /* MAC3 */ ++ reg = <0x1>; ++ }; ++ }; ++ ++ /* On-board RGMII2 PHY */ ++ ls1046mdio1: mdio@1 { ++ reg = <0x20>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ rgmii_phy2: ethernet-phy@2 { /* MAC4 */ ++ reg = <0x2>; ++ }; ++ }; ++ ++ /* Slot 1 */ ++ ls1046mdio_s1: mdio@2 { ++ reg = <0x40>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ sgmii_phy_s1_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ ++ sgmii_phy_s1_p2: ethernet-phy@1d { ++ reg = <0x1d>; ++ }; ++ ++ sgmii_phy_s1_p3: ethernet-phy@1e { ++ reg = <0x1e>; ++ }; ++ ++ sgmii_phy_s1_p4: ethernet-phy@1f { ++ reg = <0x1f>; ++ }; ++ }; ++ ++ /* Slot 2 */ ++ ls1046mdio_s2: mdio@3 { ++ reg = <0x60>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ qsgmii_phy_s2_p1: ethernet-phy@8 { ++ reg = <0x8>; ++ }; ++ qsgmii_phy_s2_p2: ethernet-phy@9 { ++ reg = <0x9>; ++ }; ++ qsgmii_phy_s2_p3: ethernet-phy@a { ++ reg = <0xa>; ++ }; ++ qsgmii_phy_s2_p4: ethernet-phy@b { ++ reg = <0xb>; ++ }; ++ }; ++ ++ /* Slot 4 */ ++ ls1046mdio_s4: mdio@5 { ++ reg = <0x80>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ sgmii_phy_s4_p1: ethernet-phy@1c { ++ reg = <0x1c>; ++ }; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts +@@ -0,0 +1,76 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "fsl-ls1046a-rdb.dts" ++ ++&bman_fbpr { ++ compatible = "fsl,bman-fbpr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_fqd { ++ compatible = "fsl,qman-fqd"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++&qman_pfdr { ++ compatible = "fsl,qman-pfdr"; ++ alloc-ranges = <0 0 0x10000 0>; ++}; ++ ++&soc { ++#include "qoriq-dpaa-eth.dtsi" ++#include "qoriq-fman3-0-6oh.dtsi" ++}; ++ ++&fsldpaa { ++ ethernet@9 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet7>; ++ }; ++}; ++ ++&fman0 { ++ compatible = "fsl,fman", "simple-bus"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts +@@ -0,0 +1,110 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "fsl-ls1046a-rdb-sdk.dts" ++ ++&soc { ++ bp7: buffer-pool@7 { ++ compatible = "fsl,ls1046a-bpool", "fsl,bpool"; ++ fsl,bpid = <7>; ++ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; ++ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; ++ }; ++ ++ bp8: buffer-pool@8 { ++ compatible = "fsl,ls1046a-bpool", "fsl,bpool"; ++ fsl,bpid = <8>; ++ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; ++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; ++ }; ++ ++ bp9: buffer-pool@9 { ++ compatible = "fsl,ls1046a-bpool", "fsl,bpool"; ++ fsl,bpid = <9>; ++ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>; ++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; ++ }; ++ ++ fsl,dpaa { ++ compatible = "fsl,ls1046a", "fsl,dpaa", "simple-bus"; ++ ++ ethernet@2 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; ++ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; ++ }; ++ ++ ethernet@3 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; ++ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; ++ }; ++ ++ ethernet@4 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>; ++ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>; ++ }; ++ ++ ethernet@5 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x5a 1 0x5b 1>; ++ fsl,qman-frame-queues-tx = <0x7a 1 0x7b 1>; ++ }; ++ ++ ethernet@8 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; ++ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; ++ }; ++ ++ ethernet@9 { ++ compatible = "fsl,dpa-ethernet-init"; ++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; ++ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; ++ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; ++ }; ++ ++ dpa-fman0-oh@2 { ++ compatible = "fsl,dpa-oh"; ++ /* Define frame queues for the OH port*/ ++ /* */ ++ fsl,qman-frame-queues-oh = <0x60 1 0x61 1>; ++ fsl,fman-oh-port = <&fman0_oh2>; ++ }; ++ }; ++}; ++/ { ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ usdpaa_mem: usdpaa_mem { ++ compatible = "fsl,usdpaa-mem"; ++ alloc-ranges = <0 0 0x10000 0>; ++ size = <0 0x10000000>; ++ alignment = <0 0x10000000>; ++ }; ++ }; ++}; ++ ++&fman0 { ++ fman0_oh2: port@83000 { ++ cell-index = <1>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x83000 0x1000>; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +@@ -0,0 +1,218 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls1046a.dtsi" ++ ++/ { ++ model = "LS1046A RDB Board"; ++ compatible = "fsl,ls1046a-rdb", "fsl,ls1046a"; ++ ++ aliases { ++ serial0 = &duart0; ++ serial1 = &duart1; ++ serial2 = &duart2; ++ serial3 = &duart3; ++ }; ++ ++ chosen { ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&esdhc { ++ mmc-hs200-1_8v; ++ sd-uhs-sdr104; ++ sd-uhs-sdr50; ++ sd-uhs-sdr25; ++ sd-uhs-sdr12; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&duart1 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <1000>; ++ }; ++ ++ temp-sensor@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ ++ eeprom@56 { ++ compatible = "atmel,24c512"; ++ reg = <0x52>; ++ }; ++ ++ eeprom@57 { ++ compatible = "atmel,24c512"; ++ reg = <0x53>; ++ }; ++}; ++ ++&i2c3 { ++ status = "okay"; ++ ++ rtc@51 { ++ compatible = "nxp,pcf2129"; ++ reg = <0x51>; ++ }; ++}; ++ ++&ifc { ++ #address-cells = <2>; ++ #size-cells = <1>; ++ /* NAND Flashe and CPLD on board */ ++ ranges = <0x0 0x0 0x0 0x7e800000 0x00010000 ++ 0x2 0x0 0x0 0x7fb00000 0x00000100>; ++ status = "okay"; ++ ++ nand@0,0 { ++ compatible = "fsl,ifc-nand"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x0 0x0 0x10000>; ++ }; ++ ++ cpld: board-control@2,0 { ++ compatible = "fsl,ls1046ardb-cpld"; ++ reg = <0x2 0x0 0x0000100>; ++ }; ++}; ++ ++&qspi { ++ num-cs = <2>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++ ++ qflash1: s25fs512s@1 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ reg = <1>; ++ }; ++}; ++ ++#include "fsl-ls1046-post.dtsi" ++ ++&fman0 { ++ ethernet@e4000 { ++ phy-handle = <&rgmii_phy1>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e6000 { ++ phy-handle = <&rgmii_phy2>; ++ phy-connection-type = "rgmii"; ++ }; ++ ++ ethernet@e8000 { ++ phy-handle = <&sgmii_phy1>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@ea000 { ++ phy-handle = <&sgmii_phy2>; ++ phy-connection-type = "sgmii"; ++ }; ++ ++ ethernet@f0000 { /* 10GEC1 */ ++ phy-handle = <&aqr106_phy>; ++ phy-connection-type = "xgmii"; ++ }; ++ ++ ethernet@f2000 { /* 10GEC2 */ ++ fixed-link = <0 1 1000 0 0>; ++ phy-connection-type = "xgmii"; ++ }; ++ ++ mdio@fc000 { ++ rgmii_phy1: ethernet-phy@1 { ++ reg = <0x1>; ++ }; ++ ++ rgmii_phy2: ethernet-phy@2 { ++ reg = <0x2>; ++ }; ++ ++ sgmii_phy1: ethernet-phy@3 { ++ reg = <0x3>; ++ }; ++ ++ sgmii_phy2: ethernet-phy@4 { ++ reg = <0x4>; ++ }; ++ }; ++ ++ mdio@fd000 { ++ aqr106_phy: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 131 4>; ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +@@ -0,0 +1,793 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-1046A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * Mingkai Hu ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++/ { ++ compatible = "fsl,ls1046a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ crypto = &crypto; ++ fman0 = &fman0; ++ ethernet0 = &enet0; ++ ethernet1 = &enet1; ++ ethernet2 = &enet2; ++ ethernet3 = &enet3; ++ ethernet4 = &enet4; ++ ethernet5 = &enet5; ++ ethernet6 = &enet6; ++ ethernet7 = &enet7; ++ }; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x1>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x2>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x3>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ l2: l2-cache { ++ compatible = "cache"; ++ }; ++ }; ++ ++ idle-states { ++ /* ++ * PSCI node is not added default, U-boot will add missing ++ * parts if it determines to use PSCI. ++ */ ++ entry-method = "arm,psci"; ++ ++ CPU_PH20: cpu-ph20 { ++ compatible = "arm,idle-state"; ++ idle-state-name = "PH20"; ++ arm,psci-suspend-param = <0x0>; ++ entry-latency-us = <1000>; ++ exit-latency-us = <1000>; ++ min-residency-us = <3000>; ++ }; ++ }; ++ ++ memory@80000000 { ++ device_type = "memory"; ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ reboot { ++ compatible ="syscon-reboot"; ++ regmap = <&dcfg>; ++ offset = <0xb0>; ++ mask = <0x02>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ }; ++ ++ pmu { ++ compatible = "arm,cortex-a72-pmu"; ++ interrupts = , ++ , ++ , ++ ; ++ interrupt-affinity = <&cpu0>, ++ <&cpu1>, ++ <&cpu2>, ++ <&cpu3>; ++ }; ++ ++ gic: interrupt-controller@1400000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x0 0x1410000 0 0x10000>, /* GICD */ ++ <0x0 0x1420000 0 0x20000>, /* GICC */ ++ <0x0 0x1440000 0 0x20000>, /* GICH */ ++ <0x0 0x1460000 0 0x20000>; /* GICV */ ++ interrupts = ; ++ }; ++ ++ soc: soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ ddr: memory-controller@1080000 { ++ compatible = "fsl,qoriq-memory-controller"; ++ reg = <0x0 0x1080000 0x0 0x1000>; ++ interrupts = ; ++ big-endian; ++ }; ++ ++ ifc: ifc@1530000 { ++ compatible = "fsl,ifc", "simple-bus"; ++ reg = <0x0 0x1530000 0x0 0x10000>; ++ big-endian; ++ interrupts = ; ++ }; ++ ++ qspi: quadspi@1550000 { ++ compatible = "fsl,ls1021a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x1550000 0x0 0x10000>, ++ <0x0 0x40000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = ; ++ clock-names = "qspi_en", "qspi"; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ big-endian; ++ fsl,qspi-has-second-chip; ++ status = "disabled"; ++ }; ++ ++ esdhc: esdhc@1560000 { ++ compatible = "fsl,ls1046a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x1560000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 2 1>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ big-endian; ++ bus-width = <4>; ++ }; ++ ++ scfg: scfg@1570000 { ++ compatible = "fsl,ls1046a-scfg", "syscon"; ++ reg = <0x0 0x1570000 0x0 0x10000>; ++ big-endian; ++ }; ++ ++ crypto: crypto@1700000 { ++ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0", ++ "fsl,sec-v4.0"; ++ fsl,sec-era = <8>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x00 0x1700000 0x100000>; ++ reg = <0x00 0x1700000 0x0 0x100000>; ++ interrupts = ; ++ ++ sec_jr0: jr@10000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x10000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr1: jr@20000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x20000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr2: jr@30000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x30000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr3: jr@40000 { ++ compatible = "fsl,sec-v5.4-job-ring", ++ "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x40000 0x10000>; ++ interrupts = ; ++ }; ++ }; ++ ++ qman: qman@1880000 { ++ compatible = "fsl,qman"; ++ reg = <0x00 0x1880000 0x0 0x10000>; ++ interrupts = <0 45 0x4>; ++ memory-region = <&qman_fqd &qman_pfdr>; ++ ++ }; ++ ++ bman: bman@1890000 { ++ compatible = "fsl,bman"; ++ reg = <0x00 0x1890000 0x0 0x10000>; ++ interrupts = <0 45 0x4>; ++ memory-region = <&bman_fbpr>; ++ ++ }; ++ ++ qportals: qman-portals@500000000 { ++ ranges = <0x0 0x5 0x00000000 0x8000000>; ++ }; ++ ++ bportals: bman-portals@508000000 { ++ ranges = <0x0 0x5 0x08000000 0x8000000>; ++ }; ++ ++ dcfg: dcfg@1ee0000 { ++ compatible = "fsl,ls1046a-dcfg", "syscon"; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; ++ big-endian; ++ }; ++ ++ clockgen: clocking@1ee1000 { ++ compatible = "fsl,ls1046a-clockgen"; ++ reg = <0x0 0x1ee1000 0x0 0x1000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; ++ }; ++ ++ tmu: tmu@1f00000 { ++ compatible = "fsl,qoriq-tmu"; ++ reg = <0x0 0x1f00000 0x0 0x10000>; ++ interrupts = <0 33 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = ++ /* Calibration data group 1 */ ++ <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ /* Calibration data group 2 */ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ /* Calibration data group 3 */ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ /* Calibration data group 4 */ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ big-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ thermal-sensors = <&tmu 3>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ cpu_crit: cpu-crit { ++ temperature = <95000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ dspi: dspi@2100000 { ++ compatible = "fsl,ls1021a-v1.0-dspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2100000 0x0 0x10000>; ++ interrupts = ; ++ clock-names = "dspi"; ++ clocks = <&clockgen 4 1>; ++ spi-num-chipselects = <5>; ++ big-endian; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@2180000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2180000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ dmas = <&edma0 1 39>, ++ <&edma0 1 38>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@2190000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2190000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@21a0000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x21a0000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c@21b0000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x21b0000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ status = "disabled"; ++ }; ++ ++ duart0: serial@21c0500 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x00 0x21c0500 0x0 0x100>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ }; ++ ++ duart1: serial@21c0600 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x00 0x21c0600 0x0 0x100>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ }; ++ ++ duart2: serial@21d0500 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21d0500 0x0 0x100>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ }; ++ ++ duart3: serial@21d0600 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21d0600 0x0 0x100>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio2: gpio@2320000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2320000 0x0 0x10000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio3: gpio@2330000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2330000 0x0 0x10000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ lpuart0: serial@2950000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x2950000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 0>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ lpuart1: serial@2960000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x2960000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ lpuart2: serial@2970000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x2970000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ lpuart3: serial@2980000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x2980000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ lpuart4: serial@2990000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x2990000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ lpuart5: serial@29a0000 { ++ compatible = "fsl,ls1021a-lpuart"; ++ reg = <0x0 0x29a0000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ clock-names = "ipg"; ++ status = "disabled"; ++ }; ++ ++ ftm0: ftm0@29d0000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x29d0000 0x0 0x10000>, ++ <0x0 0x1ee2140 0x0 0x4>; ++ reg-names = "ftm", "FlexTimer1"; ++ interrupts = ; ++ big-endian; ++ }; ++ ++ wdog0: watchdog@2ad0000 { ++ compatible = "fsl,imx21-wdt"; ++ reg = <0x0 0x2ad0000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ big-endian; ++ }; ++ ++ edma0: edma@2c00000 { ++ #dma-cells = <2>; ++ compatible = "fsl,vf610-edma"; ++ reg = <0x0 0x2c00000 0x0 0x10000>, ++ <0x0 0x2c10000 0x0 0x10000>, ++ <0x0 0x2c20000 0x0 0x10000>; ++ interrupts = , ++ ; ++ interrupt-names = "edma-tx", "edma-err"; ++ dma-channels = <32>; ++ big-endian; ++ clock-names = "dmamux0", "dmamux1"; ++ clocks = <&clockgen 4 1>, ++ <&clockgen 4 1>; ++ }; ++ ++ usb0: usb@2f00000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x2f00000 0x0 0x10000>; ++ interrupts = ; ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb1: usb@3000000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3000000 0x0 0x10000>; ++ interrupts = ; ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb2: usb@3100000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3100000 0x0 0x10000>; ++ interrupts = ; ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ sata: sata@3200000 { ++ compatible = "fsl,ls1046a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>, ++ <0x0 0x20140520 0x0 0x4>; ++ reg-names = "ahci", "sata-ecc"; ++ interrupts = ; ++ clocks = <&clockgen 4 1>; ++ dma-coherent; ++ }; ++ ++ qdma: qdma@8380000 { ++ compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma"; ++ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ ++ <0x0 0x8390000 0x0 0x10000>, /* Status regs */ ++ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */ ++ interrupts = <0 153 0x4>, ++ <0 39 0x4>; ++ interrupt-names = "qdma-error", "qdma-queue"; ++ channels = <8>; ++ queues = <2>; ++ status-sizes = <64>; ++ queue-sizes = <64 64>; ++ big-endian; ++ }; ++ ++ msi1: msi-controller@1580000 { ++ compatible = "fsl,ls1046a-msi"; ++ msi-controller; ++ reg = <0x0 0x1580000 0x0 0x10000>; ++ interrupts = , ++ , ++ , ++ ; ++ }; ++ ++ msi2: msi-controller@1590000 { ++ compatible = "fsl,ls1046a-msi"; ++ msi-controller; ++ reg = <0x0 0x1590000 0x0 0x10000>; ++ interrupts = , ++ , ++ , ++ ; ++ }; ++ ++ msi3: msi-controller@15a0000 { ++ compatible = "fsl,ls1046a-msi"; ++ msi-controller; ++ reg = <0x0 0x15a0000 0x0 0x10000>; ++ interrupts = , ++ , ++ , ++ ; ++ }; ++ ++ pcie@3400000 { ++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = , /* PME interrupt */ ++ ; /* aer interrupt */ ++ interrupt-names = "pme", "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ pcie@3500000 { ++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = , ++ ; ++ interrupt-names = "pme", "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <2>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ pcie@3600000 { ++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = , ++ ; ++ interrupt-names = "pme", "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <2>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&msi1>, <&msi2>, <&msi3>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ bman_fbpr: bman-fbpr { ++ compatible = "shared-dma-pool"; ++ size = <0 0x1000000>; ++ alignment = <0 0x1000000>; ++ no-map; ++ }; ++ qman_fqd: qman-fqd { ++ compatible = "shared-dma-pool"; ++ size = <0 0x800000>; ++ alignment = <0 0x800000>; ++ no-map; ++ }; ++ qman_pfdr: qman-pfdr { ++ compatible = "shared-dma-pool"; ++ size = <0 0x2000000>; ++ alignment = <0 0x2000000>; ++ no-map; ++ }; ++ }; ++}; ++ ++#include "qoriq-qman1-portals.dtsi" ++#include "qoriq-bman1-portals.dtsi" +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts +@@ -0,0 +1,173 @@ ++/* ++ * Device Tree file for NXP LS1088A QDS Board. ++ * ++ * Copyright 2017 NXP ++ * ++ * Harninder Rai ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls1088a.dtsi" ++ ++/ { ++ model = "LS1088A QDS Board"; ++ compatible = "fsl,ls1088a-qds", "fsl,ls1088a"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ i2c-switch@77 { ++ compatible = "nxp,pca9547"; ++ reg = <0x77>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ i2c@2 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x2>; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <1000>; ++ }; ++ ++ ina220@41 { ++ compatible = "ti,ina220"; ++ reg = <0x41>; ++ shunt-resistor = <1000>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ temp-sensor@4c { ++ compatible = "adi,adt7461a"; ++ reg = <0x4c>; ++ }; ++ ++ rtc@51 { ++ compatible = "nxp,pcf2129"; ++ reg = <0x51>; ++ /* IRQ10_B */ ++ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ eeprom@56 { ++ compatible = "atmel,24c512"; ++ reg = <0x56>; ++ }; ++ ++ eeprom@57 { ++ compatible = "atmel,24c512"; ++ reg = <0x57>; ++ }; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ m25p,fast-read; ++ reg = <0>; ++ }; ++ ++ qflash1: s25fs512s@1 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ m25p,fast-read; ++ reg = <1>; ++ }; ++}; ++ ++&ifc { ++ status = "okay"; ++ ++ ranges = <0 0 0x5 0x80000000 0x08000000 ++ 2 0 0x5 0x30000000 0x00010000 ++ 3 0 0x5 0x20000000 0x00010000>; ++ ++ nor@0,0 { ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ }; ++ ++ nand@2,0 { ++ compatible = "fsl,ifc-nand"; ++ reg = <0x2 0x0 0x10000>; ++ }; ++ ++ fpga: board-control@3,0 { ++ compatible = "fsl,ls1088aqds-fpga", "fsl,fpga-qixis"; ++ reg = <0x3 0x0 0x0000100>; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&duart1 { ++ status = "okay"; ++}; ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&sata { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts +@@ -0,0 +1,236 @@ ++/* ++ * Device Tree file for NXP LS1088A RDB Board. ++ * ++ * Copyright 2017 NXP ++ * ++ * Harninder Rai ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls1088a.dtsi" ++ ++/ { ++ model = "L1088A RDB Board"; ++ compatible = "fsl,ls1088a-rdb", "fsl,ls1088a"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ ++ i2c-switch@77 { ++ compatible = "nxp,pca9547"; ++ reg = <0x77>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ i2c@2 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x2>; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <1000>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ temp-sensor@4c { ++ compatible = "adi,adt7461a"; ++ reg = <0x4c>; ++ }; ++ ++ rtc@51 { ++ compatible = "nxp,pcf2129"; ++ reg = <0x51>; ++ /* IRQ10_B */ ++ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++ ++ qflash1: s25fs512s@1 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <1>; ++ }; ++}; ++ ++&ifc { ++ status = "okay"; ++ ++ ranges = <0 0 0x5 0x30000000 0x00010000 ++ 2 0 0x5 0x20000000 0x00010000>; ++ ++ nand@0,0 { ++ compatible = "fsl,ifc-nand"; ++ reg = <0x0 0x0 0x10000>; ++ }; ++ ++ fpga: board-control@2,0 { ++ compatible = "fsl,ls1088ardb-fpga", "fsl,fpga-qixis"; ++ reg = <0x2 0x0 0x0000100>; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&duart1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&sata { ++ status = "okay"; ++}; ++ ++&emdio1 { ++ /* Freescale F104 PHY1 */ ++ mdio1_phy1: emdio1_phy@1 { ++ reg = <0x1c>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy2: emdio1_phy@2 { ++ reg = <0x1d>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy3: emdio1_phy@3 { ++ reg = <0x1e>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy4: emdio1_phy@4 { ++ reg = <0x1f>; ++ phy-connection-type = "qsgmii"; ++ }; ++ /* F104 PHY2 */ ++ mdio1_phy5: emdio1_phy@5 { ++ reg = <0x0c>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy6: emdio1_phy@6 { ++ reg = <0x0d>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy7: emdio1_phy@7 { ++ reg = <0x0e>; ++ phy-connection-type = "qsgmii"; ++ }; ++ mdio1_phy8: emdio1_phy@8 { ++ reg = <0x0f>; ++ phy-connection-type = "qsgmii"; ++ }; ++}; ++ ++&emdio2 { ++ /* Aquantia AQR105 10G PHY */ ++ mdio2_phy1: emdio2_phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 2 0x4>; ++ reg = <0x0>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++/* DPMAC connections to external PHYs ++ * based on LS1088A RM RevC - $24.1.2 SerDes Options ++ */ ++/* DPMAC1 is 10G SFP+, fixed link */ ++&dpmac2 { ++ phy-handle = <&mdio2_phy1>; ++}; ++&dpmac3 { ++ phy-handle = <&mdio1_phy5>; ++}; ++&dpmac4 { ++ phy-handle = <&mdio1_phy6>; ++}; ++&dpmac5 { ++ phy-handle = <&mdio1_phy7>; ++}; ++&dpmac6 { ++ phy-handle = <&mdio1_phy8>; ++}; ++&dpmac7 { ++ phy-handle = <&mdio1_phy1>; ++}; ++&dpmac8 { ++ phy-handle = <&mdio1_phy2>; ++}; ++&dpmac9 { ++ phy-handle = <&mdio1_phy3>; ++}; ++&dpmac10 { ++ phy-handle = <&mdio1_phy4>; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +@@ -0,0 +1,816 @@ ++/* ++ * Device Tree Include file for NXP Layerscape-1088A family SoC. ++ * ++ * Copyright 2017 NXP ++ * ++ * Harninder Rai ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include ++#include ++ ++/ { ++ compatible = "fsl,ls1088a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ crypto = &crypto; ++ }; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ /* We have 2 clusters having 4 Cortex-A53 cores each */ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x0>; ++ clocks = <&clockgen 1 0>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x1>; ++ clocks = <&clockgen 1 0>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x2>; ++ clocks = <&clockgen 1 0>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x3>; ++ clocks = <&clockgen 1 0>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu4: cpu@100 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x100>; ++ clocks = <&clockgen 1 1>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu5: cpu@101 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x101>; ++ clocks = <&clockgen 1 1>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu6: cpu@102 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x102>; ++ clocks = <&clockgen 1 1>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu7: cpu@103 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a53"; ++ reg = <0x103>; ++ clocks = <&clockgen 1 1>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ }; ++ ++ idle-states { ++ /* ++ * PSCI node is not added default, U-boot will add missing ++ * parts if it determines to use PSCI. ++ */ ++ entry-method = "arm,psci"; ++ ++ CPU_PH20: cpu-ph20 { ++ compatible = "arm,idle-state"; ++ idle-state-name = "PH20"; ++ arm,psci-suspend-param = <0x0>; ++ entry-latency-us = <1000>; ++ exit-latency-us = <1000>; ++ min-residency-us = <3000>; ++ }; ++ }; ++ ++ gic: interrupt-controller@6000000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ ++ <0x0 0x06100000 0 0x100000>, /* GICR(RD_base+SGI_base)*/ ++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ ++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ ++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ ++ interrupts = <1 9 IRQ_TYPE_LEVEL_HIGH>; ++ ++ its: gic-its@6020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x6020000 0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */ ++ <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */ ++ <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */ ++ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */ ++ }; ++ ++ fsl_mc: fsl-mc@80c000000 { ++ compatible = "fsl,qoriq-mc"; ++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ ++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ ++ #address-cells = <3>; ++ #size-cells = <1>; ++ ++ /* ++ * Region type 0x0 - MC portals ++ * Region type 0x1 - QBMAN portals ++ */ ++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 ++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; ++ ++ dpmacs { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ dpmac1: dpmac@1 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <1>; ++ }; ++ dpmac2: dpmac@2 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <2>; ++ }; ++ dpmac3: dpmac@3 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <3>; ++ }; ++ dpmac4: dpmac@4 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <4>; ++ }; ++ dpmac5: dpmac@5 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <5>; ++ }; ++ dpmac6: dpmac@6 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <6>; ++ }; ++ dpmac7: dpmac@7 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <7>; ++ }; ++ dpmac8: dpmac@8 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <8>; ++ }; ++ dpmac9: dpmac@9 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <9>; ++ }; ++ dpmac10: dpmac@10 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xa>; ++ }; ++ }; ++ ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ dcfg: dcfg@1e00000 { ++ compatible = "fsl,ls1088a-dcfg", "syscon"; ++ reg = <0x0 0x1e00000 0x0 0x10000>; ++ little-endian; ++ }; ++ ++ rstcr: syscon@1e60000 { ++ compatible = "fsl,ls1088a-rstcr", "syscon"; ++ reg = <0x0 0x1e60000 0x0 0x4>; ++ }; ++ ++ reboot { ++ compatible = "syscon-reboot"; ++ regmap = <&rstcr>; ++ offset = <0x0>; ++ mask = <0x02>; ++ }; ++ ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ clockgen: clocking@1300000 { ++ compatible = "fsl,ls1088a-clockgen"; ++ reg = <0 0x1300000 0 0xa0000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; ++ }; ++ ++ tmu: tmu@1f80000 { ++ compatible = "fsl,qoriq-tmu"; ++ reg = <0x0 0x1f80000 0x0 0x10000>; ++ interrupts = <0 23 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = ++ /* Calibration data group 1 */ ++ <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ /* Calibration data group 2 */ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ /* Calibration data group 3 */ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ /* Calibration data group 4 */ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ little-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ thermal-sensors = <&tmu 0>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ cpu_crit: cpu-crit { ++ temperature = <95000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map1 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu4 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ duart0: serial@21c0500 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0500 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>; ++ status = "disabled"; ++ }; ++ ++ duart1: serial@21c0600 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0600 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>; ++ status = "disabled"; ++ }; ++ ++ cluster1_core0_watchdog: wdt@c000000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc000000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster1_core1_watchdog: wdt@c010000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc010000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster1_core2_watchdog: wdt@c020000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc020000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster1_core3_watchdog: wdt@c030000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc030000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core0_watchdog: wdt@c100000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc100000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core1_watchdog: wdt@c110000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc110000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core2_watchdog: wdt@c120000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc120000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core3_watchdog: wdt@c130000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc130000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio2: gpio@2320000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2320000 0x0 0x10000>; ++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio3: gpio@2330000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2330000 0x0 0x10000>; ++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ /* TODO: WRIOP (CCSR?) */ ++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, ++ * E-MDIO1: 0x1_6000 ++ */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B96000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; /* force the driver in LE mode */ ++ ++ /* Not necessary on the QDS, but needed on the RDB */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, ++ * E-MDIO2: 0x1_7000 ++ */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B97000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; /* force the driver in LE mode */ ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ ifc: ifc@2240000 { ++ compatible = "fsl,ifc", "simple-bus"; ++ reg = <0x0 0x2240000 0x0 0x20000>; ++ interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>; ++ little-endian; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ++ }; ++ ++ ftm0: ftm0@2800000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x2800000 0x0 0x10000>; ++ interrupts = <0 44 4>; ++ }; ++ ++ i2c0: i2c@2000000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2000000 0x0 0x10000>; ++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@2010000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2010000 0x0 0x10000>; ++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@2020000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2020000 0x0 0x10000>; ++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c@2030000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2030000 0x0 0x10000>; ++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>; ++ status = "disabled"; ++ }; ++ ++ qspi: quadspi@20c0000 { ++ compatible = "fsl,ls2080a-qspi", "fsl,ls1088a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20c0000 0x0 0x10000>, ++ <0x0 0x20000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = <0 25 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "qspi_en", "qspi"; ++ fsl,qspi-has-second-chip; ++ }; ++ ++ esdhc: esdhc@2140000 { ++ compatible = "fsl,ls1088a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x2140000 0x0 0x10000>; ++ interrupts = <0 28 0x4>; /* Level high type */ ++ clock-frequency = <0>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ little-endian; ++ bus-width = <4>; ++ status = "disabled"; ++ }; ++ ++ usb0: usb3@3100000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3100000 0x0 0x10000>; ++ interrupts = <0 80 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb1: usb3@3110000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3110000 0x0 0x10000>; ++ interrupts = <0 81 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ sata: sata@3200000 { ++ compatible = "fsl,ls1088a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>, ++ <0x7 0x100520 0x0 0x4>; ++ reg-names = "ahci", "sata-ecc"; ++ interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clockgen 4 3>; ++ dma-coherent; ++ status = "disabled"; ++ }; ++ ++ pcie@3400000 { ++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ pcie@3500000 { ++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic 0 0 0 115 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic 0 0 0 116 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ pcie@3600000 { ++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <8>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>, ++ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ smmu: iommu@5000000 { ++ compatible = "arm,mmu-500"; ++ reg = <0 0x5000000 0 0x800000>; ++ #global-interrupts = <12>; ++ #iommu-cells = <1>; ++ stream-match-mask = <0x7C00>; ++ interrupts = <0 13 4>, /* global secure fault */ ++ <0 14 4>, /* combined secure interrupt */ ++ <0 15 4>, /* global non-secure fault */ ++ <0 16 4>, /* combined non-secure interrupt */ ++ /* performance counter interrupts 0-7 */ ++ <0 211 4>, ++ <0 212 4>, ++ <0 213 4>, ++ <0 214 4>, ++ <0 215 4>, ++ <0 216 4>, ++ <0 217 4>, ++ <0 218 4>, ++ /* per context interrupt, 64 interrupts */ ++ <0 146 4>, ++ <0 147 4>, ++ <0 148 4>, ++ <0 149 4>, ++ <0 150 4>, ++ <0 151 4>, ++ <0 152 4>, ++ <0 153 4>, ++ <0 154 4>, ++ <0 155 4>, ++ <0 156 4>, ++ <0 157 4>, ++ <0 158 4>, ++ <0 159 4>, ++ <0 160 4>, ++ <0 161 4>, ++ <0 162 4>, ++ <0 163 4>, ++ <0 164 4>, ++ <0 165 4>, ++ <0 166 4>, ++ <0 167 4>, ++ <0 168 4>, ++ <0 169 4>, ++ <0 170 4>, ++ <0 171 4>, ++ <0 172 4>, ++ <0 173 4>, ++ <0 174 4>, ++ <0 175 4>, ++ <0 176 4>, ++ <0 177 4>, ++ <0 178 4>, ++ <0 179 4>, ++ <0 180 4>, ++ <0 181 4>, ++ <0 182 4>, ++ <0 183 4>, ++ <0 184 4>, ++ <0 185 4>, ++ <0 186 4>, ++ <0 187 4>, ++ <0 188 4>, ++ <0 189 4>, ++ <0 190 4>, ++ <0 191 4>, ++ <0 192 4>, ++ <0 193 4>, ++ <0 194 4>, ++ <0 195 4>, ++ <0 196 4>, ++ <0 197 4>, ++ <0 198 4>, ++ <0 199 4>, ++ <0 200 4>, ++ <0 201 4>, ++ <0 202 4>, ++ <0 203 4>, ++ <0 204 4>, ++ <0 205 4>, ++ <0 206 4>, ++ <0 207 4>, ++ <0 208 4>, ++ <0 209 4>; ++ }; ++ ++ crypto: crypto@8000000 { ++ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; ++ fsl,sec-era = <8>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x00 0x8000000 0x100000>; ++ reg = <0x00 0x8000000 0x0 0x100000>; ++ interrupts = ; ++ dma-coherent; ++ ++ sec_jr0: jr@10000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x10000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr1: jr@20000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x20000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr2: jr@30000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x30000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr3: jr@40000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x40000 0x10000>; ++ interrupts = ; ++ }; ++ }; ++ }; ++ ++}; +--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts +@@ -1,8 +1,10 @@ + /* + * Device Tree file for Freescale LS2080a QDS Board. + * +- * Copyright (C) 2015, Freescale Semiconductor ++ * Copyright 2015-2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP + * ++ * Abhimanyu Saini + * Bhupesh Sharma + * + * This file is dual-licensed: you can use it either under the terms +@@ -46,169 +48,76 @@ + + /dts-v1/; + +-/include/ "fsl-ls2080a.dtsi" ++#include "fsl-ls2080a.dtsi" ++#include "fsl-ls208xa-qds.dtsi" + + / { + model = "Freescale Layerscape 2080a QDS Board"; + compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; + +- aliases { +- serial0 = &serial0; +- serial1 = &serial1; +- }; +- + chosen { + stdout-path = "serial0:115200n8"; + }; + }; + +-&esdhc { +- status = "okay"; +-}; +- + &ifc { +- status = "okay"; +- #address-cells = <2>; +- #size-cells = <1>; +- ranges = <0x0 0x0 0x5 0x80000000 0x08000000 +- 0x2 0x0 0x5 0x30000000 0x00010000 +- 0x3 0x0 0x5 0x20000000 0x00010000>; +- +- nor@0,0 { ++ boardctrl: board-control@3,0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "cfi-flash"; +- reg = <0x0 0x0 0x8000000>; +- bank-width = <2>; +- device-width = <1>; +- }; ++ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus"; ++ reg = <3 0 0x300>; /* TODO check address */ ++ ranges = <0 3 0 0x300>; + +- nand@2,0 { +- compatible = "fsl,ifc-nand"; +- reg = <0x2 0x0 0x10000>; +- }; ++ mdio_mux_emi1 { ++ compatible = "mdio-mux-mmioreg", "mdio-mux"; ++ mdio-parent-bus = <&emdio1>; ++ reg = <0x54 1>; /* BRDCFG4 */ ++ mux-mask = <0xe0>; /* EMI1_MDIO */ + +- cpld@3,0 { +- reg = <0x3 0x0 0x10000>; +- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis"; +- }; +-}; +- +-&i2c0 { +- status = "okay"; +- pca9547@77 { +- compatible = "nxp,pca9547"; +- reg = <0x77>; +- #address-cells = <1>; +- #size-cells = <0>; +- i2c@0 { +- #address-cells = <1>; ++ #address-cells=<1>; + #size-cells = <0>; +- reg = <0x00>; +- rtc@68 { +- compatible = "dallas,ds3232"; +- reg = <0x68>; +- }; +- }; + +- i2c@2 { +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x02>; +- +- ina220@40 { +- compatible = "ti,ina220"; +- reg = <0x40>; +- shunt-resistor = <500>; +- }; +- +- ina220@41 { +- compatible = "ti,ina220"; +- reg = <0x41>; +- shunt-resistor = <1000>; +- }; +- }; +- +- i2c@3 { +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x3>; +- +- adt7481@4c { +- compatible = "adi,adt7461"; +- reg = <0x4c>; ++ /* Child MDIO buses, one for each riser card: ++ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0. ++ * VSC8234 PHYs on the riser cards. ++ */ ++ ++ mdio_mux3: mdio@60 { ++ reg = <0x60>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ mdio0_phy12: mdio_phy0@1c { ++ reg = <0x1c>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy13: mdio_phy1@1d { ++ reg = <0x1d>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy14: mdio_phy2@1e { ++ reg = <0x1e>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy15: mdio_phy3@1f { ++ reg = <0x1f>; ++ phy-connection-type = "sgmii"; ++ }; + }; + }; + }; + }; + +-&i2c1 { +- status = "disabled"; +-}; +- +-&i2c2 { +- status = "disabled"; +-}; +- +-&i2c3 { +- status = "disabled"; +-}; +- +-&dspi { +- status = "okay"; +- dflash0: n25q128a { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <3000000>; +- reg = <0>; +- }; +- dflash1: sst25wf040b { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <3000000>; +- reg = <1>; +- }; +- dflash2: en25s64 { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <3000000>; +- reg = <2>; +- }; +-}; +- +-&qspi { +- status = "okay"; +- flash0: s25fl256s1@0 { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <20000000>; +- reg = <0>; +- }; +- flash2: s25fl256s1@2 { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <20000000>; +- reg = <0>; +- }; +-}; +- +-&sata0 { +- status = "okay"; ++/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */ ++&dpmac9 { ++ phy-handle = <&mdio0_phy12>; + }; +- +-&sata1 { +- status = "okay"; ++&dpmac10 { ++ phy-handle = <&mdio0_phy13>; + }; +- +-&usb0 { +- status = "okay"; ++&dpmac11 { ++ phy-handle = <&mdio0_phy14>; + }; +- +-&usb1 { +- status = "okay"; ++&dpmac12 { ++ phy-handle = <&mdio0_phy15>; + }; +--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts +@@ -1,8 +1,10 @@ + /* + * Device Tree file for Freescale LS2080a RDB Board. + * +- * Copyright (C) 2015, Freescale Semiconductor ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP + * ++ * Abhimanyu Saini + * Bhupesh Sharma + * + * This file is dual-licensed: you can use it either under the terms +@@ -46,125 +48,94 @@ + + /dts-v1/; + +-/include/ "fsl-ls2080a.dtsi" ++#include "fsl-ls2080a.dtsi" ++#include "fsl-ls208xa-rdb.dtsi" + + / { + model = "Freescale Layerscape 2080a RDB Board"; + compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; + +- aliases { +- serial0 = &serial0; +- serial1 = &serial1; +- }; +- + chosen { + stdout-path = "serial1:115200n8"; + }; + }; + +-&esdhc { +- status = "okay"; +-}; +- +-&ifc { +- status = "okay"; +- #address-cells = <2>; +- #size-cells = <1>; +- ranges = <0x0 0x0 0x5 0x80000000 0x08000000 +- 0x2 0x0 0x5 0x30000000 0x00010000 +- 0x3 0x0 0x5 0x20000000 0x00010000>; +- +- nor@0,0 { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "cfi-flash"; +- reg = <0x0 0x0 0x8000000>; +- bank-width = <2>; +- device-width = <1>; +- }; +- +- nand@2,0 { +- compatible = "fsl,ifc-nand"; +- reg = <0x2 0x0 0x10000>; +- }; +- +- cpld@3,0 { +- reg = <0x3 0x0 0x10000>; +- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis"; +- }; +- +-}; +- +-&i2c0 { +- status = "okay"; +- pca9547@75 { +- compatible = "nxp,pca9547"; +- reg = <0x75>; +- #address-cells = <1>; +- #size-cells = <0>; +- status = "disabled"; +- i2c@1 { +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x01>; +- rtc@68 { +- compatible = "dallas,ds3232"; +- reg = <0x68>; +- }; +- }; +- +- i2c@3 { +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x3>; +- +- adt7481@4c { +- compatible = "adi,adt7461"; +- reg = <0x4c>; +- }; +- }; +- }; +-}; +- +-&i2c1 { +- status = "disabled"; +-}; +- +-&i2c2 { +- status = "disabled"; +-}; +- +-&i2c3 { ++&emdio1 { + status = "disabled"; ++ /* CS4340 PHYs */ ++ mdio1_phy1: emdio1_phy@1 { ++ reg = <0x10>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy2: emdio1_phy@2 { ++ reg = <0x11>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy3: emdio1_phy@3 { ++ reg = <0x12>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy4: emdio1_phy@4 { ++ reg = <0x13>; ++ phy-connection-type = "xfi"; ++ }; + }; + +-&dspi { +- status = "okay"; +- dflash0: n25q512a { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "st,m25p80"; +- spi-max-frequency = <3000000>; +- reg = <0>; ++&emdio2 { ++ /* AQR405 PHYs */ ++ mdio2_phy1: emdio2_phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 1 0x4>; /* Level high type */ ++ reg = <0x0>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy2: emdio2_phy@2 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 2 0x4>; /* Level high type */ ++ reg = <0x1>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy3: emdio2_phy@3 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 4 0x4>; /* Level high type */ ++ reg = <0x2>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy4: emdio2_phy@4 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 5 0x4>; /* Level high type */ ++ reg = <0x3>; ++ phy-connection-type = "xfi"; + }; + }; + +-&qspi { +- status = "disabled"; +-}; ++/* Update DPMAC connections to external PHYs, under the assumption of ++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. ++ */ ++/* Leave Cortina nodes commented out until driver is integrated ++ *&dpmac1 { ++ * phy-handle = <&mdio1_phy1>; ++ *}; ++ *&dpmac2 { ++ * phy-handle = <&mdio1_phy2>; ++ *}; ++ *&dpmac3 { ++ * phy-handle = <&mdio1_phy3>; ++ *}; ++ *&dpmac4 { ++ * phy-handle = <&mdio1_phy4>; ++ *}; ++ */ + +-&sata0 { +- status = "okay"; ++&dpmac5 { ++ phy-handle = <&mdio2_phy1>; + }; +- +-&sata1 { +- status = "okay"; ++&dpmac6 { ++ phy-handle = <&mdio2_phy2>; + }; +- +-&usb0 { +- status = "okay"; ++&dpmac7 { ++ phy-handle = <&mdio2_phy3>; + }; +- +-&usb1 { +- status = "okay"; ++&dpmac8 { ++ phy-handle = <&mdio2_phy4>; + }; +--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts +@@ -1,7 +1,7 @@ + /* + * Device Tree file for Freescale LS2080a software Simulator model + * +- * Copyright (C) 2014-2015, Freescale Semiconductor ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. + * + * Bhupesh Sharma + * +@@ -46,17 +46,12 @@ + + /dts-v1/; + +-/include/ "fsl-ls2080a.dtsi" ++#include "fsl-ls2080a.dtsi" + + / { + model = "Freescale Layerscape 2080a software Simulator model"; + compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; + +- aliases { +- serial0 = &serial0; +- serial1 = &serial1; +- }; +- + ethernet@2210000 { + compatible = "smsc,lan91c111"; + reg = <0x0 0x2210000 0x0 0x100>; +--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi +@@ -1,8 +1,9 @@ + /* + * Device Tree Include file for Freescale Layerscape-2080A family SoC. + * +- * Copyright (C) 2014-2015, Freescale Semiconductor ++ * Copyright 2014-2016 Freescale Semiconductor, Inc. + * ++ * Abhimanyu Saini + * Bhupesh Sharma + * + * This file is dual-licensed: you can use it either under the terms +@@ -44,696 +45,132 @@ + * OTHER DEALINGS IN THE SOFTWARE. + */ + +-/ { +- compatible = "fsl,ls2080a"; +- interrupt-parent = <&gic>; +- #address-cells = <2>; +- #size-cells = <2>; +- +- cpus { +- #address-cells = <1>; +- #size-cells = <0>; +- +- /* +- * We expect the enable-method for cpu's to be "psci", but this +- * is dependent on the SoC FW, which will fill this in. +- * +- * Currently supported enable-method is psci v0.2 +- */ +- +- /* We have 4 clusters having 2 Cortex-A57 cores each */ +- cpu@0 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x0>; +- clocks = <&clockgen 1 0>; +- next-level-cache = <&cluster0_l2>; +- }; +- +- cpu@1 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x1>; +- clocks = <&clockgen 1 0>; +- next-level-cache = <&cluster0_l2>; +- }; +- +- cpu@100 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x100>; +- clocks = <&clockgen 1 1>; +- next-level-cache = <&cluster1_l2>; +- }; +- +- cpu@101 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x101>; +- clocks = <&clockgen 1 1>; +- next-level-cache = <&cluster1_l2>; +- }; +- +- cpu@200 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x200>; +- clocks = <&clockgen 1 2>; +- next-level-cache = <&cluster2_l2>; +- }; +- +- cpu@201 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x201>; +- clocks = <&clockgen 1 2>; +- next-level-cache = <&cluster2_l2>; +- }; +- +- cpu@300 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x300>; +- clocks = <&clockgen 1 3>; +- next-level-cache = <&cluster3_l2>; +- }; +- +- cpu@301 { +- device_type = "cpu"; +- compatible = "arm,cortex-a57"; +- reg = <0x301>; +- clocks = <&clockgen 1 3>; +- next-level-cache = <&cluster3_l2>; +- }; +- +- cluster0_l2: l2-cache0 { +- compatible = "cache"; +- }; +- +- cluster1_l2: l2-cache1 { +- compatible = "cache"; +- }; +- +- cluster2_l2: l2-cache2 { +- compatible = "cache"; +- }; +- +- cluster3_l2: l2-cache3 { +- compatible = "cache"; +- }; +- }; +- +- memory@80000000 { +- device_type = "memory"; +- reg = <0x00000000 0x80000000 0 0x80000000>; +- /* DRAM space - 1, size : 2 GB DRAM */ +- }; +- +- sysclk: sysclk { +- compatible = "fixed-clock"; +- #clock-cells = <0>; +- clock-frequency = <100000000>; +- clock-output-names = "sysclk"; +- }; +- +- gic: interrupt-controller@6000000 { +- compatible = "arm,gic-v3"; +- reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ +- <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ +- <0x0 0x0c0c0000 0 0x2000>, /* GICC */ +- <0x0 0x0c0d0000 0 0x1000>, /* GICH */ +- <0x0 0x0c0e0000 0 0x20000>; /* GICV */ +- #interrupt-cells = <3>; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; +- interrupt-controller; +- interrupts = <1 9 0x4>; +- +- its: gic-its@6020000 { +- compatible = "arm,gic-v3-its"; +- msi-controller; +- reg = <0x0 0x6020000 0 0x20000>; +- }; +- }; +- +- rstcr: syscon@1e60000 { +- compatible = "fsl,ls2080a-rstcr", "syscon"; +- reg = <0x0 0x1e60000 0x0 0x4>; +- }; +- +- reboot { +- compatible ="syscon-reboot"; +- regmap = <&rstcr>; +- offset = <0x0>; +- mask = <0x2>; +- }; +- +- timer { +- compatible = "arm,armv8-timer"; +- interrupts = <1 13 4>, /* Physical Secure PPI, active-low */ +- <1 14 4>, /* Physical Non-Secure PPI, active-low */ +- <1 11 4>, /* Virtual PPI, active-low */ +- <1 10 4>; /* Hypervisor PPI, active-low */ +- fsl,erratum-a008585; +- }; +- +- pmu { +- compatible = "arm,armv8-pmuv3"; +- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ +- }; +- +- soc { +- compatible = "simple-bus"; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; +- +- clockgen: clocking@1300000 { +- compatible = "fsl,ls2080a-clockgen"; +- reg = <0 0x1300000 0 0xa0000>; +- #clock-cells = <2>; +- clocks = <&sysclk>; +- }; +- +- serial0: serial@21c0500 { +- compatible = "fsl,ns16550", "ns16550a"; +- reg = <0x0 0x21c0500 0x0 0x100>; +- clocks = <&clockgen 4 3>; +- interrupts = <0 32 0x4>; /* Level high type */ +- }; +- +- serial1: serial@21c0600 { +- compatible = "fsl,ns16550", "ns16550a"; +- reg = <0x0 0x21c0600 0x0 0x100>; +- clocks = <&clockgen 4 3>; +- interrupts = <0 32 0x4>; /* Level high type */ +- }; +- +- cluster1_core0_watchdog: wdt@c000000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc000000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster1_core1_watchdog: wdt@c010000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc010000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster2_core0_watchdog: wdt@c100000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc100000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster2_core1_watchdog: wdt@c110000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc110000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster3_core0_watchdog: wdt@c200000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc200000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster3_core1_watchdog: wdt@c210000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc210000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster4_core0_watchdog: wdt@c300000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc300000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- cluster4_core1_watchdog: wdt@c310000 { +- compatible = "arm,sp805-wdt", "arm,primecell"; +- reg = <0x0 0xc310000 0x0 0x1000>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "apb_pclk", "wdog_clk"; +- }; +- +- fsl_mc: fsl-mc@80c000000 { +- compatible = "fsl,qoriq-mc"; +- reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ +- <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ +- msi-parent = <&its>; +- #address-cells = <3>; +- #size-cells = <1>; +- +- /* +- * Region type 0x0 - MC portals +- * Region type 0x1 - QBMAN portals +- */ +- ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 +- 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; +- +- /* +- * Define the maximum number of MACs present on the SoC. +- */ +- dpmacs { +- #address-cells = <1>; +- #size-cells = <0>; +- +- dpmac1: dpmac@1 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x1>; +- }; +- +- dpmac2: dpmac@2 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x2>; +- }; +- +- dpmac3: dpmac@3 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x3>; +- }; +- +- dpmac4: dpmac@4 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x4>; +- }; +- +- dpmac5: dpmac@5 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x5>; +- }; +- +- dpmac6: dpmac@6 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x6>; +- }; +- +- dpmac7: dpmac@7 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x7>; +- }; +- +- dpmac8: dpmac@8 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x8>; +- }; +- +- dpmac9: dpmac@9 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x9>; +- }; +- +- dpmac10: dpmac@a { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xa>; +- }; +- +- dpmac11: dpmac@b { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xb>; +- }; +- +- dpmac12: dpmac@c { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xc>; +- }; +- +- dpmac13: dpmac@d { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xd>; +- }; +- +- dpmac14: dpmac@e { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xe>; +- }; +- +- dpmac15: dpmac@f { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0xf>; +- }; +- +- dpmac16: dpmac@10 { +- compatible = "fsl,qoriq-mc-dpmac"; +- reg = <0x10>; +- }; +- }; +- }; +- +- smmu: iommu@5000000 { +- compatible = "arm,mmu-500"; +- reg = <0 0x5000000 0 0x800000>; +- #global-interrupts = <12>; +- interrupts = <0 13 4>, /* global secure fault */ +- <0 14 4>, /* combined secure interrupt */ +- <0 15 4>, /* global non-secure fault */ +- <0 16 4>, /* combined non-secure interrupt */ +- /* performance counter interrupts 0-7 */ +- <0 211 4>, <0 212 4>, +- <0 213 4>, <0 214 4>, +- <0 215 4>, <0 216 4>, +- <0 217 4>, <0 218 4>, +- /* per context interrupt, 64 interrupts */ +- <0 146 4>, <0 147 4>, +- <0 148 4>, <0 149 4>, +- <0 150 4>, <0 151 4>, +- <0 152 4>, <0 153 4>, +- <0 154 4>, <0 155 4>, +- <0 156 4>, <0 157 4>, +- <0 158 4>, <0 159 4>, +- <0 160 4>, <0 161 4>, +- <0 162 4>, <0 163 4>, +- <0 164 4>, <0 165 4>, +- <0 166 4>, <0 167 4>, +- <0 168 4>, <0 169 4>, +- <0 170 4>, <0 171 4>, +- <0 172 4>, <0 173 4>, +- <0 174 4>, <0 175 4>, +- <0 176 4>, <0 177 4>, +- <0 178 4>, <0 179 4>, +- <0 180 4>, <0 181 4>, +- <0 182 4>, <0 183 4>, +- <0 184 4>, <0 185 4>, +- <0 186 4>, <0 187 4>, +- <0 188 4>, <0 189 4>, +- <0 190 4>, <0 191 4>, +- <0 192 4>, <0 193 4>, +- <0 194 4>, <0 195 4>, +- <0 196 4>, <0 197 4>, +- <0 198 4>, <0 199 4>, +- <0 200 4>, <0 201 4>, +- <0 202 4>, <0 203 4>, +- <0 204 4>, <0 205 4>, +- <0 206 4>, <0 207 4>, +- <0 208 4>, <0 209 4>; +- mmu-masters = <&fsl_mc 0x300 0>; +- }; +- +- dspi: dspi@2100000 { +- status = "disabled"; +- compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x2100000 0x0 0x10000>; +- interrupts = <0 26 0x4>; /* Level high type */ +- clocks = <&clockgen 4 3>; +- clock-names = "dspi"; +- spi-num-chipselects = <5>; +- bus-num = <0>; +- }; +- +- esdhc: esdhc@2140000 { +- status = "disabled"; +- compatible = "fsl,ls2080a-esdhc", "fsl,esdhc"; +- reg = <0x0 0x2140000 0x0 0x10000>; +- interrupts = <0 28 0x4>; /* Level high type */ +- clock-frequency = <0>; /* Updated by bootloader */ +- voltage-ranges = <1800 1800 3300 3300>; +- sdhci,auto-cmd12; +- little-endian; +- bus-width = <4>; +- }; +- +- gpio0: gpio@2300000 { +- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; +- reg = <0x0 0x2300000 0x0 0x10000>; +- interrupts = <0 36 0x4>; /* Level high type */ +- gpio-controller; +- little-endian; +- #gpio-cells = <2>; +- interrupt-controller; +- #interrupt-cells = <2>; +- }; +- +- gpio1: gpio@2310000 { +- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; +- reg = <0x0 0x2310000 0x0 0x10000>; +- interrupts = <0 36 0x4>; /* Level high type */ +- gpio-controller; +- little-endian; +- #gpio-cells = <2>; +- interrupt-controller; +- #interrupt-cells = <2>; +- }; +- +- gpio2: gpio@2320000 { +- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; +- reg = <0x0 0x2320000 0x0 0x10000>; +- interrupts = <0 37 0x4>; /* Level high type */ +- gpio-controller; +- little-endian; +- #gpio-cells = <2>; +- interrupt-controller; +- #interrupt-cells = <2>; +- }; +- +- gpio3: gpio@2330000 { +- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; +- reg = <0x0 0x2330000 0x0 0x10000>; +- interrupts = <0 37 0x4>; /* Level high type */ +- gpio-controller; +- little-endian; +- #gpio-cells = <2>; +- interrupt-controller; +- #interrupt-cells = <2>; +- }; +- +- i2c0: i2c@2000000 { +- status = "disabled"; +- compatible = "fsl,vf610-i2c"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x2000000 0x0 0x10000>; +- interrupts = <0 34 0x4>; /* Level high type */ +- clock-names = "i2c"; +- clocks = <&clockgen 4 3>; +- }; +- +- i2c1: i2c@2010000 { +- status = "disabled"; +- compatible = "fsl,vf610-i2c"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x2010000 0x0 0x10000>; +- interrupts = <0 34 0x4>; /* Level high type */ +- clock-names = "i2c"; +- clocks = <&clockgen 4 3>; +- }; +- +- i2c2: i2c@2020000 { +- status = "disabled"; +- compatible = "fsl,vf610-i2c"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x2020000 0x0 0x10000>; +- interrupts = <0 35 0x4>; /* Level high type */ +- clock-names = "i2c"; +- clocks = <&clockgen 4 3>; +- }; +- +- i2c3: i2c@2030000 { +- status = "disabled"; +- compatible = "fsl,vf610-i2c"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x2030000 0x0 0x10000>; +- interrupts = <0 35 0x4>; /* Level high type */ +- clock-names = "i2c"; +- clocks = <&clockgen 4 3>; +- }; +- +- ifc: ifc@2240000 { +- compatible = "fsl,ifc", "simple-bus"; +- reg = <0x0 0x2240000 0x0 0x20000>; +- interrupts = <0 21 0x4>; /* Level high type */ +- little-endian; +- #address-cells = <2>; +- #size-cells = <1>; +- +- ranges = <0 0 0x5 0x80000000 0x08000000 +- 2 0 0x5 0x30000000 0x00010000 +- 3 0 0x5 0x20000000 0x00010000>; +- }; +- +- qspi: quadspi@20c0000 { +- status = "disabled"; +- compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi"; +- #address-cells = <1>; +- #size-cells = <0>; +- reg = <0x0 0x20c0000 0x0 0x10000>, +- <0x0 0x20000000 0x0 0x10000000>; +- reg-names = "QuadSPI", "QuadSPI-memory"; +- interrupts = <0 25 0x4>; /* Level high type */ +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; +- clock-names = "qspi_en", "qspi"; +- }; +- +- pcie@3400000 { +- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", +- "snps,dw-pcie"; +- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ +- 0x10 0x00000000 0x0 0x00002000>; /* configuration space */ +- reg-names = "regs", "config"; +- interrupts = <0 108 0x4>; /* Level high type */ +- interrupt-names = "intr"; +- #address-cells = <3>; +- #size-cells = <2>; +- device_type = "pci"; +- dma-coherent; +- num-lanes = <4>; +- bus-range = <0x0 0xff>; +- ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */ +- 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&its>; +- #interrupt-cells = <1>; +- interrupt-map-mask = <0 0 0 7>; +- interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, +- <0000 0 0 2 &gic 0 0 0 110 4>, +- <0000 0 0 3 &gic 0 0 0 111 4>, +- <0000 0 0 4 &gic 0 0 0 112 4>; +- }; +- +- pcie@3500000 { +- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", +- "snps,dw-pcie"; +- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ +- 0x12 0x00000000 0x0 0x00002000>; /* configuration space */ +- reg-names = "regs", "config"; +- interrupts = <0 113 0x4>; /* Level high type */ +- interrupt-names = "intr"; +- #address-cells = <3>; +- #size-cells = <2>; +- device_type = "pci"; +- dma-coherent; +- num-lanes = <4>; +- bus-range = <0x0 0xff>; +- ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */ +- 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&its>; +- #interrupt-cells = <1>; +- interrupt-map-mask = <0 0 0 7>; +- interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, +- <0000 0 0 2 &gic 0 0 0 115 4>, +- <0000 0 0 3 &gic 0 0 0 116 4>, +- <0000 0 0 4 &gic 0 0 0 117 4>; +- }; +- +- pcie@3600000 { +- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", +- "snps,dw-pcie"; +- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ +- 0x14 0x00000000 0x0 0x00002000>; /* configuration space */ +- reg-names = "regs", "config"; +- interrupts = <0 118 0x4>; /* Level high type */ +- interrupt-names = "intr"; +- #address-cells = <3>; +- #size-cells = <2>; +- device_type = "pci"; +- dma-coherent; +- num-lanes = <8>; +- bus-range = <0x0 0xff>; +- ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */ +- 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&its>; +- #interrupt-cells = <1>; +- interrupt-map-mask = <0 0 0 7>; +- interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, +- <0000 0 0 2 &gic 0 0 0 120 4>, +- <0000 0 0 3 &gic 0 0 0 121 4>, +- <0000 0 0 4 &gic 0 0 0 122 4>; +- }; +- +- pcie@3700000 { +- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", +- "snps,dw-pcie"; +- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ +- 0x16 0x00000000 0x0 0x00002000>; /* configuration space */ +- reg-names = "regs", "config"; +- interrupts = <0 123 0x4>; /* Level high type */ +- interrupt-names = "intr"; +- #address-cells = <3>; +- #size-cells = <2>; +- device_type = "pci"; +- dma-coherent; +- num-lanes = <4>; +- bus-range = <0x0 0xff>; +- ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */ +- 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ +- msi-parent = <&its>; +- #interrupt-cells = <1>; +- interrupt-map-mask = <0 0 0 7>; +- interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, +- <0000 0 0 2 &gic 0 0 0 125 4>, +- <0000 0 0 3 &gic 0 0 0 126 4>, +- <0000 0 0 4 &gic 0 0 0 127 4>; +- }; +- +- sata0: sata@3200000 { +- status = "disabled"; +- compatible = "fsl,ls2080a-ahci"; +- reg = <0x0 0x3200000 0x0 0x10000>; +- interrupts = <0 133 0x4>; /* Level high type */ +- clocks = <&clockgen 4 3>; +- dma-coherent; +- }; +- +- sata1: sata@3210000 { +- status = "disabled"; +- compatible = "fsl,ls2080a-ahci"; +- reg = <0x0 0x3210000 0x0 0x10000>; +- interrupts = <0 136 0x4>; /* Level high type */ +- clocks = <&clockgen 4 3>; +- dma-coherent; +- }; +- +- usb0: usb3@3100000 { +- status = "disabled"; +- compatible = "snps,dwc3"; +- reg = <0x0 0x3100000 0x0 0x10000>; +- interrupts = <0 80 0x4>; /* Level high type */ +- dr_mode = "host"; +- snps,quirk-frame-length-adjustment = <0x20>; +- snps,dis_rxdet_inp3_quirk; +- }; +- +- usb1: usb3@3110000 { +- status = "disabled"; +- compatible = "snps,dwc3"; +- reg = <0x0 0x3110000 0x0 0x10000>; +- interrupts = <0 81 0x4>; /* Level high type */ +- dr_mode = "host"; +- snps,quirk-frame-length-adjustment = <0x20>; +- snps,dis_rxdet_inp3_quirk; +- }; +- +- ccn@4000000 { +- compatible = "arm,ccn-504"; +- reg = <0x0 0x04000000 0x0 0x01000000>; +- interrupts = <0 12 4>; +- }; +- }; +- +- ddr1: memory-controller@1080000 { +- compatible = "fsl,qoriq-memory-controller"; +- reg = <0x0 0x1080000 0x0 0x1000>; +- interrupts = <0 17 0x4>; +- little-endian; +- }; +- +- ddr2: memory-controller@1090000 { +- compatible = "fsl,qoriq-memory-controller"; +- reg = <0x0 0x1090000 0x0 0x1000>; +- interrupts = <0 18 0x4>; +- little-endian; ++#include "fsl-ls208xa.dtsi" ++ ++&cpu { ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&cluster0_l2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x1>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&cluster0_l2>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x100>; ++ clocks = <&clockgen 1 1>; ++ next-level-cache = <&cluster1_l2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x101>; ++ clocks = <&clockgen 1 1>; ++ next-level-cache = <&cluster1_l2>; ++ }; ++ ++ cpu4: cpu@200 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x200>; ++ clocks = <&clockgen 1 2>; ++ next-level-cache = <&cluster2_l2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu5: cpu@201 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x201>; ++ clocks = <&clockgen 1 2>; ++ next-level-cache = <&cluster2_l2>; ++ }; ++ ++ cpu6: cpu@300 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x300>; ++ clocks = <&clockgen 1 3>; ++ next-level-cache = <&cluster3_l2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu7: cpu@301 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x301>; ++ clocks = <&clockgen 1 3>; ++ next-level-cache = <&cluster3_l2>; + }; ++ ++ cluster0_l2: l2-cache0 { ++ compatible = "cache"; ++ }; ++ ++ cluster1_l2: l2-cache1 { ++ compatible = "cache"; ++ }; ++ ++ cluster2_l2: l2-cache2 { ++ compatible = "cache"; ++ }; ++ ++ cluster3_l2: l2-cache3 { ++ compatible = "cache"; ++ }; ++}; ++ ++&usb0 { ++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; ++ snps,dma-snooping; ++}; ++ ++&usb1 { ++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; ++ snps,dma-snooping; ++}; ++ ++&pcie1 { ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x10 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++}; ++ ++&pcie2 { ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x12 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++}; ++ ++&pcie3 { ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x14 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++}; ++ ++&pcie4 { ++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ ++ 0x16 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ + }; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts +@@ -0,0 +1,161 @@ ++/* ++ * Device Tree file for NXP LS2081A RDB Board. ++ * ++ * Copyright 2017 NXP ++ * ++ * Priyanka Jain ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls2088a.dtsi" ++ ++/ { ++ model = "NXP Layerscape 2081A RDB Board"; ++ compatible = "fsl,ls2081a-rdb", "fsl,ls2081a"; ++ ++ aliases { ++ serial0 = &serial0; ++ serial1 = &serial1; ++ }; ++ ++ chosen { ++ stdout-path = "serial1:115200n8"; ++ }; ++}; ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&ifc { ++ status = "disabled"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ pca9547@75 { ++ compatible = "nxp,pca9547"; ++ reg = <0x75>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ i2c@1 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x01>; ++ rtc@51 { ++ compatible = "nxp,pcf2129"; ++ reg = <0x51>; ++ }; ++ }; ++ ++ i2c@2 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x02>; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <500>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ adt7481@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&dspi { ++ status = "okay"; ++ dflash0: n25q512a { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <0>; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ fsl,qspi-has-second-chip; ++ flash0: s25fs512s@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "spansion,m25p80"; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++ flash1: s25fs512s@1 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "spansion,m25p80"; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <1>; ++ }; ++}; ++ ++&sata0 { ++ status = "okay"; ++}; ++ ++&sata1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts +@@ -0,0 +1,162 @@ ++/* ++ * Device Tree file for Freescale LS2088A QDS Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls2088a.dtsi" ++#include "fsl-ls208xa-qds.dtsi" ++ ++/ { ++ model = "Freescale Layerscape 2088A QDS Board"; ++ compatible = "fsl,ls2088a-qds", "fsl,ls2088a"; ++ ++ chosen { ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&ifc { ++ boardctrl: board-control@3,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus"; ++ reg = <3 0 0x300>; /* TODO check address */ ++ ranges = <0 3 0 0x300>; ++ ++ mdio_mux_emi1 { ++ compatible = "mdio-mux-mmioreg", "mdio-mux"; ++ mdio-parent-bus = <&emdio1>; ++ reg = <0x54 1>; /* BRDCFG4 */ ++ mux-mask = <0xe0>; /* EMI1_MDIO */ ++ ++ #address-cells=<1>; ++ #size-cells = <0>; ++ ++ /* Child MDIO buses, one for each riser card: ++ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0. ++ * VSC8234 PHYs on the riser cards. ++ */ ++ ++ mdio_mux3: mdio@60 { ++ reg = <0x60>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ mdio0_phy12: mdio_phy0@1c { ++ reg = <0x1c>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy13: mdio_phy1@1d { ++ reg = <0x1d>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy14: mdio_phy2@1e { ++ reg = <0x1e>; ++ phy-connection-type = "sgmii"; ++ }; ++ mdio0_phy15: mdio_phy3@1f { ++ reg = <0x1f>; ++ phy-connection-type = "sgmii"; ++ }; ++ }; ++ }; ++ }; ++}; ++ ++&pcs_mdio1 { ++ pcs_phy1: ethernet-phy@0 { ++ backplane-mode = "10gbase-kr"; ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ reg = <0x0>; ++ fsl,lane-handle = <&serdes1>; ++ fsl,lane-reg = <0x9C0 0x40>;/* lane H */ ++ }; ++}; ++ ++&pcs_mdio2 { ++ pcs_phy2: ethernet-phy@0 { ++ backplane-mode = "10gbase-kr"; ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ reg = <0x0>; ++ fsl,lane-handle = <&serdes1>; ++ fsl,lane-reg = <0x980 0x40>;/* lane G */ ++ }; ++}; ++ ++&pcs_mdio3 { ++ pcs_phy3: ethernet-phy@0 { ++ backplane-mode = "10gbase-kr"; ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ reg = <0x0>; ++ fsl,lane-handle = <&serdes1>; ++ fsl,lane-reg = <0x940 0x40>;/* lane F */ ++ }; ++}; ++ ++&pcs_mdio4 { ++ pcs_phy4: ethernet-phy@0 { ++ backplane-mode = "10gbase-kr"; ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ reg = <0x0>; ++ fsl,lane-handle = <&serdes1>; ++ fsl,lane-reg = <0x900 0x40>;/* lane E */ ++ }; ++}; ++ ++/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */ ++&dpmac9 { ++ phy-handle = <&mdio0_phy12>; ++}; ++&dpmac10 { ++ phy-handle = <&mdio0_phy13>; ++}; ++&dpmac11 { ++ phy-handle = <&mdio0_phy14>; ++}; ++&dpmac12 { ++ phy-handle = <&mdio0_phy15>; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts +@@ -0,0 +1,140 @@ ++/* ++ * Device Tree file for Freescale LS2088A RDB Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls2088a.dtsi" ++#include "fsl-ls208xa-rdb.dtsi" ++ ++/ { ++ model = "Freescale Layerscape 2088A RDB Board"; ++ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; ++ ++ chosen { ++ stdout-path = "serial1:115200n8"; ++ }; ++}; ++ ++&emdio1 { ++ status = "disabled"; ++ /* CS4340 PHYs */ ++ mdio1_phy1: emdio1_phy@1 { ++ reg = <0x10>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy2: emdio1_phy@2 { ++ reg = <0x11>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy3: emdio1_phy@3 { ++ reg = <0x12>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy4: emdio1_phy@4 { ++ reg = <0x13>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++&emdio2 { ++ /* AQR405 PHYs */ ++ mdio2_phy1: emdio2_phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 1 0x4>; /* Level high type */ ++ reg = <0x0>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy2: emdio2_phy@2 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 2 0x4>; /* Level high type */ ++ reg = <0x1>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy3: emdio2_phy@3 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 4 0x4>; /* Level high type */ ++ reg = <0x2>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy4: emdio2_phy@4 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 5 0x4>; /* Level high type */ ++ reg = <0x3>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++/* Update DPMAC connections to external PHYs, under the assumption of ++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. ++ */ ++/* Leave Cortina PHYs commented out until proper driver is integrated ++ *&dpmac1 { ++ * phy-handle = <&mdio1_phy1>; ++ *}; ++ *&dpmac2 { ++ * phy-handle = <&mdio1_phy2>; ++ *}; ++ *&dpmac3 { ++ * phy-handle = <&mdio1_phy3>; ++ *}; ++ *&dpmac4 { ++ * phy-handle = <&mdio1_phy4>; ++ *}; ++ */ ++ ++&dpmac5 { ++ phy-handle = <&mdio2_phy1>; ++}; ++&dpmac6 { ++ phy-handle = <&mdio2_phy2>; ++}; ++&dpmac7 { ++ phy-handle = <&mdio2_phy3>; ++}; ++&dpmac8 { ++ phy-handle = <&mdio2_phy4>; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi +@@ -0,0 +1,195 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-2088A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "fsl-ls208xa.dtsi" ++ ++&cpu { ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&cluster0_l2>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x1>; ++ clocks = <&clockgen 1 0>; ++ next-level-cache = <&cluster0_l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x100>; ++ clocks = <&clockgen 1 1>; ++ next-level-cache = <&cluster1_l2>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x101>; ++ clocks = <&clockgen 1 1>; ++ next-level-cache = <&cluster1_l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu4: cpu@200 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x200>; ++ clocks = <&clockgen 1 2>; ++ next-level-cache = <&cluster2_l2>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu5: cpu@201 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x201>; ++ clocks = <&clockgen 1 2>; ++ next-level-cache = <&cluster2_l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu6: cpu@300 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x300>; ++ clocks = <&clockgen 1 3>; ++ next-level-cache = <&cluster3_l2>; ++ #cooling-cells = <2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ cpu7: cpu@301 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x301>; ++ clocks = <&clockgen 1 3>; ++ next-level-cache = <&cluster3_l2>; ++ cpu-idle-states = <&CPU_PH20>; ++ }; ++ ++ idle-states { ++ /* ++ * PSCI node is not added default, U-boot will add missing ++ * parts if it determines to use PSCI. ++ */ ++ entry-method = "arm,psci"; ++ ++ CPU_PH20: cpu-ph20 { ++ compatible = "arm,idle-state"; ++ idle-state-name = "PH20"; ++ arm,psci-suspend-param = <0x0>; ++ entry-latency-us = <1000>; ++ exit-latency-us = <1000>; ++ min-residency-us = <3000>; ++ }; ++ }; ++ ++ cluster0_l2: l2-cache0 { ++ compatible = "cache"; ++ }; ++ ++ cluster1_l2: l2-cache1 { ++ compatible = "cache"; ++ }; ++ ++ cluster2_l2: l2-cache2 { ++ compatible = "cache"; ++ }; ++ ++ cluster3_l2: l2-cache3 { ++ compatible = "cache"; ++ }; ++}; ++ ++&pcie1 { ++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 ++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; ++}; ++ ++&pcie2 { ++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 ++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; ++}; ++ ++&pcie3 { ++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 ++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; ++}; ++ ++&pcie4 { ++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ ++ 0x38 0x00000000 0x0 0x00002000>; /* configuration space */ ++ ++ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 ++ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi +@@ -0,0 +1,198 @@ ++/* ++ * Device Tree file for Freescale LS2080A QDS Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++&esdhc { ++ mmc-hs200-1_8v; ++ status = "okay"; ++}; ++ ++&ifc { ++ status = "okay"; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 ++ 0x2 0x0 0x5 0x30000000 0x00010000 ++ 0x3 0x0 0x5 0x20000000 0x00010000>; ++ ++ nor@0,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ }; ++ ++ nand@2,0 { ++ compatible = "fsl,ifc-nand"; ++ reg = <0x2 0x0 0x10000>; ++ }; ++ ++ cpld@3,0 { ++ reg = <0x3 0x0 0x10000>; ++ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis"; ++ }; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ pca9547@77 { ++ compatible = "nxp,pca9547"; ++ reg = <0x77>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ i2c@0 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x00>; ++ rtc@68 { ++ compatible = "dallas,ds3232"; ++ reg = <0x68>; ++ }; ++ }; ++ ++ i2c@2 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x02>; ++ ++ ina220@40 { ++ compatible = "ti,ina220"; ++ reg = <0x40>; ++ shunt-resistor = <500>; ++ }; ++ ++ ina220@41 { ++ compatible = "ti,ina220"; ++ reg = <0x41>; ++ shunt-resistor = <1000>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ adt7481@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&dspi { ++ status = "okay"; ++ dflash0: n25q128a { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <0>; ++ }; ++ dflash1: sst25wf040b { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <1>; ++ }; ++ dflash2: en25s64 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <2>; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash0: s25fl256s1@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++ flash2: s25fl256s1@2 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ ++&sata0 { ++ status = "okay"; ++}; ++ ++&sata1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi +@@ -0,0 +1,161 @@ ++/* ++ * Device Tree file for Freescale LS2080A RDB Board. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&ifc { ++ status = "okay"; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 ++ 0x2 0x0 0x5 0x30000000 0x00010000 ++ 0x3 0x0 0x5 0x20000000 0x00010000>; ++ ++ nor@0,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ }; ++ ++ nand@2,0 { ++ compatible = "fsl,ifc-nand"; ++ reg = <0x2 0x0 0x10000>; ++ }; ++ ++ cpld@3,0 { ++ reg = <0x3 0x0 0x10000>; ++ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis"; ++ }; ++ ++}; ++ ++&i2c0 { ++ status = "okay"; ++ pca9547@75 { ++ compatible = "nxp,pca9547"; ++ reg = <0x75>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ i2c-mux-never-disable; ++ i2c@1 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x01>; ++ rtc@68 { ++ compatible = "dallas,ds3232"; ++ reg = <0x68>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ adt7481@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&dspi { ++ status = "okay"; ++ dflash0: n25q512a { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <0>; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash0: s25fs512s@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "spansion,m25p80"; ++ m25p,fast-read; ++ spi-max-frequency = <20000000>; ++ reg = <0>; ++ }; ++}; ++ ++&sata0 { ++ status = "okay"; ++}; ++ ++&sata1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +@@ -0,0 +1,910 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-2080A family SoC. ++ * ++ * Copyright 2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++/ { ++ compatible = "fsl,ls2080a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ crypto = &crypto; ++ serial0 = &serial0; ++ serial1 = &serial1; ++ }; ++ ++ cpu: cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ memory@80000000 { ++ device_type = "memory"; ++ reg = <0x00000000 0x80000000 0 0x80000000>; ++ /* DRAM space - 1, size : 2 GB DRAM */ ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ gic: interrupt-controller@6000000 { ++ compatible = "arm,gic-v3"; ++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ ++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ ++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ ++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ ++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ interrupts = <1 9 0x4>; ++ ++ its: gic-its@6020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x6020000 0 0x20000>; ++ }; ++ }; ++ ++ rstcr: syscon@1e60000 { ++ compatible = "fsl,ls2080a-rstcr", "syscon"; ++ reg = <0x0 0x1e60000 0x0 0x4>; ++ }; ++ ++ reboot { ++ compatible ="syscon-reboot"; ++ regmap = <&rstcr>; ++ offset = <0x0>; ++ mask = <0x2>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */ ++ <1 14 4>, /* Physical Non-Secure PPI, active-low */ ++ <1 11 4>, /* Virtual PPI, active-low */ ++ <1 10 4>; /* Hypervisor PPI, active-low */ ++ fsl,erratum-a008585; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ clockgen: clocking@1300000 { ++ compatible = "fsl,ls2080a-clockgen"; ++ reg = <0 0x1300000 0 0xa0000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; ++ }; ++ ++ dcfg: dcfg@1e00000 { ++ compatible = "fsl,ls2080a-dcfg", "syscon"; ++ reg = <0x0 0x1e00000 0x0 0x10000>; ++ little-endian; ++ }; ++ ++ tmu: tmu@1f80000 { ++ compatible = "fsl,qoriq-tmu"; ++ reg = <0x0 0x1f80000 0x0 0x10000>; ++ interrupts = <0 23 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ little-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ ++ thermal-sensors = <&tmu 4>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <75000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ cpu_crit: cpu-crit { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map1 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu2 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map2 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu4 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map3 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu6 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ serial0: serial@21c0500 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0500 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ ++ serial1: serial@21c0600 { ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0600 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ ++ cluster1_core0_watchdog: wdt@c000000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc000000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster1_core1_watchdog: wdt@c010000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc010000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core0_watchdog: wdt@c100000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc100000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core1_watchdog: wdt@c110000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc110000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster3_core0_watchdog: wdt@c200000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc200000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster3_core1_watchdog: wdt@c210000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc210000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster4_core0_watchdog: wdt@c300000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc300000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster4_core1_watchdog: wdt@c310000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc310000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ crypto: crypto@8000000 { ++ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; ++ fsl,sec-era = <8>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x00 0x8000000 0x100000>; ++ reg = <0x00 0x8000000 0x0 0x100000>; ++ interrupts = ; ++ dma-coherent; ++ ++ sec_jr0: jr@10000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x10000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr1: jr@20000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x20000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr2: jr@30000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x30000 0x10000>; ++ interrupts = ; ++ }; ++ ++ sec_jr3: jr@40000 { ++ compatible = "fsl,sec-v5.0-job-ring", ++ "fsl,sec-v4.0-job-ring"; ++ reg = <0x40000 0x10000>; ++ interrupts = ; ++ }; ++ }; ++ ++ fsl_mc: fsl-mc@80c000000 { ++ compatible = "fsl,qoriq-mc"; ++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ ++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ ++ #address-cells = <3>; ++ #size-cells = <1>; ++ ++ /* ++ * Region type 0x0 - MC portals ++ * Region type 0x1 - QBMAN portals ++ */ ++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 ++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; ++ ++ /* ++ * Define the maximum number of MACs present on the SoC. ++ */ ++ dpmacs { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ dpmac1: dpmac@1 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x1>; ++ }; ++ ++ dpmac2: dpmac@2 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x2>; ++ }; ++ ++ dpmac3: dpmac@3 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x3>; ++ }; ++ ++ dpmac4: dpmac@4 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x4>; ++ }; ++ ++ dpmac5: dpmac@5 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x5>; ++ }; ++ ++ dpmac6: dpmac@6 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x6>; ++ }; ++ ++ dpmac7: dpmac@7 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x7>; ++ }; ++ ++ dpmac8: dpmac@8 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x8>; ++ }; ++ ++ dpmac9: dpmac@9 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x9>; ++ }; ++ ++ dpmac10: dpmac@a { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xa>; ++ }; ++ ++ dpmac11: dpmac@b { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xb>; ++ }; ++ ++ dpmac12: dpmac@c { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xc>; ++ }; ++ ++ dpmac13: dpmac@d { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xd>; ++ }; ++ ++ dpmac14: dpmac@e { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xe>; ++ }; ++ ++ dpmac15: dpmac@f { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xf>; ++ }; ++ ++ dpmac16: dpmac@10 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x10>; ++ }; ++ }; ++ }; ++ ++ smmu: iommu@5000000 { ++ compatible = "arm,mmu-500"; ++ reg = <0 0x5000000 0 0x800000>; ++ #global-interrupts = <12>; ++ #iommu-cells = <1>; ++ stream-match-mask = <0x7C00>; ++ interrupts = <0 13 4>, /* global secure fault */ ++ <0 14 4>, /* combined secure interrupt */ ++ <0 15 4>, /* global non-secure fault */ ++ <0 16 4>, /* combined non-secure interrupt */ ++ /* performance counter interrupts 0-7 */ ++ <0 211 4>, <0 212 4>, ++ <0 213 4>, <0 214 4>, ++ <0 215 4>, <0 216 4>, ++ <0 217 4>, <0 218 4>, ++ /* per context interrupt, 64 interrupts */ ++ <0 146 4>, <0 147 4>, ++ <0 148 4>, <0 149 4>, ++ <0 150 4>, <0 151 4>, ++ <0 152 4>, <0 153 4>, ++ <0 154 4>, <0 155 4>, ++ <0 156 4>, <0 157 4>, ++ <0 158 4>, <0 159 4>, ++ <0 160 4>, <0 161 4>, ++ <0 162 4>, <0 163 4>, ++ <0 164 4>, <0 165 4>, ++ <0 166 4>, <0 167 4>, ++ <0 168 4>, <0 169 4>, ++ <0 170 4>, <0 171 4>, ++ <0 172 4>, <0 173 4>, ++ <0 174 4>, <0 175 4>, ++ <0 176 4>, <0 177 4>, ++ <0 178 4>, <0 179 4>, ++ <0 180 4>, <0 181 4>, ++ <0 182 4>, <0 183 4>, ++ <0 184 4>, <0 185 4>, ++ <0 186 4>, <0 187 4>, ++ <0 188 4>, <0 189 4>, ++ <0 190 4>, <0 191 4>, ++ <0 192 4>, <0 193 4>, ++ <0 194 4>, <0 195 4>, ++ <0 196 4>, <0 197 4>, ++ <0 198 4>, <0 199 4>, ++ <0 200 4>, <0 201 4>, ++ <0 202 4>, <0 203 4>, ++ <0 204 4>, <0 205 4>, ++ <0 206 4>, <0 207 4>, ++ <0 208 4>, <0 209 4>; ++ }; ++ ++ dspi: dspi@2100000 { ++ status = "disabled"; ++ compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2100000 0x0 0x10000>; ++ interrupts = <0 26 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ clock-names = "dspi"; ++ spi-num-chipselects = <5>; ++ bus-num = <0>; ++ }; ++ ++ esdhc: esdhc@2140000 { ++ status = "disabled"; ++ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x2140000 0x0 0x10000>; ++ interrupts = <0 28 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 1>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ little-endian; ++ bus-width = <4>; ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio2: gpio@2320000 { ++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; ++ reg = <0x0 0x2320000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio3: gpio@2330000 { ++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio"; ++ reg = <0x0 0x2330000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ /* TODO: WRIOP (CCSR?) */ ++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, ++ * E-MDIO1: 0x1_6000 ++ */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B96000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ /* Not necessary on the QDS, but needed on the RDB */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, ++ * E-MDIO2: 0x1_7000 ++ */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B97000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio1: mdio@0x8c07000 { ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c07000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio2: mdio@0x8c0b000 { ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c0b000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio3: mdio@0x8c0f000 { ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c0f000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio4: mdio@0x8c13000 { ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c13000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio5: mdio@0x8c17000 { ++ status = "disabled"; ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c17000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio6: mdio@0x8c1b000 { ++ status = "disabled"; ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c1b000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio7: mdio@0x8c1f000 { ++ status = "disabled"; ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c1f000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ pcs_mdio8: mdio@0x8c23000 { ++ status = "disabled"; ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8c23000 0x0 0x1000>; ++ device_type = "mdio"; ++ little-endian; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c0: i2c@2000000 { ++ status = "disabled"; ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2000000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c1: i2c@2010000 { ++ status = "disabled"; ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2010000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c2: i2c@2020000 { ++ status = "disabled"; ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2020000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c3: i2c@2030000 { ++ status = "disabled"; ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2030000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ ifc: ifc@2240000 { ++ compatible = "fsl,ifc", "simple-bus"; ++ reg = <0x0 0x2240000 0x0 0x20000>; ++ interrupts = <0 21 0x4>; /* Level high type */ ++ little-endian; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ++ ranges = <0 0 0x5 0x80000000 0x08000000 ++ 2 0 0x5 0x30000000 0x00010000 ++ 3 0 0x5 0x20000000 0x00010000>; ++ }; ++ ++ qspi: quadspi@20c0000 { ++ status = "disabled"; ++ compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20c0000 0x0 0x10000>, ++ <0x0 0x20000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = <0 25 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "qspi_en", "qspi"; ++ }; ++ ++ pcie1: pcie@3400000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg-names = "regs", "config"; ++ interrupts = <0 108 0x4>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, ++ <0000 0 0 2 &gic 0 0 0 110 4>, ++ <0000 0 0 3 &gic 0 0 0 111 4>, ++ <0000 0 0 4 &gic 0 0 0 112 4>; ++ }; ++ ++ pcie2: pcie@3500000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg-names = "regs", "config"; ++ interrupts = <0 113 0x4>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, ++ <0000 0 0 2 &gic 0 0 0 115 4>, ++ <0000 0 0 3 &gic 0 0 0 116 4>, ++ <0000 0 0 4 &gic 0 0 0 117 4>; ++ }; ++ ++ pcie3: pcie@3600000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg-names = "regs", "config"; ++ interrupts = <0 118 0x4>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <8>; ++ bus-range = <0x0 0xff>; ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, ++ <0000 0 0 2 &gic 0 0 0 120 4>, ++ <0000 0 0 3 &gic 0 0 0 121 4>, ++ <0000 0 0 4 &gic 0 0 0 122 4>; ++ }; ++ ++ pcie4: pcie@3700000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg-names = "regs", "config"; ++ interrupts = <0 123 0x4>; /* aer interrupt */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ msi-parent = <&its>; ++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, ++ <0000 0 0 2 &gic 0 0 0 125 4>, ++ <0000 0 0 3 &gic 0 0 0 126 4>, ++ <0000 0 0 4 &gic 0 0 0 127 4>; ++ }; ++ ++ sata0: sata@3200000 { ++ status = "disabled"; ++ compatible = "fsl,ls2080a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>; ++ interrupts = <0 133 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ dma-coherent; ++ }; ++ ++ sata1: sata@3210000 { ++ status = "disabled"; ++ compatible = "fsl,ls2080a-ahci"; ++ reg = <0x0 0x3210000 0x0 0x10000>; ++ interrupts = <0 136 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ dma-coherent; ++ }; ++ ++ usb0: usb3@3100000 { ++ status = "disabled"; ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3100000 0x0 0x10000>; ++ interrupts = <0 80 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb1: usb3@3110000 { ++ status = "disabled"; ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3110000 0x0 0x10000>; ++ interrupts = <0 81 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ snps,quirk-frame-length-adjustment = <0x20>; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ serdes1: serdes@1ea0000 { ++ reg = <0x0 0x1ea0000 0 0x00002000>; ++ }; ++ ++ ccn@4000000 { ++ compatible = "arm,ccn-504"; ++ reg = <0x0 0x04000000 0x0 0x01000000>; ++ interrupts = <0 12 4>; ++ }; ++ ++ ftm0: ftm0@2800000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x2800000 0x0 0x10000>; ++ interrupts = <0 44 4>; ++ }; ++ }; ++ ++ ddr1: memory-controller@1080000 { ++ compatible = "fsl,qoriq-memory-controller"; ++ reg = <0x0 0x1080000 0x0 0x1000>; ++ interrupts = <0 17 0x4>; ++ little-endian; ++ }; ++ ++ ddr2: memory-controller@1090000 { ++ compatible = "fsl,qoriq-memory-controller"; ++ reg = <0x0 0x1090000 0x0 0x1000>; ++ interrupts = <0 18 0x4>; ++ little-endian; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi +@@ -0,0 +1,81 @@ ++/* ++ * QorIQ BMan Portals device tree ++ * ++ * Copyright 2011-2016 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++&bportals { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "simple-bus"; ++ ++ bman-portal@0 { ++ cell-index = <0>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x0 0x4000 0x4000000 0x4000>; ++ interrupts = <0 173 0x4>; ++ }; ++ ++ bman-portal@10000 { ++ cell-index = <1>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x10000 0x4000 0x4010000 0x4000>; ++ interrupts = <0 175 0x4>; ++ }; ++ ++ bman-portal@20000 { ++ cell-index = <2>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x20000 0x4000 0x4020000 0x4000>; ++ interrupts = <0 177 0x4>; ++ }; ++ ++ bman-portal@30000 { ++ cell-index = <3>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x30000 0x4000 0x4030000 0x4000>; ++ interrupts = <0 179 0x4>; ++ }; ++ ++ bman-portal@40000 { ++ cell-index = <4>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x40000 0x4000 0x4040000 0x4000>; ++ interrupts = <0 181 0x4>; ++ }; ++ ++ bman-portal@50000 { ++ cell-index = <5>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x50000 0x4000 0x4050000 0x4000>; ++ interrupts = <0 183 0x4>; ++ }; ++ ++ bman-portal@60000 { ++ cell-index = <6>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x60000 0x4000 0x4060000 0x4000>; ++ interrupts = <0 185 0x4>; ++ }; ++ ++ bman-portal@70000 { ++ cell-index = <7>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x70000 0x4000 0x4070000 0x4000>; ++ interrupts = <0 187 0x4>; ++ }; ++ ++ bman-portal@80000 { ++ cell-index = <8>; ++ compatible = "fsl,bman-portal"; ++ reg = <0x80000 0x4000 0x4080000 0x4000>; ++ interrupts = <0 189 0x4>; ++ }; ++ ++ bman-bpids@0 { ++ compatible = "fsl,bpid-range"; ++ fsl,bpid-range = <32 32>; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi +@@ -0,0 +1,66 @@ ++/* ++ * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ] ++ * ++ * Copyright 2012 - 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++fsldpaa: fsl,dpaa { ++ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa"; ++ ethernet@0 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet0>; ++ }; ++ ethernet@1 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet1>; ++ }; ++ ethernet@2 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet2>; ++ }; ++ ethernet@3 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet3>; ++ }; ++ ethernet@4 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet4>; ++ }; ++ ethernet@5 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet5>; ++ }; ++ ethernet@8 { ++ compatible = "fsl,dpa-ethernet"; ++ fsl,fman-mac = <&enet6>; ++ }; ++}; ++ +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi +@@ -0,0 +1,43 @@ ++/* ++ * QorIQ FMan v3 10g port #0 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x10: port@90000 { ++ cell-index = <0x10>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx"; ++ reg = <0x90000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ fman0_tx_0x30: port@b0000 { ++ cell-index = <0x30>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx"; ++ reg = <0xb0000 0x1000>; ++ fsl,fman-10g-port; ++ fsl,qman-channel-id = <0x800>; ++ }; ++ ++ ethernet@f0000 { ++ cell-index = <0x8>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xf0000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>; ++ pcsphy-handle = <&pcsphy6>; ++ }; ++ ++ mdio@f1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xf1000 0x1000>; ++ ++ pcsphy6: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi +@@ -0,0 +1,43 @@ ++/* ++ * QorIQ FMan v3 10g port #1 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x11: port@91000 { ++ cell-index = <0x11>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx"; ++ reg = <0x91000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ fman0_tx_0x31: port@b1000 { ++ cell-index = <0x31>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx"; ++ reg = <0xb1000 0x1000>; ++ fsl,fman-10g-port; ++ fsl,qman-channel-id = <0x801>; ++ }; ++ ++ ethernet@f2000 { ++ cell-index = <0x9>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xf2000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>; ++ pcsphy-handle = <&pcsphy7>; ++ }; ++ ++ mdio@f3000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xf3000 0x1000>; ++ ++ pcsphy7: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #0 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x08: port@88000 { ++ cell-index = <0x8>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x88000 0x1000>; ++ }; ++ ++ fman0_tx_0x28: port@a8000 { ++ cell-index = <0x28>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xa8000 0x1000>; ++ fsl,qman-channel-id = <0x802>; ++ }; ++ ++ ethernet@e0000 { ++ cell-index = <0>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe0000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy0>; ++ }; ++ ++ mdio@e1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe1000 0x1000>; ++ ++ pcsphy0: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #1 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x09: port@89000 { ++ cell-index = <0x9>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x89000 0x1000>; ++ }; ++ ++ fman0_tx_0x29: port@a9000 { ++ cell-index = <0x29>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xa9000 0x1000>; ++ fsl,qman-channel-id = <0x803>; ++ }; ++ ++ ethernet@e2000 { ++ cell-index = <1>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe2000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy1>; ++ }; ++ ++ mdio@e3000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe3000 0x1000>; ++ ++ pcsphy1: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #2 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x0a: port@8a000 { ++ cell-index = <0xa>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x8a000 0x1000>; ++ }; ++ ++ fman0_tx_0x2a: port@aa000 { ++ cell-index = <0x2a>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xaa000 0x1000>; ++ fsl,qman-channel-id = <0x804>; ++ }; ++ ++ ethernet@e4000 { ++ cell-index = <2>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe4000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy2>; ++ }; ++ ++ mdio@e5000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe5000 0x1000>; ++ ++ pcsphy2: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #3 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x0b: port@8b000 { ++ cell-index = <0xb>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x8b000 0x1000>; ++ }; ++ ++ fman0_tx_0x2b: port@ab000 { ++ cell-index = <0x2b>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xab000 0x1000>; ++ fsl,qman-channel-id = <0x805>; ++ }; ++ ++ ethernet@e6000 { ++ cell-index = <3>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe6000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy3>; ++ }; ++ ++ mdio@e7000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe7000 0x1000>; ++ ++ pcsphy3: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #4 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x0c: port@8c000 { ++ cell-index = <0xc>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x8c000 0x1000>; ++ }; ++ ++ fman0_tx_0x2c: port@ac000 { ++ cell-index = <0x2c>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xac000 0x1000>; ++ fsl,qman-channel-id = <0x806>; ++ }; ++ ++ ethernet@e8000 { ++ cell-index = <4>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe8000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy4>; ++ }; ++ ++ mdio@e9000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe9000 0x1000>; ++ ++ pcsphy4: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi +@@ -0,0 +1,42 @@ ++/* ++ * QorIQ FMan v3 1g port #5 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ fman0_rx_0x0d: port@8d000 { ++ cell-index = <0xd>; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx"; ++ reg = <0x8d000 0x1000>; ++ }; ++ ++ fman0_tx_0x2d: port@ad000 { ++ cell-index = <0x2d>; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx"; ++ reg = <0xad000 0x1000>; ++ fsl,qman-channel-id = <0x807>; ++ }; ++ ++ ethernet@ea000 { ++ cell-index = <5>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xea000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x0d &fman0_tx_0x2d>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy5>; ++ }; ++ ++ mdio@eb000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xeb000 0x1000>; ++ ++ pcsphy5: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi +@@ -0,0 +1,47 @@ ++/* ++ * QorIQ FMan v3 OH ports device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman@1a00000 { ++ ++ fman0_oh1: port@82000 { ++ cell-index = <0>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x82000 0x1000>; ++ }; ++ ++ fman0_oh2: port@83000 { ++ cell-index = <1>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x83000 0x1000>; ++ }; ++ ++ fman0_oh3: port@84000 { ++ cell-index = <2>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x84000 0x1000>; ++ }; ++ ++ fman0_oh4: port@85000 { ++ cell-index = <3>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x85000 0x1000>; ++ }; ++ ++ fman0_oh5: port@86000 { ++ cell-index = <4>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x86000 0x1000>; ++ }; ++ ++ fman0_oh6: port@87000 { ++ cell-index = <5>; ++ compatible = "fsl,fman-port-oh"; ++ reg = <0x87000 0x1000>; ++ }; ++ ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi +@@ -0,0 +1,130 @@ ++/* ++ * QorIQ FMan v3 device tree ++ * ++ * Copyright 2012-2015 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++fman0: fman@1a00000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ cell-index = <0>; ++ compatible = "fsl,fman"; ++ ranges = <0x0 0x00 0x1a00000 0x100000>; ++ reg = <0x0 0x1a00000 0x0 0x100000>; ++ interrupts = <0 44 0x4>, <0 45 0x4>; ++ clocks = <&clockgen 3 0>; ++ clock-names = "fmanclk"; ++ fsl,qman-channel-range = <0x800 0x10>; ++ ++ cc { ++ compatible = "fsl,fman-cc"; ++ }; ++ ++ muram@0 { ++ compatible = "fsl,fman-muram"; ++ reg = <0x0 0x60000>; ++ }; ++ ++ bmi@80000 { ++ compatible = "fsl,fman-bmi"; ++ reg = <0x80000 0x400>; ++ }; ++ ++ qmi@80400 { ++ compatible = "fsl,fman-qmi"; ++ reg = <0x80400 0x400>; ++ }; ++ ++ fman0_oh_0x2: port@82000 { ++ cell-index = <0x2>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x82000 0x1000>; ++ fsl,qman-channel-id = <0x809>; ++ }; ++ ++ fman0_oh_0x3: port@83000 { ++ cell-index = <0x3>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x83000 0x1000>; ++ fsl,qman-channel-id = <0x80a>; ++ }; ++ ++ fman0_oh_0x4: port@84000 { ++ cell-index = <0x4>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x84000 0x1000>; ++ fsl,qman-channel-id = <0x80b>; ++ }; ++ ++ fman0_oh_0x5: port@85000 { ++ cell-index = <0x5>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x85000 0x1000>; ++ fsl,qman-channel-id = <0x80c>; ++ }; ++ ++ fman0_oh_0x6: port@86000 { ++ cell-index = <0x6>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x86000 0x1000>; ++ fsl,qman-channel-id = <0x80d>; ++ }; ++ ++ fman0_oh_0x7: port@87000 { ++ cell-index = <0x7>; ++ compatible = "fsl,fman-v3-port-oh"; ++ reg = <0x87000 0x1000>; ++ fsl,qman-channel-id = <0x80e>; ++ }; ++ ++ policer@c0000 { ++ compatible = "fsl,fman-policer"; ++ reg = <0xc0000 0x1000>; ++ }; ++ ++ keygen@c1000 { ++ compatible = "fsl,fman-keygen"; ++ reg = <0xc1000 0x1000>; ++ }; ++ ++ dma@c2000 { ++ compatible = "fsl,fman-dma"; ++ reg = <0xc2000 0x1000>; ++ }; ++ ++ fpm@c3000 { ++ compatible = "fsl,fman-fpm"; ++ reg = <0xc3000 0x1000>; ++ }; ++ ++ parser@c7000 { ++ compatible = "fsl,fman-parser"; ++ reg = <0xc7000 0x1000>; ++ }; ++ ++ vsps@dc000 { ++ compatible = "fsl,fman-vsps"; ++ reg = <0xdc000 0x1000>; ++ }; ++ ++ mdio0: mdio@fc000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xfc000 0x1000>; ++ }; ++ ++ xmdio0: mdio@fd000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xfd000 0x1000>; ++ }; ++ ++ ptp_timer0: ptp-timer@fe000 { ++ compatible = "fsl,fman-ptp-timer", "fsl,fman-rtc"; ++ reg = <0xfe000 0x1000>; ++ }; ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi +@@ -0,0 +1,104 @@ ++/* ++ * QorIQ QMan Portals device tree ++ * ++ * Copyright 2011-2016 Freescale Semiconductor Inc. ++ * ++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) ++ */ ++ ++&qportals { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "simple-bus"; ++ ++ qportal0: qman-portal@0 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x0 0x4000 0x4000000 0x4000>; ++ interrupts = <0 172 0x4>; ++ cell-index = <0>; ++ }; ++ ++ qportal1: qman-portal@10000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x10000 0x4000 0x4010000 0x4000>; ++ interrupts = <0 174 0x4>; ++ cell-index = <1>; ++ }; ++ ++ qportal2: qman-portal@20000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x20000 0x4000 0x4020000 0x4000>; ++ interrupts = <0 176 0x4>; ++ cell-index = <2>; ++ }; ++ ++ qportal3: qman-portal@30000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x30000 0x4000 0x4030000 0x4000>; ++ interrupts = <0 178 0x4>; ++ cell-index = <3>; ++ }; ++ ++ qportal4: qman-portal@40000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x40000 0x4000 0x4040000 0x4000>; ++ interrupts = <0 180 0x4>; ++ cell-index = <4>; ++ }; ++ ++ qportal5: qman-portal@50000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x50000 0x4000 0x4050000 0x4000>; ++ interrupts = <0 182 0x4>; ++ cell-index = <5>; ++ }; ++ ++ qportal6: qman-portal@60000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x60000 0x4000 0x4060000 0x4000>; ++ interrupts = <0 184 0x4>; ++ cell-index = <6>; ++ }; ++ ++ qportal7: qman-portal@70000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x70000 0x4000 0x4070000 0x4000>; ++ interrupts = <0 186 0x4>; ++ cell-index = <7>; ++ }; ++ ++ qportal8: qman-portal@80000 { ++ compatible = "fsl,qman-portal"; ++ reg = <0x80000 0x4000 0x4080000 0x4000>; ++ interrupts = <0 188 0x4>; ++ cell-index = <8>; ++ }; ++ ++ qman-fqids@0 { ++ compatible = "fsl,fqid-range"; ++ fsl,fqid-range = <256 256>; ++ }; ++ ++ qman-fqids@1 { ++ compatible = "fsl,fqid-range"; ++ fsl,fqid-range = <32768 32768>; ++ }; ++ ++ qman-pools@0 { ++ compatible = "fsl,pool-channel-range"; ++ fsl,pool-channel-range = <0x401 0xf>; ++ }; ++ ++ qman-cgrids@0 { ++ compatible = "fsl,cgrid-range"; ++ fsl,cgrid-range = <0 256>; ++ }; ++ ++ qman-ceetm@0 { ++ compatible = "fsl,qman-ceetm"; ++ fsl,ceetm-lfqid-range = <0xf00000 0x1000>; ++ fsl,ceetm-sp-range = <0 12>; ++ fsl,ceetm-lni-range = <0 8>; ++ fsl,ceetm-channel-range = <0 32>; ++ }; ++}; +--- a/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi ++++ b/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi +@@ -38,51 +38,61 @@ + compatible = "simple-bus"; + + bman-portal@0 { ++ cell-index = <0>; + compatible = "fsl,bman-portal"; + reg = <0x0 0x4000>, <0x100000 0x1000>; + interrupts = <105 2 0 0>; + }; + bman-portal@4000 { ++ cell-index = <1>; + compatible = "fsl,bman-portal"; + reg = <0x4000 0x4000>, <0x101000 0x1000>; + interrupts = <107 2 0 0>; + }; + bman-portal@8000 { ++ cell-index = <2>; + compatible = "fsl,bman-portal"; + reg = <0x8000 0x4000>, <0x102000 0x1000>; + interrupts = <109 2 0 0>; + }; + bman-portal@c000 { ++ cell-index = <3>; + compatible = "fsl,bman-portal"; + reg = <0xc000 0x4000>, <0x103000 0x1000>; + interrupts = <111 2 0 0>; + }; + bman-portal@10000 { ++ cell-index = <4>; + compatible = "fsl,bman-portal"; + reg = <0x10000 0x4000>, <0x104000 0x1000>; + interrupts = <113 2 0 0>; + }; + bman-portal@14000 { ++ cell-index = <5>; + compatible = "fsl,bman-portal"; + reg = <0x14000 0x4000>, <0x105000 0x1000>; + interrupts = <115 2 0 0>; + }; + bman-portal@18000 { ++ cell-index = <6>; + compatible = "fsl,bman-portal"; + reg = <0x18000 0x4000>, <0x106000 0x1000>; + interrupts = <117 2 0 0>; + }; + bman-portal@1c000 { ++ cell-index = <7>; + compatible = "fsl,bman-portal"; + reg = <0x1c000 0x4000>, <0x107000 0x1000>; + interrupts = <119 2 0 0>; + }; + bman-portal@20000 { ++ cell-index = <8>; + compatible = "fsl,bman-portal"; + reg = <0x20000 0x4000>, <0x108000 0x1000>; + interrupts = <121 2 0 0>; + }; + bman-portal@24000 { ++ cell-index = <9>; + compatible = "fsl,bman-portal"; + reg = <0x24000 0x4000>, <0x109000 0x1000>; + interrupts = <123 2 0 0>; +--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi ++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi +@@ -35,14 +35,14 @@ + fman@400000 { + fman0_rx_0x10: port@90000 { + cell-index = <0x10>; +- compatible = "fsl,fman-v3-port-rx"; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx"; + reg = <0x90000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x30: port@b0000 { + cell-index = <0x30>; +- compatible = "fsl,fman-v3-port-tx"; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx"; + reg = <0xb0000 0x1000>; + fsl,fman-10g-port; + }; +--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi ++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi +@@ -35,14 +35,14 @@ + fman@400000 { + fman0_rx_0x11: port@91000 { + cell-index = <0x11>; +- compatible = "fsl,fman-v3-port-rx"; ++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx"; + reg = <0x91000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x31: port@b1000 { + cell-index = <0x31>; +- compatible = "fsl,fman-v3-port-tx"; ++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx"; + reg = <0xb1000 0x1000>; + fsl,fman-10g-port; + }; diff --git a/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch new file mode 100644 index 000000000..445a6fa02 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch @@ -0,0 +1,1030 @@ +From 120fa458ffe2250ea58578ccfc85e674005463dc Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 10:53:50 +0800 +Subject: [PATCH] mtd: spi-nor: support layerscape + +This is a integrated patch for layerscape qspi support. + +Signed-off-by: Suresh Gupta +Signed-off-by: Yunhui Cui +Signed-off-by: mar.krzeminski +Signed-off-by: Alison Wang +Signed-off-by: Nobuhiro Iwamatsu +Signed-off-by: LABBE Corentin +Signed-off-by: Yuan Yao +Signed-off-by: Alexander Kurz +Signed-off-by: L. D. Pinney +Signed-off-by: Ash Benz +Signed-off-by: Yangbo Lu +--- + drivers/mtd/mtdchar.c | 2 +- + drivers/mtd/spi-nor/fsl-quadspi.c | 356 +++++++++++++++++++++++++++++++------- + drivers/mtd/spi-nor/spi-nor.c | 136 +++++++++++++-- + include/linux/mtd/spi-nor.h | 14 +- + 4 files changed, 432 insertions(+), 76 deletions(-) + +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file * + * data. For our userspace tools it is important to dump areas + * with ECC errors! + * For kernel internal usage it also might return -EUCLEAN +- * to signal the caller that a bitflip has occured and has ++ * to signal the caller that a bitflip has occurred and has + * been corrected by the ECC algorithm. + * + * Note: currently the standard NAND function, nand_read_oob_std, +--- a/drivers/mtd/spi-nor/fsl-quadspi.c ++++ b/drivers/mtd/spi-nor/fsl-quadspi.c +@@ -41,6 +41,8 @@ + #define QUADSPI_QUIRK_TKT253890 (1 << 2) + /* Controller cannot wake up from wait mode, TKT245618 */ + #define QUADSPI_QUIRK_TKT245618 (1 << 3) ++/* QSPI_AMBA_BASE is internally added by SOC design */ ++#define QUADSPI_AMBA_BASE_INTERNAL (0x10000) + + /* The registers */ + #define QUADSPI_MCR 0x00 +@@ -193,7 +195,7 @@ + #define QUADSPI_LUT_NUM 64 + + /* SEQID -- we can have 16 seqids at most. */ +-#define SEQID_QUAD_READ 0 ++#define SEQID_READ 0 + #define SEQID_WREN 1 + #define SEQID_WRDI 2 + #define SEQID_RDSR 3 +@@ -205,15 +207,22 @@ + #define SEQID_RDCR 9 + #define SEQID_EN4B 10 + #define SEQID_BRWR 11 ++#define SEQID_RDAR_OR_RD_EVCR 12 ++#define SEQID_WRAR 13 ++#define SEQID_WD_EVCR 14 + + #define QUADSPI_MIN_IOMAP SZ_4M + ++#define FLASH_VENDOR_SPANSION_FS "s25fs" ++#define SPANSION_S25FS_FAMILY (1 << 1) ++ + enum fsl_qspi_devtype { + FSL_QUADSPI_VYBRID, + FSL_QUADSPI_IMX6SX, + FSL_QUADSPI_IMX7D, + FSL_QUADSPI_IMX6UL, + FSL_QUADSPI_LS1021A, ++ FSL_QUADSPI_LS2080A, + }; + + struct fsl_qspi_devtype_data { +@@ -224,7 +233,7 @@ struct fsl_qspi_devtype_data { + int driver_data; + }; + +-static struct fsl_qspi_devtype_data vybrid_data = { ++static const struct fsl_qspi_devtype_data vybrid_data = { + .devtype = FSL_QUADSPI_VYBRID, + .rxfifo = 128, + .txfifo = 64, +@@ -232,7 +241,7 @@ static struct fsl_qspi_devtype_data vybr + .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN, + }; + +-static struct fsl_qspi_devtype_data imx6sx_data = { ++static const struct fsl_qspi_devtype_data imx6sx_data = { + .devtype = FSL_QUADSPI_IMX6SX, + .rxfifo = 128, + .txfifo = 512, +@@ -241,7 +250,7 @@ static struct fsl_qspi_devtype_data imx6 + | QUADSPI_QUIRK_TKT245618, + }; + +-static struct fsl_qspi_devtype_data imx7d_data = { ++static const struct fsl_qspi_devtype_data imx7d_data = { + .devtype = FSL_QUADSPI_IMX7D, + .rxfifo = 512, + .txfifo = 512, +@@ -250,7 +259,7 @@ static struct fsl_qspi_devtype_data imx7 + | QUADSPI_QUIRK_4X_INT_CLK, + }; + +-static struct fsl_qspi_devtype_data imx6ul_data = { ++static const struct fsl_qspi_devtype_data imx6ul_data = { + .devtype = FSL_QUADSPI_IMX6UL, + .rxfifo = 128, + .txfifo = 512, +@@ -267,6 +276,14 @@ static struct fsl_qspi_devtype_data ls10 + .driver_data = 0, + }; + ++static struct fsl_qspi_devtype_data ls2080a_data = { ++ .devtype = FSL_QUADSPI_LS2080A, ++ .rxfifo = 128, ++ .txfifo = 64, ++ .ahb_buf_size = 1024, ++ .driver_data = QUADSPI_AMBA_BASE_INTERNAL | QUADSPI_QUIRK_TKT253890, ++}; ++ + #define FSL_QSPI_MAX_CHIP 4 + struct fsl_qspi { + struct spi_nor nor[FSL_QSPI_MAX_CHIP]; +@@ -282,6 +299,7 @@ struct fsl_qspi { + u32 nor_size; + u32 nor_num; + u32 clk_rate; ++ u32 ddr_smp; + unsigned int chip_base_addr; /* We may support two chips. */ + bool has_second_chip; + bool big_endian; +@@ -309,6 +327,23 @@ static inline int needs_wakeup_wait_mode + return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; + } + ++static inline int has_added_amba_base_internal(struct fsl_qspi *q) ++{ ++ return q->devtype_data->driver_data & QUADSPI_AMBA_BASE_INTERNAL; ++} ++ ++static u32 fsl_get_nor_vendor(struct spi_nor *nor) ++{ ++ u32 vendor_id; ++ ++ if (nor->vendor) { ++ if (memcmp(nor->vendor, FLASH_VENDOR_SPANSION_FS, ++ sizeof(FLASH_VENDOR_SPANSION_FS) - 1)) ++ vendor_id = SPANSION_S25FS_FAMILY; ++ } ++ return vendor_id; ++} ++ + /* + * R/W functions for big- or little-endian registers: + * The qSPI controller's endian is independent of the CPU core's endian. +@@ -331,6 +366,31 @@ static u32 qspi_readl(struct fsl_qspi *q + return ioread32(addr); + } + ++static inline u32 *u8tou32(u32 *dest, const u8 *src, size_t n) ++{ ++ size_t i; ++ *dest = 0; ++ ++ n = n > 4 ? 4 : n; ++ for (i = 0; i < n; i++) ++ *dest |= *src++ << i * 8; ++ ++ return dest; ++ ++} ++ ++static inline u8 *u32tou8(u8 *dest, const u32 *src, size_t n) ++{ ++ size_t i; ++ u8 *xdest = dest; ++ ++ n = n > 4 ? 4 : n; ++ for (i = 0; i < n; i++) ++ *xdest++ = *src >> i * 8; ++ ++ return dest; ++} ++ + /* + * An IC bug makes us to re-arrange the 32-bit data. + * The following chips, such as IMX6SLX, have fixed this bug. +@@ -373,8 +433,15 @@ static void fsl_qspi_init_lut(struct fsl + void __iomem *base = q->iobase; + int rxfifo = q->devtype_data->rxfifo; + u32 lut_base; +- u8 cmd, addrlen, dummy; + int i; ++ u32 vendor; ++ ++ struct spi_nor *nor = &q->nor[0]; ++ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; ++ u8 read_op = nor->read_opcode; ++ u8 read_dm = nor->read_dummy; ++ ++ vendor = fsl_get_nor_vendor(nor); + + fsl_qspi_unlock_lut(q); + +@@ -382,25 +449,51 @@ static void fsl_qspi_init_lut(struct fsl + for (i = 0; i < QUADSPI_LUT_NUM; i++) + qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); + +- /* Quad Read */ +- lut_base = SEQID_QUAD_READ * 4; +- +- if (q->nor_size <= SZ_16M) { +- cmd = SPINOR_OP_READ_1_1_4; +- addrlen = ADDR24BIT; +- dummy = 8; +- } else { +- /* use the 4-byte address */ +- cmd = SPINOR_OP_READ_1_1_4; +- addrlen = ADDR32BIT; +- dummy = 8; +- } ++ /* Read */ ++ lut_base = SEQID_READ * 4; + +- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), ++ if (nor->flash_read == SPI_NOR_FAST) { ++ qspi_writel(q, LUT0(CMD, PAD1, read_op) | ++ LUT1(ADDR, PAD1, addrlen), ++ base + QUADSPI_LUT(lut_base)); ++ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | ++ LUT1(FSL_READ, PAD1, rxfifo), ++ base + QUADSPI_LUT(lut_base + 1)); ++ } else if (nor->flash_read == SPI_NOR_QUAD) { ++ if (q->nor_size == 0x4000000) { ++ read_op = 0xEC; ++ qspi_writel(q, ++ LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD4, addrlen), ++ base + QUADSPI_LUT(lut_base)); ++ qspi_writel(q, ++ LUT0(MODE, PAD4, 0xff) | LUT1(DUMMY, PAD4, read_dm), ++ base + QUADSPI_LUT(lut_base + 1)); ++ qspi_writel(q, ++ LUT0(FSL_READ, PAD4, rxfifo), ++ base + QUADSPI_LUT(lut_base + 2)); ++ } else { ++ qspi_writel(q, LUT0(CMD, PAD1, read_op) | ++ LUT1(ADDR, PAD1, addrlen), ++ base + QUADSPI_LUT(lut_base)); ++ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | ++ LUT1(FSL_READ, PAD4, rxfifo), ++ base + QUADSPI_LUT(lut_base + 1)); ++ } ++ } else if (nor->flash_read == SPI_NOR_DDR_QUAD) { ++ /* read mode : 1-4-4, such as Spansion s25fl128s. */ ++ qspi_writel(q, LUT0(CMD, PAD1, read_op) ++ | LUT1(ADDR_DDR, PAD4, addrlen), + base + QUADSPI_LUT(lut_base)); +- qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), ++ ++ qspi_writel(q, LUT0(MODE_DDR, PAD4, 0xff) ++ | LUT1(DUMMY, PAD1, read_dm), + base + QUADSPI_LUT(lut_base + 1)); + ++ qspi_writel(q, LUT0(FSL_READ_DDR, PAD4, rxfifo) ++ | LUT1(JMP_ON_CS, PAD1, 0), ++ base + QUADSPI_LUT(lut_base + 2)); ++ } ++ + /* Write enable */ + lut_base = SEQID_WREN * 4; + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), +@@ -409,16 +502,8 @@ static void fsl_qspi_init_lut(struct fsl + /* Page Program */ + lut_base = SEQID_PP * 4; + +- if (q->nor_size <= SZ_16M) { +- cmd = SPINOR_OP_PP; +- addrlen = ADDR24BIT; +- } else { +- /* use the 4-byte address */ +- cmd = SPINOR_OP_PP; +- addrlen = ADDR32BIT; +- } +- +- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), ++ qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) | ++ LUT1(ADDR, PAD1, addrlen), + base + QUADSPI_LUT(lut_base)); + qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), + base + QUADSPI_LUT(lut_base + 1)); +@@ -432,10 +517,8 @@ static void fsl_qspi_init_lut(struct fsl + /* Erase a sector */ + lut_base = SEQID_SE * 4; + +- cmd = q->nor[0].erase_opcode; +- addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT; +- +- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), ++ qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) | ++ LUT1(ADDR, PAD1, addrlen), + base + QUADSPI_LUT(lut_base)); + + /* Erase the whole chip */ +@@ -476,6 +559,44 @@ static void fsl_qspi_init_lut(struct fsl + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), + base + QUADSPI_LUT(lut_base)); + ++ ++ /* ++ * Flash Micron and Spansion command confilict ++ * use the same value 0x65. But it indicates different meaning. ++ */ ++ lut_base = SEQID_RDAR_OR_RD_EVCR * 4; ++ ++ if (vendor == SPANSION_S25FS_FAMILY) { ++ /* ++ * Read any device register. ++ * Used for Spansion S25FS-S family flash only. ++ */ ++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_RDAR) | ++ LUT1(ADDR, PAD1, ADDR24BIT), ++ base + QUADSPI_LUT(lut_base)); ++ qspi_writel(q, LUT0(DUMMY, PAD1, 8) | LUT1(FSL_READ, PAD1, 1), ++ base + QUADSPI_LUT(lut_base + 1)); ++ } else { ++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RD_EVCR), ++ base + QUADSPI_LUT(lut_base)); ++ } ++ ++ /* ++ * Write any device register. ++ * Used for Spansion S25FS-S family flash only. ++ */ ++ lut_base = SEQID_WRAR * 4; ++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_WRAR) | ++ LUT1(ADDR, PAD1, ADDR24BIT), ++ base + QUADSPI_LUT(lut_base)); ++ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 1), ++ base + QUADSPI_LUT(lut_base + 1)); ++ ++ /* Write EVCR register */ ++ lut_base = SEQID_WD_EVCR * 4; ++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WD_EVCR), ++ base + QUADSPI_LUT(lut_base)); ++ + fsl_qspi_lock_lut(q); + } + +@@ -483,8 +604,24 @@ static void fsl_qspi_init_lut(struct fsl + static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) + { + switch (cmd) { ++ case SPINOR_OP_READ_1_4_4_D: ++ case SPINOR_OP_READ4_1_4_4_D: ++ case SPINOR_OP_READ4_1_1_4: + case SPINOR_OP_READ_1_1_4: +- return SEQID_QUAD_READ; ++ case SPINOR_OP_READ_FAST: ++ case SPINOR_OP_READ4_FAST: ++ return SEQID_READ; ++ /* ++ * Spansion & Micron use the same command value 0x65 ++ * Spansion: SPINOR_OP_SPANSION_RDAR, read any register. ++ * Micron: SPINOR_OP_RD_EVCR, ++ * read enhanced volatile configuration register. ++ * case SPINOR_OP_RD_EVCR: ++ */ ++ case SPINOR_OP_SPANSION_RDAR: ++ return SEQID_RDAR_OR_RD_EVCR; ++ case SPINOR_OP_SPANSION_WRAR: ++ return SEQID_WRAR; + case SPINOR_OP_WREN: + return SEQID_WREN; + case SPINOR_OP_WRDI: +@@ -496,6 +633,7 @@ static int fsl_qspi_get_seqid(struct fsl + case SPINOR_OP_CHIP_ERASE: + return SEQID_CHIP_ERASE; + case SPINOR_OP_PP: ++ case SPINOR_OP_PP_4B: + return SEQID_PP; + case SPINOR_OP_RDID: + return SEQID_RDID; +@@ -507,6 +645,8 @@ static int fsl_qspi_get_seqid(struct fsl + return SEQID_EN4B; + case SPINOR_OP_BRWR: + return SEQID_BRWR; ++ case SPINOR_OP_WD_EVCR: ++ return SEQID_WD_EVCR; + default: + if (cmd == q->nor[0].erase_opcode) + return SEQID_SE; +@@ -531,8 +671,11 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c + /* save the reg */ + reg = qspi_readl(q, base + QUADSPI_MCR); + +- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, +- base + QUADSPI_SFAR); ++ if (has_added_amba_base_internal(q)) ++ qspi_writel(q, q->chip_base_addr + addr, base + QUADSPI_SFAR); ++ else ++ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, ++ base + QUADSPI_SFAR); + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, + base + QUADSPI_RBCT); + qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); +@@ -582,10 +725,10 @@ static void fsl_qspi_read_data(struct fs + q->chip_base_addr, tmp); + + if (len >= 4) { +- *((u32 *)rxbuf) = tmp; ++ u32tou8(rxbuf, &tmp, 4); + rxbuf += 4; + } else { +- memcpy(rxbuf, &tmp, len); ++ u32tou8(rxbuf, &tmp, len); + break; + } + +@@ -619,11 +762,12 @@ static inline void fsl_qspi_invalid(stru + } + + static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, +- u8 opcode, unsigned int to, u32 *txbuf, ++ u8 opcode, unsigned int to, u8 *txbuf, + unsigned count) + { + int ret, i, j; + u32 tmp; ++ u8 byts; + + dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n", + q->chip_base_addr, to, count); +@@ -633,10 +777,13 @@ static ssize_t fsl_qspi_nor_write(struct + qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); + + /* fill the TX data to the FIFO */ ++ byts = count; + for (j = 0, i = ((count + 3) / 4); j < i; j++) { +- tmp = fsl_qspi_endian_xchg(q, *txbuf); ++ u8tou32(&tmp, txbuf, byts); ++ tmp = fsl_qspi_endian_xchg(q, tmp); + qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); +- txbuf++; ++ txbuf += 4; ++ byts -= 4; + } + + /* fill the TXFIFO upto 16 bytes for i.MX7d */ +@@ -657,11 +804,43 @@ static void fsl_qspi_set_map_addr(struct + { + int nor_size = q->nor_size; + void __iomem *base = q->iobase; ++ u32 mem_base; ++ ++ if (has_added_amba_base_internal(q)) ++ mem_base = 0x0; ++ else ++ mem_base = q->memmap_phy; ++ ++ qspi_writel(q, nor_size + mem_base, base + QUADSPI_SFA1AD); ++ qspi_writel(q, nor_size * 2 + mem_base, base + QUADSPI_SFA2AD); ++ qspi_writel(q, nor_size * 3 + mem_base, base + QUADSPI_SFB1AD); ++ qspi_writel(q, nor_size * 4 + mem_base, base + QUADSPI_SFB2AD); ++} ++ ++/* ++ * enable controller ddr quad mode to support different ++ * vender flashes ddr quad mode. ++ */ ++static void set_ddr_quad_mode(struct fsl_qspi *q) ++{ ++ u32 reg, reg2; ++ ++ reg = qspi_readl(q, q->iobase + QUADSPI_MCR); ++ ++ /* Firstly, disable the module */ ++ qspi_writel(q, reg | QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); ++ ++ /* Set the Sampling Register for DDR */ ++ reg2 = qspi_readl(q, q->iobase + QUADSPI_SMPR); ++ reg2 &= ~QUADSPI_SMPR_DDRSMP_MASK; ++ reg2 |= (((q->ddr_smp) << QUADSPI_SMPR_DDRSMP_SHIFT) & ++ QUADSPI_SMPR_DDRSMP_MASK); ++ qspi_writel(q, reg2, q->iobase + QUADSPI_SMPR); ++ ++ /* Enable the module again (enable the DDR too) */ ++ reg |= QUADSPI_MCR_DDR_EN_MASK; ++ qspi_writel(q, reg, q->iobase + QUADSPI_MCR); + +- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); +- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); +- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); +- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); + } + + /* +@@ -681,19 +860,36 @@ static void fsl_qspi_init_abh_read(struc + { + void __iomem *base = q->iobase; + int seqid; ++ const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data; + + /* AHB configuration for access buffer 0/1/2 .*/ + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); ++ + /* +- * Set ADATSZ with the maximum AHB buffer size to improve the +- * read performance. ++ * Errata: A-009282: QuadSPI data prefetch may result in incorrect data ++ * Workaround: Keep the read data size to 64 bits (8 bytes). ++ * This disables the prefetch on the AHB buffer and ++ * prevents this issue from occurring. + */ +- qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | +- ((q->devtype_data->ahb_buf_size / 8) +- << QUADSPI_BUF3CR_ADATSZ_SHIFT), +- base + QUADSPI_BUF3CR); ++ if (devtype_data->devtype == FSL_QUADSPI_LS2080A || ++ devtype_data->devtype == FSL_QUADSPI_LS1021A) { ++ ++ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | ++ (1 << QUADSPI_BUF3CR_ADATSZ_SHIFT), ++ base + QUADSPI_BUF3CR); ++ ++ } else { ++ /* ++ * Set ADATSZ with the maximum AHB buffer size to improve the ++ * read performance. ++ */ ++ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | ++ ((q->devtype_data->ahb_buf_size / 8) ++ << QUADSPI_BUF3CR_ADATSZ_SHIFT), ++ base + QUADSPI_BUF3CR); ++ } + + /* We only use the buffer3 */ + qspi_writel(q, 0, base + QUADSPI_BUF0IND); +@@ -704,6 +900,11 @@ static void fsl_qspi_init_abh_read(struc + seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); + qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, + q->iobase + QUADSPI_BFGENCR); ++ ++ /* enable the DDR quad read */ ++ if (q->nor->flash_read == SPI_NOR_DDR_QUAD) ++ set_ddr_quad_mode(q); ++ + } + + /* This function was used to prepare and enable QSPI clock */ +@@ -822,6 +1023,7 @@ static const struct of_device_id fsl_qsp + { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, }, + { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, }, + { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, ++ { .compatible = "fsl,ls2080a-qspi", .data = (void *)&ls2080a_data, }, + { /* sentinel */ } + }; + MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); +@@ -835,8 +1037,12 @@ static int fsl_qspi_read_reg(struct spi_ + { + int ret; + struct fsl_qspi *q = nor->priv; ++ u32 to = 0; ++ ++ if (opcode == SPINOR_OP_SPANSION_RDAR) ++ u8tou32(&to, nor->cmd_buf, 4); + +- ret = fsl_qspi_runcmd(q, opcode, 0, len); ++ ret = fsl_qspi_runcmd(q, opcode, to, len); + if (ret) + return ret; + +@@ -848,9 +1054,13 @@ static int fsl_qspi_write_reg(struct spi + { + struct fsl_qspi *q = nor->priv; + int ret; ++ u32 to = 0; ++ ++ if (opcode == SPINOR_OP_SPANSION_WRAR) ++ u8tou32(&to, nor->cmd_buf, 4); + + if (!buf) { +- ret = fsl_qspi_runcmd(q, opcode, 0, 1); ++ ret = fsl_qspi_runcmd(q, opcode, to, 1); + if (ret) + return ret; + +@@ -859,7 +1069,7 @@ static int fsl_qspi_write_reg(struct spi + + } else if (len > 0) { + ret = fsl_qspi_nor_write(q, nor, opcode, 0, +- (u32 *)buf, len); ++ buf, len); + if (ret > 0) + return 0; + } else { +@@ -875,7 +1085,7 @@ static ssize_t fsl_qspi_write(struct spi + { + struct fsl_qspi *q = nor->priv; + ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, +- (u32 *)buf, len); ++ (u8 *)buf, len); + + /* invalid the data in the AHB buffer. */ + fsl_qspi_invalid(q); +@@ -922,7 +1132,7 @@ static ssize_t fsl_qspi_read(struct spi_ + len); + + /* Read out the data directly from the AHB buffer.*/ +- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, ++ memcpy_toio(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, + len); + + return len; +@@ -980,6 +1190,8 @@ static int fsl_qspi_probe(struct platfor + struct spi_nor *nor; + struct mtd_info *mtd; + int ret, i = 0; ++ int find_node; ++ enum read_mode mode = SPI_NOR_QUAD; + + q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); + if (!q) +@@ -1027,6 +1239,12 @@ static int fsl_qspi_probe(struct platfor + goto clk_failed; + } + ++ /* find ddrsmp value */ ++ ret = of_property_read_u32(dev->of_node, "fsl,ddr-sampling-point", ++ &q->ddr_smp); ++ if (ret) ++ q->ddr_smp = 0; ++ + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { +@@ -1050,6 +1268,7 @@ static int fsl_qspi_probe(struct platfor + + mutex_init(&q->lock); + ++ find_node = 0; + /* iterate the subnodes. */ + for_each_available_child_of_node(dev->of_node, np) { + /* skip the holes */ +@@ -1076,18 +1295,25 @@ static int fsl_qspi_probe(struct platfor + ret = of_property_read_u32(np, "spi-max-frequency", + &q->clk_rate); + if (ret < 0) +- goto mutex_failed; ++ continue; + + /* set the chip address for READID */ + fsl_qspi_set_base_addr(q, nor); + +- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); ++ ret = of_property_read_bool(np, "m25p,fast-read"); ++ mode = (ret) ? SPI_NOR_FAST : SPI_NOR_QUAD; ++ /* Can we enable the DDR Quad Read? */ ++ ret = of_property_read_bool(np, "ddr-quad-read"); + if (ret) +- goto mutex_failed; ++ mode = SPI_NOR_DDR_QUAD; ++ ++ ret = spi_nor_scan(nor, NULL, mode); ++ if (ret) ++ continue; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) +- goto mutex_failed; ++ continue; + + /* Set the correct NOR size now. */ + if (q->nor_size == 0) { +@@ -1110,8 +1336,12 @@ static int fsl_qspi_probe(struct platfor + nor->page_size = q->devtype_data->txfifo; + + i++; ++ find_node++; + } + ++ if (find_node == 0) ++ goto mutex_failed; ++ + /* finish the rest init. */ + ret = fsl_qspi_nor_setup_last(q); + if (ret) +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -40,6 +40,13 @@ + #define SPI_NOR_MAX_ID_LEN 6 + #define SPI_NOR_MAX_ADDR_WIDTH 4 + ++#define SPI_NOR_MICRON_WRITE_ENABLE 0x7f ++/* Added for S25FS-S family flash */ ++#define SPINOR_CONFIG_REG3_OFFSET 0x800004 ++#define CR3V_4KB_ERASE_UNABLE 0x8 ++#define SPINOR_S25FS_FAMILY_ID 0x81 ++ ++ + struct flash_info { + char *name; + +@@ -68,7 +75,8 @@ struct flash_info { + #define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */ + #define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */ + #define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */ +-#define USE_FSR BIT(7) /* use flag status register */ ++#define USE_FSR BIT(13) /* use flag status register */ ++#define SPI_NOR_DDR_QUAD_READ BIT(7) /* Flash supports DDR Quad Read */ + #define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */ + #define SPI_NOR_HAS_TB BIT(9) /* + * Flash SR has Top/Bottom (TB) protect +@@ -85,9 +93,11 @@ struct flash_info { + * Use dedicated 4byte address op codes + * to support memory size above 128Mib. + */ ++#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ + }; + + #define JEDEC_MFR(info) ((info)->id[0]) ++#define EXT_ID(info) ((info)->id[5]) + + static const struct flash_info *spi_nor_match_id(const char *name); + +@@ -132,7 +142,7 @@ static int read_fsr(struct spi_nor *nor) + /* + * Read configuration register, returning its value in the + * location. Return the configuration register value. +- * Returns negative if error occured. ++ * Returns negative if error occurred. + */ + static int read_cr(struct spi_nor *nor) + { +@@ -160,6 +170,8 @@ static inline int spi_nor_read_dummy_cyc + case SPI_NOR_DUAL: + case SPI_NOR_QUAD: + return 8; ++ case SPI_NOR_DDR_QUAD: ++ return 6; + case SPI_NOR_NORMAL: + return 0; + } +@@ -961,6 +973,8 @@ static const struct flash_info spi_nor_i + + /* ESMT */ + { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, + + /* Everspin */ + { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +@@ -1014,12 +1028,15 @@ static const struct flash_info spi_nor_i + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, ++ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, ++ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, 0) }, + { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, +- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, ++ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, + { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, +@@ -1033,10 +1050,11 @@ static const struct flash_info spi_nor_i + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, ++ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, +- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, +- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, ++ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, ++ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + + /* PMC */ + { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, +@@ -1054,8 +1072,11 @@ static const struct flash_info spi_nor_i + { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, + { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, + { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, +- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, 0)}, ++ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ ++ | SPI_NOR_DDR_QUAD_READ) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)}, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, + { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, +@@ -1130,6 +1151,9 @@ static const struct flash_info spi_nor_i + { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, + { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, ++ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, ++ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, ++ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, + { + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, +@@ -1192,6 +1216,53 @@ static const struct flash_info *spi_nor_ + id[0], id[1], id[2]); + return ERR_PTR(-ENODEV); + } ++/* ++ * The S25FS-S family physical sectors may be configured as a ++ * hybrid combination of eight 4-kB parameter sectors ++ * at the top or bottom of the address space with all ++ * but one of the remaining sectors being uniform size. ++ * The Parameter Sector Erase commands (20h or 21h) must ++ * be used to erase the 4-kB parameter sectors individually. ++ * The Sector (uniform sector) Erase commands (D8h or DCh) ++ * must be used to erase any of the remaining ++ * sectors, including the portion of highest or lowest address ++ * sector that is not overlaid by the parameter sectors. ++ * The uniform sector erase command has no effect on parameter sectors. ++ */ ++static int spansion_s25fs_disable_4kb_erase(struct spi_nor *nor) ++{ ++ struct fsl_qspi *q; ++ u32 cr3v_addr = SPINOR_CONFIG_REG3_OFFSET; ++ u8 cr3v = 0x0; ++ int ret = 0x0; ++ ++ q = nor->priv; ++ ++ nor->cmd_buf[2] = cr3v_addr >> 16; ++ nor->cmd_buf[1] = cr3v_addr >> 8; ++ nor->cmd_buf[0] = cr3v_addr >> 0; ++ ++ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); ++ if (ret) ++ return ret; ++ if (cr3v & CR3V_4KB_ERASE_UNABLE) ++ return 0; ++ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); ++ if (ret) ++ return ret; ++ cr3v = CR3V_4KB_ERASE_UNABLE; ++ nor->program_opcode = SPINOR_OP_SPANSION_WRAR; ++ nor->write(nor, cr3v_addr, 1, &cr3v); ++ ++ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1); ++ if (ret) ++ return ret; ++ if (!(cr3v & CR3V_4KB_ERASE_UNABLE)) ++ return -EPERM; ++ ++ return 0; ++} ++ + + static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +@@ -1411,7 +1482,7 @@ static int macronix_quad_enable(struct s + * Write status Register and configuration register with 2 bytes + * The first byte will be written to the status register, while the + * second byte will be written to the configuration register. +- * Return negative if error occured. ++ * Return negative if error occurred. + */ + static int write_sr_cr(struct spi_nor *nor, u16 val) + { +@@ -1459,6 +1530,24 @@ static int spansion_quad_enable(struct s + return 0; + } + ++static int set_ddr_quad_mode(struct spi_nor *nor, const struct flash_info *info) ++{ ++ int status; ++ ++ switch (JEDEC_MFR(info)) { ++ case SNOR_MFR_SPANSION: ++ status = spansion_quad_enable(nor); ++ if (status) { ++ dev_err(nor->dev, "Spansion DDR quad-read not enabled\n"); ++ return status; ++ } ++ return status; ++ default: ++ return -EINVAL; ++ } ++} ++ ++ + static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) + { + int status; +@@ -1604,9 +1693,25 @@ int spi_nor_scan(struct spi_nor *nor, co + write_sr(nor, 0); + spi_nor_wait_till_ready(nor); + } ++ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { ++ ret = read_sr(nor); ++ ret &= SPI_NOR_MICRON_WRITE_ENABLE; ++ ++ write_enable(nor); ++ write_sr(nor, ret); ++ } ++ ++ if (EXT_ID(info) == SPINOR_S25FS_FAMILY_ID) { ++ ret = spansion_s25fs_disable_4kb_erase(nor); ++ if (ret) ++ return ret; ++ } ++ + + if (!mtd->name) + mtd->name = dev_name(dev); ++ if (info->name) ++ nor->vendor = info->name; + mtd->priv = nor; + mtd->type = MTD_NORFLASH; + mtd->writesize = 1; +@@ -1639,6 +1744,8 @@ int spi_nor_scan(struct spi_nor *nor, co + nor->flags |= SNOR_F_USE_FSR; + if (info->flags & SPI_NOR_HAS_TB) + nor->flags |= SNOR_F_HAS_SR_TB; ++ if (info->flags & NO_CHIP_ERASE) ++ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + + #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + /* prefer "small sector" erase if possible */ +@@ -1676,9 +1783,15 @@ int spi_nor_scan(struct spi_nor *nor, co + /* Some devices cannot do fast-read, no matter what DT tells us */ + if (info->flags & SPI_NOR_NO_FR) + nor->flash_read = SPI_NOR_NORMAL; +- +- /* Quad/Dual-read mode takes precedence over fast/normal */ +- if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { ++ /* DDR Quad/Quad/Dual-read mode takes precedence over fast/normal */ ++ if (mode == SPI_NOR_DDR_QUAD && info->flags & SPI_NOR_DDR_QUAD_READ) { ++ ret = set_ddr_quad_mode(nor, info); ++ if (ret) { ++ dev_err(dev, "DDR quad mode not supported\n"); ++ return ret; ++ } ++ nor->flash_read = SPI_NOR_DDR_QUAD; ++ } else if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { + ret = set_quad_mode(nor, info); + if (ret) { + dev_err(dev, "quad mode not supported\n"); +@@ -1691,6 +1804,9 @@ int spi_nor_scan(struct spi_nor *nor, co + + /* Default commands */ + switch (nor->flash_read) { ++ case SPI_NOR_DDR_QUAD: ++ nor->read_opcode = SPINOR_OP_READ4_1_4_4_D; ++ break; + case SPI_NOR_QUAD: + nor->read_opcode = SPINOR_OP_READ_1_1_4; + break; +--- a/include/linux/mtd/spi-nor.h ++++ b/include/linux/mtd/spi-nor.h +@@ -31,10 +31,10 @@ + + /* + * Note on opcode nomenclature: some opcodes have a format like +- * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number ++ * SPINOR_OP_FUNCTION{4,}_x_y_z{_D}. The numbers x, y,and z stand for the number + * of I/O lines used for the opcode, address, and data (respectively). The + * FUNCTION has an optional suffix of '4', to represent an opcode which +- * requires a 4-byte (32-bit) address. ++ * requires a 4-byte (32-bit) address. The suffix of 'D' stands for the + */ + + /* Flash opcodes. */ +@@ -46,7 +46,9 @@ + #define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ + #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ + #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ ++#define SPINOR_OP_READ_1_4_4_D 0xed /* Read data bytes (DDR Quad SPI) */ + #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ ++#define SPINOR_OP_READ4_1_4_4_D 0xee /* Read data bytes (DDR Quad SPI) */ + #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ + #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ + #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +@@ -62,9 +64,11 @@ + /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ + #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ + #define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ ++#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ + #define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ + #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ + #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ ++#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ + #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ + #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ + #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ +@@ -94,6 +98,10 @@ + /* Used for Spansion flashes only. */ + #define SPINOR_OP_BRWR 0x17 /* Bank register write */ + ++/* Used for Spansion S25FS-S family flash only. */ ++#define SPINOR_OP_SPANSION_RDAR 0x65 /* Read any device register */ ++#define SPINOR_OP_SPANSION_WRAR 0x71 /* Write any device register */ ++ + /* Used for Micron flashes only. */ + #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ + #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ +@@ -124,6 +132,7 @@ enum read_mode { + SPI_NOR_FAST, + SPI_NOR_DUAL, + SPI_NOR_QUAD, ++ SPI_NOR_DDR_QUAD, + }; + + #define SPI_NOR_MAX_CMD_SIZE 8 +@@ -189,6 +198,7 @@ struct spi_nor { + bool sst_write_second; + u32 flags; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; ++ char *vendor; + + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); + void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); diff --git a/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch new file mode 100644 index 000000000..c4f15111c --- /dev/null +++ b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch @@ -0,0 +1,397 @@ +From c0e4767d3b26f21e5043fe2d15a24a1958de766e Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 10:17:28 +0800 +Subject: [PATCH] mtd: support layerscape + +This is a integrated patch for layerscape ifc-nor-nand support. + +Signed-off-by: Alison Wang +Signed-off-by: Prabhakar Kushwaha +Signed-off-by: Yangbo Lu +--- + drivers/memory/Kconfig | 2 +- + drivers/memory/fsl_ifc.c | 263 ++++++++++++++++++++++++++++++++++++++++ + drivers/mtd/maps/physmap_of.c | 4 + + drivers/mtd/nand/Kconfig | 2 +- + drivers/mtd/nand/fsl_ifc_nand.c | 5 +- + include/linux/fsl_ifc.h | 7 ++ + 6 files changed, 280 insertions(+), 3 deletions(-) + +--- a/drivers/memory/Kconfig ++++ b/drivers/memory/Kconfig +@@ -115,7 +115,7 @@ config FSL_CORENET_CF + + config FSL_IFC + bool +- depends on FSL_SOC || ARCH_LAYERSCAPE ++ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A + + config JZ4780_NEMC + bool "Ingenic JZ4780 SoC NEMC driver" +--- a/drivers/memory/fsl_ifc.c ++++ b/drivers/memory/fsl_ifc.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -37,6 +38,8 @@ + + struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + EXPORT_SYMBOL(fsl_ifc_ctrl_dev); ++#define FSL_IFC_V1_3_0 0x01030000 ++#define IFC_TIMEOUT_MSECS 1000 /* 1000ms */ + + /* + * convert_ifc_address - convert the base address +@@ -311,6 +314,261 @@ err: + return ret; + } + ++#ifdef CONFIG_PM_SLEEP ++/* save ifc registers */ ++static int fsl_ifc_suspend(struct device *dev) ++{ ++ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); ++ struct fsl_ifc_global __iomem *fcm = ctrl->gregs; ++ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs; ++ __be32 nand_evter_intr_en, cm_evter_intr_en, nor_evter_intr_en, ++ gpcm_evter_intr_en; ++ uint32_t ifc_bank, i; ++ ++ ctrl->saved_gregs = kzalloc(sizeof(struct fsl_ifc_global), GFP_KERNEL); ++ if (!ctrl->saved_gregs) ++ return -ENOMEM; ++ ctrl->saved_rregs = kzalloc(sizeof(struct fsl_ifc_runtime), GFP_KERNEL); ++ if (!ctrl->saved_rregs) ++ return -ENOMEM; ++ ++ cm_evter_intr_en = ifc_in32(&fcm->cm_evter_intr_en); ++ nand_evter_intr_en = ifc_in32(&runtime->ifc_nand.nand_evter_intr_en); ++ nor_evter_intr_en = ifc_in32(&runtime->ifc_nor.nor_evter_intr_en); ++ gpcm_evter_intr_en = ifc_in32(&runtime->ifc_gpcm.gpcm_evter_intr_en); ++ ++/* IFC interrupts disabled */ ++ ++ ifc_out32(0x0, &fcm->cm_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en); ++ ++ if (ctrl->saved_gregs) { ++ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) { ++ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr_ext = ++ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr_ext); ++ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr = ++ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr); ++ ctrl->saved_gregs->amask_cs[ifc_bank].amask = ++ ifc_in32(&fcm->amask_cs[ifc_bank].amask); ++ ctrl->saved_gregs->csor_cs[ifc_bank].csor_ext = ++ ifc_in32(&fcm->csor_cs[ifc_bank].csor_ext); ++ ctrl->saved_gregs->csor_cs[ifc_bank].csor = ++ ifc_in32(&fcm->csor_cs[ifc_bank].csor); ++ for (i = 0; i < 4; i++) { ++ ctrl->saved_gregs->ftim_cs[ifc_bank].ftim[i] = ++ ifc_in32( ++ &fcm->ftim_cs[ifc_bank].ftim[i]); ++ } ++ } ++ ++ ctrl->saved_gregs->rb_map = ifc_in32(&fcm->rb_map); ++ ctrl->saved_gregs->wb_map = ifc_in32(&fcm->wb_map); ++ ctrl->saved_gregs->ifc_gcr = ifc_in32(&fcm->ifc_gcr); ++ ctrl->saved_gregs->ddr_ccr_low = ifc_in32(&fcm->ddr_ccr_low); ++ ctrl->saved_gregs->cm_evter_en = ifc_in32(&fcm->cm_evter_en); ++ } ++ ++ if (ctrl->saved_rregs) { ++ /* IFC controller NAND machine registers */ ++ ctrl->saved_rregs->ifc_nand.ncfgr = ++ ifc_in32(&runtime->ifc_nand.ncfgr); ++ ctrl->saved_rregs->ifc_nand.nand_fcr0 = ++ ifc_in32(&runtime->ifc_nand.nand_fcr0); ++ ctrl->saved_rregs->ifc_nand.nand_fcr1 = ++ ifc_in32(&runtime->ifc_nand.nand_fcr1); ++ ctrl->saved_rregs->ifc_nand.row0 = ++ ifc_in32(&runtime->ifc_nand.row0); ++ ctrl->saved_rregs->ifc_nand.row1 = ++ ifc_in32(&runtime->ifc_nand.row1); ++ ctrl->saved_rregs->ifc_nand.col0 = ++ ifc_in32(&runtime->ifc_nand.col0); ++ ctrl->saved_rregs->ifc_nand.col1 = ++ ifc_in32(&runtime->ifc_nand.col1); ++ ctrl->saved_rregs->ifc_nand.row2 = ++ ifc_in32(&runtime->ifc_nand.row2); ++ ctrl->saved_rregs->ifc_nand.col2 = ++ ifc_in32(&runtime->ifc_nand.col2); ++ ctrl->saved_rregs->ifc_nand.row3 = ++ ifc_in32(&runtime->ifc_nand.row3); ++ ctrl->saved_rregs->ifc_nand.col3 = ++ ifc_in32(&runtime->ifc_nand.col3); ++ ++ ctrl->saved_rregs->ifc_nand.nand_fbcr = ++ ifc_in32(&runtime->ifc_nand.nand_fbcr); ++ ctrl->saved_rregs->ifc_nand.nand_fir0 = ++ ifc_in32(&runtime->ifc_nand.nand_fir0); ++ ctrl->saved_rregs->ifc_nand.nand_fir1 = ++ ifc_in32(&runtime->ifc_nand.nand_fir1); ++ ctrl->saved_rregs->ifc_nand.nand_fir2 = ++ ifc_in32(&runtime->ifc_nand.nand_fir2); ++ ctrl->saved_rregs->ifc_nand.nand_csel = ++ ifc_in32(&runtime->ifc_nand.nand_csel); ++ ctrl->saved_rregs->ifc_nand.nandseq_strt = ++ ifc_in32( ++ &runtime->ifc_nand.nandseq_strt); ++ ctrl->saved_rregs->ifc_nand.nand_evter_en = ++ ifc_in32( ++ &runtime->ifc_nand.nand_evter_en); ++ ctrl->saved_rregs->ifc_nand.nanndcr = ++ ifc_in32(&runtime->ifc_nand.nanndcr); ++ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg0 = ++ ifc_in32( ++ &runtime->ifc_nand.nand_dll_lowcfg0); ++ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg1 = ++ ifc_in32( ++ &runtime->ifc_nand.nand_dll_lowcfg1); ++ ++ /* IFC controller NOR machine registers */ ++ ctrl->saved_rregs->ifc_nor.nor_evter_en = ++ ifc_in32( ++ &runtime->ifc_nor.nor_evter_en); ++ ctrl->saved_rregs->ifc_nor.norcr = ++ ifc_in32(&runtime->ifc_nor.norcr); ++ ++ /* IFC controller GPCM Machine registers */ ++ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_en = ++ ifc_in32( ++ &runtime->ifc_gpcm.gpcm_evter_en); ++ } ++ ++/* save the interrupt values */ ++ ctrl->saved_gregs->cm_evter_intr_en = cm_evter_intr_en; ++ ctrl->saved_rregs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en; ++ ctrl->saved_rregs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en; ++ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en; ++ ++ return 0; ++} ++ ++/* restore ifc registers */ ++static int fsl_ifc_resume(struct device *dev) ++{ ++ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev); ++ struct fsl_ifc_global __iomem *fcm = ctrl->gregs; ++ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs; ++ struct fsl_ifc_global *savd_gregs = ctrl->saved_gregs; ++ struct fsl_ifc_runtime *savd_rregs = ctrl->saved_rregs; ++ uint32_t ver = 0, ncfgr, timeout, ifc_bank, i; ++ ++/* ++ * IFC interrupts disabled ++ */ ++ ifc_out32(0x0, &fcm->cm_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en); ++ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en); ++ ++ ++ if (ctrl->saved_gregs) { ++ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) { ++ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr_ext, ++ &fcm->cspr_cs[ifc_bank].cspr_ext); ++ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr, ++ &fcm->cspr_cs[ifc_bank].cspr); ++ ifc_out32(savd_gregs->amask_cs[ifc_bank].amask, ++ &fcm->amask_cs[ifc_bank].amask); ++ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor_ext, ++ &fcm->csor_cs[ifc_bank].csor_ext); ++ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor, ++ &fcm->csor_cs[ifc_bank].csor); ++ for (i = 0; i < 4; i++) { ++ ifc_out32(savd_gregs->ftim_cs[ifc_bank].ftim[i], ++ &fcm->ftim_cs[ifc_bank].ftim[i]); ++ } ++ } ++ ifc_out32(savd_gregs->rb_map, &fcm->rb_map); ++ ifc_out32(savd_gregs->wb_map, &fcm->wb_map); ++ ifc_out32(savd_gregs->ifc_gcr, &fcm->ifc_gcr); ++ ifc_out32(savd_gregs->ddr_ccr_low, &fcm->ddr_ccr_low); ++ ifc_out32(savd_gregs->cm_evter_en, &fcm->cm_evter_en); ++ } ++ ++ if (ctrl->saved_rregs) { ++ /* IFC controller NAND machine registers */ ++ ifc_out32(savd_rregs->ifc_nand.ncfgr, ++ &runtime->ifc_nand.ncfgr); ++ ifc_out32(savd_rregs->ifc_nand.nand_fcr0, ++ &runtime->ifc_nand.nand_fcr0); ++ ifc_out32(savd_rregs->ifc_nand.nand_fcr1, ++ &runtime->ifc_nand.nand_fcr1); ++ ifc_out32(savd_rregs->ifc_nand.row0, &runtime->ifc_nand.row0); ++ ifc_out32(savd_rregs->ifc_nand.row1, &runtime->ifc_nand.row1); ++ ifc_out32(savd_rregs->ifc_nand.col0, &runtime->ifc_nand.col0); ++ ifc_out32(savd_rregs->ifc_nand.col1, &runtime->ifc_nand.col1); ++ ifc_out32(savd_rregs->ifc_nand.row2, &runtime->ifc_nand.row2); ++ ifc_out32(savd_rregs->ifc_nand.col2, &runtime->ifc_nand.col2); ++ ifc_out32(savd_rregs->ifc_nand.row3, &runtime->ifc_nand.row3); ++ ifc_out32(savd_rregs->ifc_nand.col3, &runtime->ifc_nand.col3); ++ ifc_out32(savd_rregs->ifc_nand.nand_fbcr, ++ &runtime->ifc_nand.nand_fbcr); ++ ifc_out32(savd_rregs->ifc_nand.nand_fir0, ++ &runtime->ifc_nand.nand_fir0); ++ ifc_out32(savd_rregs->ifc_nand.nand_fir1, ++ &runtime->ifc_nand.nand_fir1); ++ ifc_out32(savd_rregs->ifc_nand.nand_fir2, ++ &runtime->ifc_nand.nand_fir2); ++ ifc_out32(savd_rregs->ifc_nand.nand_csel, ++ &runtime->ifc_nand.nand_csel); ++ ifc_out32(savd_rregs->ifc_nand.nandseq_strt, ++ &runtime->ifc_nand.nandseq_strt); ++ ifc_out32(savd_rregs->ifc_nand.nand_evter_en, ++ &runtime->ifc_nand.nand_evter_en); ++ ifc_out32(savd_rregs->ifc_nand.nanndcr, ++ &runtime->ifc_nand.nanndcr); ++ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg0, ++ &runtime->ifc_nand.nand_dll_lowcfg0); ++ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg1, ++ &runtime->ifc_nand.nand_dll_lowcfg1); ++ ++ /* IFC controller NOR machine registers */ ++ ifc_out32(savd_rregs->ifc_nor.nor_evter_en, ++ &runtime->ifc_nor.nor_evter_en); ++ ifc_out32(savd_rregs->ifc_nor.norcr, &runtime->ifc_nor.norcr); ++ ++ /* IFC controller GPCM Machine registers */ ++ ifc_out32(savd_rregs->ifc_gpcm.gpcm_evter_en, ++ &runtime->ifc_gpcm.gpcm_evter_en); ++ ++ /* IFC interrupts enabled */ ++ ifc_out32(ctrl->saved_gregs->cm_evter_intr_en, ++ &fcm->cm_evter_intr_en); ++ ifc_out32(ctrl->saved_rregs->ifc_nand.nand_evter_intr_en, ++ &runtime->ifc_nand.nand_evter_intr_en); ++ ifc_out32(ctrl->saved_rregs->ifc_nor.nor_evter_intr_en, ++ &runtime->ifc_nor.nor_evter_intr_en); ++ ifc_out32(ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en, ++ &runtime->ifc_gpcm.gpcm_evter_intr_en); ++ ++ kfree(ctrl->saved_gregs); ++ kfree(ctrl->saved_rregs); ++ ctrl->saved_gregs = NULL; ++ ctrl->saved_rregs = NULL; ++ } ++ ++ ver = ifc_in32(&fcm->ifc_rev); ++ ncfgr = ifc_in32(&runtime->ifc_nand.ncfgr); ++ if (ver >= FSL_IFC_V1_3_0) { ++ ++ ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN, ++ &runtime->ifc_nand.ncfgr); ++ /* wait for SRAM_INIT bit to be clear or timeout */ ++ timeout = 10; ++ while ((ifc_in32(&runtime->ifc_nand.ncfgr) & ++ IFC_NAND_SRAM_INIT_EN) && timeout) { ++ mdelay(IFC_TIMEOUT_MSECS); ++ timeout--; ++ } ++ ++ if (!timeout) ++ dev_err(ctrl->dev, "Timeout waiting for IFC SRAM INIT"); ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_PM_SLEEP */ ++ + static const struct of_device_id fsl_ifc_match[] = { + { + .compatible = "fsl,ifc", +@@ -318,10 +576,15 @@ static const struct of_device_id fsl_ifc + {}, + }; + ++static const struct dev_pm_ops ifc_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(fsl_ifc_suspend, fsl_ifc_resume) ++}; ++ + static struct platform_driver fsl_ifc_ctrl_driver = { + .driver = { + .name = "fsl-ifc", + .of_match_table = fsl_ifc_match, ++ .pm = &ifc_pm_ops, + }, + .probe = fsl_ifc_ctrl_probe, + .remove = fsl_ifc_ctrl_remove, +--- a/drivers/mtd/maps/physmap_of.c ++++ b/drivers/mtd/maps/physmap_of.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -209,6 +210,9 @@ static int of_flash_probe(struct platfor + return err; + } + ++ if (of_property_read_bool(dp->parent, "big-endian")) ++ info->list[i].map.swap = CFI_BIG_ENDIAN; ++ + err = -ENOMEM; + info->list[i].map.virt = ioremap(info->list[i].map.phys, + info->list[i].map.size); +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC + + config MTD_NAND_FSL_IFC + tristate "NAND support for Freescale IFC controller" +- depends on FSL_SOC || ARCH_LAYERSCAPE ++ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A + select FSL_IFC + select MEMORY + help +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -904,9 +904,12 @@ static int fsl_ifc_chip_init(struct fsl_ + chip->ecc.algo = NAND_ECC_HAMMING; + } + +- if (ctrl->version == FSL_IFC_VERSION_1_1_0) ++ if (ctrl->version >= FSL_IFC_VERSION_1_1_0) + fsl_ifc_sram_init(priv); + ++ if (ctrl->version >= FSL_IFC_VERSION_2_0_0) ++ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1; ++ + return 0; + } + +--- a/include/linux/fsl_ifc.h ++++ b/include/linux/fsl_ifc.h +@@ -274,6 +274,8 @@ + */ + /* Auto Boot Mode */ + #define IFC_NAND_NCFGR_BOOT 0x80000000 ++/* SRAM INIT EN */ ++#define IFC_NAND_SRAM_INIT_EN 0x20000000 + /* Addressing Mode-ROW0+n/COL0 */ + #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 + /* Addressing Mode-ROW0+n/COL0+n */ +@@ -861,6 +863,11 @@ struct fsl_ifc_ctrl { + u32 nand_stat; + wait_queue_head_t nand_wait; + bool little_endian; ++#ifdef CONFIG_PM_SLEEP ++ /*save regs when system goes to deep sleep*/ ++ struct fsl_ifc_global *saved_gregs; ++ struct fsl_ifc_runtime *saved_rregs; ++#endif + }; + + extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; diff --git a/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch b/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch new file mode 100644 index 000000000..7f9477674 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch @@ -0,0 +1,2365 @@ +From 2ed7bff3d1f2fa6c5f6eff0b2bd98deaa3dc18b0 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 10:57:14 +0800 +Subject: [PATCH] net: support layerscape + +This is a integrated patch for layerscape net support. + +Signed-off-by: Madalin Bucur +Signed-off-by: Zhao Qiang +Signed-off-by: Camelia Groza +Signed-off-by: Madalin Bucur +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Ramneek Mehresh +Signed-off-by: Jarod Wilson +Signed-off-by: Nikhil Badola +Signed-off-by: stephen hemminger +Signed-off-by: Arnd Bergmann +Signed-off-by: Yangbo Lu +--- + drivers/base/devres.c | 66 +++++++++++++++ + drivers/base/soc.c | 66 +++++++++++++++ + drivers/net/bonding/bond_main.c | 10 +-- + drivers/net/dummy.c | 5 +- + drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 +-- + drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +- + drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 4 +- + drivers/net/ethernet/atheros/alx/main.c | 6 +- + drivers/net/ethernet/broadcom/b44.c | 5 +- + drivers/net/ethernet/broadcom/bnx2.c | 5 +- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +- + drivers/net/ethernet/broadcom/tg3.c | 8 +- + drivers/net/ethernet/brocade/bna/bnad.c | 6 +- + drivers/net/ethernet/calxeda/xgmac.c | 5 +- + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 5 +- + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 7 +- + drivers/net/ethernet/cisco/enic/enic_main.c | 8 +- + drivers/net/ethernet/ec_bhf.c | 4 +- + drivers/net/ethernet/emulex/benet/be_main.c | 5 +- + drivers/net/ethernet/hisilicon/hns/hns_enet.c | 6 +- + drivers/net/ethernet/ibm/ehea/ehea_main.c | 5 +- + drivers/net/ethernet/intel/e1000e/e1000.h | 4 +- + drivers/net/ethernet/intel/e1000e/netdev.c | 5 +- + drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 6 +- + drivers/net/ethernet/intel/i40e/i40e.h | 5 +- + drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ++-- + drivers/net/ethernet/intel/igb/igb_main.c | 10 +-- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +- + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +- + drivers/net/ethernet/marvell/mvneta.c | 4 +- + drivers/net/ethernet/marvell/mvpp2.c | 4 +- + drivers/net/ethernet/marvell/sky2.c | 6 +- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 6 +- + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 +- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +- + drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +- + drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 3 +- + drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 9 +- + drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 +- + .../net/ethernet/netronome/nfp/nfp_net_common.c | 6 +- + drivers/net/ethernet/nvidia/forcedeth.c | 4 +- + .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 10 +-- + drivers/net/ethernet/qlogic/qede/qede_main.c | 7 +- + drivers/net/ethernet/qualcomm/emac/emac.c | 6 +- + drivers/net/ethernet/realtek/8139too.c | 9 +- + drivers/net/ethernet/realtek/r8169.c | 4 +- + drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 8 +- + drivers/net/ethernet/sfc/efx.c | 6 +- + drivers/net/ethernet/sun/niu.c | 6 +- + drivers/net/ethernet/synopsys/dwc_eth_qos.c | 4 +- + drivers/net/ethernet/tile/tilepro.c | 4 +- + drivers/net/ethernet/via/via-rhine.c | 8 +- + drivers/net/fjes/fjes_main.c | 7 +- + drivers/net/hyperv/netvsc_drv.c | 6 +- + drivers/net/ifb.c | 6 +- + drivers/net/ipvlan/ipvlan_main.c | 5 +- + drivers/net/loopback.c | 5 +- + drivers/net/macsec.c | 8 +- + drivers/net/macvlan.c | 5 +- + drivers/net/nlmon.c | 4 +- + drivers/net/ppp/ppp_generic.c | 4 +- + drivers/net/slip/slip.c | 3 +- + drivers/net/team/team.c | 3 +- + drivers/net/tun.c | 3 +- + drivers/net/veth.c | 6 +- + drivers/net/virtio_net.c | 6 +- + drivers/net/vmxnet3/vmxnet3_ethtool.c | 4 +- + drivers/net/vmxnet3/vmxnet3_int.h | 4 +- + drivers/net/vrf.c | 5 +- + drivers/net/xen-netfront.c | 6 +- + drivers/staging/netlogic/xlr_net.c | 10 +-- + include/linux/device.h | 19 +++++ + include/linux/fsl/svr.h | 97 ++++++++++++++++++++++ + include/linux/fsl_devices.h | 3 + + include/linux/netdev_features.h | 2 + + include/linux/netdevice.h | 12 ++- + include/linux/skbuff.h | 2 + + include/linux/sys_soc.h | 3 + + include/net/ip_tunnels.h | 4 +- + include/uapi/linux/if_ether.h | 1 + + net/8021q/vlan_dev.c | 5 +- + net/bridge/br_device.c | 6 +- + net/core/dev.c | 13 ++- + net/core/skbuff.c | 29 ++++++- + net/ipv4/ip_tunnel_core.c | 6 +- + net/l2tp/l2tp_eth.c | 6 +- + net/mac80211/iface.c | 4 +- + net/openvswitch/vport-internal_dev.c | 4 +- + net/sched/sch_generic.c | 7 ++ + net/sched/sch_teql.c | 5 +- + 90 files changed, 468 insertions(+), 298 deletions(-) + create mode 100644 include/linux/fsl/svr.h + +--- a/drivers/base/devres.c ++++ b/drivers/base/devres.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "base.h" + +@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, + &devres)); + } + EXPORT_SYMBOL_GPL(devm_free_pages); ++ ++static void devm_percpu_release(struct device *dev, void *pdata) ++{ ++ void __percpu *p; ++ ++ p = *(void __percpu **)pdata; ++ free_percpu(p); ++} ++ ++static int devm_percpu_match(struct device *dev, void *data, void *p) ++{ ++ struct devres *devr = container_of(data, struct devres, data); ++ ++ return *(void **)devr->data == p; ++} ++ ++/** ++ * __devm_alloc_percpu - Resource-managed alloc_percpu ++ * @dev: Device to allocate per-cpu memory for ++ * @size: Size of per-cpu memory to allocate ++ * @align: Alignment of per-cpu memory to allocate ++ * ++ * Managed alloc_percpu. Per-cpu memory allocated with this function is ++ * automatically freed on driver detach. ++ * ++ * RETURNS: ++ * Pointer to allocated memory on success, NULL on failure. ++ */ ++void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, ++ size_t align) ++{ ++ void *p; ++ void __percpu *pcpu; ++ ++ pcpu = __alloc_percpu(size, align); ++ if (!pcpu) ++ return NULL; ++ ++ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); ++ if (!p) { ++ free_percpu(pcpu); ++ return NULL; ++ } ++ ++ *(void __percpu **)p = pcpu; ++ ++ devres_add(dev, p); ++ ++ return pcpu; ++} ++EXPORT_SYMBOL_GPL(__devm_alloc_percpu); ++ ++/** ++ * devm_free_percpu - Resource-managed free_percpu ++ * @dev: Device this memory belongs to ++ * @pdata: Per-cpu memory to free ++ * ++ * Free memory allocated with devm_alloc_percpu(). ++ */ ++void devm_free_percpu(struct device *dev, void __percpu *pdata) ++{ ++ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, ++ (void *)pdata)); ++} ++EXPORT_SYMBOL_GPL(devm_free_percpu); +--- a/drivers/base/soc.c ++++ b/drivers/base/soc.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + static DEFINE_IDA(soc_ida); + +@@ -159,3 +160,68 @@ static int __init soc_bus_register(void) + return bus_register(&soc_bus_type); + } + core_initcall(soc_bus_register); ++ ++static int soc_device_match_one(struct device *dev, void *arg) ++{ ++ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); ++ const struct soc_device_attribute *match = arg; ++ ++ if (match->machine && ++ !glob_match(match->machine, soc_dev->attr->machine)) ++ return 0; ++ ++ if (match->family && ++ !glob_match(match->family, soc_dev->attr->family)) ++ return 0; ++ ++ if (match->revision && ++ !glob_match(match->revision, soc_dev->attr->revision)) ++ return 0; ++ ++ if (match->soc_id && ++ !glob_match(match->soc_id, soc_dev->attr->soc_id)) ++ return 0; ++ ++ return 1; ++} ++ ++/* ++ * soc_device_match - identify the SoC in the machine ++ * @matches: zero-terminated array of possible matches ++ * ++ * returns the first matching entry of the argument array, or NULL ++ * if none of them match. ++ * ++ * This function is meant as a helper in place of of_match_node() ++ * in cases where either no device tree is available or the information ++ * in a device node is insufficient to identify a particular variant ++ * by its compatible strings or other properties. For new devices, ++ * the DT binding should always provide unique compatible strings ++ * that allow the use of of_match_node() instead. ++ * ++ * The calling function can use the .data entry of the ++ * soc_device_attribute to pass a structure or function pointer for ++ * each entry. ++ */ ++const struct soc_device_attribute *soc_device_match( ++ const struct soc_device_attribute *matches) ++{ ++ int ret = 0; ++ ++ if (!matches) ++ return NULL; ++ ++ while (!ret) { ++ if (!(matches->machine || matches->family || ++ matches->revision || matches->soc_id)) ++ break; ++ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches, ++ soc_device_match_one); ++ if (!ret) ++ matches++; ++ else ++ return matches; ++ } ++ return NULL; ++} ++EXPORT_SYMBOL_GPL(soc_device_match); +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -211,8 +211,8 @@ static int lacp_fast; + + static int bond_init(struct net_device *bond_dev); + static void bond_uninit(struct net_device *bond_dev); +-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, +- struct rtnl_link_stats64 *stats); ++static void bond_get_stats(struct net_device *bond_dev, ++ struct rtnl_link_stats64 *stats); + static void bond_slave_arr_handler(struct work_struct *work); + static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, + int mod); +@@ -3336,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_ + } + } + +-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, +- struct rtnl_link_stats64 *stats) ++static void bond_get_stats(struct net_device *bond_dev, ++ struct rtnl_link_stats64 *stats) + { + struct bonding *bond = netdev_priv(bond_dev); + struct rtnl_link_stats64 temp; +@@ -3361,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_ge + + memcpy(&bond->bond_stats, stats, sizeof(*stats)); + spin_unlock(&bond->stats_lock); +- +- return stats; + } + + static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) +--- a/drivers/net/dummy.c ++++ b/drivers/net/dummy.c +@@ -54,8 +54,8 @@ struct pcpu_dstats { + struct u64_stats_sync syncp; + }; + +-static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void dummy_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + int i; + +@@ -73,7 +73,6 @@ static struct rtnl_link_stats64 *dummy_g + stats->tx_bytes += tbytes; + stats->tx_packets += tpackets; + } +- return stats; + } + + static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c +@@ -2172,19 +2172,19 @@ err: + ena_com_delete_debug_area(adapter->ena_dev); + } + +-static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void ena_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_admin_basic_stats ena_stats; + int rc; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) +- return NULL; ++ return; + + rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); + if (rc) +- return NULL; ++ return; + + stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | + ena_stats.tx_bytes_low; +@@ -2211,8 +2211,6 @@ static struct rtnl_link_stats64 *ena_get + + stats->rx_errors = 0; + stats->tx_errors = 0; +- +- return stats; + } + + static const struct net_device_ops ena_netdev_ops = { +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -1542,8 +1542,8 @@ static void xgbe_tx_timeout(struct net_d + schedule_work(&pdata->restart_work); + } + +-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *s) ++static void xgbe_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *s) + { + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; +@@ -1569,8 +1569,6 @@ static struct rtnl_link_stats64 *xgbe_ge + s->tx_dropped = netdev->stats.tx_dropped; + + DBGPR("<--%s\n", __func__); +- +- return s; + } + + static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +@@ -1199,7 +1199,7 @@ err: + return ret; + } + +-static struct rtnl_link_stats64 *xgene_enet_get_stats64( ++static void xgene_enet_get_stats64( + struct net_device *ndev, + struct rtnl_link_stats64 *storage) + { +@@ -1230,8 +1230,6 @@ static struct rtnl_link_stats64 *xgene_e + } + } + memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); +- +- return storage; + } + + static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) +--- a/drivers/net/ethernet/atheros/alx/main.c ++++ b/drivers/net/ethernet/atheros/alx/main.c +@@ -1424,8 +1424,8 @@ static void alx_poll_controller(struct n + } + #endif + +-static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *net_stats) ++static void alx_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *net_stats) + { + struct alx_priv *alx = netdev_priv(dev); + struct alx_hw_stats *hw_stats = &alx->hw.stats; +@@ -1469,8 +1469,6 @@ static struct rtnl_link_stats64 *alx_get + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + + spin_unlock(&alx->stats_lock); +- +- return net_stats; + } + + static const struct net_device_ops alx_netdev_ops = { +--- a/drivers/net/ethernet/broadcom/b44.c ++++ b/drivers/net/ethernet/broadcom/b44.c +@@ -1677,8 +1677,8 @@ static int b44_close(struct net_device * + return 0; + } + +-static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *nstat) ++static void b44_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *nstat) + { + struct b44 *bp = netdev_priv(dev); + struct b44_hw_stats *hwstat = &bp->hw_stats; +@@ -1721,7 +1721,6 @@ static struct rtnl_link_stats64 *b44_get + #endif + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + +- return nstat; + } + + static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) +--- a/drivers/net/ethernet/broadcom/bnx2.c ++++ b/drivers/net/ethernet/broadcom/bnx2.c +@@ -6828,13 +6828,13 @@ bnx2_save_stats(struct bnx2 *bp) + (unsigned long) (bp->stats_blk->ctr + \ + bp->temp_stats_blk->ctr) + +-static struct rtnl_link_stats64 * ++static void + bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) + { + struct bnx2 *bp = netdev_priv(dev); + + if (bp->stats_blk == NULL) +- return net_stats; ++ return; + + net_stats->rx_packets = + GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + +@@ -6898,7 +6898,6 @@ bnx2_get_stats64(struct net_device *dev, + GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + + GET_32BIT_NET_STATS(stat_FwRxDrop); + +- return net_stats; + } + + /* All ethtool functions called with rtnl_lock */ +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -5664,7 +5664,7 @@ static int bnxt_ioctl(struct net_device + return -EOPNOTSUPP; + } + +-static struct rtnl_link_stats64 * ++static void + bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + u32 i; +@@ -5673,7 +5673,7 @@ bnxt_get_stats64(struct net_device *dev, + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + + if (!bp->bnapi) +- return stats; ++ return; + + /* TODO check if we need to synchronize with bnxt_close path */ + for (i = 0; i < bp->cp_nr_rings; i++) { +@@ -5720,8 +5720,6 @@ bnxt_get_stats64(struct net_device *dev, + stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); + stats->tx_errors = le64_to_cpu(tx->tx_err); + } +- +- return stats; + } + + static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -14145,8 +14145,8 @@ static const struct ethtool_ops tg3_etht + .set_link_ksettings = tg3_set_link_ksettings, + }; + +-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void tg3_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct tg3 *tp = netdev_priv(dev); + +@@ -14154,13 +14154,11 @@ static struct rtnl_link_stats64 *tg3_get + if (!tp->hw_stats) { + *stats = tp->net_stats_prev; + spin_unlock_bh(&tp->lock); +- return stats; ++ return; + } + + tg3_get_nstats(tp, stats); + spin_unlock_bh(&tp->lock); +- +- return stats; + } + + static void tg3_set_rx_mode(struct net_device *dev) +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, str + * Used spin_lock to synchronize reading of stats structures, which + * is written by BNA under the same lock. + */ +-static struct rtnl_link_stats64 * ++static void + bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) + { + struct bnad *bnad = netdev_priv(netdev); +@@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netd + bnad_netdev_hwstats_fill(bnad, stats); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); +- +- return stats; + } + + static void +@@ -3430,7 +3428,7 @@ static const struct net_device_ops bnad_ + .ndo_open = bnad_open, + .ndo_stop = bnad_stop, + .ndo_start_xmit = bnad_start_xmit, +- .ndo_get_stats64 = bnad_get_stats64, ++ .ndo_get_stats64 = bnad_get_stats64, + .ndo_set_rx_mode = bnad_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnad_set_mac_address, +--- a/drivers/net/ethernet/calxeda/xgmac.c ++++ b/drivers/net/ethernet/calxeda/xgmac.c +@@ -1460,9 +1460,9 @@ static void xgmac_poll_controller(struct + } + #endif + +-static struct rtnl_link_stats64 * ++static void + xgmac_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *storage) ++ struct rtnl_link_stats64 *storage) + { + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *base = priv->base; +@@ -1490,7 +1490,6 @@ xgmac_get_stats64(struct net_device *dev + + writel(0, base + XGMAC_MMC_CTRL); + spin_unlock_bh(&priv->stats_lock); +- return storage; + } + + static int xgmac_set_mac_address(struct net_device *dev, void *p) +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +@@ -1423,8 +1423,8 @@ void nicvf_update_stats(struct nicvf *ni + nicvf_update_sq_stats(nic, qidx); + } + +-static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void nicvf_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct nicvf *nic = netdev_priv(netdev); + struct nicvf_hw_stats *hw_stats = &nic->hw_stats; +@@ -1440,7 +1440,6 @@ static struct rtnl_link_stats64 *nicvf_g + stats->tx_packets = hw_stats->tx_frames; + stats->tx_dropped = hw_stats->tx_drops; + +- return stats; + } + + static void nicvf_tx_timeout(struct net_device *dev) +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2383,8 +2383,8 @@ int cxgb4_remove_server_filter(const str + } + EXPORT_SYMBOL(cxgb4_remove_server_filter); + +-static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *ns) ++static void cxgb_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *ns) + { + struct port_stats stats; + struct port_info *p = netdev_priv(dev); +@@ -2397,7 +2397,7 @@ static struct rtnl_link_stats64 *cxgb_ge + spin_lock(&adapter->stats_lock); + if (!netif_device_present(dev)) { + spin_unlock(&adapter->stats_lock); +- return ns; ++ return; + } + t4_get_port_stats_offset(adapter, p->tx_chan, &stats, + &p->stats_base); +@@ -2431,7 +2431,6 @@ static struct rtnl_link_stats64 *cxgb_ge + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; +- return ns; + } + + static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -680,8 +680,8 @@ static netdev_tx_t enic_hard_start_xmit( + } + + /* dev_base_lock rwlock held, nominally process context */ +-static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, +- struct rtnl_link_stats64 *net_stats) ++static void enic_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *net_stats) + { + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *stats; +@@ -693,7 +693,7 @@ static struct rtnl_link_stats64 *enic_ge + * recorded stats. + */ + if (err == -ENOMEM) +- return net_stats; ++ return; + + net_stats->tx_packets = stats->tx.tx_frames_ok; + net_stats->tx_bytes = stats->tx.tx_bytes_ok; +@@ -707,8 +707,6 @@ static struct rtnl_link_stats64 *enic_ge + net_stats->rx_over_errors = enic->rq_truncated_pkts; + net_stats->rx_crc_errors = enic->rq_bad_fcs; + net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; +- +- return net_stats; + } + + static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) +--- a/drivers/net/ethernet/ec_bhf.c ++++ b/drivers/net/ethernet/ec_bhf.c +@@ -458,7 +458,7 @@ static int ec_bhf_stop(struct net_device + return 0; + } + +-static struct rtnl_link_stats64 * ++static void + ec_bhf_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) + { +@@ -473,8 +473,6 @@ ec_bhf_get_stats(struct net_device *net_ + + stats->tx_bytes = priv->stat_tx_bytes; + stats->rx_bytes = priv->stat_rx_bytes; +- +- return stats; + } + + static const struct net_device_ops ec_bhf_netdev_ops = { +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -646,8 +646,8 @@ void be_parse_stats(struct be_adapter *a + } + } + +-static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void be_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct be_adapter *adapter = netdev_priv(netdev); + struct be_drv_stats *drvs = &adapter->drv_stats; +@@ -711,7 +711,6 @@ static struct rtnl_link_stats64 *be_get_ + stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + + drvs->rx_input_fifo_overflow_drop + + drvs->rx_drops_no_pbuf; +- return stats; + } + + void be_link_status_update(struct be_adapter *adapter, u8 link_status) +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +@@ -1536,8 +1536,8 @@ void hns_nic_set_rx_mode(struct net_devi + hns_set_multicast_list(ndev); + } + +-struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, +- struct rtnl_link_stats64 *stats) ++static void hns_nic_get_stats64(struct net_device *ndev, ++ struct rtnl_link_stats64 *stats) + { + int idx = 0; + u64 tx_bytes = 0; +@@ -1579,8 +1579,6 @@ struct rtnl_link_stats64 *hns_nic_get_st + stats->tx_window_errors = ndev->stats.tx_window_errors; + stats->rx_compressed = ndev->stats.rx_compressed; + stats->tx_compressed = ndev->stats.tx_compressed; +- +- return stats; + } + + static u16 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c +@@ -328,8 +328,8 @@ out: + spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); + } + +-static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void ehea_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct ehea_port *port = netdev_priv(dev); + u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; +@@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_ge + + stats->multicast = port->stats.multicast; + stats->rx_errors = port->stats.rx_errors; +- return stats; + } + + static void ehea_update_stats(struct work_struct *work) +--- a/drivers/net/ethernet/intel/e1000e/e1000.h ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h +@@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e10 + int e1000e_setup_tx_resources(struct e1000_ring *ring); + void e1000e_free_rx_resources(struct e1000_ring *ring); + void e1000e_free_tx_resources(struct e1000_ring *ring); +-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats); ++void e1000e_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats); + void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); + void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); + void e1000e_get_hw_control(struct e1000_adapter *adapter); +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -5939,8 +5939,8 @@ static void e1000_reset_task(struct work + * + * Returns the address of the device statistics structure. + **/ +-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++void e1000e_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct e1000_adapter *adapter = netdev_priv(netdev); + +@@ -5977,7 +5977,6 @@ struct rtnl_link_stats64 *e1000e_get_sta + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); +- return stats; + } + + /** +--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +@@ -1128,8 +1128,8 @@ void fm10k_reset_rx_state(struct fm10k_i + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces fm10k_get_stats for kernels which support it. + */ +-static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void fm10k_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *ring; +@@ -1174,8 +1174,6 @@ static struct rtnl_link_stats64 *fm10k_g + + /* following stats updated by fm10k_service_task() */ + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +- +- return stats; + } + + int fm10k_setup_tc(struct net_device *dev, u8 tc) +--- a/drivers/net/ethernet/intel/i40e/i40e.h ++++ b/drivers/net/ethernet/intel/i40e/i40e.h +@@ -797,9 +797,8 @@ static inline void i40e_irq_dynamic_enab + void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); + void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); + #ifdef I40E_FCOE +-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( +- struct net_device *netdev, +- struct rtnl_link_stats64 *storage); ++void i40e_get_netdev_stats_struct(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage); + int i40e_set_mac(struct net_device *netdev, void *p); + void i40e_set_rx_mode(struct net_device *netdev); + #endif +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -408,15 +408,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_s + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ +-#ifdef I40E_FCOE +-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( +- struct net_device *netdev, +- struct rtnl_link_stats64 *stats) +-#else +-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( +- struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++#ifndef I40E_FCOE ++static + #endif ++void i40e_get_netdev_stats_struct(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; +@@ -425,10 +421,10 @@ static struct rtnl_link_stats64 *i40e_ge + int i; + + if (test_bit(__I40E_DOWN, &vsi->state)) +- return stats; ++ return; + + if (!vsi->tx_rings) +- return stats; ++ return; + + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { +@@ -468,8 +464,6 @@ static struct rtnl_link_stats64 *i40e_ge + stats->rx_dropped = vsi_stats->rx_dropped; + stats->rx_crc_errors = vsi_stats->rx_crc_errors; + stats->rx_length_errors = vsi_stats->rx_length_errors; +- +- return stats; + } + + /** +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned + static void igb_watchdog(unsigned long); + static void igb_watchdog_task(struct work_struct *); + static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static void igb_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + static int igb_change_mtu(struct net_device *, int); + static int igb_set_mac(struct net_device *, void *); + static void igb_set_uta(struct igb_adapter *adapter, bool set); +@@ -5386,8 +5386,8 @@ static void igb_reset_task(struct work_s + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void igb_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct igb_adapter *adapter = netdev_priv(netdev); + +@@ -5395,8 +5395,6 @@ static struct rtnl_link_stats64 *igb_get + igb_update_stats(adapter, &adapter->stats64); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +- +- return stats; + } + + /** +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -8085,8 +8085,9 @@ static void ixgbe_netpoll(struct net_dev + } + + #endif +-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++ ++static void ixgbe_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; +@@ -8124,13 +8125,13 @@ static struct rtnl_link_stats64 *ixgbe_g + } + } + rcu_read_unlock(); ++ + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +- return stats; + } + + #ifdef CONFIG_IXGBE_DCB +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +@@ -3880,8 +3880,8 @@ static void ixgbevf_shutdown(struct pci_ + ixgbevf_suspend(pdev, PMSG_SUSPEND); + } + +-static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void ixgbevf_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + unsigned int start; +@@ -3914,8 +3914,6 @@ static struct rtnl_link_stats64 *ixgbevf + stats->tx_bytes += bytes; + stats->tx_packets += packets; + } +- +- return stats; + } + + #define IXGBEVF_MAX_MAC_HDR_LEN 127 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -636,7 +636,7 @@ static void mvneta_mib_counters_clear(st + } + + /* Get System Network Statistics */ +-static struct rtnl_link_stats64 * ++static void + mvneta_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { +@@ -670,8 +670,6 @@ mvneta_get_stats64(struct net_device *de + stats->rx_dropped = dev->stats.rx_dropped; + + stats->tx_dropped = dev->stats.tx_dropped; +- +- return stats; + } + + /* Rx descriptors helper methods */ +--- a/drivers/net/ethernet/marvell/mvpp2.c ++++ b/drivers/net/ethernet/marvell/mvpp2.c +@@ -5762,7 +5762,7 @@ error: + return err; + } + +-static struct rtnl_link_stats64 * ++static void + mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct mvpp2_port *port = netdev_priv(dev); +@@ -5794,8 +5794,6 @@ mvpp2_get_stats64(struct net_device *dev + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; +- +- return stats; + } + + static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -3898,8 +3898,8 @@ static void sky2_set_multicast(struct ne + gma_write16(hw, port, GM_RX_CTRL, reg); + } + +-static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void sky2_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; +@@ -3939,8 +3939,6 @@ static struct rtnl_link_stats64 *sky2_ge + stats->rx_dropped = dev->stats.rx_dropped; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->tx_fifo_errors = dev->stats.tx_fifo_errors; +- +- return stats; + } + + /* Can have one global because blinking is controlled by +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_ + } + } + +-static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *storage) ++static void mtk_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *storage) + { + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; +@@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get + storage->tx_errors = dev->stats.tx_errors; + storage->rx_dropped = dev->stats.rx_dropped; + storage->tx_dropped = dev->stats.tx_dropped; +- +- return storage; + } + + static inline int mtk_max_frag_size(int mtu) +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -1316,7 +1316,7 @@ static void mlx4_en_tx_timeout(struct ne + } + + +-static struct rtnl_link_stats64 * ++static void + mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct mlx4_en_priv *priv = netdev_priv(dev); +@@ -1324,8 +1324,6 @@ mlx4_en_get_stats64(struct net_device *d + spin_lock_bh(&priv->stats_lock); + netdev_stats_to_stats64(stats, &dev->stats); + spin_unlock_bh(&priv->stats_lock); +- +- return stats; + } + + static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2647,7 +2647,7 @@ mqprio: + return mlx5e_setup_tc(dev, tc->tc); + } + +-struct rtnl_link_stats64 * ++static void + mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct mlx5e_priv *priv = netdev_priv(dev); +@@ -2681,7 +2681,6 @@ mlx5e_get_stats(struct net_device *dev, + stats->multicast = + VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); + +- return stats; + } + + static void mlx5e_set_rx_mode(struct net_device *dev) +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -949,15 +949,13 @@ out: + /* Return the stats from a cache that is updated periodically, + * as this function might get called in an atomic context. + */ +-static struct rtnl_link_stats64 * ++static void + mlxsw_sp_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); +- +- return stats; + } + + int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, +--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +@@ -351,7 +351,7 @@ static int mlxsw_sx_port_change_mtu(stru + return 0; + } + +-static struct rtnl_link_stats64 * ++static void + mlxsw_sx_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { +@@ -380,7 +380,6 @@ mlxsw_sx_port_get_stats64(struct net_dev + tx_dropped += p->tx_dropped; + } + stats->tx_dropped = tx_dropped; +- return stats; + } + + static const struct net_device_ops mlxsw_sx_port_netdev_ops = { +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +@@ -378,8 +378,8 @@ static inline void put_be32(__be32 val, + __raw_writel((__force __u32) val, (__force void __iomem *)p); + } + +-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static void myri10ge_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + + static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) + { +@@ -3119,8 +3119,8 @@ drop: + return NETDEV_TX_OK; + } + +-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void myri10ge_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + const struct myri10ge_priv *mgp = netdev_priv(dev); + const struct myri10ge_slice_netstats *slice_stats; +@@ -3135,7 +3135,6 @@ static struct rtnl_link_stats64 *myri10g + stats->rx_dropped += slice_stats->rx_dropped; + stats->tx_dropped += slice_stats->tx_dropped; + } +- return stats; + } + + static void myri10ge_set_multicast_list(struct net_device *dev) +--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c ++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c +@@ -3116,7 +3116,7 @@ static int vxge_change_mtu(struct net_de + * @stats: pointer to struct rtnl_link_stats64 + * + */ +-static struct rtnl_link_stats64 * ++static void + vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) + { + struct vxgedev *vdev = netdev_priv(dev); +@@ -3155,8 +3155,6 @@ vxge_get_stats64(struct net_device *dev, + net_stats->tx_bytes += bytes; + net_stats->tx_errors += txstats->tx_errors; + } +- +- return net_stats; + } + + static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +@@ -2400,8 +2400,8 @@ int nfp_net_set_ring_size(struct nfp_net + return err; + } + +-static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void nfp_net_stat64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct nfp_net *nn = netdev_priv(netdev); + int r; +@@ -2431,8 +2431,6 @@ static struct rtnl_link_stats64 *nfp_net + stats->tx_bytes += data[1]; + stats->tx_errors += data[2]; + } +- +- return stats; + } + + static bool nfp_net_ebpf_capable(struct nfp_net *nn) +--- a/drivers/net/ethernet/nvidia/forcedeth.c ++++ b/drivers/net/ethernet/nvidia/forcedeth.c +@@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_d + * Called with read_lock(&dev_base_lock) held for read - + * only synchronized against unregister_netdevice. + */ +-static struct rtnl_link_stats64* ++static void + nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) +@@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, s + + spin_unlock_bh(&np->hwstats_lock); + } +- +- return storage; + } + + /* +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int + + static void netxen_free_ip_list(struct netxen_adapter *, bool); + static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); +-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static void netxen_nic_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + static int netxen_nic_set_mac(struct net_device *netdev, void *p); + + /* PCI Device ID Table */ +@@ -2295,8 +2295,8 @@ request_reset: + clear_bit(__NX_RESETTING, &adapter->state); + } + +-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static void netxen_nic_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) + { + struct netxen_adapter *adapter = netdev_priv(netdev); + +@@ -2306,8 +2306,6 @@ static struct rtnl_link_stats64 *netxen_ + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; +- +- return stats; + } + + static irqreturn_t netxen_intr(int irq, void *data) +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c +@@ -1803,9 +1803,8 @@ void qede_fill_by_demand_stats(struct qe + edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; + } + +-static +-struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void qede_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct qede_dev *edev = netdev_priv(dev); + +@@ -1835,8 +1834,6 @@ struct rtnl_link_stats64 *qede_get_stats + stats->collisions = edev->stats.tx_total_collisions; + stats->rx_crc_errors = edev->stats.rx_crc_errors; + stats->rx_frame_errors = edev->stats.rx_align_errors; +- +- return stats; + } + + #ifdef CONFIG_QED_SRIOV +--- a/drivers/net/ethernet/qualcomm/emac/emac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac.c +@@ -319,8 +319,8 @@ static int emac_ioctl(struct net_device + } + + /* Provide network statistics info for the interface */ +-static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *net_stats) ++static void emac_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *net_stats) + { + struct emac_adapter *adpt = netdev_priv(netdev); + unsigned int addr = REG_MAC_RX_STATUS_BIN; +@@ -384,8 +384,6 @@ static struct rtnl_link_stats64 *emac_ge + net_stats->tx_window_errors = stats->tx_late_col; + + spin_unlock(&stats->lock); +- +- return net_stats; + } + + static const struct net_device_ops emac_netdev_ops = { +--- a/drivers/net/ethernet/realtek/8139too.c ++++ b/drivers/net/ethernet/realtek/8139too.c +@@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_stru + static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); + static int rtl8139_close (struct net_device *dev); + static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); +-static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 +- *stats); ++static void rtl8139_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + static void rtl8139_set_rx_mode (struct net_device *dev); + static void __set_rx_mode (struct net_device *dev); + static void rtl8139_hw_start (struct net_device *dev); +@@ -2521,7 +2520,7 @@ static int netdev_ioctl(struct net_devic + } + + +-static struct rtnl_link_stats64 * ++static void + rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct rtl8139_private *tp = netdev_priv(dev); +@@ -2549,8 +2548,6 @@ rtl8139_get_stats64(struct net_device *d + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); +- +- return stats; + } + + /* Set or clear the multicast filter for this adaptor. +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -7751,7 +7751,7 @@ err_pm_runtime_put: + goto out; + } + +-static struct rtnl_link_stats64 * ++static void + rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct rtl8169_private *tp = netdev_priv(dev); +@@ -7805,8 +7805,6 @@ rtl8169_get_stats64(struct net_device *d + le16_to_cpu(tp->tc_offset.tx_aborted); + + pm_runtime_put_noidle(&pdev->dev); +- +- return stats; + } + + static void rtl8169_net_suspend(struct net_device *dev) +--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c ++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +@@ -1721,11 +1721,9 @@ static inline u64 sxgbe_get_stat64(void + * This function is a driver entry point whenever ifconfig command gets + * executed to see device statistics. Statistics are number of + * bytes sent or received, errors occurred etc. +- * Return value: +- * This function returns various statistical information of device. + */ +-static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void sxgbe_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct sxgbe_priv_data *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->ioaddr; +@@ -1776,8 +1774,6 @@ static struct rtnl_link_stats64 *sxgbe_g + SXGBE_MMC_TXUFLWHI_GBCNT_REG); + writel(0, ioaddr + SXGBE_MMC_CTL_REG); + spin_unlock(&priv->stats_lock); +- +- return stats; + } + + /* sxgbe_set_features - entry point to set offload features of the device. +--- a/drivers/net/ethernet/sfc/efx.c ++++ b/drivers/net/ethernet/sfc/efx.c +@@ -2232,16 +2232,14 @@ int efx_net_stop(struct net_device *net_ + } + + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ +-static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, +- struct rtnl_link_stats64 *stats) ++static void efx_net_stats(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats) + { + struct efx_nic *efx = netdev_priv(net_dev); + + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, stats); + spin_unlock_bh(&efx->stats_lock); +- +- return stats; + } + + /* Context: netif_tx_lock held, BHs disabled. */ +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -6294,8 +6294,8 @@ no_rings: + stats->tx_errors = errors; + } + +-static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void niu_get_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct niu *np = netdev_priv(dev); + +@@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get + niu_get_rx_stats(np, stats); + niu_get_tx_stats(np, stats); + } +- +- return stats; + } + + static void niu_load_hash_xmac(struct niu *np, u16 *hash) +--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c ++++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c +@@ -2490,7 +2490,7 @@ static void dwceqos_read_mmc_counters(st + dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); + } + +-static struct rtnl_link_stats64* ++static void + dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) + { + unsigned long flags; +@@ -2522,8 +2522,6 @@ dwceqos_get_stats64(struct net_device *n + else + s->tx_errors = hwstats->txunderflowerror + + hwstats->txcarriererror; +- +- return s; + } + + static void +--- a/drivers/net/ethernet/tile/tilepro.c ++++ b/drivers/net/ethernet/tile/tilepro.c +@@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_dev + * + * Returns the address of the device statistics structure. + */ +-static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void tile_net_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct tile_net_priv *priv = netdev_priv(dev); + u64 rx_packets = 0, tx_packets = 0; +--- a/drivers/net/ethernet/via/via-rhine.c ++++ b/drivers/net/ethernet/via/via-rhine.c +@@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int i + static void rhine_tx(struct net_device *dev); + static int rhine_rx(struct net_device *dev, int limit); + static void rhine_set_rx_mode(struct net_device *dev); +-static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static void rhine_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); + static const struct ethtool_ops netdev_ethtool_ops; + static int rhine_close(struct net_device *dev); +@@ -2222,7 +2222,7 @@ out_unlock: + mutex_unlock(&rp->task_lock); + } + +-static struct rtnl_link_stats64 * ++static void + rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct rhine_private *rp = netdev_priv(dev); +@@ -2245,8 +2245,6 @@ rhine_get_stats64(struct net_device *dev + stats->tx_packets = rp->tx_stats.packets; + stats->tx_bytes = rp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); +- +- return stats; + } + + static void rhine_set_rx_mode(struct net_device *dev) +--- a/drivers/net/fjes/fjes_main.c ++++ b/drivers/net/fjes/fjes_main.c +@@ -56,8 +56,7 @@ static void fjes_raise_intr_rxdata_task( + static void fjes_tx_stall_task(struct work_struct *); + static void fjes_force_close_task(struct work_struct *); + static irqreturn_t fjes_intr(int, void*); +-static struct rtnl_link_stats64 * +-fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); ++static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); + static int fjes_change_mtu(struct net_device *, int); + static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); + static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); +@@ -762,14 +761,12 @@ static void fjes_tx_retry(struct net_dev + netif_tx_wake_queue(queue); + } + +-static struct rtnl_link_stats64 * ++static void + fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) + { + struct fjes_adapter *adapter = netdev_priv(netdev); + + memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); +- +- return stats; + } + + static int fjes_change_mtu(struct net_device *netdev, int new_mtu) +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -918,8 +918,8 @@ out: + return ret; + } + +-static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, +- struct rtnl_link_stats64 *t) ++static void netvsc_get_stats64(struct net_device *net, ++ struct rtnl_link_stats64 *t) + { + struct net_device_context *ndev_ctx = netdev_priv(net); + int cpu; +@@ -957,8 +957,6 @@ static struct rtnl_link_stats64 *netvsc_ + + t->rx_dropped = net->stats.rx_dropped; + t->rx_errors = net->stats.rx_errors; +- +- return t; + } + + static int netvsc_set_mac_addr(struct net_device *ndev, void *p) +--- a/drivers/net/ifb.c ++++ b/drivers/net/ifb.c +@@ -129,8 +129,8 @@ resched: + + } + +-static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void ifb_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; +@@ -157,8 +157,6 @@ static struct rtnl_link_stats64 *ifb_sta + } + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; +- +- return stats; + } + + static int ifb_dev_init(struct net_device *dev) +--- a/drivers/net/ipvlan/ipvlan_main.c ++++ b/drivers/net/ipvlan/ipvlan_main.c +@@ -296,8 +296,8 @@ static void ipvlan_set_multicast_mac_fil + dev_mc_sync(ipvlan->phy_dev, dev); + } + +-static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *s) ++static void ipvlan_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *s) + { + struct ipvl_dev *ipvlan = netdev_priv(dev); + +@@ -334,7 +334,6 @@ static struct rtnl_link_stats64 *ipvlan_ + s->rx_dropped = rx_errs; + s->tx_dropped = tx_drps; + } +- return s; + } + + static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +--- a/drivers/net/loopback.c ++++ b/drivers/net/loopback.c +@@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct + return NETDEV_TX_OK; + } + +-static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void loopback_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + u64 bytes = 0; + u64 packets = 0; +@@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopbac + stats->tx_packets = packets; + stats->rx_bytes = bytes; + stats->tx_bytes = bytes; +- return stats; + } + + static u32 always_on(struct net_device *dev) +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -2899,13 +2899,13 @@ static int macsec_change_mtu(struct net_ + return 0; + } + +-static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *s) ++static void macsec_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *s) + { + int cpu; + + if (!dev->tstats) +- return s; ++ return; + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats; +@@ -2929,8 +2929,6 @@ static struct rtnl_link_stats64 *macsec_ + + s->rx_dropped = dev->stats.rx_dropped; + s->tx_dropped = dev->stats.tx_dropped; +- +- return s; + } + + static int macsec_get_iflink(const struct net_device *dev) +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -857,8 +857,8 @@ static void macvlan_uninit(struct net_de + macvlan_port_destroy(port->dev); + } + +-static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void macvlan_dev_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct macvlan_dev *vlan = netdev_priv(dev); + +@@ -895,7 +895,6 @@ static struct rtnl_link_stats64 *macvlan + stats->rx_dropped = rx_errors; + stats->tx_dropped = tx_dropped; + } +- return stats; + } + + static int macvlan_vlan_rx_add_vid(struct net_device *dev, +--- a/drivers/net/nlmon.c ++++ b/drivers/net/nlmon.c +@@ -76,7 +76,7 @@ static int nlmon_close(struct net_device + return netlink_remove_tap(&nlmon->nt); + } + +-static struct rtnl_link_stats64 * ++static void + nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + int i; +@@ -104,8 +104,6 @@ nlmon_get_stats64(struct net_device *dev + + stats->rx_bytes = bytes; + stats->tx_bytes = 0; +- +- return stats; + } + + static u32 always_on(struct net_device *dev) +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -1312,7 +1312,7 @@ ppp_net_ioctl(struct net_device *dev, st + return err; + } + +-static struct rtnl_link_stats64* ++static void + ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) + { + struct ppp *ppp = netdev_priv(dev); +@@ -1332,8 +1332,6 @@ ppp_get_stats64(struct net_device *dev, + stats64->rx_dropped = dev->stats.rx_dropped; + stats64->tx_dropped = dev->stats.tx_dropped; + stats64->rx_length_errors = dev->stats.rx_length_errors; +- +- return stats64; + } + + static int ppp_dev_init(struct net_device *dev) +--- a/drivers/net/slip/slip.c ++++ b/drivers/net/slip/slip.c +@@ -571,7 +571,7 @@ static int sl_change_mtu(struct net_devi + + /* Netdevice get statistics request */ + +-static struct rtnl_link_stats64 * ++static void + sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct net_device_stats *devstats = &dev->stats; +@@ -602,7 +602,6 @@ sl_get_stats64(struct net_device *dev, s + stats->collisions += comp->sls_o_misses; + } + #endif +- return stats; + } + + /* Netdevice register callback */ +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -1798,7 +1798,7 @@ unwind: + return err; + } + +-static struct rtnl_link_stats64 * ++static void + team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + struct team *team = netdev_priv(dev); +@@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; + stats->rx_nohandler = rx_nohandler; +- return stats; + } + + static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -983,7 +983,7 @@ static void tun_set_headroom(struct net_ + tun->align = new_hr; + } + +-static struct rtnl_link_stats64 * ++static void + tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; +@@ -1017,7 +1017,6 @@ tun_net_get_stats64(struct net_device *d + stats->rx_dropped = rx_dropped; + stats->rx_frame_errors = rx_frame_errors; + stats->tx_dropped = tx_dropped; +- return stats; + } + + static const struct net_device_ops tun_netdev_ops = { +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -161,8 +161,8 @@ static u64 veth_stats_one(struct pcpu_vs + return atomic64_read(&priv->dropped); + } + +-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *tot) ++static void veth_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *tot) + { + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; +@@ -180,8 +180,6 @@ static struct rtnl_link_stats64 *veth_ge + tot->rx_packets = one.packets; + } + rcu_read_unlock(); +- +- return tot; + } + + /* fake multicast ability */ +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1017,8 +1017,8 @@ out: + return ret; + } + +-static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, +- struct rtnl_link_stats64 *tot) ++static void virtnet_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *tot) + { + struct virtnet_info *vi = netdev_priv(dev); + int cpu; +@@ -1051,8 +1051,6 @@ static struct rtnl_link_stats64 *virtnet + tot->rx_dropped = dev->stats.rx_dropped; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_frame_errors = dev->stats.rx_frame_errors; +- +- return tot; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -113,7 +113,7 @@ vmxnet3_global_stats[] = { + }; + + +-struct rtnl_link_stats64 * ++void + vmxnet3_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) + { +@@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *n + stats->rx_dropped += drvRxStats->drop_total; + stats->multicast += devRxStats->mcastPktsRxOK; + } +- +- return stats; + } + + static int +--- a/drivers/net/vmxnet3/vmxnet3_int.h ++++ b/drivers/net/vmxnet3/vmxnet3_int.h +@@ -466,8 +466,8 @@ vmxnet3_create_queues(struct vmxnet3_ada + + void vmxnet3_set_ethtool_ops(struct net_device *netdev); + +-struct rtnl_link_stats64 * +-vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); ++void vmxnet3_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); + + extern char vmxnet3_driver_name[]; + #endif +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -79,8 +79,8 @@ static void vrf_tx_error(struct net_devi + kfree_skb(skb); + } + +-static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void vrf_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + int i; + +@@ -104,7 +104,6 @@ static struct rtnl_link_stats64 *vrf_get + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } +- return stats; + } + + /* Local traffic destined to local address. Reinsert the packet to rx +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -1081,8 +1081,8 @@ static int xennet_change_mtu(struct net_ + return 0; + } + +-static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *tot) ++static void xennet_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *tot) + { + struct netfront_info *np = netdev_priv(dev); + int cpu; +@@ -1113,8 +1113,6 @@ static struct rtnl_link_stats64 *xennet_ + + tot->rx_errors = dev->stats.rx_errors; + tot->tx_dropped = dev->stats.tx_dropped; +- +- return tot; + } + + static void xennet_release_tx_bufs(struct netfront_queue *queue) +--- a/drivers/staging/netlogic/xlr_net.c ++++ b/drivers/staging/netlogic/xlr_net.c +@@ -395,14 +395,6 @@ static void xlr_stats(struct net_device + TX_DROP_FRAME_COUNTER); + } + +-static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev, +- struct rtnl_link_stats64 *stats +- ) +-{ +- xlr_stats(ndev, stats); +- return stats; +-} +- + static const struct net_device_ops xlr_netdev_ops = { + .ndo_open = xlr_net_open, + .ndo_stop = xlr_net_stop, +@@ -410,7 +402,7 @@ static const struct net_device_ops xlr_n + .ndo_select_queue = xlr_net_select_queue, + .ndo_set_mac_address = xlr_net_set_mac_addr, + .ndo_set_rx_mode = xlr_set_rx_mode, +- .ndo_get_stats64 = xlr_get_stats64, ++ .ndo_get_stats64 = xlr_stats, + }; + + /* +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru + int devm_add_action(struct device *dev, void (*action)(void *), void *data); + void devm_remove_action(struct device *dev, void (*action)(void *), void *data); + ++/** ++ * devm_alloc_percpu - Resource-managed alloc_percpu ++ * @dev: Device to allocate per-cpu memory for ++ * @type: Type to allocate per-cpu memory for ++ * ++ * Managed alloc_percpu. Per-cpu memory allocated with this function is ++ * automatically freed on driver detach. ++ * ++ * RETURNS: ++ * Pointer to allocated memory on success, NULL on failure. ++ */ ++#define devm_alloc_percpu(dev, type) \ ++ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ ++ __alignof__(type))) ++ ++void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, ++ size_t align); ++void devm_free_percpu(struct device *dev, void __percpu *pdata); ++ + static inline int devm_add_action_or_reset(struct device *dev, + void (*action)(void *), void *data) + { +--- /dev/null ++++ b/include/linux/fsl/svr.h +@@ -0,0 +1,97 @@ ++/* ++ * MPC85xx cpu type detection ++ * ++ * Copyright 2011-2012 Freescale Semiconductor, Inc. ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#ifndef FSL_SVR_H ++#define FSL_SVR_H ++ ++#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ ++#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ ++#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ ++ ++/* Some parts define SVR[0:23] as the SOC version */ ++#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ ++ ++#define SVR_8533 0x803400 ++#define SVR_8535 0x803701 ++#define SVR_8536 0x803700 ++#define SVR_8540 0x803000 ++#define SVR_8541 0x807200 ++#define SVR_8543 0x803200 ++#define SVR_8544 0x803401 ++#define SVR_8545 0x803102 ++#define SVR_8547 0x803101 ++#define SVR_8548 0x803100 ++#define SVR_8555 0x807100 ++#define SVR_8560 0x807000 ++#define SVR_8567 0x807501 ++#define SVR_8568 0x807500 ++#define SVR_8569 0x808000 ++#define SVR_8572 0x80E000 ++#define SVR_P1010 0x80F100 ++#define SVR_P1011 0x80E500 ++#define SVR_P1012 0x80E501 ++#define SVR_P1013 0x80E700 ++#define SVR_P1014 0x80F101 ++#define SVR_P1017 0x80F700 ++#define SVR_P1020 0x80E400 ++#define SVR_P1021 0x80E401 ++#define SVR_P1022 0x80E600 ++#define SVR_P1023 0x80F600 ++#define SVR_P1024 0x80E402 ++#define SVR_P1025 0x80E403 ++#define SVR_P2010 0x80E300 ++#define SVR_P2020 0x80E200 ++#define SVR_P2040 0x821000 ++#define SVR_P2041 0x821001 ++#define SVR_P3041 0x821103 ++#define SVR_P4040 0x820100 ++#define SVR_P4080 0x820000 ++#define SVR_P5010 0x822100 ++#define SVR_P5020 0x822000 ++#define SVR_P5021 0X820500 ++#define SVR_P5040 0x820400 ++#define SVR_T4240 0x824000 ++#define SVR_T4120 0x824001 ++#define SVR_T4160 0x824100 ++#define SVR_T4080 0x824102 ++#define SVR_C291 0x850000 ++#define SVR_C292 0x850020 ++#define SVR_C293 0x850030 ++#define SVR_B4860 0X868000 ++#define SVR_G4860 0x868001 ++#define SVR_G4060 0x868003 ++#define SVR_B4440 0x868100 ++#define SVR_G4440 0x868101 ++#define SVR_B4420 0x868102 ++#define SVR_B4220 0x868103 ++#define SVR_T1040 0x852000 ++#define SVR_T1041 0x852001 ++#define SVR_T1042 0x852002 ++#define SVR_T1020 0x852100 ++#define SVR_T1021 0x852101 ++#define SVR_T1022 0x852102 ++#define SVR_T1023 0x854100 ++#define SVR_T1024 0x854000 ++#define SVR_T2080 0x853000 ++#define SVR_T2081 0x853100 ++ ++#define SVR_8610 0x80A000 ++#define SVR_8641 0x809000 ++#define SVR_8641D 0x809001 ++ ++#define SVR_9130 0x860001 ++#define SVR_9131 0x860000 ++#define SVR_9132 0x861000 ++#define SVR_9232 0x861400 ++ ++#define SVR_Unknown 0xFFFFFF ++ ++#endif +--- a/include/linux/fsl_devices.h ++++ b/include/linux/fsl_devices.h +@@ -99,7 +99,10 @@ struct fsl_usb2_platform_data { + unsigned suspended:1; + unsigned already_suspended:1; + unsigned has_fsl_erratum_a007792:1; ++ unsigned has_fsl_erratum_14:1; + unsigned has_fsl_erratum_a005275:1; ++ unsigned has_fsl_erratum_a006918:1; ++ unsigned has_fsl_erratum_a005697:1; + unsigned check_phy_clk_valid:1; + + /* register save area for suspend/resume */ +--- a/include/linux/netdev_features.h ++++ b/include/linux/netdev_features.h +@@ -74,6 +74,7 @@ enum { + NETIF_F_BUSY_POLL_BIT, /* Busy poll */ + + NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ ++ NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */ + + /* + * Add your fresh new feature above and remember to update +@@ -136,6 +137,7 @@ enum { + #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) + #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) + #define NETIF_F_HW_TC __NETIF_F(HW_TC) ++#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ) + + #define for_each_netdev_feature(mask_addr, bit) \ + for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -916,8 +916,8 @@ struct netdev_xdp { + * Callback used when the transmitter has not made any progress + * for dev->watchdog ticks. + * +- * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, +- * struct rtnl_link_stats64 *storage); ++ * void (*ndo_get_stats64)(struct net_device *dev, ++ * struct rtnl_link_stats64 *storage); + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + * Called when a user wants to get the network device usage + * statistics. Drivers must do one of the following: +@@ -1165,8 +1165,8 @@ struct net_device_ops { + struct neigh_parms *); + void (*ndo_tx_timeout) (struct net_device *dev); + +- struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, +- struct rtnl_link_stats64 *storage); ++ void (*ndo_get_stats64)(struct net_device *dev, ++ struct rtnl_link_stats64 *storage); + bool (*ndo_has_offload_stats)(int attr_id); + int (*ndo_get_offload_stats)(int attr_id, + const struct net_device *dev, +@@ -1509,6 +1509,8 @@ enum netdev_priv_flags { + * @if_port: Selectable AUI, TP, ... + * @dma: DMA channel + * @mtu: Interface MTU value ++ * @min_mtu: Interface Minimum MTU value ++ * @max_mtu: Interface Maximum MTU value + * @type: Interface hardware type + * @hard_header_len: Maximum hardware header length. + * @min_header_len: Minimum hardware header length +@@ -1735,6 +1737,8 @@ struct net_device { + unsigned char dma; + + unsigned int mtu; ++ unsigned int min_mtu; ++ unsigned int max_mtu; + unsigned short type; + unsigned short hard_header_len; + unsigned short min_header_len; +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -903,6 +903,7 @@ void kfree_skb(struct sk_buff *skb); + void kfree_skb_list(struct sk_buff *segs); + void skb_tx_error(struct sk_buff *skb); + void consume_skb(struct sk_buff *skb); ++void skb_recycle(struct sk_buff *skb); + void __kfree_skb(struct sk_buff *skb); + extern struct kmem_cache *skbuff_head_cache; + +@@ -3057,6 +3058,7 @@ static inline void skb_free_datagram_loc + } + int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); + int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); ++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old); + int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); + __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, + int len, __wsum csum); +--- a/include/linux/sys_soc.h ++++ b/include/linux/sys_soc.h +@@ -13,6 +13,7 @@ struct soc_device_attribute { + const char *family; + const char *revision; + const char *soc_id; ++ const void *data; + }; + + /** +@@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_de + */ + struct device *soc_device_to_device(struct soc_device *soc); + ++const struct soc_device_attribute *soc_device_match( ++ const struct soc_device_attribute *matches); + #endif /* __SOC_BUS_H */ +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -261,8 +261,8 @@ int ip_tunnel_ioctl(struct net_device *d + int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); + int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); + +-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *tot); ++void ip_tunnel_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *tot); + struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, + int link, __be16 flags, + __be32 remote, __be32 local, +--- a/include/uapi/linux/if_ether.h ++++ b/include/uapi/linux/if_ether.h +@@ -35,6 +35,7 @@ + #define ETH_DATA_LEN 1500 /* Max. octets in payload */ + #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ + #define ETH_FCS_LEN 4 /* Octets in the FCS */ ++#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */ + + /* + * These are the defined Ethernet Protocol ID's. +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -671,7 +671,8 @@ static int vlan_ethtool_get_ts_info(stru + return 0; + } + +-static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ++static void vlan_dev_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct vlan_pcpu_stats *p; + u32 rx_errors = 0, tx_dropped = 0; +@@ -702,8 +703,6 @@ static struct rtnl_link_stats64 *vlan_de + } + stats->rx_errors = rx_errors; + stats->tx_dropped = tx_dropped; +- +- return stats; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -156,8 +156,8 @@ static int br_dev_stop(struct net_device + return 0; + } + +-static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void br_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct net_bridge *br = netdev_priv(dev); + struct pcpu_sw_netstats tmp, sum = { 0 }; +@@ -181,8 +181,6 @@ static struct rtnl_link_stats64 *br_get_ + stats->tx_packets = sum.tx_packets; + stats->rx_bytes = sum.rx_bytes; + stats->rx_packets = sum.rx_packets; +- +- return stats; + } + + static int br_change_mtu(struct net_device *dev, int new_mtu) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -6603,9 +6603,18 @@ int dev_set_mtu(struct net_device *dev, + if (new_mtu == dev->mtu) + return 0; + +- /* MTU must be positive. */ +- if (new_mtu < 0) ++ /* MTU must be positive, and in range */ ++ if (new_mtu < 0 || new_mtu < dev->min_mtu) { ++ net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n", ++ dev->name, new_mtu, dev->min_mtu); + return -EINVAL; ++ } ++ ++ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { ++ net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n", ++ dev->name, new_mtu, dev->min_mtu); ++ return -EINVAL; ++ } + + if (!netif_device_present(dev)) + return -ENODEV; +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk + } + EXPORT_SYMBOL(napi_consume_skb); + ++/** ++ * skb_recycle - clean up an skb for reuse ++ * @skb: buffer ++ * ++ * Recycles the skb to be reused as a receive buffer. This ++ * function does any necessary reference count dropping, and ++ * cleans up the skbuff as if it just came from __alloc_skb(). ++ */ ++void skb_recycle(struct sk_buff *skb) ++{ ++ struct skb_shared_info *shinfo; ++ u8 head_frag = skb->head_frag; ++ ++ skb_release_head_state(skb); ++ ++ shinfo = skb_shinfo(skb); ++ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); ++ atomic_set(&shinfo->dataref, 1); ++ ++ memset(skb, 0, offsetof(struct sk_buff, tail)); ++ skb->data = skb->head + NET_SKB_PAD; ++ skb->head_frag = head_frag; ++ skb_reset_tail_pointer(skb); ++} ++EXPORT_SYMBOL(skb_recycle); ++ + /* Make sure a field is enclosed inside headers_start/headers_end section */ + #define CHECK_SKB_FIELD(field) \ + BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ +@@ -1073,7 +1099,7 @@ static void skb_headers_offset_update(st + skb->inner_mac_header += off; + } + +-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) + { + __copy_skb_header(new, old); + +@@ -1081,6 +1107,7 @@ static void copy_skb_header(struct sk_bu + skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; + skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; + } ++EXPORT_SYMBOL(copy_skb_header); + + static inline int skb_alloc_rx_flag(const struct sk_buff *skb) + { +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -188,8 +188,8 @@ int iptunnel_handle_offloads(struct sk_b + EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); + + /* Often modified stats are per cpu, other are shared (netdev->stats) */ +-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *tot) ++void ip_tunnel_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *tot) + { + int i; + +@@ -214,8 +214,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_ + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; + } +- +- return tot; + } + EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); + +--- a/net/l2tp/l2tp_eth.c ++++ b/net/l2tp/l2tp_eth.c +@@ -106,8 +106,8 @@ static int l2tp_eth_dev_xmit(struct sk_b + return NETDEV_TX_OK; + } + +-static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void l2tp_eth_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct l2tp_eth *priv = netdev_priv(dev); + +@@ -117,10 +117,8 @@ static struct rtnl_link_stats64 *l2tp_et + stats->rx_bytes = atomic_long_read(&priv->rx_bytes); + stats->rx_packets = atomic_long_read(&priv->rx_packets); + stats->rx_errors = atomic_long_read(&priv->rx_errors); +- return stats; + } + +- + static const struct net_device_ops l2tp_eth_netdev_ops = { + .ndo_init = l2tp_eth_dev_init, + .ndo_uninit = l2tp_eth_dev_uninit, +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1146,7 +1146,7 @@ static u16 ieee80211_netdev_select_queue + return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); + } + +-static struct rtnl_link_stats64 * ++static void + ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + int i; +@@ -1171,8 +1171,6 @@ ieee80211_get_stats64(struct net_device + stats->rx_bytes += rx_bytes; + stats->tx_bytes += tx_bytes; + } +- +- return stats; + } + + static const struct net_device_ops ieee80211_dataif_ops = { +--- a/net/openvswitch/vport-internal_dev.c ++++ b/net/openvswitch/vport-internal_dev.c +@@ -106,7 +106,7 @@ static void internal_dev_destructor(stru + free_netdev(dev); + } + +-static struct rtnl_link_stats64 * ++static void + internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) + { + int i; +@@ -134,8 +134,6 @@ internal_get_stats(struct net_device *de + stats->tx_bytes += local_stats.tx_bytes; + stats->tx_packets += local_stats.tx_packets; + } +- +- return stats; + } + + static void internal_set_rx_headroom(struct net_device *dev, int new_hr) +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long a + txq->trans_timeout++; + break; + } ++ ++ /* Devices with HW_ACCEL_MQ have multiple txqs ++ * but update only the first one's transmission ++ * timestamp so avoid checking the rest. ++ */ ++ if (dev->features & NETIF_F_HW_ACCEL_MQ) ++ break; + } + + if (some_queue_timedout) { +--- a/net/sched/sch_teql.c ++++ b/net/sched/sch_teql.c +@@ -401,8 +401,8 @@ static int teql_master_close(struct net_ + return 0; + } + +-static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats) ++static void teql_master_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) + { + struct teql_master *m = netdev_priv(dev); + +@@ -410,7 +410,6 @@ static struct rtnl_link_stats64 *teql_ma + stats->tx_bytes = m->tx_bytes; + stats->tx_errors = m->tx_errors; + stats->tx_dropped = m->tx_dropped; +- return stats; + } + + static int teql_master_mtu(struct net_device *dev, int new_mtu) diff --git a/target/linux/layerscape/patches-4.4/7015-fmd-add-fman-driver.patch b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch similarity index 80% rename from target/linux/layerscape/patches-4.4/7015-fmd-add-fman-driver.patch rename to target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch index 2d89ff999..56c07c5e6 100644 --- a/target/linux/layerscape/patches-4.4/7015-fmd-add-fman-driver.patch +++ b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch @@ -1,30 +1,52 @@ -From 9a69168a7ab58035571d9d19d531a40aa7f909dd Mon Sep 17 00:00:00 2001 -From: Zhao Qiang -Date: Wed, 16 Dec 2015 21:46:52 +0200 -Subject: [PATCH 15/70] fmd: add fman driver +From 6fe4518adbbbab0404958db4aa95673d60174881 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 11:58:03 +0800 +Subject: [PATCH] sdk_dpaa: support layerscape -Add fman driver code of dpaa, put it to drivers/net/ethernet/freescale/sdk_fman. -fman is frame manager, combining ethernet MACs with packet parsing and -classification logic, providing intelligent distribution and queuing -decisions for incomming traffic. +This is a integrated patch for layerscape dpaa1-sdk support. -Signed-off-by: Mandy Lavi -Signed-off-by: Madalin Bucur +Signed-off-by: Camelia Groza Signed-off-by: Zhao Qiang +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Madalin Bucur +Signed-off-by: Yangbo Lu --- - drivers/net/ethernet/freescale/Kconfig | 1 + - drivers/net/ethernet/freescale/Makefile | 1 + - drivers/net/ethernet/freescale/sdk_fman/Kconfig | 151 + + drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 + + drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 + + .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++ + .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 + + .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 + + .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 + + drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1213 ++++ + drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 698 ++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1992 +++++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 237 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1811 +++++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 225 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 + + .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1179 +++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 + + .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++ + drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 291 + + drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 907 +++ + drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 ++ + drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 + + .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++ + .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 + + drivers/net/ethernet/freescale/sdk_fman/Kconfig | 153 + drivers/net/ethernet/freescale/sdk_fman/Makefile | 11 + .../freescale/sdk_fman/Peripherals/FM/HC/Makefile | 15 + .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 ++++ .../freescale/sdk_fman/Peripherals/FM/MAC/Makefile | 28 + - .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1463 ++++ + .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1464 ++++ .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 + .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c | 97 + .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h | 42 + - .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 646 ++ - .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 224 + + .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 658 ++ + .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 225 + .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 + .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 + .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 845 +++ @@ -32,11 +54,11 @@ Signed-off-by: Zhao Qiang .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 511 ++ .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 213 + .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 + - .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1088 +++ + .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1096 +++ .../freescale/sdk_fman/Peripherals/FM/MAC/memac.h | 110 + .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c | 78 + .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h | 73 + - .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 974 +++ + .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 975 +++ .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.h | 151 + .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c | 139 + .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h | 80 + @@ -51,18 +73,18 @@ Signed-off-by: Zhao Qiang .../freescale/sdk_fman/Peripherals/FM/Makefile | 23 + .../freescale/sdk_fman/Peripherals/FM/Pcd/Makefile | 26 + .../freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 + - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7538 ++++++++++++++++++++ - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 ++ + .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 ++++++++++++++++++++ + .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 + .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++++ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 + - .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 +++++++++++++++ + .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++++ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++ - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2094 ++++++ + .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 ++++++ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 + - .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1846 +++++ + .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 +++++ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 + - .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 422 ++ + .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 ++ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 + .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 + @@ -82,12 +104,12 @@ Signed-off-by: Zhao Qiang .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++ .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 + .../freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 + - .../freescale/sdk_fman/Peripherals/FM/fm.c | 5195 ++++++++++++++ - .../freescale/sdk_fman/Peripherals/FM/fm.h | 646 ++ + .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++++ + .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++ .../freescale/sdk_fman/Peripherals/FM/fm_ipc.h | 465 ++ .../freescale/sdk_fman/Peripherals/FM/fm_muram.c | 174 + - .../freescale/sdk_fman/Peripherals/FM/fman.c | 1399 ++++ - .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1203 ++++ + .../freescale/sdk_fman/Peripherals/FM/fman.c | 1398 ++++ + .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 ++++ .../freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 + .../sdk_fman/Peripherals/FM/inc/fm_sp_common.h | 117 + .../net/ethernet/freescale/sdk_fman/etc/Makefile | 12 + @@ -100,12 +122,12 @@ Signed-off-by: Zhao Qiang .../ethernet/freescale/sdk_fman/fmanv3h_dflags.h | 57 + .../ethernet/freescale/sdk_fman/fmanv3l_dflags.h | 56 + .../sdk_fman/inc/Peripherals/crc_mac_addr_ext.h | 364 + - .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 207 + - .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1705 +++++ - .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 846 +++ + .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 210 + + .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1731 +++++ + .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 859 +++ .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 ++++ .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 + - .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 +++++++++++ + .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 ++++++++++ .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 +++++++ .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++ .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 ++ @@ -139,10 +161,10 @@ Signed-off-by: Zhao Qiang .../freescale/sdk_fman/inc/flib/fsl_fman_rtc.h | 449 ++ .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 + .../freescale/sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++ - .../integrations/FMANV3H/dpaa_integration_ext.h | 290 + + .../integrations/FMANV3H/dpaa_integration_ext.h | 291 + .../sdk_fman/inc/integrations/FMANV3H/part_ext.h | 71 + .../integrations/FMANV3H/part_integration_ext.h | 304 + - .../integrations/FMANV3L/dpaa_integration_ext.h | 292 + + .../integrations/FMANV3L/dpaa_integration_ext.h | 293 + .../sdk_fman/inc/integrations/FMANV3L/part_ext.h | 59 + .../integrations/FMANV3L/part_integration_ext.h | 304 + .../inc/integrations/LS1043/dpaa_integration_ext.h | 291 + @@ -154,7 +176,7 @@ Signed-off-by: Zhao Qiang .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 + .../inc/integrations/P3040_P4080_P5020/part_ext.h | 83 + .../P3040_P4080_P5020/part_integration_ext.h | 336 + - .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 99 + + .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 100 + .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 435 ++ .../net/ethernet/freescale/sdk_fman/inc/net_ext.h | 430 ++ .../net/ethernet/freescale/sdk_fman/inc/std_ext.h | 48 + @@ -173,19 +195,19 @@ Signed-off-by: Zhao Qiang .../freescale/sdk_fman/src/inc/system/sys_io_ext.h | 46 + .../freescale/sdk_fman/src/inc/types_linux.h | 208 + .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 + - .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 127 + + .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 128 + .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 + - .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 919 +++ + .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 921 +++ .../ethernet/freescale/sdk_fman/src/inc/xx/xx.h | 50 + .../freescale/sdk_fman/src/system/Makefile | 10 + .../freescale/sdk_fman/src/system/sys_io.c | 171 + .../freescale/sdk_fman/src/wrapper/Makefile | 19 + .../freescale/sdk_fman/src/wrapper/fman_test.c | 1665 +++++ - .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2795 ++++++++ + .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2908 ++++++++ .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.h | 294 + - .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1507 ++++ + .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1480 ++++ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4813 +++++++++++++ - .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1300 ++++ + .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 ++++ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++ .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 + .../sdk_fman/src/wrapper/lnxwrp_resources_ut.c | 191 + @@ -195,13 +217,47 @@ Signed-off-by: Zhao Qiang .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 + .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 +++++ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 + - .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1255 ++++ + .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 ++++ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h | 56 + .../ethernet/freescale/sdk_fman/src/xx/Makefile | 18 + - .../freescale/sdk_fman/src/xx/module_strings.c | 45 + - .../ethernet/freescale/sdk_fman/src/xx/udivdi3.c | 132 + + .../freescale/sdk_fman/src/xx/module_strings.c | 46 + .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 +++ .../ethernet/freescale/sdk_fman/src/xx/xx_linux.c | 918 +++ + drivers/staging/fsl_qbman/Kconfig | 228 + + drivers/staging/fsl_qbman/Makefile | 28 + + drivers/staging/fsl_qbman/bman_config.c | 720 ++ + drivers/staging/fsl_qbman/bman_debugfs.c | 119 + + drivers/staging/fsl_qbman/bman_driver.c | 575 ++ + drivers/staging/fsl_qbman/bman_high.c | 1145 +++ + drivers/staging/fsl_qbman/bman_low.h | 565 ++ + drivers/staging/fsl_qbman/bman_private.h | 166 + + drivers/staging/fsl_qbman/bman_test.c | 56 + + drivers/staging/fsl_qbman/bman_test.h | 44 + + drivers/staging/fsl_qbman/bman_test_high.c | 183 + + drivers/staging/fsl_qbman/bman_test_thresh.c | 196 + + drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++ + drivers/staging/fsl_qbman/dpa_sys.h | 259 + + drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 + + drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 + + drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 + + drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 + + drivers/staging/fsl_qbman/fsl_usdpaa.c | 1983 +++++ + drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 + + drivers/staging/fsl_qbman/qbman_driver.c | 88 + + drivers/staging/fsl_qbman/qman_config.c | 1224 ++++ + drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++ + drivers/staging/fsl_qbman/qman_driver.c | 977 +++ + drivers/staging/fsl_qbman/qman_high.c | 5669 +++++++++++++++ + drivers/staging/fsl_qbman/qman_low.h | 1427 ++++ + drivers/staging/fsl_qbman/qman_private.h | 398 + + drivers/staging/fsl_qbman/qman_test.c | 57 + + drivers/staging/fsl_qbman/qman_test.h | 45 + + drivers/staging/fsl_qbman/qman_test_high.c | 216 + + drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++ + drivers/staging/fsl_qbman/qman_utility.c | 129 + + include/linux/fsl_bman.h | 532 ++ + include/linux/fsl_qman.h | 3888 ++++++++++ + include/linux/fsl_usdpaa.h | 372 + include/uapi/linux/fmd/Kbuild | 5 + include/uapi/linux/fmd/Peripherals/Kbuild | 4 + include/uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++ @@ -212,7 +268,32 @@ Signed-off-by: Zhao Qiang .../linux/fmd/integrations/integration_ioctls.h | 56 + include/uapi/linux/fmd/ioctls.h | 96 + include/uapi/linux/fmd/net_ioctls.h | 430 ++ - 200 files changed, 115244 insertions(+) + 257 files changed, 152931 insertions(+) + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c + create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Kconfig create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Makefile create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile @@ -398,9 +479,43 @@ Signed-off-by: Zhao Qiang create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c - create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/udivdi3.c create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c + create mode 100644 drivers/staging/fsl_qbman/Kconfig + create mode 100644 drivers/staging/fsl_qbman/Makefile + create mode 100644 drivers/staging/fsl_qbman/bman_config.c + create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c + create mode 100644 drivers/staging/fsl_qbman/bman_driver.c + create mode 100644 drivers/staging/fsl_qbman/bman_high.c + create mode 100644 drivers/staging/fsl_qbman/bman_low.h + create mode 100644 drivers/staging/fsl_qbman/bman_private.h + create mode 100644 drivers/staging/fsl_qbman/bman_test.c + create mode 100644 drivers/staging/fsl_qbman/bman_test.h + create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c + create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c + create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c + create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h + create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h + create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h + create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h + create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h + create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c + create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c + create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c + create mode 100644 drivers/staging/fsl_qbman/qman_config.c + create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c + create mode 100644 drivers/staging/fsl_qbman/qman_driver.c + create mode 100644 drivers/staging/fsl_qbman/qman_high.c + create mode 100644 drivers/staging/fsl_qbman/qman_low.h + create mode 100644 drivers/staging/fsl_qbman/qman_private.h + create mode 100644 drivers/staging/fsl_qbman/qman_test.c + create mode 100644 drivers/staging/fsl_qbman/qman_test.h + create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c + create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c + create mode 100644 drivers/staging/fsl_qbman/qman_utility.c + create mode 100644 include/linux/fsl_bman.h + create mode 100644 include/linux/fsl_qman.h + create mode 100644 include/linux/fsl_usdpaa.h create mode 100644 include/uapi/linux/fmd/Kbuild create mode 100644 include/uapi/linux/fmd/Peripherals/Kbuild create mode 100644 include/uapi/linux/fmd/Peripherals/fm_ioctls.h @@ -412,29 +527,12934 @@ Signed-off-by: Zhao Qiang create mode 100644 include/uapi/linux/fmd/ioctls.h create mode 100644 include/uapi/linux/fmd/net_ioctls.h ---- a/drivers/net/ethernet/freescale/Kconfig -+++ b/drivers/net/ethernet/freescale/Kconfig -@@ -92,4 +92,5 @@ config GIANFAR - and MPC86xx family of chips, the eTSEC on LS1021A and the FEC - on the 8540. - -+source "drivers/net/ethernet/freescale/sdk_fman/Kconfig" - endif # NET_VENDOR_FREESCALE ---- a/drivers/net/ethernet/freescale/Makefile -+++ b/drivers/net/ethernet/freescale/Makefile -@@ -17,3 +17,4 @@ gianfar_driver-objs := gianfar.o \ - gianfar_ethtool.o - obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o - ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o -+obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig +@@ -0,0 +1,173 @@ ++menuconfig FSL_SDK_DPAA_ETH ++ tristate "DPAA Ethernet" ++ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH ++ select PHYLIB ++ help ++ Data Path Acceleration Architecture Ethernet driver, ++ supporting the Freescale QorIQ chips. ++ Depends on Freescale Buffer Manager and Queue Manager ++ driver and Frame Manager Driver. ++ ++if FSL_SDK_DPAA_ETH ++ ++config FSL_DPAA_HOOKS ++ bool "DPAA Ethernet driver hooks" ++ ++config FSL_DPAA_CEETM ++ bool "DPAA CEETM QoS" ++ depends on NET_SCHED ++ default n ++ help ++ Enable QoS offloading support through the CEETM hardware block. ++ ++config FSL_DPAA_OFFLINE_PORTS ++ bool "Offline Ports support" ++ depends on FSL_SDK_DPAA_ETH ++ default y ++ help ++ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide ++ most of the functionality of the regular, online ports, except they receive their ++ frames from a core or an accelerator on the SoC, via QMan frame queues, ++ rather than directly from the network. ++ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like ++ any online FMan port. They deliver the processed frames to frame queues, according ++ to the applied PCD configurations. ++ ++ Choosing this feature will not impact the functionality and/or performance of the system, ++ so it is safe to have it. ++ ++config FSL_DPAA_ADVANCED_DRIVERS ++ bool "Advanced DPAA Ethernet drivers" ++ depends on FSL_SDK_DPAA_ETH ++ default y ++ help ++ Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver ++ is needed to support advanced scenarios. Select this to also build the advanced ++ drivers. ++ ++config FSL_DPAA_ETH_JUMBO_FRAME ++ bool "Optimize for jumbo frames" ++ default n ++ help ++ Optimize the DPAA Ethernet driver throughput for large frames ++ termination traffic (e.g. 4K and above). ++ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE ++ is set to 9600 bytes. ++ Using this option in combination with small frames increases ++ significantly the driver's memory footprint and may even deplete ++ the system memory. Also, the skb truesize is altered and messages ++ from the stack that warn against this are bypassed. ++ This option is not available on LS1043. ++ ++config FSL_DPAA_TS ++ bool "Linux compliant timestamping" ++ depends on FSL_SDK_DPAA_ETH ++ default n ++ help ++ Enable Linux API compliant timestamping support. ++ ++config FSL_DPAA_1588 ++ bool "IEEE 1588-compliant timestamping" ++ depends on FSL_SDK_DPAA_ETH ++ select FSL_DPAA_TS ++ default n ++ help ++ Enable IEEE1588 support code. ++ ++config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++ bool "Use driver's Tx queue selection mechanism" ++ default y ++ depends on FSL_SDK_DPAA_ETH ++ help ++ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection ++ of the egress FQ. That will override the XPS support for this netdevice. ++ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping, ++ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this ++ and use the standard XPS support instead. ++ ++config FSL_DPAA_ETH_MAX_BUF_COUNT ++ int "Maximum nuber of buffers in private bpool" ++ depends on FSL_SDK_DPAA_ETH ++ range 64 2048 ++ default "128" ++ help ++ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's ++ buffer pool. One needn't normally modify this, as it has probably been tuned for performance ++ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD. ++ ++config FSL_DPAA_ETH_REFILL_THRESHOLD ++ int "Private bpool refill threshold" ++ depends on FSL_SDK_DPAA_ETH ++ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT ++ default "80" ++ help ++ The DPAA-Ethernet driver will start replenishing buffer pools whose count ++ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally ++ modify this value unless one has very specific performance reasons. ++ ++config FSL_DPAA_CS_THRESHOLD_1G ++ hex "Egress congestion threshold on 1G ports" ++ depends on FSL_SDK_DPAA_ETH ++ range 0x1000 0x10000000 ++ default "0x06000000" ++ help ++ The size in bytes of the egress Congestion State notification threshold on 1G ports. ++ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop ++ (e.g. by sending UDP datagrams at "while(1) speed"), ++ and the larger the frame size, the more acute the problem. ++ So we have to find a balance between these factors: ++ - avoiding the device staying congested for a prolonged time (risking ++ the netdev watchdog to fire - see also the tx_timeout module param); ++ - affecting performance of protocols such as TCP, which otherwise ++ behave well under the congestion notification mechanism; ++ - preventing the Tx cores from tightly-looping (as if the congestion ++ threshold was too low to be effective); ++ - running out of memory if the CS threshold is set too high. ++ ++config FSL_DPAA_CS_THRESHOLD_10G ++ hex "Egress congestion threshold on 10G ports" ++ depends on FSL_SDK_DPAA_ETH ++ range 0x1000 0x20000000 ++ default "0x10000000" ++ help ++ The size in bytes of the egress Congestion State notification threshold on 10G ports. ++ ++config FSL_DPAA_INGRESS_CS_THRESHOLD ++ hex "Ingress congestion threshold on FMan ports" ++ depends on FSL_SDK_DPAA_ETH ++ default "0x10000000" ++ help ++ The size in bytes of the ingress tail-drop threshold on FMan ports. ++ Traffic piling up above this value will be rejected by QMan and discarded by FMan. ++ ++config FSL_DPAA_ETH_DEBUGFS ++ bool "DPAA Ethernet debugfs interface" ++ depends on DEBUG_FS && FSL_SDK_DPAA_ETH ++ default y ++ help ++ This option compiles debugfs code for the DPAA Ethernet driver. ++ ++config FSL_DPAA_ETH_DEBUG ++ bool "DPAA Ethernet Debug Support" ++ depends on FSL_SDK_DPAA_ETH ++ default n ++ help ++ This option compiles debug code for the DPAA Ethernet driver. ++ ++config FSL_DPAA_DBG_LOOP ++ bool "DPAA Ethernet Debug loopback" ++ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++ default n ++ help ++ This option allows to divert all received traffic on a certain interface A towards a ++ selected interface B. This option is used to benchmark the HW + Ethernet driver in ++ isolation from the Linux networking stack. The loops are controlled by debugfs entries, ++ one for each interface. By default all loops are disabled (target value is -1). I.e. to ++ change the loop setting for interface 4 and divert all received traffic to interface 5 ++ write Tx interface number in the receive interface debugfs file: ++ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop ++ 4->-1 ++ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop ++ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop ++ 4->5 ++endif # FSL_SDK_DPAA_ETH +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile +@@ -0,0 +1,46 @@ ++# ++# Makefile for the Freescale Ethernet controllers ++# ++ccflags-y += -DVERSION=\"\" ++# ++# Include netcomm SW specific definitions ++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk ++ ++ccflags-y += -I$(NET_DPA) ++ ++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o ++obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o ++ ++fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o ++ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y) ++fsl_dpa-objs += dpaa_debugfs.o ++endif ++ifeq ($(CONFIG_FSL_DPAA_1588),y) ++fsl_dpa-objs += dpaa_1588.o ++endif ++ifeq ($(CONFIG_FSL_DPAA_CEETM),y) ++ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper ++fsl_dpa-objs += dpaa_eth_ceetm.o ++endif ++ ++fsl_mac-objs += mac.o mac-api.o ++ ++# Advanced drivers ++ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y) ++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o ++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o ++ ++fsl_advanced-objs += dpaa_eth_base.o ++# suport for multiple drivers per kernel module comes in kernel 3.14 ++# so we are forced to generate several modules for the advanced drivers ++fsl_proxy-objs += dpaa_eth_proxy.o ++ ++ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y) ++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o ++ ++fsl_oh-objs += offline_port.o ++endif ++endif ++ ++# Needed by the tracing framework ++CFLAGS_dpaa_eth.o := -I$(src) +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c +@@ -0,0 +1,580 @@ ++/* Copyright (C) 2011 Freescale Semiconductor, Inc. ++ * Copyright (C) 2009 IXXAT Automation, GmbH ++ * ++ * DPAA Ethernet Driver -- IEEE 1588 interface functionality ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#include "dpaa_1588.h" ++#include "mac.h" ++ ++static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) ++{ ++ struct circ_buf *circ_buf = &ptp_buf->circ_buf; ++ ++ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size); ++ if (!circ_buf->buf) ++ return 1; ++ ++ circ_buf->head = 0; ++ circ_buf->tail = 0; ++ ptp_buf->size = size; ++ spin_lock_init(&ptp_buf->ptp_lock); ++ ++ return 0; ++} ++ ++static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) ++{ ++ struct circ_buf *circ_buf = &ptp_buf->circ_buf; ++ ++ circ_buf->head = 0; ++ circ_buf->tail = 0; ++ ptp_buf->size = size; ++} ++ ++static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf, ++ struct dpa_ptp_data *data) ++{ ++ struct circ_buf *circ_buf = &ptp_buf->circ_buf; ++ int size = ptp_buf->size; ++ struct dpa_ptp_data *tmp; ++ unsigned long flags; ++ int head, tail; ++ ++ spin_lock_irqsave(&ptp_buf->ptp_lock, flags); ++ ++ head = circ_buf->head; ++ tail = circ_buf->tail; ++ ++ if (CIRC_SPACE(head, tail, size) <= 0) ++ circ_buf->tail = (tail + 1) & (size - 1); ++ ++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head; ++ memcpy(tmp, data, sizeof(struct dpa_ptp_data)); ++ ++ circ_buf->head = (head + 1) & (size - 1); ++ ++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); ++ ++ return 0; ++} ++ ++static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst, ++ struct dpa_ptp_ident *src) ++{ ++ int ret; ++ ++ if ((dst->version != src->version) || (dst->msg_type != src->msg_type)) ++ return 0; ++ ++ if ((dst->netw_prot == src->netw_prot) ++ || src->netw_prot == DPA_PTP_PROT_DONTCARE) { ++ if (dst->seq_id != src->seq_id) ++ return 0; ++ ++ ret = memcmp(dst->snd_port_id, src->snd_port_id, ++ DPA_PTP_SOURCE_PORT_LENGTH); ++ if (ret) ++ return 0; ++ else ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf, ++ struct dpa_ptp_ident *ident, ++ struct dpa_ptp_time *ts) ++{ ++ struct circ_buf *circ_buf = &ptp_buf->circ_buf; ++ int size = ptp_buf->size; ++ int head, tail, idx; ++ unsigned long flags; ++ struct dpa_ptp_data *tmp, *tmp2; ++ struct dpa_ptp_ident *tmp_ident; ++ ++ spin_lock_irqsave(&ptp_buf->ptp_lock, flags); ++ ++ head = circ_buf->head; ++ tail = idx = circ_buf->tail; ++ ++ if (CIRC_CNT(head, tail, size) == 0) { ++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); ++ return 1; ++ } ++ ++ while (idx != head) { ++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx; ++ tmp_ident = &tmp->ident; ++ if (dpa_ptp_is_ident_match(tmp_ident, ident)) ++ break; ++ idx = (idx + 1) & (size - 1); ++ } ++ ++ if (idx == head) { ++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); ++ return 1; ++ } ++ ++ ts->sec = tmp->ts.sec; ++ ts->nsec = tmp->ts.nsec; ++ ++ if (idx != tail) { ++ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) { ++ tail = circ_buf->tail = ++ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1); ++ } ++ ++ while (CIRC_CNT(idx, tail, size) > 0) { ++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx; ++ idx = (idx - 1) & (size - 1); ++ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx; ++ *tmp = *tmp2; ++ } ++ } ++ circ_buf->tail = (tail + 1) & (size - 1); ++ ++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); ++ ++ return 0; ++} ++ ++/* Parse the PTP packets ++ * ++ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in ++ * an IEEE802.3 ethernet frame. This function returns the position of ++ * the PTP packet or NULL if no PTP found ++ */ ++static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type) ++{ ++ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN; ++ u8 *ptp_loc = NULL; ++ u8 msg_type; ++ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN; ++ struct iphdr *iph; ++ struct udphdr *udph; ++ struct ipv6hdr *ipv6h; ++ ++ /* when we can receive S/G frames we need to check the data we want to ++ * access is in the linear skb buffer ++ */ ++ if (!pskb_may_pull(skb, access_len)) ++ return NULL; ++ ++ *eth_type = *((u16 *)pos); ++ ++ /* Check if inner tag is here */ ++ if (*eth_type == ETH_P_8021Q) { ++ access_len += DPA_VLAN_TAG_LEN; ++ ++ if (!pskb_may_pull(skb, access_len)) ++ return NULL; ++ ++ pos += DPA_VLAN_TAG_LEN; ++ *eth_type = *((u16 *)pos); ++ } ++ ++ pos += DPA_ETYPE_LEN; ++ ++ switch (*eth_type) { ++ /* Transport of PTP over Ethernet */ ++ case ETH_P_1588: ++ ptp_loc = pos; ++ ++ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1)) ++ return NULL; ++ ++ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf; ++ if ((msg_type == PTP_MSGTYPE_SYNC) ++ || (msg_type == PTP_MSGTYPE_DELREQ) ++ || (msg_type == PTP_MSGTYPE_PDELREQ) ++ || (msg_type == PTP_MSGTYPE_PDELRESP)) ++ return ptp_loc; ++ break; ++ /* Transport of PTP over IPv4 */ ++ case ETH_P_IP: ++ iph = (struct iphdr *)pos; ++ access_len += sizeof(struct iphdr); ++ ++ if (!pskb_may_pull(skb, access_len)) ++ return NULL; ++ ++ if (ntohs(iph->protocol) != IPPROTO_UDP) ++ return NULL; ++ ++ access_len += iph->ihl * 4 - sizeof(struct iphdr) + ++ sizeof(struct udphdr); ++ ++ if (!pskb_may_pull(skb, access_len)) ++ return NULL; ++ ++ pos += iph->ihl * 4; ++ udph = (struct udphdr *)pos; ++ if (ntohs(udph->dest) != 319) ++ return NULL; ++ ptp_loc = pos + sizeof(struct udphdr); ++ break; ++ /* Transport of PTP over IPv6 */ ++ case ETH_P_IPV6: ++ ipv6h = (struct ipv6hdr *)pos; ++ ++ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr); ++ ++ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP) ++ return NULL; ++ ++ pos += sizeof(struct ipv6hdr); ++ udph = (struct udphdr *)pos; ++ if (ntohs(udph->dest) != 319) ++ return NULL; ++ ptp_loc = pos + sizeof(struct udphdr); ++ break; ++ default: ++ break; ++ } ++ ++ return ptp_loc; ++} ++ ++static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv, ++ struct sk_buff *skb, void *data, enum port_type rx_tx, ++ struct dpa_ptp_data *ptp_data) ++{ ++ u64 nsec; ++ u32 mod; ++ u8 *ptp_loc; ++ u16 eth_type; ++ ++ ptp_loc = dpa_ptp_parse_packet(skb, ð_type); ++ if (!ptp_loc) ++ return -EINVAL; ++ ++ switch (eth_type) { ++ case ETH_P_IP: ++ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4; ++ break; ++ case ETH_P_IPV6: ++ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6; ++ break; ++ case ETH_P_1588: ++ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2)) ++ return -EINVAL; ++ ++ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf; ++ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf; ++ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID)); ++ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID, ++ DPA_PTP_SOURCE_PORT_LENGTH); ++ ++ nsec = dpa_get_timestamp_ns(priv, rx_tx, data); ++ mod = do_div(nsec, NANOSEC_PER_SECOND); ++ ptp_data->ts.sec = nsec; ++ ptp_data->ts.nsec = mod; ++ ++ return 0; ++} ++ ++void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv, ++ struct sk_buff *skb, void *data) ++{ ++ struct dpa_ptp_tsu *tsu = priv->tsu; ++ struct dpa_ptp_data ptp_tx_data; ++ ++ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data)) ++ return; ++ ++ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data); ++} ++ ++void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv, ++ struct sk_buff *skb, void *data) ++{ ++ struct dpa_ptp_tsu *tsu = priv->tsu; ++ struct dpa_ptp_data ptp_rx_data; ++ ++ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data)) ++ return; ++ ++ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data); ++} ++ ++static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu, ++ struct dpa_ptp_ident *ident, ++ struct dpa_ptp_time *ts) ++{ ++ struct dpa_ptp_tsu *tsu = ptp_tsu; ++ struct dpa_ptp_time tmp; ++ int flag; ++ ++ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp); ++ if (!flag) { ++ ts->sec = tmp.sec; ++ ts->nsec = tmp.nsec; ++ return 0; ++ } ++ ++ return -1; ++} ++ ++static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu, ++ struct dpa_ptp_ident *ident, ++ struct dpa_ptp_time *ts) ++{ ++ struct dpa_ptp_tsu *tsu = ptp_tsu; ++ struct dpa_ptp_time tmp; ++ int flag; ++ ++ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp); ++ if (!flag) { ++ ts->sec = tmp.sec; ++ ts->nsec = tmp.nsec; ++ return 0; ++ } ++ ++ return -1; ++} ++ ++static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu, ++ struct dpa_ptp_time *cnt_time) ++{ ++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; ++ u64 tmp, fiper; ++ ++ if (mac_dev->fm_rtc_disable) ++ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev)); ++ ++ /* TMR_FIPER1 will pulse every second after ALARM1 expired */ ++ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; ++ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS; ++ if (mac_dev->fm_rtc_set_alarm) ++ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev), ++ 0, tmp); ++ if (mac_dev->fm_rtc_set_fiper) ++ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev), ++ 0, fiper); ++ ++ if (mac_dev->fm_rtc_enable) ++ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev)); ++} ++ ++static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu, ++ struct dpa_ptp_time *curr_time) ++{ ++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; ++ u64 tmp; ++ u32 mod; ++ ++ if (mac_dev->fm_rtc_get_cnt) ++ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev), ++ &tmp); ++ ++ mod = do_div(tmp, NANOSEC_PER_SECOND); ++ curr_time->sec = (u32)tmp; ++ curr_time->nsec = mod; ++} ++ ++static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu, ++ struct dpa_ptp_time *cnt_time) ++{ ++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; ++ u64 tmp; ++ ++ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; ++ ++ if (mac_dev->fm_rtc_set_cnt) ++ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev), ++ tmp); ++ ++ /* Restart fiper two seconds later */ ++ cnt_time->sec += 2; ++ cnt_time->nsec = 0; ++ dpa_set_fiper_alarm(tsu, cnt_time); ++} ++ ++static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend) ++{ ++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; ++ u32 drift; ++ ++ if (mac_dev->fm_rtc_get_drift) ++ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev), ++ &drift); ++ ++ *addend = drift; ++} ++ ++static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend) ++{ ++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; ++ ++ if (mac_dev->fm_rtc_set_drift) ++ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev), ++ addend); ++} ++ ++static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu) ++{ ++ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); ++ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); ++} ++ ++int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ struct dpa_ptp_tsu *tsu = priv->tsu; ++ struct mac_device *mac_dev = priv->mac_dev; ++ struct dpa_ptp_data ptp_data; ++ struct dpa_ptp_data *ptp_data_user; ++ struct dpa_ptp_time act_time; ++ u32 addend; ++ int retval = 0; ++ ++ if (!tsu || !tsu->valid) ++ return -ENODEV; ++ ++ switch (cmd) { ++ case PTP_ENBL_TXTS_IOCTL: ++ tsu->hwts_tx_en_ioctl = 1; ++ if (mac_dev->fm_rtc_enable) ++ mac_dev->fm_rtc_enable(get_fm_handle(dev)); ++ if (mac_dev->ptp_enable) ++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); ++ break; ++ case PTP_DSBL_TXTS_IOCTL: ++ tsu->hwts_tx_en_ioctl = 0; ++ if (mac_dev->fm_rtc_disable) ++ mac_dev->fm_rtc_disable(get_fm_handle(dev)); ++ if (mac_dev->ptp_disable) ++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); ++ break; ++ case PTP_ENBL_RXTS_IOCTL: ++ tsu->hwts_rx_en_ioctl = 1; ++ break; ++ case PTP_DSBL_RXTS_IOCTL: ++ tsu->hwts_rx_en_ioctl = 0; ++ break; ++ case PTP_GET_RX_TIMESTAMP: ++ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; ++ if (copy_from_user(&ptp_data.ident, ++ &ptp_data_user->ident, sizeof(ptp_data.ident))) ++ return -EINVAL; ++ ++ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) ++ return -EAGAIN; ++ ++ if (copy_to_user((void __user *)&ptp_data_user->ts, ++ &ptp_data.ts, sizeof(ptp_data.ts))) ++ return -EFAULT; ++ break; ++ case PTP_GET_TX_TIMESTAMP: ++ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; ++ if (copy_from_user(&ptp_data.ident, ++ &ptp_data_user->ident, sizeof(ptp_data.ident))) ++ return -EINVAL; ++ ++ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) ++ return -EAGAIN; ++ ++ if (copy_to_user((void __user *)&ptp_data_user->ts, ++ &ptp_data.ts, sizeof(ptp_data.ts))) ++ return -EFAULT; ++ break; ++ case PTP_GET_TIME: ++ dpa_get_curr_cnt(tsu, &act_time); ++ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time))) ++ return -EFAULT; ++ break; ++ case PTP_SET_TIME: ++ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) ++ return -EINVAL; ++ dpa_set_1588cnt(tsu, &act_time); ++ break; ++ case PTP_GET_ADJ: ++ dpa_get_drift(tsu, &addend); ++ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend))) ++ return -EFAULT; ++ break; ++ case PTP_SET_ADJ: ++ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend))) ++ return -EINVAL; ++ dpa_set_drift(tsu, addend); ++ break; ++ case PTP_SET_FIPER_ALARM: ++ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) ++ return -EINVAL; ++ dpa_set_fiper_alarm(tsu, &act_time); ++ break; ++ case PTP_CLEANUP_TS: ++ dpa_flush_timestamp(tsu); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return retval; ++} ++ ++int dpa_ptp_init(struct dpa_priv_s *priv) ++{ ++ struct dpa_ptp_tsu *tsu; ++ ++ /* Allocate memory for PTP structure */ ++ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL); ++ if (!tsu) ++ return -ENOMEM; ++ ++ tsu->valid = TRUE; ++ tsu->dpa_priv = priv; ++ ++ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); ++ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); ++ ++ priv->tsu = tsu; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_ptp_init); ++ ++void dpa_ptp_cleanup(struct dpa_priv_s *priv) ++{ ++ struct dpa_ptp_tsu *tsu = priv->tsu; ++ ++ tsu->valid = FALSE; ++ vfree(tsu->rx_timestamps.circ_buf.buf); ++ vfree(tsu->tx_timestamps.circ_buf.buf); ++ ++ kfree(tsu); ++} ++EXPORT_SYMBOL(dpa_ptp_cleanup); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h +@@ -0,0 +1,138 @@ ++/* Copyright (C) 2011 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++#ifndef __DPAA_1588_H__ ++#define __DPAA_1588_H__ ++ ++#include ++#include ++#include ++#include ++ ++#define DEFAULT_PTP_RX_BUF_SZ 256 ++#define DEFAULT_PTP_TX_BUF_SZ 256 ++ ++/* 1588 private ioctl calls */ ++#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE ++#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1) ++#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2) ++#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3) ++#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4) ++#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5) ++#define PTP_SET_TIME (SIOCDEVPRIVATE + 6) ++#define PTP_GET_TIME (SIOCDEVPRIVATE + 7) ++#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8) ++#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9) ++#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10) ++#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11) ++ ++/* PTP V2 message type */ ++enum { ++ PTP_MSGTYPE_SYNC = 0x0, ++ PTP_MSGTYPE_DELREQ = 0x1, ++ PTP_MSGTYPE_PDELREQ = 0x2, ++ PTP_MSGTYPE_PDELRESP = 0x3, ++ PTP_MSGTYPE_FLWUP = 0x8, ++ PTP_MSGTYPE_DELRESP = 0x9, ++ PTP_MSGTYPE_PDELRES_FLWUP = 0xA, ++ PTP_MSGTYPE_ANNOUNCE = 0xB, ++ PTP_MSGTYPE_SGNLNG = 0xC, ++ PTP_MSGTYPE_MNGMNT = 0xD, ++}; ++ ++/* Byte offset of data in the PTP V2 headers */ ++#define PTP_OFFS_MSG_TYPE 0 ++#define PTP_OFFS_VER_PTP 1 ++#define PTP_OFFS_MSG_LEN 2 ++#define PTP_OFFS_DOM_NMB 4 ++#define PTP_OFFS_FLAGS 6 ++#define PTP_OFFS_CORFIELD 8 ++#define PTP_OFFS_SRCPRTID 20 ++#define PTP_OFFS_SEQ_ID 30 ++#define PTP_OFFS_CTRL 32 ++#define PTP_OFFS_LOGMEAN 33 ++ ++#define PTP_IP_OFFS 14 ++#define PTP_UDP_OFFS 34 ++#define PTP_HEADER_OFFS 42 ++#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE) ++#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID) ++#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID) ++#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL) ++ ++/* 1588-2008 network protocol enumeration values */ ++#define DPA_PTP_PROT_IPV4 1 ++#define DPA_PTP_PROT_IPV6 2 ++#define DPA_PTP_PROT_802_3 3 ++#define DPA_PTP_PROT_DONTCARE 0xFFFF ++ ++#define DPA_PTP_SOURCE_PORT_LENGTH 10 ++#define DPA_PTP_HEADER_SZE 34 ++#define DPA_ETYPE_LEN 2 ++#define DPA_VLAN_TAG_LEN 4 ++#define NANOSEC_PER_SECOND 1000000000 ++ ++/* The threshold between the current found one and the oldest one */ ++#define TS_ACCUMULATION_THRESHOLD 50 ++ ++/* Struct needed to identify a timestamp */ ++struct dpa_ptp_ident { ++ u8 version; ++ u8 msg_type; ++ u16 netw_prot; ++ u16 seq_id; ++ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH]; ++}; ++ ++/* Timestamp format in 1588-2008 */ ++struct dpa_ptp_time { ++ u64 sec; /* just 48 bit used */ ++ u32 nsec; ++}; ++ ++/* needed for timestamp data over ioctl */ ++struct dpa_ptp_data { ++ struct dpa_ptp_ident ident; ++ struct dpa_ptp_time ts; ++}; ++ ++struct dpa_ptp_circ_buf { ++ struct circ_buf circ_buf; ++ u32 size; ++ spinlock_t ptp_lock; ++}; ++ ++/* PTP TSU control structure */ ++struct dpa_ptp_tsu { ++ struct dpa_priv_s *dpa_priv; ++ bool valid; ++ struct dpa_ptp_circ_buf rx_timestamps; ++ struct dpa_ptp_circ_buf tx_timestamps; ++ ++ /* HW timestamping over ioctl enabled flag */ ++ int hwts_tx_en_ioctl; ++ int hwts_rx_en_ioctl; ++}; ++ ++extern int dpa_ptp_init(struct dpa_priv_s *priv); ++extern void dpa_ptp_cleanup(struct dpa_priv_s *priv); ++extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv, ++ struct sk_buff *skb, void *data); ++extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv, ++ struct sk_buff *skb, void *data); ++extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd); ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c +@@ -0,0 +1,180 @@ ++/* Copyright 2008-2013 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include /* struct qm_mcr_querycgr */ ++#include ++#include "dpaa_debugfs.h" ++#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */ ++ ++#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries" ++#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa" ++ ++static struct dentry *dpa_debugfs_root; ++ ++static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file); ++static ssize_t dpa_loop_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off); ++ ++static const struct file_operations dpa_debugfs_lp_fops = { ++ .open = dpa_debugfs_loop_open, ++ .write = dpa_loop_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int dpa_debugfs_loop_show(struct seq_file *file, void *offset) ++{ ++ struct dpa_priv_s *priv; ++ ++ BUG_ON(offset == NULL); ++ ++ priv = netdev_priv((struct net_device *)file->private); ++ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to); ++ ++ return 0; ++} ++ ++static int user_input_convert(const char __user *user_buf, size_t count, ++ long *val) ++{ ++ char buf[12]; ++ ++ if (count > sizeof(buf) - 1) ++ return -EINVAL; ++ if (copy_from_user(buf, user_buf, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ if (kstrtol(buf, 0, val)) ++ return -EINVAL; ++ return 0; ++} ++ ++static ssize_t dpa_loop_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ struct dpa_priv_s *priv; ++ struct net_device *netdev; ++ struct seq_file *sf; ++ int ret; ++ long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ ++ sf = (struct seq_file *)f->private_data; ++ netdev = (struct net_device *)sf->private; ++ priv = netdev_priv(netdev); ++ ++ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val; ++ ++ return count; ++} ++ ++static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file) ++{ ++ int _errno; ++ const struct net_device *net_dev; ++ ++ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private); ++ if (unlikely(_errno < 0)) { ++ net_dev = (struct net_device *)inode->i_private; ++ ++ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev))) ++ netdev_err(net_dev, "single_open() = %d\n", ++ _errno); ++ } ++ ++ return _errno; ++} ++ ++ ++int dpa_netdev_debugfs_create(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ static int cnt; ++ char loop_file_name[100]; ++ ++ if (unlikely(dpa_debugfs_root == NULL)) { ++ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, ++ "root debugfs missing, possible module ordering issue"); ++ return -ENOMEM; ++ } ++ ++ sprintf(loop_file_name, "eth%d_loop", ++cnt); ++ priv->debugfs_loop_file = debugfs_create_file(loop_file_name, ++ S_IRUGO, ++ dpa_debugfs_root, ++ net_dev, ++ &dpa_debugfs_lp_fops); ++ if (unlikely(priv->debugfs_loop_file == NULL)) { ++ netdev_err(net_dev, "debugfs_create_file(%s/%s)", ++ dpa_debugfs_root->d_iname, ++ loop_file_name); ++ ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++void dpa_netdev_debugfs_remove(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ ++ debugfs_remove(priv->debugfs_loop_file); ++} ++ ++int __init dpa_debugfs_module_init(void) ++{ ++ int _errno = 0; ++ ++ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n"); ++ ++ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL); ++ ++ if (unlikely(dpa_debugfs_root == NULL)) { ++ _errno = -ENOMEM; ++ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n", ++ KBUILD_BASENAME".c", __LINE__, __func__); ++ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n", ++ DPA_ETH_DEBUGFS_ROOT, _errno); ++ } ++ ++ return _errno; ++} ++ ++void __exit dpa_debugfs_module_exit(void) ++{ ++ debugfs_remove(dpa_debugfs_root); ++} +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h +@@ -0,0 +1,43 @@ ++/* Copyright 2008-2013 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPAA_DEBUGFS_H_ ++#define DPAA_DEBUGFS_H_ ++ ++#include ++#include /* struct dentry needed in dpaa_eth.h */ ++ ++int dpa_netdev_debugfs_create(struct net_device *net_dev); ++void dpa_netdev_debugfs_remove(struct net_device *net_dev); ++int __init dpa_debugfs_module_init(void); ++void __exit dpa_debugfs_module_exit(void); ++ ++#endif /* DPAA_DEBUGFS_H_ */ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c +@@ -0,0 +1,1213 @@ ++/* Copyright 2008-2013 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* arp_hdr_len() */ ++#include /* VLAN_HLEN */ ++#include /* struct icmphdr */ ++#include /* struct iphdr */ ++#include /* struct ipv6hdr */ ++#include /* struct udphdr */ ++#include /* struct tcphdr */ ++#include /* net_ratelimit() */ ++#include /* ETH_P_IP and ETH_P_IPV6 */ ++#include ++#include ++#include ++#include ++#ifdef CONFIG_SOC_BUS ++#include /* soc_device_match */ ++#endif ++ ++#include "fsl_fman.h" ++#include "fm_ext.h" ++#include "fm_port_ext.h" ++ ++#include "mac.h" ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++#include "dpaa_debugfs.h" ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++ ++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files ++ * using trace events only need to #include ++ */ ++#define CREATE_TRACE_POINTS ++#include "dpaa_eth_trace.h" ++ ++#define DPA_NAPI_WEIGHT 64 ++ ++/* Valid checksum indication */ ++#define DPA_CSUM_VALID 0xFFFF ++ ++#define DPA_DESCRIPTION "FSL DPAA Ethernet driver" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++MODULE_AUTHOR("Andy Fleming "); ++ ++MODULE_DESCRIPTION(DPA_DESCRIPTION); ++ ++static uint8_t debug = -1; ++module_param(debug, byte, S_IRUGO); ++MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); ++ ++/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ ++static uint16_t tx_timeout = 1000; ++module_param(tx_timeout, ushort, S_IRUGO); ++MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); ++ ++static const char rtx[][3] = { ++ [RX] = "RX", ++ [TX] = "TX" ++}; ++ ++#ifndef CONFIG_PPC ++bool dpaa_errata_a010022; ++EXPORT_SYMBOL(dpaa_errata_a010022); ++#endif ++ ++/* BM */ ++ ++#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8) ++ ++static uint8_t dpa_priv_common_bpid; ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++struct net_device *dpa_loop_netdevs[20]; ++#endif ++ ++#ifdef CONFIG_PM ++ ++static int dpaa_suspend(struct device *dev) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ int err = 0; ++ ++ net_dev = dev_get_drvdata(dev); ++ ++ if (net_dev->flags & IFF_UP) { ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ if (priv->wol & DPAA_WOL_MAGIC) { ++ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX], ++ priv->mac_dev->get_mac_handle(mac_dev), true); ++ if (err) { ++ netdev_err(net_dev, "set_wol() = %d\n", err); ++ goto set_wol_failed; ++ } ++ } ++ ++ err = fm_port_suspend(mac_dev->port_dev[RX]); ++ if (err) { ++ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err); ++ goto rx_port_suspend_failed; ++ } ++ ++ err = fm_port_suspend(mac_dev->port_dev[TX]); ++ if (err) { ++ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err); ++ goto tx_port_suspend_failed; ++ } ++ } ++ ++ return 0; ++ ++tx_port_suspend_failed: ++ fm_port_resume(mac_dev->port_dev[RX]); ++rx_port_suspend_failed: ++ if (priv->wol & DPAA_WOL_MAGIC) { ++ priv->mac_dev->set_wol(mac_dev->port_dev[RX], ++ priv->mac_dev->get_mac_handle(mac_dev), false); ++ } ++set_wol_failed: ++ return err; ++} ++ ++static int dpaa_resume(struct device *dev) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ int err = 0; ++ ++ net_dev = dev_get_drvdata(dev); ++ ++ if (net_dev->flags & IFF_UP) { ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev)); ++ if (err) { ++ netdev_err(net_dev, "fm_mac_resume = %d\n", err); ++ goto resume_failed; ++ } ++ ++ err = fm_port_resume(mac_dev->port_dev[TX]); ++ if (err) { ++ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err); ++ goto resume_failed; ++ } ++ ++ err = fm_port_resume(mac_dev->port_dev[RX]); ++ if (err) { ++ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err); ++ goto resume_failed; ++ } ++ ++ if (priv->wol & DPAA_WOL_MAGIC) { ++ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX], ++ priv->mac_dev->get_mac_handle(mac_dev), false); ++ if (err) { ++ netdev_err(net_dev, "set_wol() = %d\n", err); ++ goto resume_failed; ++ } ++ } ++ } ++ ++ return 0; ++ ++resume_failed: ++ return err; ++} ++ ++static const struct dev_pm_ops dpaa_pm_ops = { ++ .suspend = dpaa_suspend, ++ .resume = dpaa_resume, ++}; ++ ++#define DPAA_PM_OPS (&dpaa_pm_ops) ++ ++#else /* CONFIG_PM */ ++ ++#define DPAA_PM_OPS NULL ++ ++#endif /* CONFIG_PM */ ++ ++/* Checks whether the checksum field in Parse Results array is valid ++ * (equals 0xFFFF) and increments the .cse counter otherwise ++ */ ++static inline void ++dpa_csum_validation(const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd) ++{ ++ dma_addr_t addr = qm_fd_addr(fd); ++ struct dpa_bp *dpa_bp = priv->dpa_bp; ++ void *frm = phys_to_virt(addr); ++ fm_prs_result_t *parse_result; ++ ++ if (unlikely(!frm)) ++ return; ++ ++ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE + ++ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL); ++ ++ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE); ++ ++ if (parse_result->cksum != DPA_CSUM_VALID) ++ percpu_priv->rx_errors.cse++; ++} ++ ++static void _dpa_rx_error(struct net_device *net_dev, ++ const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd, ++ u32 fqid) ++{ ++ /* limit common, possibly innocuous Rx FIFO Overflow errors' ++ * interference with zero-loss convergence benchmark results. ++ */ ++ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL)) ++ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n"); ++ else ++ if (netif_msg_hw(priv) && net_ratelimit()) ++ netdev_dbg(net_dev, "Err FD status = 0x%08x\n", ++ fd->status & FM_FD_STAT_RX_ERRORS); ++#ifdef CONFIG_FSL_DPAA_HOOKS ++ if (dpaa_eth_hooks.rx_error && ++ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) ++ /* it's up to the hook to perform resource cleanup */ ++ return; ++#endif ++ percpu_priv->stats.rx_errors++; ++ ++ if (fd->status & FM_PORT_FRM_ERR_DMA) ++ percpu_priv->rx_errors.dme++; ++ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL) ++ percpu_priv->rx_errors.fpe++; ++ if (fd->status & FM_PORT_FRM_ERR_SIZE) ++ percpu_priv->rx_errors.fse++; ++ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR) ++ percpu_priv->rx_errors.phe++; ++ if (fd->status & FM_FD_STAT_L4CV) ++ dpa_csum_validation(priv, percpu_priv, fd); ++ ++ dpa_fd_release(net_dev, fd); ++} ++ ++static void _dpa_tx_error(struct net_device *net_dev, ++ const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd, ++ u32 fqid) ++{ ++ struct sk_buff *skb; ++ ++ if (netif_msg_hw(priv) && net_ratelimit()) ++ netdev_warn(net_dev, "FD status = 0x%08x\n", ++ fd->status & FM_FD_STAT_TX_ERRORS); ++#ifdef CONFIG_FSL_DPAA_HOOKS ++ if (dpaa_eth_hooks.tx_error && ++ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) ++ /* now the hook must ensure proper cleanup */ ++ return; ++#endif ++ percpu_priv->stats.tx_errors++; ++ ++ /* If we intended the buffers from this frame to go into the bpools ++ * when the FMan transmit was done, we need to put it in manually. ++ */ ++ if (fd->bpid != 0xff) { ++ dpa_fd_release(net_dev, fd); ++ return; ++ } ++ ++ skb = _dpa_cleanup_tx_fd(priv, fd); ++ dev_kfree_skb(skb); ++} ++ ++/* Helper function to factor out frame validation logic on all Rx paths. Its ++ * purpose is to extract from the Parse Results structure information about ++ * the integrity of the frame, its checksum, the length of the parsed headers ++ * and whether the frame is suitable for GRO. ++ * ++ * Assumes no parser errors, since any error frame is dropped before this ++ * function is called. ++ * ++ * @skb will have its ip_summed field overwritten; ++ * @use_gro will only be written with 0, if the frame is definitely not ++ * GRO-able; otherwise, it will be left unchanged; ++ * @hdr_size will be written with a safe value, at least the size of the ++ * headers' length. ++ */ ++void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, ++ const struct qm_fd *fd, ++ struct sk_buff *skb, int *use_gro) ++{ ++ if (fd->status & FM_FD_STAT_L4CV) { ++ /* The parser has run and performed L4 checksum validation. ++ * We know there were no parser errors (and implicitly no ++ * L4 csum error), otherwise we wouldn't be here. ++ */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ ++ /* Don't go through GRO for certain types of traffic that ++ * we know are not GRO-able, such as dgram-based protocols. ++ * In the worst-case scenarios, such as small-pkt terminating ++ * UDP, the extra GRO processing would be overkill. ++ * ++ * The only protocol the Parser supports that is also GRO-able ++ * is currently TCP. ++ */ ++ if (!fm_l4_frame_is_tcp(parse_results)) ++ *use_gro = 0; ++ ++ return; ++ } ++ ++ /* We're here because either the parser didn't run or the L4 checksum ++ * was not verified. This may include the case of a UDP frame with ++ * checksum zero or an L4 proto other than TCP/UDP ++ */ ++ skb->ip_summed = CHECKSUM_NONE; ++ ++ /* Bypass GRO for unknown traffic or if no PCDs are applied */ ++ *use_gro = 0; ++} ++ ++int dpaa_eth_poll(struct napi_struct *napi, int budget) ++{ ++ struct dpa_napi_portal *np = ++ container_of(napi, struct dpa_napi_portal, napi); ++ ++ int cleaned = qman_p_poll_dqrr(np->p, budget); ++ ++ if (cleaned < budget) { ++ int tmp; ++ napi_complete(napi); ++ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); ++ DPA_BUG_ON(tmp); ++ } ++ ++ return cleaned; ++} ++EXPORT_SYMBOL(dpaa_eth_poll); ++ ++static void __hot _dpa_tx_conf(struct net_device *net_dev, ++ const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd, ++ u32 fqid) ++{ ++ struct sk_buff *skb; ++ ++ /* do we need the timestamp for the error frames? */ ++ ++ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { ++ if (netif_msg_hw(priv) && net_ratelimit()) ++ netdev_warn(net_dev, "FD status = 0x%08x\n", ++ fd->status & FM_FD_STAT_TX_ERRORS); ++ ++ percpu_priv->stats.tx_errors++; ++ } ++ ++ /* hopefully we need not get the timestamp before the hook */ ++#ifdef CONFIG_FSL_DPAA_HOOKS ++ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev, ++ fd, fqid) == DPAA_ETH_STOLEN) ++ /* it's the hook that must now perform cleanup */ ++ return; ++#endif ++ /* This might not perfectly reflect the reality, if the core dequeuing ++ * the Tx confirmation is different from the one that did the enqueue, ++ * but at least it'll show up in the total count. ++ */ ++ percpu_priv->tx_confirm++; ++ ++ skb = _dpa_cleanup_tx_fd(priv, fd); ++ ++ dev_kfree_skb(skb); ++} ++ ++enum qman_cb_dqrr_result ++priv_rx_error_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dq) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct dpa_percpu_priv_s *percpu_priv; ++ int *count_ptr; ++ ++ net_dev = ((struct dpa_fq *)fq)->net_dev; ++ priv = netdev_priv(net_dev); ++ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); ++ ++ if (dpaa_eth_napi_schedule(percpu_priv, portal)) ++ return qman_cb_dqrr_stop; ++ ++ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr))) ++ /* Unable to refill the buffer pool due to insufficient ++ * system memory. Just release the frame back into the pool, ++ * otherwise we'll soon end up with an empty buffer pool. ++ */ ++ dpa_fd_release(net_dev, &dq->fd); ++ else ++ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); ++ ++ return qman_cb_dqrr_consume; ++} ++ ++ ++enum qman_cb_dqrr_result __hot ++priv_rx_default_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dq) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct dpa_percpu_priv_s *percpu_priv; ++ int *count_ptr; ++ struct dpa_bp *dpa_bp; ++ ++ net_dev = ((struct dpa_fq *)fq)->net_dev; ++ priv = netdev_priv(net_dev); ++ dpa_bp = priv->dpa_bp; ++ ++ /* Trace the Rx fd */ ++ trace_dpa_rx_fd(net_dev, fq, &dq->fd); ++ ++ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count); ++ ++ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) ++ return qman_cb_dqrr_stop; ++ ++ /* Vale of plenty: make sure we didn't run out of buffers */ ++ ++ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr))) ++ /* Unable to refill the buffer pool due to insufficient ++ * system memory. Just release the frame back into the pool, ++ * otherwise we'll soon end up with an empty buffer pool. ++ */ ++ dpa_fd_release(net_dev, &dq->fd); ++ else ++ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid, ++ count_ptr); ++ ++ return qman_cb_dqrr_consume; ++} ++ ++enum qman_cb_dqrr_result ++priv_tx_conf_error_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dq) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct dpa_percpu_priv_s *percpu_priv; ++ ++ net_dev = ((struct dpa_fq *)fq)->net_dev; ++ priv = netdev_priv(net_dev); ++ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ ++ if (dpaa_eth_napi_schedule(percpu_priv, portal)) ++ return qman_cb_dqrr_stop; ++ ++ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); ++ ++ return qman_cb_dqrr_consume; ++} ++ ++enum qman_cb_dqrr_result __hot ++priv_tx_conf_default_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dq) ++{ ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ struct dpa_percpu_priv_s *percpu_priv; ++ ++ net_dev = ((struct dpa_fq *)fq)->net_dev; ++ priv = netdev_priv(net_dev); ++ ++ /* Trace the fd */ ++ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd); ++ ++ /* Non-migratable context, safe to use raw_cpu_ptr */ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ ++ if (dpaa_eth_napi_schedule(percpu_priv, portal)) ++ return qman_cb_dqrr_stop; ++ ++ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); ++ ++ return qman_cb_dqrr_consume; ++} ++ ++void priv_ern(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_mr_entry *msg) ++{ ++ struct net_device *net_dev; ++ const struct dpa_priv_s *priv; ++ struct sk_buff *skb; ++ struct dpa_percpu_priv_s *percpu_priv; ++ struct qm_fd fd = msg->ern.fd; ++ ++ net_dev = ((struct dpa_fq *)fq)->net_dev; ++ priv = netdev_priv(net_dev); ++ /* Non-migratable context, safe to use raw_cpu_ptr */ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ ++ percpu_priv->stats.tx_dropped++; ++ percpu_priv->stats.tx_fifo_errors++; ++ count_ern(percpu_priv, msg); ++ ++ /* If we intended this buffer to go into the pool ++ * when the FM was done, we need to put it in ++ * manually. ++ */ ++ if (msg->ern.fd.bpid != 0xff) { ++ dpa_fd_release(net_dev, &fd); ++ return; ++ } ++ ++ skb = _dpa_cleanup_tx_fd(priv, &fd); ++ dev_kfree_skb_any(skb); ++} ++ ++const struct dpa_fq_cbs_t private_fq_cbs = { ++ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } }, ++ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } }, ++ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } }, ++ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } }, ++ .egress_ern = { .cb = { .ern = priv_ern } } ++}; ++EXPORT_SYMBOL(private_fq_cbs); ++ ++static void dpaa_eth_napi_enable(struct dpa_priv_s *priv) ++{ ++ struct dpa_percpu_priv_s *percpu_priv; ++ int i, j; ++ ++ for_each_possible_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ ++ for (j = 0; j < qman_portal_max; j++) ++ napi_enable(&percpu_priv->np[j].napi); ++ } ++} ++ ++static void dpaa_eth_napi_disable(struct dpa_priv_s *priv) ++{ ++ struct dpa_percpu_priv_s *percpu_priv; ++ int i, j; ++ ++ for_each_possible_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ ++ for (j = 0; j < qman_portal_max; j++) ++ napi_disable(&percpu_priv->np[j].napi); ++ } ++} ++ ++static int __cold dpa_eth_priv_start(struct net_device *net_dev) ++{ ++ int err; ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ ++ dpaa_eth_napi_enable(priv); ++ ++ err = dpa_start(net_dev); ++ if (err < 0) ++ dpaa_eth_napi_disable(priv); ++ ++ return err; ++} ++ ++ ++ ++static int __cold dpa_eth_priv_stop(struct net_device *net_dev) ++{ ++ int _errno; ++ struct dpa_priv_s *priv; ++ ++ _errno = dpa_stop(net_dev); ++ /* Allow NAPI to consume any frame still in the Rx/TxConfirm ++ * ingress queues. This is to avoid a race between the current ++ * context and ksoftirqd which could leave NAPI disabled while ++ * in fact there's still Rx traffic to be processed. ++ */ ++ usleep_range(5000, 10000); ++ ++ priv = netdev_priv(net_dev); ++ dpaa_eth_napi_disable(priv); ++ ++ return _errno; ++} ++ ++#ifdef CONFIG_NET_POLL_CONTROLLER ++static void dpaa_eth_poll_controller(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct dpa_percpu_priv_s *percpu_priv = ++ raw_cpu_ptr(priv->percpu_priv); ++ struct qman_portal *p; ++ const struct qman_portal_config *pc; ++ struct dpa_napi_portal *np; ++ ++ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id()); ++ pc = qman_p_get_portal_config(p); ++ np = &percpu_priv->np[pc->index]; ++ ++ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI); ++ qman_p_poll_dqrr(np->p, np->napi.weight); ++ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); ++} ++#endif ++ ++static const struct net_device_ops dpa_private_ops = { ++ .ndo_open = dpa_eth_priv_start, ++ .ndo_start_xmit = dpa_tx, ++ .ndo_stop = dpa_eth_priv_stop, ++ .ndo_tx_timeout = dpa_timeout, ++ .ndo_get_stats64 = dpa_get_stats64, ++ .ndo_set_mac_address = dpa_set_mac_address, ++ .ndo_validate_addr = eth_validate_addr, ++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++ .ndo_select_queue = dpa_select_queue, ++#endif ++ .ndo_change_mtu = dpa_change_mtu, ++ .ndo_set_rx_mode = dpa_set_rx_mode, ++ .ndo_init = dpa_ndo_init, ++ .ndo_set_features = dpa_set_features, ++ .ndo_fix_features = dpa_fix_features, ++ .ndo_do_ioctl = dpa_ioctl, ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = dpaa_eth_poll_controller, ++#endif ++}; ++ ++static int dpa_private_napi_add(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct dpa_percpu_priv_s *percpu_priv; ++ int i, cpu; ++ ++ for_each_possible_cpu(cpu) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); ++ ++ percpu_priv->np = devm_kzalloc(net_dev->dev.parent, ++ qman_portal_max * sizeof(struct dpa_napi_portal), ++ GFP_KERNEL); ++ ++ if (unlikely(percpu_priv->np == NULL)) { ++ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < qman_portal_max; i++) ++ netif_napi_add(net_dev, &percpu_priv->np[i].napi, ++ dpaa_eth_poll, DPA_NAPI_WEIGHT); ++ } ++ ++ return 0; ++} ++ ++void dpa_private_napi_del(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct dpa_percpu_priv_s *percpu_priv; ++ int i, cpu; ++ ++ for_each_possible_cpu(cpu) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); ++ ++ if (percpu_priv->np) { ++ for (i = 0; i < qman_portal_max; i++) ++ netif_napi_del(&percpu_priv->np[i].napi); ++ ++ devm_kfree(net_dev->dev.parent, percpu_priv->np); ++ } ++ } ++} ++EXPORT_SYMBOL(dpa_private_napi_del); ++ ++static int dpa_private_netdev_init(struct net_device *net_dev) ++{ ++ int i; ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct dpa_percpu_priv_s *percpu_priv; ++ const uint8_t *mac_addr; ++ ++ /* Although we access another CPU's private data here ++ * we do it at initialization so it is safe ++ */ ++ for_each_possible_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ percpu_priv->net_dev = net_dev; ++ } ++ ++ net_dev->netdev_ops = &dpa_private_ops; ++ mac_addr = priv->mac_dev->addr; ++ ++ net_dev->mem_start = priv->mac_dev->res->start; ++ net_dev->mem_end = priv->mac_dev->res->end; ++ ++ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_LLTX); ++ ++ /* Advertise S/G and HIGHDMA support for private interfaces */ ++ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; ++ /* Recent kernels enable GSO automatically, if ++ * we declare NETIF_F_SG. For conformity, we'll ++ * still declare GSO explicitly. ++ */ ++ net_dev->features |= NETIF_F_GSO; ++ ++ /* Advertise GRO support */ ++ net_dev->features |= NETIF_F_GRO; ++ ++ /* Advertise NETIF_F_HW_ACCEL_MQ to avoid Tx timeout warnings */ ++ net_dev->features |= NETIF_F_HW_ACCEL_MQ; ++ ++ return dpa_netdev_init(net_dev, mac_addr, tx_timeout); ++} ++ ++static struct dpa_bp * __cold ++dpa_priv_bp_probe(struct device *dev) ++{ ++ struct dpa_bp *dpa_bp; ++ ++ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL); ++ if (unlikely(dpa_bp == NULL)) { ++ dev_err(dev, "devm_kzalloc() failed\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count); ++ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; ++ ++ dpa_bp->seed_cb = dpa_bp_priv_seed; ++ dpa_bp->free_buf_cb = _dpa_bp_free_pf; ++ ++ return dpa_bp; ++} ++ ++/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR. ++ * We won't be sending congestion notifications to FMan; for now, we just use ++ * this CGR to generate enqueue rejections to FMan in order to drop the frames ++ * before they reach our ingress queues and eat up memory. ++ */ ++static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv) ++{ ++ struct qm_mcc_initcgr initcgr; ++ u32 cs_th; ++ int err; ++ ++ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); ++ if (err < 0) { ++ pr_err("Error %d allocating CGR ID\n", err); ++ goto out_error; ++ } ++ ++ /* Enable CS TD, but disable Congestion State Change Notifications. */ ++ initcgr.we_mask = QM_CGR_WE_CS_THRES; ++ initcgr.cgr.cscn_en = QM_CGR_EN; ++ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD; ++ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); ++ ++ initcgr.we_mask |= QM_CGR_WE_CSTD_EN; ++ initcgr.cgr.cstd_en = QM_CGR_EN; ++ ++ /* This is actually a hack, because this CGR will be associated with ++ * our affine SWP. However, we'll place our ingress FQs in it. ++ */ ++ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, ++ &initcgr); ++ if (err < 0) { ++ pr_err("Error %d creating ingress CGR with ID %d\n", err, ++ priv->ingress_cgr.cgrid); ++ qman_release_cgrid(priv->ingress_cgr.cgrid); ++ goto out_error; ++ } ++ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", ++ priv->ingress_cgr.cgrid, priv->mac_dev->addr); ++ ++ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255 ++ * range), but we have no common initialization path between the ++ * different variants of the DPAA Eth driver, so we do it here rather ++ * than modifying every other variant than "private Eth". ++ */ ++ priv->use_ingress_cgr = true; ++ ++out_error: ++ return err; ++} ++ ++static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, ++ size_t count) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ int i; ++ ++ if (netif_msg_probe(priv)) ++ dev_dbg(net_dev->dev.parent, ++ "Using private BM buffer pools\n"); ++ ++ priv->bp_count = count; ++ ++ for (i = 0; i < count; i++) { ++ int err; ++ err = dpa_bp_alloc(&dpa_bp[i]); ++ if (err < 0) { ++ dpa_bp_free(priv); ++ priv->dpa_bp = NULL; ++ return err; ++ } ++ ++ priv->dpa_bp = &dpa_bp[i]; ++ } ++ ++ dpa_priv_common_bpid = priv->dpa_bp->bpid; ++ return 0; ++} ++ ++static const struct of_device_id dpa_match[]; ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++static int dpa_new_loop_id(void) ++{ ++ static int if_id; ++ ++ return if_id++; ++} ++#endif ++ ++static int ++dpaa_eth_priv_probe(struct platform_device *_of_dev) ++{ ++ int err = 0, i, channel; ++ struct device *dev; ++ struct device_node *dpa_node; ++ struct dpa_bp *dpa_bp; ++ size_t count = 1; ++ struct net_device *net_dev = NULL; ++ struct dpa_priv_s *priv = NULL; ++ struct dpa_percpu_priv_s *percpu_priv; ++ struct fm_port_fqs port_fqs; ++ struct dpa_buffer_layout_s *buf_layout = NULL; ++ struct mac_device *mac_dev; ++ ++ dev = &_of_dev->dev; ++ ++ dpa_node = dev->of_node; ++ ++ if (!of_device_is_available(dpa_node)) ++ return -ENODEV; ++ ++ /* Get the buffer pools assigned to this interface; ++ * run only once the default pool probing code ++ */ ++ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? : ++ dpa_priv_bp_probe(dev); ++ if (IS_ERR(dpa_bp)) ++ return PTR_ERR(dpa_bp); ++ ++ /* Allocate this early, so we can store relevant information in ++ * the private area (needed by 1588 code in dpa_mac_probe) ++ */ ++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); ++ if (!net_dev) { ++ dev_err(dev, "alloc_etherdev_mq() failed\n"); ++ goto alloc_etherdev_mq_failed; ++ } ++ ++ /* Do this here, so we can be verbose early */ ++ SET_NETDEV_DEV(net_dev, dev); ++ dev_set_drvdata(dev, net_dev); ++ ++ priv = netdev_priv(net_dev); ++ priv->net_dev = net_dev; ++ strcpy(priv->if_type, "private"); ++ ++ priv->msg_enable = netif_msg_init(debug, -1); ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ priv->loop_id = dpa_new_loop_id(); ++ priv->loop_to = -1; /* disabled by default */ ++ dpa_loop_netdevs[priv->loop_id] = net_dev; ++#endif ++ ++ mac_dev = dpa_mac_probe(_of_dev); ++ if (IS_ERR(mac_dev) || !mac_dev) { ++ err = PTR_ERR(mac_dev); ++ goto mac_probe_failed; ++ } ++ ++ /* We have physical ports, so we need to establish ++ * the buffer layout. ++ */ ++ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), ++ GFP_KERNEL); ++ if (!buf_layout) { ++ dev_err(dev, "devm_kzalloc() failed\n"); ++ goto alloc_failed; ++ } ++ dpa_set_buffers_layout(mac_dev, buf_layout); ++ ++ /* For private ports, need to compute the size of the default ++ * buffer pool, based on FMan port buffer layout;also update ++ * the maximum buffer size for private ports if necessary ++ */ ++ dpa_bp->size = dpa_bp_size(&buf_layout[RX]); ++ ++#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME ++ /* We only want to use jumbo frame optimization if we actually have ++ * L2 MAX FRM set for jumbo frames as well. ++ */ ++#ifndef CONFIG_PPC ++ if (likely(!dpaa_errata_a010022)) ++#endif ++ if(fm_get_max_frm() < 9600) ++ dev_warn(dev, ++ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n"); ++#endif ++ ++ INIT_LIST_HEAD(&priv->dpa_fq_list); ++ ++ memset(&port_fqs, 0, sizeof(port_fqs)); ++ ++ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX); ++ if (!err) ++ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, ++ &port_fqs, true, TX); ++ ++ if (err < 0) ++ goto fq_probe_failed; ++ ++ /* bp init */ ++ ++ err = dpa_priv_bp_create(net_dev, dpa_bp, count); ++ ++ if (err < 0) ++ goto bp_create_failed; ++ ++ priv->mac_dev = mac_dev; ++ ++ channel = dpa_get_channel(); ++ ++ if (channel < 0) { ++ err = channel; ++ goto get_channel_failed; ++ } ++ ++ priv->channel = (uint16_t)channel; ++ dpaa_eth_add_channel(priv->channel); ++ ++ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]); ++ ++ /* Create a congestion group for this netdev, with ++ * dynamically-allocated CGR ID. ++ * Must be executed after probing the MAC, but before ++ * assigning the egress FQs to the CGRs. ++ */ ++ err = dpaa_eth_cgr_init(priv); ++ if (err < 0) { ++ dev_err(dev, "Error initializing CGR\n"); ++ goto tx_cgr_init_failed; ++ } ++ err = dpaa_eth_priv_ingress_cgr_init(priv); ++ if (err < 0) { ++ dev_err(dev, "Error initializing ingress CGR\n"); ++ goto rx_cgr_init_failed; ++ } ++ ++ /* Add the FQs to the interface, and make them active */ ++ err = dpa_fqs_init(dev, &priv->dpa_fq_list, false); ++ if (err < 0) ++ goto fq_alloc_failed; ++ ++ priv->buf_layout = buf_layout; ++ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]); ++ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]); ++ ++ /* All real interfaces need their ports initialized */ ++ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, ++ buf_layout, dev); ++ ++#ifdef CONFIG_FMAN_PFC ++ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) { ++ err = fm_port_set_pfc_priorities_mapping_to_qman_wq( ++ mac_dev->port_dev[TX], i, i); ++ if (unlikely(err != 0)) { ++ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i); ++ goto pfc_mapping_failed; ++ } ++ } ++#endif ++ ++ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); ++ ++ if (priv->percpu_priv == NULL) { ++ dev_err(dev, "devm_alloc_percpu() failed\n"); ++ err = -ENOMEM; ++ goto alloc_percpu_failed; ++ } ++ for_each_possible_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ memset(percpu_priv, 0, sizeof(*percpu_priv)); ++ } ++ ++ /* Initialize NAPI */ ++ err = dpa_private_napi_add(net_dev); ++ ++ if (err < 0) ++ goto napi_add_failed; ++ ++ err = dpa_private_netdev_init(net_dev); ++ ++ if (err < 0) ++ goto netdev_init_failed; ++ ++ dpaa_eth_sysfs_init(&net_dev->dev); ++ ++#ifdef CONFIG_PM ++ device_set_wakeup_capable(dev, true); ++#endif ++ ++ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name); ++ ++ return 0; ++ ++netdev_init_failed: ++napi_add_failed: ++ dpa_private_napi_del(net_dev); ++alloc_percpu_failed: ++#ifdef CONFIG_FMAN_PFC ++pfc_mapping_failed: ++#endif ++ dpa_fq_free(dev, &priv->dpa_fq_list); ++fq_alloc_failed: ++ qman_delete_cgr_safe(&priv->ingress_cgr); ++ qman_release_cgrid(priv->ingress_cgr.cgrid); ++rx_cgr_init_failed: ++ qman_delete_cgr_safe(&priv->cgr_data.cgr); ++ qman_release_cgrid(priv->cgr_data.cgr.cgrid); ++tx_cgr_init_failed: ++get_channel_failed: ++ dpa_bp_free(priv); ++bp_create_failed: ++fq_probe_failed: ++alloc_failed: ++mac_probe_failed: ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++alloc_etherdev_mq_failed: ++ if (atomic_read(&dpa_bp->refs) == 0) ++ devm_kfree(dev, dpa_bp); ++ ++ return err; ++} ++ ++static const struct of_device_id dpa_match[] = { ++ { ++ .compatible = "fsl,dpa-ethernet" ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, dpa_match); ++ ++static struct platform_driver dpa_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = dpa_match, ++ .owner = THIS_MODULE, ++ .pm = DPAA_PM_OPS, ++ }, ++ .probe = dpaa_eth_priv_probe, ++ .remove = dpa_remove ++}; ++ ++#ifndef CONFIG_PPC ++static bool __init __cold soc_has_errata_a010022(void) ++{ ++#ifdef CONFIG_SOC_BUS ++ const struct soc_device_attribute soc_msi_matches[] = { ++ { .family = "QorIQ LS1043A", ++ .data = NULL }, ++ { }, ++ }; ++ ++ if (soc_device_match(soc_msi_matches)) ++ return true; ++ ++ return false; ++#else ++ return true; /* cannot identify SoC */ ++#endif ++} ++#endif ++ ++static int __init __cold dpa_load(void) ++{ ++ int _errno; ++ ++ pr_info(DPA_DESCRIPTION "\n"); ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ dpa_debugfs_module_init(); ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++ ++ /* initialise dpaa_eth mirror values */ ++ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); ++ dpa_max_frm = fm_get_max_frm(); ++ dpa_num_cpus = num_possible_cpus(); ++ ++#ifndef CONFIG_PPC ++ /* Detect if the current SoC requires the 4K alignment workaround */ ++ dpaa_errata_a010022 = soc_has_errata_a010022(); ++#endif ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs)); ++#endif ++ ++ _errno = platform_driver_register(&dpa_driver); ++ if (unlikely(_errno < 0)) { ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): platform_driver_register() = %d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, _errno); ++ } ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ return _errno; ++} ++module_init(dpa_load); ++ ++static void __exit __cold dpa_unload(void) ++{ ++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ platform_driver_unregister(&dpa_driver); ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ dpa_debugfs_module_exit(); ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++ ++ /* Only one channel is used and needs to be relased after all ++ * interfaces are removed ++ */ ++ dpa_release_channel(); ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++} ++module_exit(dpa_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h +@@ -0,0 +1,698 @@ ++/* Copyright 2008-2012 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPA_H ++#define __DPA_H ++ ++#include ++#include /* struct qman_fq */ ++ ++#include "fm_ext.h" ++#include "dpaa_eth_trace.h" ++ ++extern int dpa_rx_extra_headroom; ++extern int dpa_max_frm; ++extern int dpa_num_cpus; ++ ++#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom ++#define dpa_get_max_frm() dpa_max_frm ++ ++#define dpa_get_max_mtu() \ ++ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN)) ++ ++#define __hot ++ ++/* Simple enum of FQ types - used for array indexing */ ++enum port_type {RX, TX}; ++ ++/* TODO: This structure should be renamed & moved to the FMD wrapper */ ++struct dpa_buffer_layout_s { ++ uint16_t priv_data_size; ++ bool parse_results; ++ bool time_stamp; ++ bool hash_results; ++ uint8_t manip_extra_space; ++ uint16_t data_align; ++}; ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define DPA_BUG_ON(cond) BUG_ON(cond) ++#else ++#define DPA_BUG_ON(cond) ++#endif ++ ++#define DPA_TX_PRIV_DATA_SIZE 16 ++#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t) ++#define DPA_TIME_STAMP_SIZE 8 ++#define DPA_HASH_RESULTS_SIZE 8 ++#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \ ++ dpa_get_rx_extra_headroom()) ++ ++#define FM_FD_STAT_RX_ERRORS \ ++ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \ ++ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \ ++ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \ ++ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \ ++ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR) ++ ++#define FM_FD_STAT_TX_ERRORS \ ++ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \ ++ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA) ++ ++#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME ++/* The raw buffer size must be cacheline aligned. ++ * Normally we use 2K buffers. ++ */ ++#define DPA_BP_RAW_SIZE 2048 ++#else ++/* For jumbo frame optimizations, use buffers large enough to accommodate ++ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra ++ * space to account for further alignments. ++ */ ++#define DPA_MAX_FRM_SIZE 9600 ++#ifdef CONFIG_PPC ++#define DPA_BP_RAW_SIZE \ ++ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \ ++ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)) ++#else /* CONFIG_PPC */ ++#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \ ++ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \ ++ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))) ++#endif /* CONFIG_PPC */ ++#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */ ++ ++/* This is what FMan is ever allowed to use. ++ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is ++ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, ++ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us ++ * half-page-aligned buffers (can we?), so we reserve some more space ++ * for start-of-buffer alignment. ++ */ ++#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \ ++ SMP_CACHE_BYTES) ++/* We must ensure that skb_shinfo is always cacheline-aligned. */ ++#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1)) ++ ++/* Maximum size of a buffer for which recycling is allowed. ++ * We need an upper limit such that forwarded skbs that get reallocated on Tx ++ * aren't allowed to grow unboundedly. On the other hand, we need to make sure ++ * that skbs allocated by us will not fail to be recycled due to their size. ++ * ++ * For a requested size, the kernel allocator provides the next power of two ++ * sized block, which the stack will use as is, regardless of the actual size ++ * it required; since we must accommodate at most 9.6K buffers (L2 maximum ++ * supported frame size), set the recycling upper limit to 16K. ++ */ ++#define DPA_RECYCLE_MAX_SIZE 16384 ++ ++#if defined(CONFIG_FSL_SDK_FMAN_TEST) ++/*TODO: temporary for fman pcd testing */ ++#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20 ++#endif ++ ++#define DPAA_ETH_FQ_DELTA 0x10000 ++ ++#define DPAA_ETH_PCD_FQ_BASE(device_addr) \ ++ (((device_addr) & 0x1fffff) >> 6) ++ ++#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \ ++ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr)) ++ ++/* Largest value that the FQD's OAL field can hold. ++ * This is DPAA-1.x specific. ++ * TODO: This rather belongs in fsl_qman.h ++ */ ++#define FSL_QMAN_MAX_OAL 127 ++ ++/* Maximum offset value for a contig or sg FD (represented on 9 bits) */ ++#define DPA_MAX_FD_OFFSET ((1 << 9) - 1) ++ ++/* Default alignment for start of data in an Rx FD */ ++#define DPA_FD_DATA_ALIGNMENT 16 ++ ++/* Values for the L3R field of the FM Parse Results ++ */ ++/* L3 Type field: First IP Present IPv4 */ ++#define FM_L3_PARSE_RESULT_IPV4 0x8000 ++/* L3 Type field: First IP Present IPv6 */ ++#define FM_L3_PARSE_RESULT_IPV6 0x4000 ++ ++/* Values for the L4R field of the FM Parse Results ++ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual. ++ */ ++/* L4 Type field: UDP */ ++#define FM_L4_PARSE_RESULT_UDP 0x40 ++/* L4 Type field: TCP */ ++#define FM_L4_PARSE_RESULT_TCP 0x20 ++/* FD status field indicating whether the FM Parser has attempted to validate ++ * the L4 csum of the frame. ++ * Note that having this bit set doesn't necessarily imply that the checksum ++ * is valid. One would have to check the parse results to find that out. ++ */ ++#define FM_FD_STAT_L4CV 0x00000004 ++ ++ ++#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL ++ ++/* Check if the parsed frame was found to be a TCP segment. ++ * ++ * @parse_result_ptr must be of type (fm_prs_result_t *). ++ */ ++#define fm_l4_frame_is_tcp(parse_result_ptr) \ ++ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP) ++ ++/* number of Tx queues to FMan */ ++#ifdef CONFIG_FMAN_PFC ++#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT) ++#else ++#define DPAA_ETH_TX_QUEUES NR_CPUS ++#endif ++ ++#define DPAA_ETH_RX_QUEUES 128 ++ ++/* Convenience macros for storing/retrieving the skb back-pointers. They must ++ * accommodate both recycling and confirmation paths - i.e. cases when the buf ++ * was allocated by ourselves, respectively by the stack. In the former case, ++ * we could store the skb at negative offset; in the latter case, we can't, ++ * so we'll use 0 as offset. ++ * ++ * NB: @off is an offset from a (struct sk_buff **) pointer! ++ */ ++#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \ ++{ \ ++ skbh = (struct sk_buff **)addr; \ ++ *(skbh + (off)) = skb; \ ++} ++#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \ ++{ \ ++ skbh = (struct sk_buff **)addr; \ ++ skb = *(skbh + (off)); \ ++} ++ ++#ifdef CONFIG_PM ++/* Magic Packet wakeup */ ++#define DPAA_WOL_MAGIC 0x00000001 ++#endif ++ ++#if defined(CONFIG_FSL_SDK_FMAN_TEST) ++struct pcd_range { ++ uint32_t base; ++ uint32_t count; ++}; ++#endif ++ ++/* More detailed FQ types - used for fine-grained WQ assignments */ ++enum dpa_fq_type { ++ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ ++ FQ_TYPE_RX_ERROR, /* Rx Error FQs */ ++ FQ_TYPE_RX_PCD, /* User-defined PCDs */ ++ FQ_TYPE_TX, /* "Real" Tx FQs */ ++ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ ++ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ ++ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ ++ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */ ++}; ++ ++struct dpa_fq { ++ struct qman_fq fq_base; ++ struct list_head list; ++ struct net_device *net_dev; ++ bool init; ++ uint32_t fqid; ++ uint32_t flags; ++ uint16_t channel; ++ uint8_t wq; ++ enum dpa_fq_type fq_type; ++}; ++ ++struct dpa_fq_cbs_t { ++ struct qman_fq rx_defq; ++ struct qman_fq tx_defq; ++ struct qman_fq rx_errq; ++ struct qman_fq tx_errq; ++ struct qman_fq egress_ern; ++}; ++ ++struct fqid_cell { ++ uint32_t start; ++ uint32_t count; ++}; ++ ++struct dpa_bp { ++ struct bman_pool *pool; ++ uint8_t bpid; ++ struct device *dev; ++ union { ++ /* The buffer pools used for the private ports are initialized ++ * with target_count buffers for each CPU; at runtime the ++ * number of buffers per CPU is constantly brought back to this ++ * level ++ */ ++ int target_count; ++ /* The configured value for the number of buffers in the pool, ++ * used for shared port buffer pools ++ */ ++ int config_count; ++ }; ++ size_t size; ++ bool seed_pool; ++ /* physical address of the contiguous memory used by the pool to store ++ * the buffers ++ */ ++ dma_addr_t paddr; ++ /* virtual address of the contiguous memory used by the pool to store ++ * the buffers ++ */ ++ void __iomem *vaddr; ++ /* current number of buffers in the bpool alloted to this CPU */ ++ int __percpu *percpu_count; ++ atomic_t refs; ++ /* some bpools need to be seeded before use by this cb */ ++ int (*seed_cb)(struct dpa_bp *); ++ /* some bpools need to be emptied before freeing; this cb is used ++ * for freeing of individual buffers taken from the pool ++ */ ++ void (*free_buf_cb)(void *addr); ++}; ++ ++struct dpa_rx_errors { ++ u64 dme; /* DMA Error */ ++ u64 fpe; /* Frame Physical Error */ ++ u64 fse; /* Frame Size Error */ ++ u64 phe; /* Header Error */ ++ u64 cse; /* Checksum Validation Error */ ++}; ++ ++/* Counters for QMan ERN frames - one counter per rejection code */ ++struct dpa_ern_cnt { ++ u64 cg_tdrop; /* Congestion group taildrop */ ++ u64 wred; /* WRED congestion */ ++ u64 err_cond; /* Error condition */ ++ u64 early_window; /* Order restoration, frame too early */ ++ u64 late_window; /* Order restoration, frame too late */ ++ u64 fq_tdrop; /* FQ taildrop */ ++ u64 fq_retired; /* FQ is retired */ ++ u64 orp_zero; /* ORP disabled */ ++}; ++ ++struct dpa_napi_portal { ++ struct napi_struct napi; ++ struct qman_portal *p; ++}; ++ ++struct dpa_percpu_priv_s { ++ struct net_device *net_dev; ++ struct dpa_napi_portal *np; ++ u64 in_interrupt; ++ u64 tx_returned; ++ u64 tx_confirm; ++ /* fragmented (non-linear) skbuffs received from the stack */ ++ u64 tx_frag_skbuffs; ++ /* number of S/G frames received */ ++ u64 rx_sg; ++ ++ struct rtnl_link_stats64 stats; ++ struct dpa_rx_errors rx_errors; ++ struct dpa_ern_cnt ern_cnt; ++}; ++ ++struct dpa_priv_s { ++ struct dpa_percpu_priv_s __percpu *percpu_priv; ++ struct dpa_bp *dpa_bp; ++ /* Store here the needed Tx headroom for convenience and speed ++ * (even though it can be computed based on the fields of buf_layout) ++ */ ++ uint16_t tx_headroom; ++ struct net_device *net_dev; ++ struct mac_device *mac_dev; ++ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES]; ++ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES]; ++ ++ size_t bp_count; ++ ++ uint16_t channel; /* "fsl,qman-channel-id" */ ++ struct list_head dpa_fq_list; ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ struct dentry *debugfs_loop_file; ++#endif ++ ++ uint32_t msg_enable; /* net_device message level */ ++#ifdef CONFIG_FSL_DPAA_1588 ++ struct dpa_ptp_tsu *tsu; ++#endif ++ ++#if defined(CONFIG_FSL_SDK_FMAN_TEST) ++/* TODO: this is temporary until pcd support is implemented in dpaa */ ++ int priv_pcd_num_ranges; ++ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES]; ++#endif ++ ++ struct { ++ /** ++ * All egress queues to a given net device belong to one ++ * (and the same) congestion group. ++ */ ++ struct qman_cgr cgr; ++ /* If congested, when it began. Used for performance stats. */ ++ u32 congestion_start_jiffies; ++ /* Number of jiffies the Tx port was congested. */ ++ u32 congested_jiffies; ++ /** ++ * Counter for the number of times the CGR ++ * entered congestion state ++ */ ++ u32 cgr_congested_count; ++ } cgr_data; ++ /* Use a per-port CGR for ingress traffic. */ ++ bool use_ingress_cgr; ++ struct qman_cgr ingress_cgr; ++ ++#ifdef CONFIG_FSL_DPAA_TS ++ bool ts_tx_en; /* Tx timestamping enabled */ ++ bool ts_rx_en; /* Rx timestamping enabled */ ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++ struct dpa_buffer_layout_s *buf_layout; ++ uint16_t rx_headroom; ++ char if_type[30]; ++ ++ void *peer; ++#ifdef CONFIG_PM ++ u32 wol; ++#endif ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ int loop_id; ++ int loop_to; ++#endif ++#ifdef CONFIG_FSL_DPAA_CEETM ++ bool ceetm_en; /* CEETM QoS enabled */ ++#endif ++}; ++ ++struct fm_port_fqs { ++ struct dpa_fq *tx_defq; ++ struct dpa_fq *tx_errq; ++ struct dpa_fq *rx_defq; ++ struct dpa_fq *rx_errq; ++}; ++ ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++extern struct net_device *dpa_loop_netdevs[20]; ++#endif ++ ++/* functions with different implementation for SG and non-SG: */ ++int dpa_bp_priv_seed(struct dpa_bp *dpa_bp); ++int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr); ++void __hot _dpa_rx(struct net_device *net_dev, ++ struct qman_portal *portal, ++ const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd, ++ u32 fqid, ++ int *count_ptr); ++int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev); ++int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev, ++ struct qman_fq *egress_fq, struct qman_fq *conf_fq); ++struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, ++ const struct qm_fd *fd); ++void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, ++ const struct qm_fd *fd, ++ struct sk_buff *skb, ++ int *use_gro); ++#ifndef CONFIG_FSL_DPAA_TS ++bool dpa_skb_is_recyclable(struct sk_buff *skb); ++bool dpa_buf_is_recyclable(struct sk_buff *skb, ++ uint32_t min_size, ++ uint16_t min_offset, ++ unsigned char **new_buf_start); ++#endif ++int __hot skb_to_contig_fd(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd, ++ int *count_ptr, int *offset); ++int __hot skb_to_sg_fd(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd); ++int __cold __attribute__((nonnull)) ++ _dpa_fq_free(struct device *dev, struct qman_fq *fq); ++ ++/* Turn on HW checksum computation for this outgoing frame. ++ * If the current protocol is not something we support in this regard ++ * (or if the stack has already computed the SW checksum), we do nothing. ++ * ++ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value ++ * otherwise. ++ * ++ * Note that this function may modify the fd->cmd field and the skb data buffer ++ * (the Parse Results area). ++ */ ++int dpa_enable_tx_csum(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results); ++ ++static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv, ++ struct qman_portal *portal) ++{ ++ /* In case of threaded ISR for RT enable kernel, ++ * in_irq() does not return appropriate value, so use ++ * in_serving_softirq to distinguish softirq or irq context. ++ */ ++ if (unlikely(in_irq() || !in_serving_softirq())) { ++ /* Disable QMan IRQ and invoke NAPI */ ++ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); ++ if (likely(!ret)) { ++ const struct qman_portal_config *pc = ++ qman_p_get_portal_config(portal); ++ struct dpa_napi_portal *np = ++ &percpu_priv->np[pc->index]; ++ ++ np->p = portal; ++ napi_schedule(&np->napi); ++ percpu_priv->in_interrupt++; ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++static inline ssize_t __const __must_check __attribute__((nonnull)) ++dpa_fd_length(const struct qm_fd *fd) ++{ ++ return fd->length20; ++} ++ ++static inline ssize_t __const __must_check __attribute__((nonnull)) ++dpa_fd_offset(const struct qm_fd *fd) ++{ ++ return fd->offset; ++} ++ ++/* Verifies if the skb length is below the interface MTU */ ++static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu) ++{ ++ if (unlikely(skb->len > mtu)) ++ if ((skb->protocol != htons(ETH_P_8021Q)) ++ || (skb->len > mtu + 4)) ++ return -1; ++ ++ return 0; ++} ++ ++static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl) ++{ ++ uint16_t headroom; ++ /* The frame headroom must accommodate: ++ * - the driver private data area ++ * - parse results, hash results, timestamp if selected ++ * - manip extra space ++ * If either hash results or time stamp are selected, both will ++ * be copied to/from the frame headroom, as TS is located between PR and ++ * HR in the IC and IC copy size has a granularity of 16bytes ++ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) ++ * ++ * Also make sure the headroom is a multiple of data_align bytes ++ */ ++ headroom = (uint16_t)(bl->priv_data_size + ++ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) + ++ (bl->hash_results || bl->time_stamp ? ++ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) + ++ bl->manip_extra_space); ++ ++ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom; ++} ++ ++int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n); ++int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n); ++int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n); ++ ++void dpaa_eth_sysfs_remove(struct device *dev); ++void dpaa_eth_sysfs_init(struct device *dev); ++int dpaa_eth_poll(struct napi_struct *napi, int budget); ++ ++void dpa_private_napi_del(struct net_device *net_dev); ++ ++/* Equivalent to a memset(0), but works faster */ ++static inline void clear_fd(struct qm_fd *fd) ++{ ++ fd->opaque_addr = 0; ++ fd->opaque = 0; ++ fd->cmd = 0; ++} ++ ++static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv, ++ struct qman_fq *tx_fq) ++{ ++ int i; ++ ++ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) ++ if (priv->egress_fqs[i] == tx_fq) ++ return i; ++ ++ return -EINVAL; ++} ++ ++static inline int __hot dpa_xmit(struct dpa_priv_s *priv, ++ struct rtnl_link_stats64 *percpu_stats, ++ struct qm_fd *fd, struct qman_fq *egress_fq, ++ struct qman_fq *conf_fq) ++{ ++ int err, i; ++ ++ if (fd->bpid == 0xff) ++ fd->cmd |= qman_fq_fqid(conf_fq); ++ ++ /* Trace this Tx fd */ ++ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd); ++ ++ for (i = 0; i < 100000; i++) { ++ err = qman_enqueue(egress_fq, fd, 0); ++ if (err != -EBUSY) ++ break; ++ } ++ ++ if (unlikely(err < 0)) { ++ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */ ++ percpu_stats->tx_errors++; ++ percpu_stats->tx_fifo_errors++; ++ return err; ++ } ++ ++ percpu_stats->tx_packets++; ++ percpu_stats->tx_bytes += dpa_fd_length(fd); ++ ++ return 0; ++} ++ ++/* Use multiple WQs for FQ assignment: ++ * - Tx Confirmation queues go to WQ1. ++ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between ++ * Rx and Tx traffic, or between Rx Default and Rx PCD frames). ++ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance ++ * to be scheduled, in case there are many more FQs in WQ3). ++ * This ensures that Tx-confirmed buffers are timely released. In particular, ++ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they ++ * are greatly outnumbered by other FQs in the system (usually PCDs), while ++ * dequeue scheduling is round-robin. ++ */ ++static inline void _dpa_assign_wq(struct dpa_fq *fq) ++{ ++ switch (fq->fq_type) { ++ case FQ_TYPE_TX_CONFIRM: ++ case FQ_TYPE_TX_CONF_MQ: ++ fq->wq = 1; ++ break; ++ case FQ_TYPE_RX_DEFAULT: ++ case FQ_TYPE_TX: ++ fq->wq = 3; ++ break; ++ case FQ_TYPE_RX_ERROR: ++ case FQ_TYPE_TX_ERROR: ++ case FQ_TYPE_RX_PCD_HI_PRIO: ++ fq->wq = 2; ++ break; ++ case FQ_TYPE_RX_PCD: ++ fq->wq = 5; ++ break; ++ default: ++ WARN(1, "Invalid FQ type %d for FQID %d!\n", ++ fq->fq_type, fq->fqid); ++ } ++} ++ ++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++/* Use in lieu of skb_get_queue_mapping() */ ++#ifdef CONFIG_FMAN_PFC ++#define dpa_get_queue_mapping(skb) \ ++ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \ ++ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \ ++ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \ ++ dpa_num_cpus + smp_processor_id())); ++ ++#else ++#define dpa_get_queue_mapping(skb) \ ++ raw_smp_processor_id() ++#endif ++#else ++/* Use the queue selected by XPS */ ++#define dpa_get_queue_mapping(skb) \ ++ skb_get_queue_mapping(skb) ++#endif ++ ++#ifdef CONFIG_PTP_1588_CLOCK_DPAA ++struct ptp_priv_s { ++ struct device_node *node; ++ struct platform_device *of_dev; ++ struct ptp_clock *clock; ++ struct mac_device *mac_dev; ++}; ++extern struct ptp_priv_s ptp_priv; ++#endif ++ ++static inline void _dpa_bp_free_pf(void *addr) ++{ ++ put_page(virt_to_head_page(addr)); ++} ++ ++/* LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue ++ * manifests itself at high traffic rates when frames cross 4K memory ++ * boundaries or when they are not aligned to 16 bytes; For the moment, we ++ * use a SW workaround to avoid frames larger than 4K or that exceed 4K ++ * alignments and to realign the frames to 16 bytes. ++ */ ++ ++#ifndef CONFIG_PPC ++extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */ ++#define NONREC_MARK 0x01 ++#define HAS_DMA_ISSUE(start, size) \ ++ (((uintptr_t)(start) + (size)) > \ ++ (((uintptr_t)(start) + 0x1000) & ~0xFFF)) ++#endif /* !CONFIG_PPC */ ++ ++#endif /* __DPA_H */ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c +@@ -0,0 +1,205 @@ ++/* Copyright 2008-2013 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#include "dpaa_eth_base.h" ++ ++#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++uint8_t advanced_debug = -1; ++module_param(advanced_debug, byte, S_IRUGO); ++MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level"); ++EXPORT_SYMBOL(advanced_debug); ++ ++static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1) ++{ ++ return ((struct dpa_bp *)dpa_bp0)->size - ++ ((struct dpa_bp *)dpa_bp1)->size; ++} ++ ++struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ ++dpa_bp_probe(struct platform_device *_of_dev, size_t *count) ++{ ++ int i, lenp, na, ns, err; ++ struct device *dev; ++ struct device_node *dev_node; ++ const __be32 *bpool_cfg; ++ struct dpa_bp *dpa_bp; ++ u32 bpid; ++ ++ dev = &_of_dev->dev; ++ ++ *count = of_count_phandle_with_args(dev->of_node, ++ "fsl,bman-buffer-pools", NULL); ++ if (*count < 1) { ++ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL); ++ if (dpa_bp == NULL) { ++ dev_err(dev, "devm_kzalloc() failed\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dev_node = of_find_node_by_path("/"); ++ if (unlikely(dev_node == NULL)) { ++ dev_err(dev, "of_find_node_by_path(/) failed\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ na = of_n_addr_cells(dev_node); ++ ns = of_n_size_cells(dev_node); ++ ++ for (i = 0; i < *count; i++) { ++ of_node_put(dev_node); ++ ++ dev_node = of_parse_phandle(dev->of_node, ++ "fsl,bman-buffer-pools", i); ++ if (dev_node == NULL) { ++ dev_err(dev, "of_find_node_by_phandle() failed\n"); ++ return ERR_PTR(-EFAULT); ++ } ++ ++ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) { ++ dev_err(dev, ++ "!of_device_is_compatible(%s, fsl,bpool)\n", ++ dev_node->full_name); ++ dpa_bp = ERR_PTR(-EINVAL); ++ goto _return_of_node_put; ++ } ++ ++ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid); ++ if (err) { ++ dev_err(dev, "Cannot find buffer pool ID in the device tree\n"); ++ dpa_bp = ERR_PTR(-EINVAL); ++ goto _return_of_node_put; ++ } ++ dpa_bp[i].bpid = (uint8_t)bpid; ++ ++ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", ++ &lenp); ++ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { ++ const uint32_t *seed_pool; ++ ++ dpa_bp[i].config_count = ++ (int)of_read_number(bpool_cfg, ns); ++ dpa_bp[i].size = ++ (size_t)of_read_number(bpool_cfg + ns, ns); ++ dpa_bp[i].paddr = ++ of_read_number(bpool_cfg + 2 * ns, na); ++ ++ seed_pool = of_get_property(dev_node, ++ "fsl,bpool-ethernet-seeds", &lenp); ++ dpa_bp[i].seed_pool = !!seed_pool; ++ ++ } else { ++ dev_err(dev, ++ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n", ++ dev_node->full_name); ++ dpa_bp = ERR_PTR(-EINVAL); ++ goto _return_of_node_put; ++ } ++ } ++ ++ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL); ++ ++ return dpa_bp; ++ ++_return_of_node_put: ++ if (dev_node) ++ of_node_put(dev_node); ++ ++ return dpa_bp; ++} ++EXPORT_SYMBOL(dpa_bp_probe); ++ ++int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, ++ size_t count) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ int i; ++ ++ priv->dpa_bp = dpa_bp; ++ priv->bp_count = count; ++ ++ for (i = 0; i < count; i++) { ++ int err; ++ err = dpa_bp_alloc(&dpa_bp[i]); ++ if (err < 0) { ++ dpa_bp_free(priv); ++ priv->dpa_bp = NULL; ++ return err; ++ } ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_bp_create); ++ ++static int __init __cold dpa_advanced_load(void) ++{ ++ pr_info(DPA_DESCRIPTION "\n"); ++ ++ return 0; ++} ++module_init(dpa_advanced_load); ++ ++static void __exit __cold dpa_advanced_unload(void) ++{ ++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", ++ KBUILD_BASENAME".c", __func__); ++ ++} ++module_exit(dpa_advanced_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h +@@ -0,0 +1,49 @@ ++/* Copyright 2008-2013 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA_ETH_BASE_H ++#define __DPAA_ETH_BASE_H ++ ++#include /* struct net_device */ ++#include /* struct bm_buffer */ ++#include /* struct platform_device */ ++#include /* struct hwtstamp_config */ ++ ++extern uint8_t advanced_debug; ++extern const struct dpa_fq_cbs_t shared_fq_cbs; ++extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev); ++ ++struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ ++dpa_bp_probe(struct platform_device *_of_dev, size_t *count); ++int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, ++ size_t count); ++ ++#endif /* __DPAA_ETH_BASE_H */ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c +@@ -0,0 +1,1992 @@ ++/* Copyright 2008-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "dpaa_eth_ceetm.h" ++ ++#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc" ++ ++const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = { ++ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) }, ++ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) }, ++}; ++ ++struct Qdisc_ops ceetm_qdisc_ops; ++ ++/* Obtain the DCP and the SP ids from the FMan port */ ++static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id, ++ unsigned int *sp_id) ++{ ++ uint32_t channel; ++ t_LnxWrpFmPortDev *port_dev; ++ struct dpa_priv_s *dpa_priv = netdev_priv(dev); ++ struct mac_device *mac_dev = dpa_priv->mac_dev; ++ ++ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX]; ++ channel = port_dev->txCh; ++ ++ *sp_id = channel & CHANNEL_SP_MASK; ++ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id); ++ ++ if (channel < DCP0_MAX_CHANNEL) { ++ *dcp_id = qm_dc_portal_fman0; ++ pr_debug(KBUILD_BASENAME " : DCP ID 0\n"); ++ } else { ++ *dcp_id = qm_dc_portal_fman1; ++ pr_debug(KBUILD_BASENAME " : DCP ID 1\n"); ++ } ++} ++ ++/* Enqueue Rejection Notification callback */ ++static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq, ++ const struct qm_mr_entry *msg) ++{ ++ struct net_device *net_dev; ++ struct ceetm_class *cls; ++ struct ceetm_class_stats *cstats = NULL; ++ const struct dpa_priv_s *dpa_priv; ++ struct dpa_percpu_priv_s *dpa_percpu_priv; ++ struct sk_buff *skb; ++ struct qm_fd fd = msg->ern.fd; ++ ++ net_dev = ((struct ceetm_fq *)fq)->net_dev; ++ dpa_priv = netdev_priv(net_dev); ++ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv); ++ ++ /* Increment DPA counters */ ++ dpa_percpu_priv->stats.tx_dropped++; ++ dpa_percpu_priv->stats.tx_fifo_errors++; ++ ++ /* Increment CEETM counters */ ++ cls = ((struct ceetm_fq *)fq)->ceetm_cls; ++ switch (cls->type) { ++ case CEETM_PRIO: ++ cstats = this_cpu_ptr(cls->prio.cstats); ++ break; ++ case CEETM_WBFS: ++ cstats = this_cpu_ptr(cls->wbfs.cstats); ++ break; ++ } ++ ++ if (cstats) ++ cstats->ern_drop_count++; ++ ++ if (fd.bpid != 0xff) { ++ dpa_fd_release(net_dev, &fd); ++ return; ++ } ++ ++ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd); ++ dev_kfree_skb_any(skb); ++} ++ ++/* Congestion State Change Notification callback */ ++static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested) ++{ ++ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx; ++ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev); ++ struct ceetm_class *cls = ceetm_fq->ceetm_cls; ++ struct ceetm_class_stats *cstats = NULL; ++ ++ switch (cls->type) { ++ case CEETM_PRIO: ++ cstats = this_cpu_ptr(cls->prio.cstats); ++ break; ++ case CEETM_WBFS: ++ cstats = this_cpu_ptr(cls->wbfs.cstats); ++ break; ++ } ++ ++ if (congested) { ++ dpa_priv->cgr_data.congestion_start_jiffies = jiffies; ++ netif_tx_stop_all_queues(dpa_priv->net_dev); ++ dpa_priv->cgr_data.cgr_congested_count++; ++ if (cstats) ++ cstats->congested_count++; ++ } else { ++ dpa_priv->cgr_data.congested_jiffies += ++ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies); ++ netif_tx_wake_all_queues(dpa_priv->net_dev); ++ } ++} ++ ++/* Allocate a ceetm fq */ ++static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev, ++ struct ceetm_class *cls) ++{ ++ *fq = kzalloc(sizeof(**fq), GFP_KERNEL); ++ if (!*fq) ++ return -ENOMEM; ++ ++ (*fq)->net_dev = dev; ++ (*fq)->ceetm_cls = cls; ++ return 0; ++} ++ ++/* Configure a ceetm Class Congestion Group */ ++static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg, ++ struct qm_ceetm_channel *channel, unsigned int id, ++ struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv) ++{ ++ int err; ++ u32 cs_th; ++ u16 ccg_mask; ++ struct qm_ceetm_ccg_params ccg_params; ++ ++ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq); ++ if (err) ++ return err; ++ ++ /* Configure the count mode (frames/bytes), enable congestion state ++ * notifications, configure the congestion entry and exit thresholds, ++ * enable tail-drop, configure the tail-drop mode, and set the ++ * overhead accounting limit ++ */ ++ ccg_mask = QM_CCGR_WE_MODE | ++ QM_CCGR_WE_CSCN_EN | ++ QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT | ++ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE | ++ QM_CCGR_WE_OAL; ++ ++ ccg_params.mode = 0; /* count bytes */ ++ ccg_params.cscn_en = 1; /* generate notifications */ ++ ccg_params.td_en = 1; /* enable tail-drop */ ++ ccg_params.td_mode = 0; /* tail-drop on congestion state */ ++ ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) + ++ dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); ++ ++ /* Set the congestion state thresholds according to the link speed */ ++ if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) ++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G; ++ else ++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G; ++ ++ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1); ++ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out, ++ cs_th * CEETM_CCGR_RATIO, 1); ++ ++ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/* Configure a ceetm Logical Frame Queue */ ++static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq, ++ struct qm_ceetm_lfq **lfq) ++{ ++ int err; ++ u64 context_a; ++ u32 context_b; ++ ++ err = qman_ceetm_lfq_claim(lfq, cq); ++ if (err) ++ return err; ++ ++ /* Get the former contexts in order to preserve context B */ ++ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b); ++ if (err) ++ return err; ++ ++ context_a = CEETM_CONTEXT_A; ++ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b); ++ if (err) ++ return err; ++ ++ (*lfq)->ern = ceetm_ern; ++ ++ err = qman_ceetm_create_fq(*lfq, &fq->fq); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/* Configure a prio ceetm class */ ++static int ceetm_config_prio_cls(struct ceetm_class *cls, ++ struct net_device *dev, ++ struct qm_ceetm_channel *channel, ++ unsigned int id) ++{ ++ int err; ++ struct dpa_priv_s *dpa_priv = netdev_priv(dev); ++ ++ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls); ++ if (err) ++ return err; ++ ++ /* Claim and configure the CCG */ ++ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq, ++ dpa_priv); ++ if (err) ++ return err; ++ ++ /* Claim and configure the CQ */ ++ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg); ++ if (err) ++ return err; ++ ++ if (cls->shaped) { ++ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1); ++ if (err) ++ return err; ++ ++ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1); ++ if (err) ++ return err; ++ } ++ ++ /* Claim and configure a LFQ */ ++ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/* Configure a wbfs ceetm class */ ++static int ceetm_config_wbfs_cls(struct ceetm_class *cls, ++ struct net_device *dev, ++ struct qm_ceetm_channel *channel, ++ unsigned int id, int type) ++{ ++ int err; ++ struct dpa_priv_s *dpa_priv = netdev_priv(dev); ++ ++ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls); ++ if (err) ++ return err; ++ ++ /* Claim and configure the CCG */ ++ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq, ++ dpa_priv); ++ if (err) ++ return err; ++ ++ /* Claim and configure the CQ */ ++ if (type == WBFS_GRP_B) ++ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id, ++ cls->wbfs.ccg); ++ else ++ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id, ++ cls->wbfs.ccg); ++ if (err) ++ return err; ++ ++ /* Configure the CQ weight: real number multiplied by 100 to get rid ++ * of the fraction ++ */ ++ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq, ++ cls->wbfs.weight * 100); ++ if (err) ++ return err; ++ ++ /* Claim and configure a LFQ */ ++ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/* Find class in qdisc hash table using given handle */ ++static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch) ++{ ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct Qdisc_class_common *clc; ++ ++ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n", ++ __func__, handle, sch->handle); ++ ++ clc = qdisc_class_find(&priv->clhash, handle); ++ return clc ? container_of(clc, struct ceetm_class, common) : NULL; ++} ++ ++/* Insert a class in the qdisc's class hash */ ++static void ceetm_link_class(struct Qdisc *sch, ++ struct Qdisc_class_hash *clhash, ++ struct Qdisc_class_common *common) ++{ ++ sch_tree_lock(sch); ++ qdisc_class_hash_insert(clhash, common); ++ sch_tree_unlock(sch); ++ qdisc_class_hash_grow(sch, clhash); ++} ++ ++/* Destroy a ceetm class */ ++static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl) ++{ ++ if (!cl) ++ return; ++ ++ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n", ++ __func__, cl->common.classid, sch->handle); ++ ++ switch (cl->type) { ++ case CEETM_ROOT: ++ if (cl->root.child) { ++ qdisc_destroy(cl->root.child); ++ cl->root.child = NULL; ++ } ++ ++ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the channel %d\n", ++ __func__, cl->root.ch->idx); ++ ++ break; ++ ++ case CEETM_PRIO: ++ if (cl->prio.child) { ++ qdisc_destroy(cl->prio.child); ++ cl->prio.child = NULL; ++ } ++ ++ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the LFQ %d\n", ++ __func__, cl->prio.lfq->idx); ++ ++ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the CQ %d\n", ++ __func__, cl->prio.cq->idx); ++ ++ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the CCG %d\n", ++ __func__, cl->prio.ccg->idx); ++ ++ kfree(cl->prio.fq); ++ ++ if (cl->prio.cstats) ++ free_percpu(cl->prio.cstats); ++ ++ break; ++ ++ case CEETM_WBFS: ++ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the LFQ %d\n", ++ __func__, cl->wbfs.lfq->idx); ++ ++ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the CQ %d\n", ++ __func__, cl->wbfs.cq->idx); ++ ++ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the CCG %d\n", ++ __func__, cl->wbfs.ccg->idx); ++ ++ kfree(cl->wbfs.fq); ++ ++ if (cl->wbfs.cstats) ++ free_percpu(cl->wbfs.cstats); ++ } ++ ++ tcf_destroy_chain(&cl->filter_list); ++ kfree(cl); ++} ++ ++/* Destroy a ceetm qdisc */ ++static void ceetm_destroy(struct Qdisc *sch) ++{ ++ unsigned int ntx, i; ++ struct hlist_node *next; ++ struct ceetm_class *cl; ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n", ++ __func__, sch->handle); ++ ++ /* All filters need to be removed before destroying the classes */ ++ tcf_destroy_chain(&priv->filter_list); ++ ++ for (i = 0; i < priv->clhash.hashsize; i++) { ++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) ++ tcf_destroy_chain(&cl->filter_list); ++ } ++ ++ for (i = 0; i < priv->clhash.hashsize; i++) { ++ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i], ++ common.hnode) ++ ceetm_cls_destroy(sch, cl); ++ } ++ ++ qdisc_class_hash_destroy(&priv->clhash); ++ ++ switch (priv->type) { ++ case CEETM_ROOT: ++ dpa_disable_ceetm(dev); ++ ++ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the LNI %d\n", ++ __func__, priv->root.lni->idx); ++ ++ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp)) ++ pr_err(KBUILD_BASENAME ++ " : %s : error releasing the SP %d\n", ++ __func__, priv->root.sp->idx); ++ ++ if (priv->root.qstats) ++ free_percpu(priv->root.qstats); ++ ++ if (!priv->root.qdiscs) ++ break; ++ ++ /* Remove the pfifo qdiscs */ ++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) ++ if (priv->root.qdiscs[ntx]) ++ qdisc_destroy(priv->root.qdiscs[ntx]); ++ ++ kfree(priv->root.qdiscs); ++ break; ++ ++ case CEETM_PRIO: ++ if (priv->prio.parent) ++ priv->prio.parent->root.child = NULL; ++ break; ++ ++ case CEETM_WBFS: ++ if (priv->wbfs.parent) ++ priv->wbfs.parent->prio.child = NULL; ++ break; ++ } ++} ++ ++static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb) ++{ ++ struct Qdisc *qdisc; ++ unsigned int ntx, i; ++ struct nlattr *nest; ++ struct tc_ceetm_qopt qopt; ++ struct ceetm_qdisc_stats *qstats; ++ struct net_device *dev = qdisc_dev(sch); ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ sch_tree_lock(sch); ++ memset(&qopt, 0, sizeof(qopt)); ++ qopt.type = priv->type; ++ qopt.shaped = priv->shaped; ++ ++ switch (priv->type) { ++ case CEETM_ROOT: ++ /* Gather statistics from the underlying pfifo qdiscs */ ++ sch->q.qlen = 0; ++ memset(&sch->bstats, 0, sizeof(sch->bstats)); ++ memset(&sch->qstats, 0, sizeof(sch->qstats)); ++ ++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { ++ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; ++ sch->q.qlen += qdisc->q.qlen; ++ sch->bstats.bytes += qdisc->bstats.bytes; ++ sch->bstats.packets += qdisc->bstats.packets; ++ sch->qstats.qlen += qdisc->qstats.qlen; ++ sch->qstats.backlog += qdisc->qstats.backlog; ++ sch->qstats.drops += qdisc->qstats.drops; ++ sch->qstats.requeues += qdisc->qstats.requeues; ++ sch->qstats.overlimits += qdisc->qstats.overlimits; ++ } ++ ++ for_each_online_cpu(i) { ++ qstats = per_cpu_ptr(priv->root.qstats, i); ++ sch->qstats.drops += qstats->drops; ++ } ++ ++ qopt.rate = priv->root.rate; ++ qopt.ceil = priv->root.ceil; ++ qopt.overhead = priv->root.overhead; ++ break; ++ ++ case CEETM_PRIO: ++ qopt.qcount = priv->prio.qcount; ++ break; ++ ++ case CEETM_WBFS: ++ qopt.qcount = priv->wbfs.qcount; ++ qopt.cr = priv->wbfs.cr; ++ qopt.er = priv->wbfs.er; ++ break; ++ ++ default: ++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); ++ sch_tree_unlock(sch); ++ return -EINVAL; ++ } ++ ++ nest = nla_nest_start(skb, TCA_OPTIONS); ++ if (!nest) ++ goto nla_put_failure; ++ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt)) ++ goto nla_put_failure; ++ nla_nest_end(skb, nest); ++ ++ sch_tree_unlock(sch); ++ return skb->len; ++ ++nla_put_failure: ++ sch_tree_unlock(sch); ++ nla_nest_cancel(skb, nest); ++ return -EMSGSIZE; ++} ++ ++/* Configure a root ceetm qdisc */ ++static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv, ++ struct tc_ceetm_qopt *qopt) ++{ ++ struct netdev_queue *dev_queue; ++ struct Qdisc *qdisc; ++ enum qm_dc_portal dcp_id; ++ unsigned int i, sp_id, parent_id; ++ int err; ++ u64 bps; ++ struct qm_ceetm_sp *sp; ++ struct qm_ceetm_lni *lni; ++ struct net_device *dev = qdisc_dev(sch); ++ struct dpa_priv_s *dpa_priv = netdev_priv(dev); ++ struct mac_device *mac_dev = dpa_priv->mac_dev; ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ /* Validate inputs */ ++ if (sch->parent != TC_H_ROOT) { ++ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n"); ++ tcf_destroy_chain(&priv->filter_list); ++ qdisc_class_hash_destroy(&priv->clhash); ++ return -EINVAL; ++ } ++ ++ if (!mac_dev) { ++ pr_err("CEETM: the interface is lacking a mac\n"); ++ err = -EINVAL; ++ goto err_init_root; ++ } ++ ++ /* pre-allocate underlying pfifo qdiscs */ ++ priv->root.qdiscs = kcalloc(dev->num_tx_queues, ++ sizeof(priv->root.qdiscs[0]), ++ GFP_KERNEL); ++ if (!priv->root.qdiscs) { ++ err = -ENOMEM; ++ goto err_init_root; ++ } ++ ++ for (i = 0; i < dev->num_tx_queues; i++) { ++ dev_queue = netdev_get_tx_queue(dev, i); ++ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle), ++ TC_H_MIN(i + PFIFO_MIN_OFFSET)); ++ ++ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, ++ parent_id); ++ if (!qdisc) { ++ err = -ENOMEM; ++ goto err_init_root; ++ } ++ ++ priv->root.qdiscs[i] = qdisc; ++ qdisc->flags |= TCQ_F_ONETXQUEUE; ++ } ++ ++ sch->flags |= TCQ_F_MQROOT; ++ ++ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats); ++ if (!priv->root.qstats) { ++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_init_root; ++ } ++ ++ priv->shaped = qopt->shaped; ++ priv->root.rate = qopt->rate; ++ priv->root.ceil = qopt->ceil; ++ priv->root.overhead = qopt->overhead; ++ ++ /* Claim the SP */ ++ get_dcp_and_sp(dev, &dcp_id, &sp_id); ++ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n", ++ __func__); ++ goto err_init_root; ++ } ++ ++ priv->root.sp = sp; ++ ++ /* Claim the LNI - will use the same id as the SP id since SPs 0-7 ++ * are connected to the TX FMan ports ++ */ ++ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n", ++ __func__); ++ goto err_init_root; ++ } ++ ++ priv->root.lni = lni; ++ ++ err = qman_ceetm_sp_set_lni(sp, lni); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n", ++ __func__); ++ goto err_init_root; ++ } ++ ++ lni->sp = sp; ++ ++ /* Configure the LNI shaper */ ++ if (priv->shaped) { ++ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n", ++ __func__); ++ goto err_init_root; ++ } ++ ++ bps = priv->root.rate << 3; /* Bps -> bps */ ++ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n", ++ __func__); ++ goto err_init_root; ++ } ++ ++ bps = priv->root.ceil << 3; /* Bps -> bps */ ++ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n", ++ __func__); ++ goto err_init_root; ++ } ++ } ++ ++ /* TODO default configuration */ ++ ++ dpa_enable_ceetm(dev); ++ return 0; ++ ++err_init_root: ++ ceetm_destroy(sch); ++ return err; ++} ++ ++/* Configure a prio ceetm qdisc */ ++static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv, ++ struct tc_ceetm_qopt *qopt) ++{ ++ int err; ++ unsigned int i; ++ struct ceetm_class *parent_cl, *child_cl; ++ struct Qdisc *parent_qdisc; ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ if (sch->parent == TC_H_ROOT) { ++ pr_err("CEETM: a prio ceetm qdisc can not be root\n"); ++ err = -EINVAL; ++ goto err_init_prio; ++ } ++ ++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); ++ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) { ++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n"); ++ err = -EINVAL; ++ goto err_init_prio; ++ } ++ ++ /* Obtain the parent root ceetm_class */ ++ parent_cl = ceetm_find(sch->parent, parent_qdisc); ++ ++ if (!parent_cl || parent_cl->type != CEETM_ROOT) { ++ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n"); ++ err = -EINVAL; ++ goto err_init_prio; ++ } ++ ++ priv->prio.parent = parent_cl; ++ parent_cl->root.child = sch; ++ ++ priv->shaped = parent_cl->shaped; ++ priv->prio.qcount = qopt->qcount; ++ ++ /* Create and configure qcount child classes */ ++ for (i = 0; i < priv->prio.qcount; i++) { ++ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL); ++ if (!child_cl) { ++ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_init_prio; ++ } ++ ++ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats); ++ if (!child_cl->prio.cstats) { ++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_init_prio_cls; ++ } ++ ++ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1)); ++ child_cl->refcnt = 1; ++ child_cl->parent = sch; ++ child_cl->type = CEETM_PRIO; ++ child_cl->shaped = priv->shaped; ++ child_cl->prio.child = NULL; ++ ++ /* All shaped CQs have CR and ER enabled by default */ ++ child_cl->prio.cr = child_cl->shaped; ++ child_cl->prio.er = child_cl->shaped; ++ child_cl->prio.fq = NULL; ++ child_cl->prio.cq = NULL; ++ ++ /* Configure the corresponding hardware CQ */ ++ err = ceetm_config_prio_cls(child_cl, dev, ++ parent_cl->root.ch, i); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n", ++ __func__, child_cl->common.classid); ++ goto err_init_prio_cls; ++ } ++ ++ /* Add class handle in Qdisc */ ++ ceetm_link_class(sch, &priv->clhash, &child_cl->common); ++ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n", ++ __func__, child_cl->common.classid, ++ child_cl->prio.cq->idx, child_cl->prio.ccg->idx); ++ } ++ ++ return 0; ++ ++err_init_prio_cls: ++ ceetm_cls_destroy(sch, child_cl); ++err_init_prio: ++ ceetm_destroy(sch); ++ return err; ++} ++ ++/* Configure a wbfs ceetm qdisc */ ++static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv, ++ struct tc_ceetm_qopt *qopt) ++{ ++ int err, group_b, small_group; ++ unsigned int i, id, prio_a, prio_b; ++ struct ceetm_class *parent_cl, *child_cl, *root_cl; ++ struct Qdisc *parent_qdisc; ++ struct ceetm_qdisc *parent_priv; ++ struct qm_ceetm_channel *channel; ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ /* Validate inputs */ ++ if (sch->parent == TC_H_ROOT) { ++ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ /* Obtain the parent prio ceetm qdisc */ ++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); ++ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) { ++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ /* Obtain the parent prio ceetm class */ ++ parent_cl = ceetm_find(sch->parent, parent_qdisc); ++ parent_priv = qdisc_priv(parent_qdisc); ++ ++ if (!parent_cl || parent_cl->type != CEETM_PRIO) { ++ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ if (!qopt->qcount || !qopt->qweight[0]) { ++ pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ priv->shaped = parent_cl->shaped; ++ ++ if (!priv->shaped && (qopt->cr || qopt->er)) { ++ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ if (priv->shaped && !(qopt->cr || qopt->er)) { ++ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ /* Obtain the parent root ceetm class */ ++ root_cl = parent_priv->prio.parent; ++ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) || ++ root_cl->root.wbfs_grp_large) { ++ pr_err("CEETM: no more wbfs classes are available\n"); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) && ++ qopt->qcount == CEETM_MAX_WBFS_QCOUNT) { ++ pr_err("CEETM: only %d wbfs classes are available\n", ++ CEETM_MIN_WBFS_QCOUNT); ++ err = -EINVAL; ++ goto err_init_wbfs; ++ } ++ ++ priv->wbfs.parent = parent_cl; ++ parent_cl->prio.child = sch; ++ ++ priv->wbfs.qcount = qopt->qcount; ++ priv->wbfs.cr = qopt->cr; ++ priv->wbfs.er = qopt->er; ++ ++ channel = root_cl->root.ch; ++ ++ /* Configure the hardware wbfs channel groups */ ++ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) { ++ /* Configure the large group A */ ++ priv->wbfs.group_type = WBFS_GRP_LARGE; ++ small_group = false; ++ group_b = false; ++ prio_a = TC_H_MIN(parent_cl->common.classid) - 1; ++ prio_b = prio_a; ++ ++ } else if (root_cl->root.wbfs_grp_a) { ++ /* Configure the group B */ ++ priv->wbfs.group_type = WBFS_GRP_B; ++ ++ err = qman_ceetm_channel_get_group(channel, &small_group, ++ &prio_a, &prio_b); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n", ++ __func__); ++ goto err_init_wbfs; ++ } ++ ++ small_group = true; ++ group_b = true; ++ prio_b = TC_H_MIN(parent_cl->common.classid) - 1; ++ /* If group A isn't configured, configure it as group B */ ++ prio_a = prio_a ? : prio_b; ++ ++ } else { ++ /* Configure the small group A */ ++ priv->wbfs.group_type = WBFS_GRP_A; ++ ++ err = qman_ceetm_channel_get_group(channel, &small_group, ++ &prio_a, &prio_b); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n", ++ __func__); ++ goto err_init_wbfs; ++ } ++ ++ small_group = true; ++ group_b = false; ++ prio_a = TC_H_MIN(parent_cl->common.classid) - 1; ++ /* If group B isn't configured, configure it as group A */ ++ prio_b = prio_b ? : prio_a; ++ } ++ ++ err = qman_ceetm_channel_set_group(channel, small_group, prio_a, ++ prio_b); ++ if (err) ++ goto err_init_wbfs; ++ ++ if (priv->shaped) { ++ err = qman_ceetm_channel_set_group_cr_eligibility(channel, ++ group_b, ++ priv->wbfs.cr); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n", ++ __func__); ++ goto err_init_wbfs; ++ } ++ ++ err = qman_ceetm_channel_set_group_er_eligibility(channel, ++ group_b, ++ priv->wbfs.er); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n", ++ __func__); ++ goto err_init_wbfs; ++ } ++ } ++ ++ /* Create qcount child classes */ ++ for (i = 0; i < priv->wbfs.qcount; i++) { ++ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL); ++ if (!child_cl) { ++ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_init_wbfs; ++ } ++ ++ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats); ++ if (!child_cl->wbfs.cstats) { ++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_init_wbfs_cls; ++ } ++ ++ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1)); ++ child_cl->refcnt = 1; ++ child_cl->parent = sch; ++ child_cl->type = CEETM_WBFS; ++ child_cl->shaped = priv->shaped; ++ child_cl->wbfs.fq = NULL; ++ child_cl->wbfs.cq = NULL; ++ child_cl->wbfs.weight = qopt->qweight[i]; ++ ++ if (priv->wbfs.group_type == WBFS_GRP_B) ++ id = WBFS_GRP_B_OFFSET + i; ++ else ++ id = WBFS_GRP_A_OFFSET + i; ++ ++ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id, ++ priv->wbfs.group_type); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n", ++ __func__, child_cl->common.classid); ++ goto err_init_wbfs_cls; ++ } ++ ++ /* Add class handle in Qdisc */ ++ ceetm_link_class(sch, &priv->clhash, &child_cl->common); ++ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n", ++ __func__, child_cl->common.classid, ++ child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx); ++ } ++ ++ /* Signal the root class that a group has been configured */ ++ switch (priv->wbfs.group_type) { ++ case WBFS_GRP_LARGE: ++ root_cl->root.wbfs_grp_large = true; ++ break; ++ case WBFS_GRP_A: ++ root_cl->root.wbfs_grp_a = true; ++ break; ++ case WBFS_GRP_B: ++ root_cl->root.wbfs_grp_b = true; ++ break; ++ } ++ ++ return 0; ++ ++err_init_wbfs_cls: ++ ceetm_cls_destroy(sch, child_cl); ++err_init_wbfs: ++ ceetm_destroy(sch); ++ return err; ++} ++ ++/* Configure a generic ceetm qdisc */ ++static int ceetm_init(struct Qdisc *sch, struct nlattr *opt) ++{ ++ struct tc_ceetm_qopt *qopt; ++ struct nlattr *tb[TCA_CEETM_QOPS + 1]; ++ int ret; ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ if (!netif_is_multiqueue(dev)) ++ return -EOPNOTSUPP; ++ ++ if (!opt) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy); ++ if (ret < 0) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return ret; ++ } ++ ++ if (!tb[TCA_CEETM_QOPS]) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (TC_H_MIN(sch->handle)) { ++ pr_err("CEETM: a qdisc should not have a minor\n"); ++ return -EINVAL; ++ } ++ ++ qopt = nla_data(tb[TCA_CEETM_QOPS]); ++ ++ /* Initialize the class hash list. Each qdisc has its own class hash */ ++ ret = qdisc_class_hash_init(&priv->clhash); ++ if (ret < 0) { ++ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n", ++ __func__); ++ return ret; ++ } ++ ++ priv->type = qopt->type; ++ ++ switch (priv->type) { ++ case CEETM_ROOT: ++ ret = ceetm_init_root(sch, priv, qopt); ++ break; ++ case CEETM_PRIO: ++ ret = ceetm_init_prio(sch, priv, qopt); ++ break; ++ case CEETM_WBFS: ++ ret = ceetm_init_wbfs(sch, priv, qopt); ++ break; ++ default: ++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); ++ ceetm_destroy(sch); ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++/* Edit a root ceetm qdisc */ ++static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv, ++ struct net_device *dev, ++ struct tc_ceetm_qopt *qopt) ++{ ++ int err = 0; ++ u64 bps; ++ ++ if (priv->shaped != (bool)qopt->shaped) { ++ pr_err("CEETM: qdisc %X is %s\n", sch->handle, ++ priv->shaped ? "shaped" : "unshaped"); ++ return -EINVAL; ++ } ++ ++ /* Nothing to modify for unshaped qdiscs */ ++ if (!priv->shaped) ++ return 0; ++ ++ /* Configure the LNI shaper */ ++ if (priv->root.overhead != qopt->overhead) { ++ err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1, ++ qopt->overhead); ++ if (err) ++ goto change_err; ++ priv->root.overhead = qopt->overhead; ++ } ++ ++ if (priv->root.rate != qopt->rate) { ++ bps = qopt->rate << 3; /* Bps -> bps */ ++ err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps, ++ dev->mtu); ++ if (err) ++ goto change_err; ++ priv->root.rate = qopt->rate; ++ } ++ ++ if (priv->root.ceil != qopt->ceil) { ++ bps = qopt->ceil << 3; /* Bps -> bps */ ++ err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps, ++ dev->mtu); ++ if (err) ++ goto change_err; ++ priv->root.ceil = qopt->ceil; ++ } ++ ++ return 0; ++ ++change_err: ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n", ++ __func__, sch->handle); ++ return err; ++} ++ ++/* Edit a wbfs ceetm qdisc */ ++static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv, ++ struct tc_ceetm_qopt *qopt) ++{ ++ int err; ++ bool group_b; ++ struct qm_ceetm_channel *channel; ++ struct ceetm_class *prio_class, *root_class; ++ struct ceetm_qdisc *prio_qdisc; ++ ++ if (qopt->qcount) { ++ pr_err("CEETM: the qcount can not be modified\n"); ++ return -EINVAL; ++ } ++ ++ if (qopt->qweight[0]) { ++ pr_err("CEETM: the qweight can be modified through the wbfs classes\n"); ++ return -EINVAL; ++ } ++ ++ if (!priv->shaped && (qopt->cr || qopt->er)) { ++ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n"); ++ return -EINVAL; ++ } ++ ++ if (priv->shaped && !(qopt->cr || qopt->er)) { ++ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n"); ++ return -EINVAL; ++ } ++ ++ /* Nothing to modify for unshaped qdiscs */ ++ if (!priv->shaped) ++ return 0; ++ ++ prio_class = priv->wbfs.parent; ++ prio_qdisc = qdisc_priv(prio_class->parent); ++ root_class = prio_qdisc->prio.parent; ++ channel = root_class->root.ch; ++ group_b = priv->wbfs.group_type == WBFS_GRP_B; ++ ++ if (qopt->cr != priv->wbfs.cr) { ++ err = qman_ceetm_channel_set_group_cr_eligibility(channel, ++ group_b, ++ qopt->cr); ++ if (err) ++ goto change_err; ++ priv->wbfs.cr = qopt->cr; ++ } ++ ++ if (qopt->er != priv->wbfs.er) { ++ err = qman_ceetm_channel_set_group_er_eligibility(channel, ++ group_b, ++ qopt->er); ++ if (err) ++ goto change_err; ++ priv->wbfs.er = qopt->er; ++ } ++ ++ return 0; ++ ++change_err: ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n", ++ __func__, sch->handle); ++ return err; ++} ++ ++/* Edit a ceetm qdisc */ ++static int ceetm_change(struct Qdisc *sch, struct nlattr *opt) ++{ ++ struct tc_ceetm_qopt *qopt; ++ struct nlattr *tb[TCA_CEETM_QOPS + 1]; ++ int ret; ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy); ++ if (ret < 0) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return ret; ++ } ++ ++ if (!tb[TCA_CEETM_QOPS]) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (TC_H_MIN(sch->handle)) { ++ pr_err("CEETM: a qdisc should not have a minor\n"); ++ return -EINVAL; ++ } ++ ++ qopt = nla_data(tb[TCA_CEETM_QOPS]); ++ ++ if (priv->type != qopt->type) { ++ pr_err("CEETM: qdisc %X is not of the provided type\n", ++ sch->handle); ++ return -EINVAL; ++ } ++ ++ switch (priv->type) { ++ case CEETM_ROOT: ++ ret = ceetm_change_root(sch, priv, dev, qopt); ++ break; ++ case CEETM_PRIO: ++ pr_err("CEETM: prio qdiscs can not be modified\n"); ++ ret = -EINVAL; ++ break; ++ case CEETM_WBFS: ++ ret = ceetm_change_wbfs(sch, priv, qopt); ++ break; ++ default: ++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++/* Attach the underlying pfifo qdiscs */ ++static void ceetm_attach(struct Qdisc *sch) ++{ ++ struct net_device *dev = qdisc_dev(sch); ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct Qdisc *qdisc, *old_qdisc; ++ unsigned int i; ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ for (i = 0; i < dev->num_tx_queues; i++) { ++ qdisc = priv->root.qdiscs[i]; ++ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); ++ if (old_qdisc) ++ qdisc_destroy(old_qdisc); ++ } ++} ++ ++static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid) ++{ ++ struct ceetm_class *cl; ++ ++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", ++ __func__, classid, sch->handle); ++ cl = ceetm_find(classid, sch); ++ ++ if (cl) ++ cl->refcnt++; /* Will decrement in put() */ ++ return (unsigned long)cl; ++} ++ ++static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg) ++{ ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ ++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", ++ __func__, cl->common.classid, sch->handle); ++ cl->refcnt--; ++ ++ if (cl->refcnt == 0) ++ ceetm_cls_destroy(sch, cl); ++} ++ ++static int ceetm_cls_change_root(struct ceetm_class *cl, ++ struct tc_ceetm_copt *copt, ++ struct net_device *dev) ++{ ++ int err; ++ u64 bps; ++ ++ if ((bool)copt->shaped != cl->shaped) { ++ pr_err("CEETM: class %X is %s\n", cl->common.classid, ++ cl->shaped ? "shaped" : "unshaped"); ++ return -EINVAL; ++ } ++ ++ if (cl->shaped && cl->root.rate != copt->rate) { ++ bps = copt->rate << 3; /* Bps -> bps */ ++ err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps, ++ dev->mtu); ++ if (err) ++ goto change_cls_err; ++ cl->root.rate = copt->rate; ++ } ++ ++ if (cl->shaped && cl->root.ceil != copt->ceil) { ++ bps = copt->ceil << 3; /* Bps -> bps */ ++ err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps, ++ dev->mtu); ++ if (err) ++ goto change_cls_err; ++ cl->root.ceil = copt->ceil; ++ } ++ ++ if (!cl->shaped && cl->root.tbl != copt->tbl) { ++ err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl); ++ if (err) ++ goto change_cls_err; ++ cl->root.tbl = copt->tbl; ++ } ++ ++ return 0; ++ ++change_cls_err: ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n", ++ __func__, cl->common.classid); ++ return err; ++} ++ ++static int ceetm_cls_change_prio(struct ceetm_class *cl, ++ struct tc_ceetm_copt *copt) ++{ ++ int err; ++ ++ if (!cl->shaped && (copt->cr || copt->er)) { ++ pr_err("CEETM: only shaped classes can have CR and ER enabled\n"); ++ return -EINVAL; ++ } ++ ++ if (cl->prio.cr != (bool)copt->cr) { ++ err = qman_ceetm_channel_set_cq_cr_eligibility( ++ cl->prio.cq->parent, ++ cl->prio.cq->idx, ++ copt->cr); ++ if (err) ++ goto change_cls_err; ++ cl->prio.cr = copt->cr; ++ } ++ ++ if (cl->prio.er != (bool)copt->er) { ++ err = qman_ceetm_channel_set_cq_er_eligibility( ++ cl->prio.cq->parent, ++ cl->prio.cq->idx, ++ copt->er); ++ if (err) ++ goto change_cls_err; ++ cl->prio.er = copt->er; ++ } ++ ++ return 0; ++ ++change_cls_err: ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n", ++ __func__, cl->common.classid); ++ return err; ++} ++ ++static int ceetm_cls_change_wbfs(struct ceetm_class *cl, ++ struct tc_ceetm_copt *copt) ++{ ++ int err; ++ ++ if (copt->weight != cl->wbfs.weight) { ++ /* Configure the CQ weight: real number multiplied by 100 to ++ * get rid of the fraction ++ */ ++ err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq, ++ copt->weight * 100); ++ ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n", ++ __func__, cl->common.classid); ++ return err; ++ } ++ ++ cl->wbfs.weight = copt->weight; ++ } ++ ++ return 0; ++} ++ ++/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */ ++static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid, ++ struct nlattr **tca, unsigned long *arg) ++{ ++ int err; ++ u64 bps; ++ struct ceetm_qdisc *priv; ++ struct ceetm_class *cl = (struct ceetm_class *)*arg; ++ struct nlattr *opt = tca[TCA_OPTIONS]; ++ struct nlattr *tb[__TCA_CEETM_MAX]; ++ struct tc_ceetm_copt *copt; ++ struct qm_ceetm_channel *channel; ++ struct net_device *dev = qdisc_dev(sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n", ++ __func__, classid, sch->handle); ++ ++ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) { ++ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n"); ++ return -EINVAL; ++ } ++ ++ priv = qdisc_priv(sch); ++ ++ if (!opt) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!cl && sch->handle != parentid) { ++ pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n"); ++ return -EINVAL; ++ } ++ ++ if (!cl && priv->type != CEETM_ROOT) { ++ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n"); ++ return -EINVAL; ++ } ++ ++ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy); ++ if (err < 0) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!tb[TCA_CEETM_COPT]) { ++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) { ++ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n"); ++ return -EINVAL; ++ } ++ ++ copt = nla_data(tb[TCA_CEETM_COPT]); ++ ++ /* Configure an existing ceetm class */ ++ if (cl) { ++ if (copt->type != cl->type) { ++ pr_err("CEETM: class %X is not of the provided type\n", ++ cl->common.classid); ++ return -EINVAL; ++ } ++ ++ switch (copt->type) { ++ case CEETM_ROOT: ++ return ceetm_cls_change_root(cl, copt, dev); ++ ++ case CEETM_PRIO: ++ return ceetm_cls_change_prio(cl, copt); ++ ++ case CEETM_WBFS: ++ return ceetm_cls_change_wbfs(cl, copt); ++ ++ default: ++ pr_err(KBUILD_BASENAME " : %s : invalid class\n", ++ __func__); ++ return -EINVAL; ++ } ++ } ++ ++ /* Add a new root ceetm class */ ++ if (copt->type != CEETM_ROOT) { ++ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n"); ++ return -EINVAL; ++ } ++ ++ if (copt->shaped && !priv->shaped) { ++ pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n"); ++ return -EINVAL; ++ } ++ ++ cl = kzalloc(sizeof(*cl), GFP_KERNEL); ++ if (!cl) ++ return -ENOMEM; ++ ++ cl->type = copt->type; ++ cl->shaped = copt->shaped; ++ cl->root.rate = copt->rate; ++ cl->root.ceil = copt->ceil; ++ cl->root.tbl = copt->tbl; ++ ++ cl->common.classid = classid; ++ cl->refcnt = 1; ++ cl->parent = sch; ++ cl->root.child = NULL; ++ cl->root.wbfs_grp_a = false; ++ cl->root.wbfs_grp_b = false; ++ cl->root.wbfs_grp_large = false; ++ ++ /* Claim a CEETM channel */ ++ err = qman_ceetm_channel_claim(&channel, priv->root.lni); ++ if (err) { ++ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n", ++ __func__); ++ goto claim_err; ++ } ++ ++ cl->root.ch = channel; ++ ++ if (cl->shaped) { ++ /* Configure the channel shaper */ ++ err = qman_ceetm_channel_enable_shaper(channel, 1); ++ if (err) ++ goto channel_err; ++ ++ bps = cl->root.rate << 3; /* Bps -> bps */ ++ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps, ++ dev->mtu); ++ if (err) ++ goto channel_err; ++ ++ bps = cl->root.ceil << 3; /* Bps -> bps */ ++ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps, ++ dev->mtu); ++ if (err) ++ goto channel_err; ++ ++ } else { ++ /* Configure the uFQ algorithm */ ++ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl); ++ if (err) ++ goto channel_err; ++ } ++ ++ /* Add class handle in Qdisc */ ++ ceetm_link_class(sch, &priv->clhash, &cl->common); ++ ++ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n", ++ __func__, classid, channel->idx); ++ *arg = (unsigned long)cl; ++ return 0; ++ ++channel_err: ++ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n", ++ __func__, channel->idx); ++ if (qman_ceetm_channel_release(channel)) ++ pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n", ++ __func__, channel->idx); ++claim_err: ++ kfree(cl); ++ return err; ++} ++ ++static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg) ++{ ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct ceetm_class *cl; ++ unsigned int i; ++ ++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); ++ ++ if (arg->stop) ++ return; ++ ++ for (i = 0; i < priv->clhash.hashsize; i++) { ++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { ++ if (arg->count < arg->skip) { ++ arg->count++; ++ continue; ++ } ++ if (arg->fn(sch, (unsigned long)cl, arg) < 0) { ++ arg->stop = 1; ++ return; ++ } ++ arg->count++; ++ } ++ } ++} ++ ++static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg, ++ struct sk_buff *skb, struct tcmsg *tcm) ++{ ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ struct nlattr *nest; ++ struct tc_ceetm_copt copt; ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", ++ __func__, cl->common.classid, sch->handle); ++ ++ sch_tree_lock(sch); ++ ++ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle; ++ tcm->tcm_handle = cl->common.classid; ++ ++ memset(&copt, 0, sizeof(copt)); ++ ++ copt.shaped = cl->shaped; ++ copt.type = cl->type; ++ ++ switch (cl->type) { ++ case CEETM_ROOT: ++ if (cl->root.child) ++ tcm->tcm_info = cl->root.child->handle; ++ ++ copt.rate = cl->root.rate; ++ copt.ceil = cl->root.ceil; ++ copt.tbl = cl->root.tbl; ++ break; ++ ++ case CEETM_PRIO: ++ if (cl->prio.child) ++ tcm->tcm_info = cl->prio.child->handle; ++ ++ copt.cr = cl->prio.cr; ++ copt.er = cl->prio.er; ++ break; ++ ++ case CEETM_WBFS: ++ copt.weight = cl->wbfs.weight; ++ break; ++ } ++ ++ nest = nla_nest_start(skb, TCA_OPTIONS); ++ if (!nest) ++ goto nla_put_failure; ++ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt)) ++ goto nla_put_failure; ++ nla_nest_end(skb, nest); ++ sch_tree_unlock(sch); ++ return skb->len; ++ ++nla_put_failure: ++ sch_tree_unlock(sch); ++ nla_nest_cancel(skb, nest); ++ return -EMSGSIZE; ++} ++ ++static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg) ++{ ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", ++ __func__, cl->common.classid, sch->handle); ++ ++ sch_tree_lock(sch); ++ qdisc_class_hash_remove(&priv->clhash, &cl->common); ++ cl->refcnt--; ++ ++ /* The refcnt should be at least 1 since we have incremented it in ++ * get(). Will decrement again in put() where we will call destroy() ++ * to actually free the memory if it reaches 0. ++ */ ++ WARN_ON(cl->refcnt == 0); ++ ++ sch_tree_unlock(sch); ++ return 0; ++} ++ ++/* Get the class' child qdisc, if any */ ++static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg) ++{ ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", ++ __func__, cl->common.classid, sch->handle); ++ ++ switch (cl->type) { ++ case CEETM_ROOT: ++ return cl->root.child; ++ ++ case CEETM_PRIO: ++ return cl->prio.child; ++ } ++ ++ return NULL; ++} ++ ++static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg, ++ struct Qdisc *new, struct Qdisc **old) ++{ ++ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) { ++ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg, ++ struct gnet_dump *d) ++{ ++ unsigned int i; ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ struct gnet_stats_basic_packed tmp_bstats; ++ struct ceetm_class_stats *cstats = NULL; ++ struct qm_ceetm_cq *cq = NULL; ++ struct tc_ceetm_xstats xstats; ++ ++ memset(&xstats, 0, sizeof(xstats)); ++ memset(&tmp_bstats, 0, sizeof(tmp_bstats)); ++ ++ switch (cl->type) { ++ case CEETM_ROOT: ++ return 0; ++ case CEETM_PRIO: ++ cq = cl->prio.cq; ++ break; ++ case CEETM_WBFS: ++ cq = cl->wbfs.cq; ++ break; ++ } ++ ++ for_each_online_cpu(i) { ++ switch (cl->type) { ++ case CEETM_PRIO: ++ cstats = per_cpu_ptr(cl->prio.cstats, i); ++ break; ++ case CEETM_WBFS: ++ cstats = per_cpu_ptr(cl->wbfs.cstats, i); ++ break; ++ } ++ ++ if (cstats) { ++ xstats.ern_drop_count += cstats->ern_drop_count; ++ xstats.congested_count += cstats->congested_count; ++ tmp_bstats.bytes += cstats->bstats.bytes; ++ tmp_bstats.packets += cstats->bstats.packets; ++ } ++ } ++ ++ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), ++ d, NULL, &tmp_bstats) < 0) ++ return -1; ++ ++ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0, ++ &xstats.frame_count, ++ &xstats.byte_count)) ++ return -1; ++ ++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); ++} ++ ++static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg) ++{ ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list; ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, ++ cl ? cl->common.classid : 0, sch->handle); ++ return fl; ++} ++ ++static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent, ++ u32 classid) ++{ ++ struct ceetm_class *cl = ceetm_find(classid, sch); ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, ++ cl ? cl->common.classid : 0, sch->handle); ++ return (unsigned long)cl; ++} ++ ++static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg) ++{ ++ struct ceetm_class *cl = (struct ceetm_class *)arg; ++ ++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, ++ cl ? cl->common.classid : 0, sch->handle); ++} ++ ++const struct Qdisc_class_ops ceetm_cls_ops = { ++ .graft = ceetm_cls_graft, ++ .leaf = ceetm_cls_leaf, ++ .get = ceetm_cls_get, ++ .put = ceetm_cls_put, ++ .change = ceetm_cls_change, ++ .delete = ceetm_cls_delete, ++ .walk = ceetm_cls_walk, ++ .tcf_chain = ceetm_tcf_chain, ++ .bind_tcf = ceetm_tcf_bind, ++ .unbind_tcf = ceetm_tcf_unbind, ++ .dump = ceetm_cls_dump, ++ .dump_stats = ceetm_cls_dump_stats, ++}; ++ ++struct Qdisc_ops ceetm_qdisc_ops __read_mostly = { ++ .id = "ceetm", ++ .priv_size = sizeof(struct ceetm_qdisc), ++ .cl_ops = &ceetm_cls_ops, ++ .init = ceetm_init, ++ .destroy = ceetm_destroy, ++ .change = ceetm_change, ++ .dump = ceetm_dump, ++ .attach = ceetm_attach, ++ .owner = THIS_MODULE, ++}; ++ ++/* Run the filters and classifiers attached to the qdisc on the provided skb */ ++static struct ceetm_class *ceetm_classify(struct sk_buff *skb, ++ struct Qdisc *sch, int *qerr, ++ bool *act_drop) ++{ ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct ceetm_class *cl = NULL, *wbfs_cl; ++ struct tcf_result res; ++ struct tcf_proto *tcf; ++ int result; ++ ++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; ++ tcf = priv->filter_list; ++ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) { ++#ifdef CONFIG_NET_CLS_ACT ++ switch (result) { ++ case TC_ACT_QUEUED: ++ case TC_ACT_STOLEN: ++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; ++ case TC_ACT_SHOT: ++ /* No valid class found due to action */ ++ *act_drop = true; ++ return NULL; ++ } ++#endif ++ cl = (void *)res.class; ++ if (!cl) { ++ if (res.classid == sch->handle) { ++ /* The filter leads to the qdisc */ ++ /* TODO default qdisc */ ++ return NULL; ++ } ++ ++ cl = ceetm_find(res.classid, sch); ++ if (!cl) ++ /* The filter leads to an invalid class */ ++ break; ++ } ++ ++ /* The class might have its own filters attached */ ++ tcf = cl->filter_list; ++ } ++ ++ if (!cl) { ++ /* No valid class found */ ++ /* TODO default qdisc */ ++ return NULL; ++ } ++ ++ switch (cl->type) { ++ case CEETM_ROOT: ++ if (cl->root.child) { ++ /* Run the prio qdisc classifiers */ ++ return ceetm_classify(skb, cl->root.child, qerr, ++ act_drop); ++ } else { ++ /* The root class does not have a child prio qdisc */ ++ /* TODO default qdisc */ ++ return NULL; ++ } ++ case CEETM_PRIO: ++ if (cl->prio.child) { ++ /* If filters lead to a wbfs class, return it. ++ * Otherwise, return the prio class ++ */ ++ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr, ++ act_drop); ++ /* A NULL result might indicate either an erroneous ++ * filter, or no filters at all. We will assume the ++ * latter ++ */ ++ return wbfs_cl ? : cl; ++ } ++ } ++ ++ /* For wbfs and childless prio classes, return the class directly */ ++ return cl; ++} ++ ++int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev) ++{ ++ int ret; ++ bool act_drop = false; ++ struct Qdisc *sch = net_dev->qdisc; ++ struct ceetm_class *cl; ++ struct dpa_priv_s *priv_dpa; ++ struct qman_fq *egress_fq, *conf_fq; ++ struct ceetm_qdisc *priv = qdisc_priv(sch); ++ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats); ++ struct ceetm_class_stats *cstats; ++ const int queue_mapping = dpa_get_queue_mapping(skb); ++ spinlock_t *root_lock = qdisc_lock(sch); ++ ++ spin_lock(root_lock); ++ cl = ceetm_classify(skb, sch, &ret, &act_drop); ++ spin_unlock(root_lock); ++ ++#ifdef CONFIG_NET_CLS_ACT ++ if (act_drop) { ++ if (ret & __NET_XMIT_BYPASS) ++ qstats->drops++; ++ goto drop; ++ } ++#endif ++ /* TODO default class */ ++ if (unlikely(!cl)) { ++ qstats->drops++; ++ goto drop; ++ } ++ ++ priv_dpa = netdev_priv(net_dev); ++ conf_fq = priv_dpa->conf_fqs[queue_mapping]; ++ ++ /* Choose the proper tx fq and update the basic stats (bytes and ++ * packets sent by the class) ++ */ ++ switch (cl->type) { ++ case CEETM_PRIO: ++ egress_fq = &cl->prio.fq->fq; ++ cstats = this_cpu_ptr(cl->prio.cstats); ++ break; ++ case CEETM_WBFS: ++ egress_fq = &cl->wbfs.fq->fq; ++ cstats = this_cpu_ptr(cl->wbfs.cstats); ++ break; ++ default: ++ qstats->drops++; ++ goto drop; ++ } ++ ++ bstats_update(&cstats->bstats, skb); ++ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq); ++ ++drop: ++ dev_kfree_skb_any(skb); ++ return NET_XMIT_SUCCESS; ++} ++ ++static int __init ceetm_register(void) ++{ ++ int _errno = 0; ++ ++ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n"); ++ ++ _errno = register_qdisc(&ceetm_qdisc_ops); ++ if (unlikely(_errno)) ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): register_qdisc() = %d\n", ++ KBUILD_BASENAME ".c", __LINE__, __func__, _errno); ++ ++ return _errno; ++} ++ ++static void __exit ceetm_unregister(void) ++{ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME ".c", __func__); ++ ++ unregister_qdisc(&ceetm_qdisc_ops); ++} ++ ++module_init(ceetm_register); ++module_exit(ceetm_unregister); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h +@@ -0,0 +1,237 @@ ++/* Copyright 2008-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA_ETH_CEETM_H ++#define __DPAA_ETH_CEETM_H ++ ++#include ++#include ++#include ++#include ++ ++#include "mac.h" ++#include "dpaa_eth_common.h" ++ ++/* Mask to determine the sub-portal id from a channel number */ ++#define CHANNEL_SP_MASK 0x1f ++/* The number of the last channel that services DCP0, connected to FMan 0. ++ * Value validated for B4 and T series platforms. ++ */ ++#define DCP0_MAX_CHANNEL 0x80f ++/* A2V=1 - field A2 is valid ++ * A0V=1 - field A0 is valid - enables frame confirmation ++ * OVOM=1 - override operation mode bits with values from A2 ++ * EBD=1 - external buffers are deallocated at the end of the FMan flow ++ * NL=0 - the BMI releases all the internal buffers ++ */ ++#define CEETM_CONTEXT_A 0x1a00000080000000 ++/* The ratio between the superior and inferior congestion state thresholds. The ++ * lower threshold is set to 7/8 of the superior one (as the default for WQ ++ * scheduling). ++ */ ++#define CEETM_CCGR_RATIO 0.875 ++/* For functional purposes, there are num_tx_queues pfifo qdiscs through which ++ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20 ++ * are reserved for the maximum 32 CEETM channels (majors and minors are in ++ * hex). ++ */ ++#define PFIFO_MIN_OFFSET 0x21 ++ ++/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */ ++#define CEETM_MAX_PRIO_QCOUNT 8 ++#define CEETM_MAX_WBFS_QCOUNT 8 ++#define CEETM_MIN_WBFS_QCOUNT 4 ++ ++/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A ++ * and/or 12-15 for group B). ++ */ ++#define WBFS_GRP_A_OFFSET 8 ++#define WBFS_GRP_B_OFFSET 12 ++ ++#define WBFS_GRP_A 1 ++#define WBFS_GRP_B 2 ++#define WBFS_GRP_LARGE 3 ++ ++enum { ++ TCA_CEETM_UNSPEC, ++ TCA_CEETM_COPT, ++ TCA_CEETM_QOPS, ++ __TCA_CEETM_MAX, ++}; ++ ++/* CEETM configuration types */ ++enum { ++ CEETM_ROOT = 1, ++ CEETM_PRIO, ++ CEETM_WBFS ++}; ++ ++#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1) ++extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1]; ++ ++struct ceetm_class; ++struct ceetm_qdisc_stats; ++struct ceetm_class_stats; ++ ++struct ceetm_fq { ++ struct qman_fq fq; ++ struct net_device *net_dev; ++ struct ceetm_class *ceetm_cls; ++}; ++ ++struct root_q { ++ struct Qdisc **qdiscs; ++ __u16 overhead; ++ __u32 rate; ++ __u32 ceil; ++ struct qm_ceetm_sp *sp; ++ struct qm_ceetm_lni *lni; ++ struct ceetm_qdisc_stats __percpu *qstats; ++}; ++ ++struct prio_q { ++ __u16 qcount; ++ struct ceetm_class *parent; ++}; ++ ++struct wbfs_q { ++ __u16 qcount; ++ int group_type; ++ struct ceetm_class *parent; ++ __u16 cr; ++ __u16 er; ++}; ++ ++struct ceetm_qdisc { ++ int type; /* LNI/CHNL/WBFS */ ++ bool shaped; ++ union { ++ struct root_q root; ++ struct prio_q prio; ++ struct wbfs_q wbfs; ++ }; ++ struct Qdisc_class_hash clhash; ++ struct tcf_proto *filter_list; /* qdisc attached filters */ ++}; ++ ++/* CEETM Qdisc configuration parameters */ ++struct tc_ceetm_qopt { ++ __u32 type; ++ __u16 shaped; ++ __u16 qcount; ++ __u16 overhead; ++ __u32 rate; ++ __u32 ceil; ++ __u16 cr; ++ __u16 er; ++ __u8 qweight[CEETM_MAX_WBFS_QCOUNT]; ++}; ++ ++struct root_c { ++ unsigned int rate; ++ unsigned int ceil; ++ unsigned int tbl; ++ bool wbfs_grp_a; ++ bool wbfs_grp_b; ++ bool wbfs_grp_large; ++ struct Qdisc *child; ++ struct qm_ceetm_channel *ch; ++}; ++ ++struct prio_c { ++ bool cr; ++ bool er; ++ struct ceetm_fq *fq; /* Hardware FQ instance Handle */ ++ struct qm_ceetm_lfq *lfq; ++ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */ ++ struct qm_ceetm_ccg *ccg; ++ /* only one wbfs can be linked to one priority CQ */ ++ struct Qdisc *child; ++ struct ceetm_class_stats __percpu *cstats; ++}; ++ ++struct wbfs_c { ++ __u8 weight; /* The weight of the class between 1 and 248 */ ++ struct ceetm_fq *fq; /* Hardware FQ instance Handle */ ++ struct qm_ceetm_lfq *lfq; ++ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */ ++ struct qm_ceetm_ccg *ccg; ++ struct ceetm_class_stats __percpu *cstats; ++}; ++ ++struct ceetm_class { ++ struct Qdisc_class_common common; ++ int refcnt; /* usage count of this class */ ++ struct tcf_proto *filter_list; /* class attached filters */ ++ struct Qdisc *parent; ++ bool shaped; ++ int type; /* ROOT/PRIO/WBFS */ ++ union { ++ struct root_c root; ++ struct prio_c prio; ++ struct wbfs_c wbfs; ++ }; ++}; ++ ++/* CEETM Class configuration parameters */ ++struct tc_ceetm_copt { ++ __u32 type; ++ __u16 shaped; ++ __u32 rate; ++ __u32 ceil; ++ __u16 tbl; ++ __u16 cr; ++ __u16 er; ++ __u8 weight; ++}; ++ ++/* CEETM stats */ ++struct ceetm_qdisc_stats { ++ __u32 drops; ++}; ++ ++struct ceetm_class_stats { ++ /* Software counters */ ++ struct gnet_stats_basic_packed bstats; ++ __u32 ern_drop_count; ++ __u32 congested_count; ++}; ++ ++struct tc_ceetm_xstats { ++ __u32 ern_drop_count; ++ __u32 congested_count; ++ /* Hardware counters */ ++ __u64 frame_count; ++ __u64 byte_count; ++}; ++ ++int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev); ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c +@@ -0,0 +1,1811 @@ ++/* Copyright 2008-2013 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* vlan_eth_hdr */ ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#ifdef CONFIG_FSL_DPAA_1588 ++#include "dpaa_1588.h" ++#endif ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++#include "dpaa_debugfs.h" ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++#include "mac.h" ++ ++/* Size in bytes of the FQ taildrop threshold */ ++#define DPA_FQ_TD 0x200000 ++ ++#ifdef CONFIG_PTP_1588_CLOCK_DPAA ++struct ptp_priv_s ptp_priv; ++#endif ++ ++static struct dpa_bp *dpa_bp_array[64]; ++ ++int dpa_max_frm; ++EXPORT_SYMBOL(dpa_max_frm); ++ ++int dpa_rx_extra_headroom; ++EXPORT_SYMBOL(dpa_rx_extra_headroom); ++ ++int dpa_num_cpus = NR_CPUS; ++ ++static const struct fqid_cell tx_confirm_fqids[] = { ++ {0, DPAA_ETH_TX_QUEUES} ++}; ++ ++static struct fqid_cell default_fqids[][3] = { ++ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} }, ++ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} } ++}; ++ ++static const char fsl_qman_frame_queues[][25] = { ++ [RX] = "fsl,qman-frame-queues-rx", ++ [TX] = "fsl,qman-frame-queues-tx" ++}; ++#ifdef CONFIG_FSL_DPAA_HOOKS ++/* A set of callbacks for hooking into the fastpath at different points. */ ++struct dpaa_eth_hooks_s dpaa_eth_hooks; ++EXPORT_SYMBOL(dpaa_eth_hooks); ++/* This function should only be called on the probe paths, since it makes no ++ * effort to guarantee consistency of the destination hooks structure. ++ */ ++void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks) ++{ ++ if (hooks) ++ dpaa_eth_hooks = *hooks; ++ else ++ pr_err("NULL pointer to hooks!\n"); ++} ++EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks); ++#endif ++ ++int dpa_netdev_init(struct net_device *net_dev, ++ const uint8_t *mac_addr, ++ uint16_t tx_timeout) ++{ ++ int err; ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ ++ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ++ ++ net_dev->features |= net_dev->hw_features; ++ net_dev->vlan_features = net_dev->features; ++ ++ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); ++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); ++ ++ net_dev->ethtool_ops = &dpa_ethtool_ops; ++ ++ net_dev->needed_headroom = priv->tx_headroom; ++ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); ++ ++ err = register_netdev(net_dev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev() = %d\n", err); ++ return err; ++ } ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ /* create debugfs entry for this net_device */ ++ err = dpa_netdev_debugfs_create(net_dev); ++ if (err) { ++ unregister_netdev(net_dev); ++ return err; ++ } ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_netdev_init); ++ ++int __cold dpa_start(struct net_device *net_dev) ++{ ++ int err, i; ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ err = mac_dev->init_phy(net_dev, priv->mac_dev); ++ if (err < 0) { ++ if (netif_msg_ifup(priv)) ++ netdev_err(net_dev, "init_phy() = %d\n", err); ++ return err; ++ } ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ err = fm_port_enable(mac_dev->port_dev[i]); ++ if (err) ++ goto mac_start_failed; ++ } ++ ++ err = priv->mac_dev->start(mac_dev); ++ if (err < 0) { ++ if (netif_msg_ifup(priv)) ++ netdev_err(net_dev, "mac_dev->start() = %d\n", err); ++ goto mac_start_failed; ++ } ++ ++ netif_tx_start_all_queues(net_dev); ++ ++ return 0; ++ ++mac_start_failed: ++ for_each_port_device(i, mac_dev->port_dev) ++ fm_port_disable(mac_dev->port_dev[i]); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpa_start); ++ ++int __cold dpa_stop(struct net_device *net_dev) ++{ ++ int _errno, i, err; ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ netif_tx_stop_all_queues(net_dev); ++ /* Allow the Fman (Tx) port to process in-flight frames before we ++ * try switching it off. ++ */ ++ usleep_range(5000, 10000); ++ ++ _errno = mac_dev->stop(mac_dev); ++ if (unlikely(_errno < 0)) ++ if (netif_msg_ifdown(priv)) ++ netdev_err(net_dev, "mac_dev->stop() = %d\n", ++ _errno); ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ err = fm_port_disable(mac_dev->port_dev[i]); ++ _errno = err ? err : _errno; ++ } ++ ++ if (mac_dev->phy_dev) ++ phy_disconnect(mac_dev->phy_dev); ++ mac_dev->phy_dev = NULL; ++ ++ return _errno; ++} ++EXPORT_SYMBOL(dpa_stop); ++ ++void __cold dpa_timeout(struct net_device *net_dev) ++{ ++ const struct dpa_priv_s *priv; ++ struct dpa_percpu_priv_s *percpu_priv; ++ ++ priv = netdev_priv(net_dev); ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ ++ if (netif_msg_timer(priv)) ++ netdev_crit(net_dev, "Transmit timeout!\n"); ++ ++ percpu_priv->stats.tx_errors++; ++} ++EXPORT_SYMBOL(dpa_timeout); ++ ++/* net_device */ ++ ++/** ++ * @param net_dev the device for which statistics are calculated ++ * @param stats the function fills this structure with the device's statistics ++ * @return the address of the structure containing the statistics ++ * ++ * Calculates the statistics for the given device by adding the statistics ++ * collected by each CPU. ++ */ ++void __cold ++dpa_get_stats64(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ u64 *cpustats; ++ u64 *netstats = (u64 *)stats; ++ int i, j; ++ struct dpa_percpu_priv_s *percpu_priv; ++ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); ++ ++ for_each_possible_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ ++ cpustats = (u64 *)&percpu_priv->stats; ++ ++ for (j = 0; j < numstats; j++) ++ netstats[j] += cpustats[j]; ++ } ++} ++EXPORT_SYMBOL(dpa_get_stats64); ++ ++int dpa_change_mtu(struct net_device *net_dev, int new_mtu) ++{ ++ const int max_mtu = dpa_get_max_mtu(); ++ ++ /* Make sure we don't exceed the Ethernet controller's MAXFRM */ ++ if (new_mtu < 68 || new_mtu > max_mtu) { ++ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n", ++ new_mtu, 68, max_mtu); ++ return -EINVAL; ++ } ++ net_dev->mtu = new_mtu; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_change_mtu); ++ ++/* .ndo_init callback */ ++int dpa_ndo_init(struct net_device *net_dev) ++{ ++ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, ++ * we choose conservatively and let the user explicitly set a higher ++ * MTU via ifconfig. Otherwise, the user may end up with different MTUs ++ * in the same LAN. ++ * If on the other hand fsl_fm_max_frm has been chosen below 1500, ++ * start with the maximum allowed. ++ */ ++ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN); ++ ++ pr_debug("Setting initial MTU on net device: %d\n", init_mtu); ++ net_dev->mtu = init_mtu; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_ndo_init); ++ ++int dpa_set_features(struct net_device *dev, netdev_features_t features) ++{ ++ /* Not much to do here for now */ ++ dev->features = features; ++ return 0; ++} ++EXPORT_SYMBOL(dpa_set_features); ++ ++netdev_features_t dpa_fix_features(struct net_device *dev, ++ netdev_features_t features) ++{ ++ netdev_features_t unsupported_features = 0; ++ ++ /* In theory we should never be requested to enable features that ++ * we didn't set in netdev->features and netdev->hw_features at probe ++ * time, but double check just to be on the safe side. ++ * We don't support enabling Rx csum through ethtool yet ++ */ ++ unsupported_features |= NETIF_F_RXCSUM; ++ ++ features &= ~unsupported_features; ++ ++ return features; ++} ++EXPORT_SYMBOL(dpa_fix_features); ++ ++#ifdef CONFIG_FSL_DPAA_TS ++u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx, ++ const void *data) ++{ ++ u64 *ts, ns; ++ ++ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx], ++ data); ++ ++ if (!ts || *ts == 0) ++ return 0; ++ ++ be64_to_cpus(ts); ++ ++ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */ ++ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT; ++ ++ return ns; ++} ++ ++int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx, ++ struct skb_shared_hwtstamps *shhwtstamps, const void *data) ++{ ++ u64 ns; ++ ++ ns = dpa_get_timestamp_ns(priv, rx_tx, data); ++ ++ if (ns == 0) ++ return -EINVAL; ++ ++ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); ++ shhwtstamps->hwtstamp = ns_to_ktime(ns); ++ ++ return 0; ++} ++ ++static void dpa_ts_tx_enable(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ struct mac_device *mac_dev = priv->mac_dev; ++ ++ if (mac_dev->fm_rtc_enable) ++ mac_dev->fm_rtc_enable(get_fm_handle(dev)); ++ if (mac_dev->ptp_enable) ++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); ++ ++ priv->ts_tx_en = true; ++} ++ ++static void dpa_ts_tx_disable(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ ++#if 0 ++/* the RTC might be needed by the Rx Ts, cannot disable here ++ * no separate ptp_disable API for Rx/Tx, cannot disable here ++ */ ++ struct mac_device *mac_dev = priv->mac_dev; ++ ++ if (mac_dev->fm_rtc_disable) ++ mac_dev->fm_rtc_disable(get_fm_handle(dev)); ++ ++ if (mac_dev->ptp_disable) ++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); ++#endif ++ ++ priv->ts_tx_en = false; ++} ++ ++static void dpa_ts_rx_enable(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ struct mac_device *mac_dev = priv->mac_dev; ++ ++ if (mac_dev->fm_rtc_enable) ++ mac_dev->fm_rtc_enable(get_fm_handle(dev)); ++ if (mac_dev->ptp_enable) ++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); ++ ++ priv->ts_rx_en = true; ++} ++ ++static void dpa_ts_rx_disable(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ ++#if 0 ++/* the RTC might be needed by the Tx Ts, cannot disable here ++ * no separate ptp_disable API for Rx/Tx, cannot disable here ++ */ ++ struct mac_device *mac_dev = priv->mac_dev; ++ ++ if (mac_dev->fm_rtc_disable) ++ mac_dev->fm_rtc_disable(get_fm_handle(dev)); ++ ++ if (mac_dev->ptp_disable) ++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); ++#endif ++ ++ priv->ts_rx_en = false; ++} ++ ++static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ struct hwtstamp_config config; ++ ++ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) ++ return -EFAULT; ++ ++ switch (config.tx_type) { ++ case HWTSTAMP_TX_OFF: ++ dpa_ts_tx_disable(dev); ++ break; ++ case HWTSTAMP_TX_ON: ++ dpa_ts_tx_enable(dev); ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) ++ dpa_ts_rx_disable(dev); ++ else { ++ dpa_ts_rx_enable(dev); ++ /* TS is set for all frame types, not only those requested */ ++ config.rx_filter = HWTSTAMP_FILTER_ALL; ++ } ++ ++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? ++ -EFAULT : 0; ++} ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++#ifdef CONFIG_FSL_DPAA_1588 ++ struct dpa_priv_s *priv = netdev_priv(dev); ++#endif ++ int ret = 0; ++ ++ /* at least one timestamping feature must be enabled */ ++#ifdef CONFIG_FSL_DPAA_TS ++ if (!netif_running(dev)) ++#endif ++ return -EINVAL; ++ ++#ifdef CONFIG_FSL_DPAA_TS ++ if (cmd == SIOCSHWTSTAMP) ++ return dpa_ts_ioctl(dev, rq, cmd); ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) { ++ if (priv->tsu && priv->tsu->valid) ++ ret = dpa_ioctl_1588(dev, rq, cmd); ++ else ++ ret = -ENODEV; ++ } ++#endif ++ ++ return ret; ++} ++EXPORT_SYMBOL(dpa_ioctl); ++ ++int __cold dpa_remove(struct platform_device *of_dev) ++{ ++ int err; ++ struct device *dev; ++ struct net_device *net_dev; ++ struct dpa_priv_s *priv; ++ ++ dev = &of_dev->dev; ++ net_dev = dev_get_drvdata(dev); ++ ++ priv = netdev_priv(net_dev); ++ ++ dpaa_eth_sysfs_remove(dev); ++ ++ dev_set_drvdata(dev, NULL); ++ unregister_netdev(net_dev); ++ ++ err = dpa_fq_free(dev, &priv->dpa_fq_list); ++ ++ qman_delete_cgr_safe(&priv->ingress_cgr); ++ qman_release_cgrid(priv->ingress_cgr.cgrid); ++ qman_delete_cgr_safe(&priv->cgr_data.cgr); ++ qman_release_cgrid(priv->cgr_data.cgr.cgrid); ++ ++ dpa_private_napi_del(net_dev); ++ ++ dpa_bp_free(priv); ++ ++ if (priv->buf_layout) ++ devm_kfree(dev, priv->buf_layout); ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ /* remove debugfs entry for this net_device */ ++ dpa_netdev_debugfs_remove(net_dev); ++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */ ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid) ++ dpa_ptp_cleanup(priv); ++#endif ++ ++ free_netdev(net_dev); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpa_remove); ++ ++struct mac_device * __cold __must_check ++__attribute__((nonnull)) ++dpa_mac_probe(struct platform_device *_of_dev) ++{ ++ struct device *dpa_dev, *dev; ++ struct device_node *mac_node; ++ struct platform_device *of_dev; ++ struct mac_device *mac_dev; ++#ifdef CONFIG_FSL_DPAA_1588 ++ int lenp; ++ const phandle *phandle_prop; ++ struct net_device *net_dev = NULL; ++ struct dpa_priv_s *priv = NULL; ++ struct device_node *timer_node; ++#endif ++ dpa_dev = &_of_dev->dev; ++ ++ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0); ++ if (unlikely(mac_node == NULL)) { ++ dev_err(dpa_dev, "Cannot find MAC device device tree node\n"); ++ return ERR_PTR(-EFAULT); ++ } ++ ++ of_dev = of_find_device_by_node(mac_node); ++ if (unlikely(of_dev == NULL)) { ++ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n", ++ mac_node->full_name); ++ of_node_put(mac_node); ++ return ERR_PTR(-EINVAL); ++ } ++ of_node_put(mac_node); ++ ++ dev = &of_dev->dev; ++ ++ mac_dev = dev_get_drvdata(dev); ++ if (unlikely(mac_dev == NULL)) { ++ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n", ++ dev_name(dev)); ++ return ERR_PTR(-EINVAL); ++ } ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++ phandle_prop = of_get_property(mac_node, "ptp-timer", &lenp); ++ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) || ++ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) && ++ (mac_dev->speed == SPEED_1000)))) { ++ timer_node = of_find_node_by_phandle(*phandle_prop); ++ if (timer_node) ++ net_dev = dev_get_drvdata(dpa_dev); ++ if (timer_node && net_dev) { ++ priv = netdev_priv(net_dev); ++ if (!dpa_ptp_init(priv)) ++ dev_info(dev, "%s: ptp 1588 is initialized.\n", ++ mac_node->full_name); ++ } ++ } ++#endif ++ ++#ifdef CONFIG_PTP_1588_CLOCK_DPAA ++ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) || ++ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) && ++ (mac_dev->speed == SPEED_1000))) { ++ ptp_priv.node = of_parse_phandle(mac_node, "ptp-timer", 0); ++ if (ptp_priv.node) { ++ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node); ++ if (unlikely(ptp_priv.of_dev == NULL)) { ++ dev_err(dpa_dev, ++ "Cannot find device represented by timer_node\n"); ++ of_node_put(ptp_priv.node); ++ return ERR_PTR(-EINVAL); ++ } ++ ptp_priv.mac_dev = mac_dev; ++ } ++ } ++#endif ++ return mac_dev; ++} ++EXPORT_SYMBOL(dpa_mac_probe); ++ ++int dpa_set_mac_address(struct net_device *net_dev, void *addr) ++{ ++ const struct dpa_priv_s *priv; ++ int _errno; ++ struct mac_device *mac_dev; ++ ++ priv = netdev_priv(net_dev); ++ ++ _errno = eth_mac_addr(net_dev, addr); ++ if (_errno < 0) { ++ if (netif_msg_drv(priv)) ++ netdev_err(net_dev, ++ "eth_mac_addr() = %d\n", ++ _errno); ++ return _errno; ++ } ++ ++ mac_dev = priv->mac_dev; ++ ++ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev), ++ net_dev->dev_addr); ++ if (_errno < 0) { ++ if (netif_msg_drv(priv)) ++ netdev_err(net_dev, ++ "mac_dev->change_addr() = %d\n", ++ _errno); ++ return _errno; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_set_mac_address); ++ ++void dpa_set_rx_mode(struct net_device *net_dev) ++{ ++ int _errno; ++ const struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ ++ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { ++ priv->mac_dev->promisc = !priv->mac_dev->promisc; ++ _errno = priv->mac_dev->set_promisc( ++ priv->mac_dev->get_mac_handle(priv->mac_dev), ++ priv->mac_dev->promisc); ++ if (unlikely(_errno < 0) && netif_msg_drv(priv)) ++ netdev_err(net_dev, ++ "mac_dev->set_promisc() = %d\n", ++ _errno); ++ } ++ ++ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev); ++ if (unlikely(_errno < 0) && netif_msg_drv(priv)) ++ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno); ++} ++EXPORT_SYMBOL(dpa_set_rx_mode); ++ ++void dpa_set_buffers_layout(struct mac_device *mac_dev, ++ struct dpa_buffer_layout_s *layout) ++{ ++ struct fm_port_params params; ++ ++ /* Rx */ ++ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE; ++ layout[RX].parse_results = true; ++ layout[RX].hash_results = true; ++#ifdef CONFIG_FSL_DPAA_TS ++ layout[RX].time_stamp = true; ++#endif ++ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], ¶ms); ++ layout[RX].manip_extra_space = params.manip_extra_space; ++ /* a value of zero for data alignment means "don't care", so align to ++ * a non-zero value to prevent FMD from using its own default ++ */ ++ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; ++ ++ /* Tx */ ++ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE; ++ layout[TX].parse_results = true; ++ layout[TX].hash_results = true; ++#ifdef CONFIG_FSL_DPAA_TS ++ layout[TX].time_stamp = true; ++#endif ++ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], ¶ms); ++ layout[TX].manip_extra_space = params.manip_extra_space; ++ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; ++} ++EXPORT_SYMBOL(dpa_set_buffers_layout); ++ ++int __attribute__((nonnull)) ++dpa_bp_alloc(struct dpa_bp *dpa_bp) ++{ ++ int err; ++ struct bman_pool_params bp_params; ++ struct platform_device *pdev; ++ ++ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) { ++ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers"); ++ return -EINVAL; ++ } ++ ++ memset(&bp_params, 0, sizeof(struct bman_pool_params)); ++#ifdef CONFIG_FMAN_PFC ++ bp_params.flags = BMAN_POOL_FLAG_THRESH; ++ bp_params.thresholds[0] = bp_params.thresholds[2] = ++ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD; ++ bp_params.thresholds[1] = bp_params.thresholds[3] = ++ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; ++#endif ++ ++ /* If the pool is already specified, we only create one per bpid */ ++ if (dpa_bpid2pool_use(dpa_bp->bpid)) ++ return 0; ++ ++ if (dpa_bp->bpid == 0) ++ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID; ++ else ++ bp_params.bpid = dpa_bp->bpid; ++ ++ dpa_bp->pool = bman_new_pool(&bp_params); ++ if (unlikely(dpa_bp->pool == NULL)) { ++ pr_err("bman_new_pool() failed\n"); ++ return -ENODEV; ++ } ++ ++ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid; ++ ++ pdev = platform_device_register_simple("dpaa_eth_bpool", ++ dpa_bp->bpid, NULL, 0); ++ if (IS_ERR(pdev)) { ++ pr_err("platform_device_register_simple() failed\n"); ++ err = PTR_ERR(pdev); ++ goto pdev_register_failed; ++ } ++ { ++ struct dma_map_ops *ops = get_dma_ops(&pdev->dev); ++ ops->dma_supported = NULL; ++ } ++ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); ++ if (err) { ++ pr_err("dma_coerce_mask_and_coherent() failed\n"); ++ goto pdev_mask_failed; ++ } ++#ifdef CONFIG_FMAN_ARM ++ /* force coherency */ ++ pdev->dev.archdata.dma_coherent = true; ++ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true); ++#endif ++ ++ dpa_bp->dev = &pdev->dev; ++ ++ if (dpa_bp->seed_cb) { ++ err = dpa_bp->seed_cb(dpa_bp); ++ if (err) ++ goto pool_seed_failed; ++ } ++ ++ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp); ++ ++ return 0; ++ ++pool_seed_failed: ++pdev_mask_failed: ++ platform_device_unregister(pdev); ++pdev_register_failed: ++ bman_free_pool(dpa_bp->pool); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpa_bp_alloc); ++ ++void dpa_bp_drain(struct dpa_bp *bp) ++{ ++ int ret, num = 8; ++ ++ do { ++ struct bm_buffer bmb[8]; ++ int i; ++ ++ ret = bman_acquire(bp->pool, bmb, num, 0); ++ if (ret < 0) { ++ if (num == 8) { ++ /* we have less than 8 buffers left; ++ * drain them one by one ++ */ ++ num = 1; ++ ret = 1; ++ continue; ++ } else { ++ /* Pool is fully drained */ ++ break; ++ } ++ } ++ ++ for (i = 0; i < num; i++) { ++ dma_addr_t addr = bm_buf_addr(&bmb[i]); ++ ++ dma_unmap_single(bp->dev, addr, bp->size, ++ DMA_BIDIRECTIONAL); ++ ++ bp->free_buf_cb(phys_to_virt(addr)); ++ } ++ } while (ret > 0); ++} ++EXPORT_SYMBOL(dpa_bp_drain); ++ ++static void __cold __attribute__((nonnull)) ++_dpa_bp_free(struct dpa_bp *dpa_bp) ++{ ++ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid); ++ ++ /* the mapping between bpid and dpa_bp is done very late in the ++ * allocation procedure; if something failed before the mapping, the bp ++ * was not configured, therefore we don't need the below instructions ++ */ ++ if (!bp) ++ return; ++ ++ if (!atomic_dec_and_test(&bp->refs)) ++ return; ++ ++ if (bp->free_buf_cb) ++ dpa_bp_drain(bp); ++ ++ dpa_bp_array[bp->bpid] = NULL; ++ bman_free_pool(bp->pool); ++ ++ if (bp->dev) ++ platform_device_unregister(to_platform_device(bp->dev)); ++} ++ ++void __cold __attribute__((nonnull)) ++dpa_bp_free(struct dpa_priv_s *priv) ++{ ++ int i; ++ ++ if (priv->dpa_bp) ++ for (i = 0; i < priv->bp_count; i++) ++ _dpa_bp_free(&priv->dpa_bp[i]); ++} ++EXPORT_SYMBOL(dpa_bp_free); ++ ++struct dpa_bp *dpa_bpid2pool(int bpid) ++{ ++ return dpa_bp_array[bpid]; ++} ++EXPORT_SYMBOL(dpa_bpid2pool); ++ ++void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp) ++{ ++ dpa_bp_array[bpid] = dpa_bp; ++ atomic_set(&dpa_bp->refs, 1); ++} ++ ++bool dpa_bpid2pool_use(int bpid) ++{ ++ if (dpa_bpid2pool(bpid)) { ++ atomic_inc(&dpa_bp_array[bpid]->refs); ++ return true; ++ } ++ ++ return false; ++} ++ ++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, ++ void *accel_priv, select_queue_fallback_t fallback) ++{ ++ return dpa_get_queue_mapping(skb); ++} ++EXPORT_SYMBOL(dpa_select_queue); ++#endif ++ ++struct dpa_fq *dpa_fq_alloc(struct device *dev, ++ u32 fq_start, ++ u32 fq_count, ++ struct list_head *list, ++ enum dpa_fq_type fq_type) ++{ ++ int i; ++ struct dpa_fq *dpa_fq; ++ ++ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL); ++ if (dpa_fq == NULL) ++ return NULL; ++ ++ for (i = 0; i < fq_count; i++) { ++ dpa_fq[i].fq_type = fq_type; ++ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO) ++ dpa_fq[i].fqid = fq_start ? ++ DPAA_ETH_FQ_DELTA + fq_start + i : 0; ++ else ++ dpa_fq[i].fqid = fq_start ? fq_start + i : 0; ++ ++ list_add_tail(&dpa_fq[i].list, list); ++ } ++ ++#ifdef CONFIG_FMAN_PFC ++ if (fq_type == FQ_TYPE_TX) ++ for (i = 0; i < fq_count; i++) ++ dpa_fq[i].wq = i / dpa_num_cpus; ++ else ++#endif ++ for (i = 0; i < fq_count; i++) ++ _dpa_assign_wq(dpa_fq + i); ++ ++ return dpa_fq; ++} ++EXPORT_SYMBOL(dpa_fq_alloc); ++ ++/* Probing of FQs for MACful ports */ ++int dpa_fq_probe_mac(struct device *dev, struct list_head *list, ++ struct fm_port_fqs *port_fqs, ++ bool alloc_tx_conf_fqs, ++ enum port_type ptype) ++{ ++ struct fqid_cell *fqids = NULL; ++ const void *fqids_off = NULL; ++ struct dpa_fq *dpa_fq = NULL; ++ struct device_node *np = dev->of_node; ++ int num_ranges; ++ int i, lenp; ++ ++ if (ptype == TX && alloc_tx_conf_fqs) { ++ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start, ++ tx_confirm_fqids->count, list, ++ FQ_TYPE_TX_CONF_MQ)) ++ goto fq_alloc_failed; ++ } ++ ++ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp); ++ if (fqids_off == NULL) { ++ /* No dts definition, so use the defaults. */ ++ fqids = default_fqids[ptype]; ++ num_ranges = 3; ++ } else { ++ num_ranges = lenp / sizeof(*fqids); ++ ++ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges, ++ GFP_KERNEL); ++ if (fqids == NULL) ++ goto fqids_alloc_failed; ++ ++ /* convert to CPU endianess */ ++ for (i = 0; i < num_ranges; i++) { ++ fqids[i].start = be32_to_cpup(fqids_off + ++ i * sizeof(*fqids)); ++ fqids[i].count = be32_to_cpup(fqids_off + ++ i * sizeof(*fqids) + sizeof(__be32)); ++ } ++ } ++ ++ for (i = 0; i < num_ranges; i++) { ++ switch (i) { ++ case 0: ++ /* The first queue is the error queue */ ++ if (fqids[i].count != 1) ++ goto invalid_error_queue; ++ ++ dpa_fq = dpa_fq_alloc(dev, fqids[i].start, ++ fqids[i].count, list, ++ ptype == RX ? ++ FQ_TYPE_RX_ERROR : ++ FQ_TYPE_TX_ERROR); ++ if (dpa_fq == NULL) ++ goto fq_alloc_failed; ++ ++ if (ptype == RX) ++ port_fqs->rx_errq = &dpa_fq[0]; ++ else ++ port_fqs->tx_errq = &dpa_fq[0]; ++ break; ++ case 1: ++ /* the second queue is the default queue */ ++ if (fqids[i].count != 1) ++ goto invalid_default_queue; ++ ++ dpa_fq = dpa_fq_alloc(dev, fqids[i].start, ++ fqids[i].count, list, ++ ptype == RX ? ++ FQ_TYPE_RX_DEFAULT : ++ FQ_TYPE_TX_CONFIRM); ++ if (dpa_fq == NULL) ++ goto fq_alloc_failed; ++ ++ if (ptype == RX) ++ port_fqs->rx_defq = &dpa_fq[0]; ++ else ++ port_fqs->tx_defq = &dpa_fq[0]; ++ break; ++ default: ++ /* all subsequent queues are either RX* PCD or Tx */ ++ if (ptype == RX) { ++ if (!dpa_fq_alloc(dev, fqids[i].start, ++ fqids[i].count, list, ++ FQ_TYPE_RX_PCD) || ++ !dpa_fq_alloc(dev, fqids[i].start, ++ fqids[i].count, list, ++ FQ_TYPE_RX_PCD_HI_PRIO)) ++ goto fq_alloc_failed; ++ } else { ++ if (!dpa_fq_alloc(dev, fqids[i].start, ++ fqids[i].count, list, ++ FQ_TYPE_TX)) ++ goto fq_alloc_failed; ++ } ++ break; ++ } ++ } ++ ++ return 0; ++ ++fq_alloc_failed: ++fqids_alloc_failed: ++ dev_err(dev, "Cannot allocate memory for frame queues\n"); ++ return -ENOMEM; ++ ++invalid_default_queue: ++invalid_error_queue: ++ dev_err(dev, "Too many default or error queues\n"); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(dpa_fq_probe_mac); ++ ++static u32 rx_pool_channel; ++static DEFINE_SPINLOCK(rx_pool_channel_init); ++ ++int dpa_get_channel(void) ++{ ++ spin_lock(&rx_pool_channel_init); ++ if (!rx_pool_channel) { ++ u32 pool; ++ int ret = qman_alloc_pool(&pool); ++ if (!ret) ++ rx_pool_channel = pool; ++ } ++ spin_unlock(&rx_pool_channel_init); ++ if (!rx_pool_channel) ++ return -ENOMEM; ++ return rx_pool_channel; ++} ++EXPORT_SYMBOL(dpa_get_channel); ++ ++void dpa_release_channel(void) ++{ ++ qman_release_pool(rx_pool_channel); ++} ++EXPORT_SYMBOL(dpa_release_channel); ++ ++void dpaa_eth_add_channel(u16 channel) ++{ ++ const cpumask_t *cpus = qman_affine_cpus(); ++ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); ++ int cpu; ++ struct qman_portal *portal; ++ ++ for_each_cpu(cpu, cpus) { ++ portal = (struct qman_portal *)qman_get_affine_portal(cpu); ++ qman_p_static_dequeue_add(portal, pool); ++ } ++} ++EXPORT_SYMBOL(dpaa_eth_add_channel); ++ ++/** ++ * Congestion group state change notification callback. ++ * Stops the device's egress queues while they are congested and ++ * wakes them upon exiting congested state. ++ * Also updates some CGR-related stats. ++ */ ++static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, ++ ++ int congested) ++{ ++ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr, ++ struct dpa_priv_s, cgr_data.cgr); ++ ++ if (congested) { ++ priv->cgr_data.congestion_start_jiffies = jiffies; ++ netif_tx_stop_all_queues(priv->net_dev); ++ priv->cgr_data.cgr_congested_count++; ++ } else { ++ priv->cgr_data.congested_jiffies += ++ (jiffies - priv->cgr_data.congestion_start_jiffies); ++ netif_tx_wake_all_queues(priv->net_dev); ++ } ++} ++ ++int dpaa_eth_cgr_init(struct dpa_priv_s *priv) ++{ ++ struct qm_mcc_initcgr initcgr; ++ u32 cs_th; ++ int err; ++ ++ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); ++ if (err < 0) { ++ pr_err("Error %d allocating CGR ID\n", err); ++ goto out_error; ++ } ++ priv->cgr_data.cgr.cb = dpaa_eth_cgscn; ++ ++ /* Enable Congestion State Change Notifications and CS taildrop */ ++ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; ++ initcgr.cgr.cscn_en = QM_CGR_EN; ++ ++ /* Set different thresholds based on the MAC speed. ++ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed ++ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. ++ * In such cases, we ought to reconfigure the threshold, too. ++ */ ++ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) ++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G; ++ else ++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G; ++ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); ++ ++ initcgr.we_mask |= QM_CGR_WE_CSTD_EN; ++ initcgr.cgr.cstd_en = QM_CGR_EN; ++ ++ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, ++ &initcgr); ++ if (err < 0) { ++ pr_err("Error %d creating CGR with ID %d\n", err, ++ priv->cgr_data.cgr.cgrid); ++ qman_release_cgrid(priv->cgr_data.cgr.cgrid); ++ goto out_error; ++ } ++ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", ++ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, ++ priv->cgr_data.cgr.chan); ++ ++out_error: ++ return err; ++} ++EXPORT_SYMBOL(dpaa_eth_cgr_init); ++ ++static inline void dpa_setup_ingress(const struct dpa_priv_s *priv, ++ struct dpa_fq *fq, ++ const struct qman_fq *template) ++{ ++ fq->fq_base = *template; ++ fq->net_dev = priv->net_dev; ++ ++ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; ++ fq->channel = priv->channel; ++} ++ ++static inline void dpa_setup_egress(const struct dpa_priv_s *priv, ++ struct dpa_fq *fq, ++ struct fm_port *port, ++ const struct qman_fq *template) ++{ ++ fq->fq_base = *template; ++ fq->net_dev = priv->net_dev; ++ ++ if (port) { ++ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; ++ fq->channel = (uint16_t)fm_get_tx_port_channel(port); ++ } else { ++ fq->flags = QMAN_FQ_FLAG_NO_MODIFY; ++ } ++} ++ ++void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, ++ struct fm_port *tx_port) ++{ ++ struct dpa_fq *fq; ++ uint16_t portals[NR_CPUS]; ++ int cpu, portal_cnt = 0, num_portals = 0; ++ uint32_t pcd_fqid, pcd_fqid_hi_prio; ++ const cpumask_t *affine_cpus = qman_affine_cpus(); ++ int egress_cnt = 0, conf_cnt = 0; ++ ++ /* Prepare for PCD FQs init */ ++ for_each_cpu(cpu, affine_cpus) ++ portals[num_portals++] = qman_affine_channel(cpu); ++ if (num_portals == 0) ++ dev_err(priv->net_dev->dev.parent, ++ "No Qman software (affine) channels found"); ++ ++ pcd_fqid = (priv->mac_dev) ? ++ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0; ++ pcd_fqid_hi_prio = (priv->mac_dev) ? ++ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0; ++ ++ /* Initialize each FQ in the list */ ++ list_for_each_entry(fq, &priv->dpa_fq_list, list) { ++ switch (fq->fq_type) { ++ case FQ_TYPE_RX_DEFAULT: ++ BUG_ON(!priv->mac_dev); ++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); ++ break; ++ case FQ_TYPE_RX_ERROR: ++ BUG_ON(!priv->mac_dev); ++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq); ++ break; ++ case FQ_TYPE_RX_PCD: ++ /* For MACless we can't have dynamic Rx queues */ ++ BUG_ON(!priv->mac_dev && !fq->fqid); ++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); ++ if (!fq->fqid) ++ fq->fqid = pcd_fqid++; ++ fq->channel = portals[portal_cnt]; ++ portal_cnt = (portal_cnt + 1) % num_portals; ++ break; ++ case FQ_TYPE_RX_PCD_HI_PRIO: ++ /* For MACless we can't have dynamic Hi Pri Rx queues */ ++ BUG_ON(!priv->mac_dev && !fq->fqid); ++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); ++ if (!fq->fqid) ++ fq->fqid = pcd_fqid_hi_prio++; ++ fq->channel = portals[portal_cnt]; ++ portal_cnt = (portal_cnt + 1) % num_portals; ++ break; ++ case FQ_TYPE_TX: ++ dpa_setup_egress(priv, fq, tx_port, ++ &fq_cbs->egress_ern); ++ /* If we have more Tx queues than the number of cores, ++ * just ignore the extra ones. ++ */ ++ if (egress_cnt < DPAA_ETH_TX_QUEUES) ++ priv->egress_fqs[egress_cnt++] = &fq->fq_base; ++ break; ++ case FQ_TYPE_TX_CONFIRM: ++ BUG_ON(!priv->mac_dev); ++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); ++ break; ++ case FQ_TYPE_TX_CONF_MQ: ++ BUG_ON(!priv->mac_dev); ++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); ++ priv->conf_fqs[conf_cnt++] = &fq->fq_base; ++ break; ++ case FQ_TYPE_TX_ERROR: ++ BUG_ON(!priv->mac_dev); ++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq); ++ break; ++ default: ++ dev_warn(priv->net_dev->dev.parent, ++ "Unknown FQ type detected!\n"); ++ break; ++ } ++ } ++ ++ /* The number of Tx queues may be smaller than the number of cores, if ++ * the Tx queue range is specified in the device tree instead of being ++ * dynamically allocated. ++ * Make sure all CPUs receive a corresponding Tx queue. ++ */ ++ while (egress_cnt < DPAA_ETH_TX_QUEUES) { ++ list_for_each_entry(fq, &priv->dpa_fq_list, list) { ++ if (fq->fq_type != FQ_TYPE_TX) ++ continue; ++ priv->egress_fqs[egress_cnt++] = &fq->fq_base; ++ if (egress_cnt == DPAA_ETH_TX_QUEUES) ++ break; ++ } ++ } ++} ++EXPORT_SYMBOL(dpa_fq_setup); ++ ++int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable) ++{ ++ int _errno; ++ const struct dpa_priv_s *priv; ++ struct device *dev; ++ struct qman_fq *fq; ++ struct qm_mcc_initfq initfq; ++ struct qman_fq *confq; ++ int queue_id; ++ ++ priv = netdev_priv(dpa_fq->net_dev); ++ dev = dpa_fq->net_dev->dev.parent; ++ ++ if (dpa_fq->fqid == 0) ++ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; ++ ++ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); ++ ++ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); ++ if (_errno) { ++ dev_err(dev, "qman_create_fq() failed\n"); ++ return _errno; ++ } ++ fq = &dpa_fq->fq_base; ++ ++ if (dpa_fq->init) { ++ memset(&initfq, 0, sizeof(initfq)); ++ ++ initfq.we_mask = QM_INITFQ_WE_FQCTRL; ++ /* FIXME: why would we want to keep an empty FQ in cache? */ ++ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; ++ ++ /* Try to reduce the number of portal interrupts for ++ * Tx Confirmation FQs. ++ */ ++ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) ++ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; ++ ++ /* FQ placement */ ++ initfq.we_mask |= QM_INITFQ_WE_DESTWQ; ++ ++ initfq.fqd.dest.channel = dpa_fq->channel; ++ initfq.fqd.dest.wq = dpa_fq->wq; ++ ++ /* Put all egress queues in a congestion group of their own. ++ * Sensu stricto, the Tx confirmation queues are Rx FQs, ++ * rather than Tx - but they nonetheless account for the ++ * memory footprint on behalf of egress traffic. We therefore ++ * place them in the netdev's CGR, along with the Tx FQs. ++ */ ++ if (dpa_fq->fq_type == FQ_TYPE_TX || ++ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM || ++ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { ++ initfq.we_mask |= QM_INITFQ_WE_CGID; ++ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; ++ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid; ++ /* Set a fixed overhead accounting, in an attempt to ++ * reduce the impact of fixed-size skb shells and the ++ * driver's needed headroom on system memory. This is ++ * especially the case when the egress traffic is ++ * composed of small datagrams. ++ * Unfortunately, QMan's OAL value is capped to an ++ * insufficient value, but even that is better than ++ * no overhead accounting at all. ++ */ ++ initfq.we_mask |= QM_INITFQ_WE_OAC; ++ initfq.fqd.oac_init.oac = QM_OAC_CG; ++ initfq.fqd.oac_init.oal = ++ (signed char)(min(sizeof(struct sk_buff) + ++ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); ++ } ++ ++ if (td_enable) { ++ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; ++ qm_fqd_taildrop_set(&initfq.fqd.td, ++ DPA_FQ_TD, 1); ++ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; ++ } ++ ++ /* Configure the Tx confirmation queue, now that we know ++ * which Tx queue it pairs with. ++ */ ++ if (dpa_fq->fq_type == FQ_TYPE_TX) { ++ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base); ++ if (queue_id >= 0) { ++ confq = priv->conf_fqs[queue_id]; ++ if (confq) { ++ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; ++ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD) ++ * A2V=1 (contextA A2 field is valid) ++ * A0V=1 (contextA A0 field is valid) ++ * B0V=1 (contextB field is valid) ++ * ContextA A2: EBD=1 (deallocate buffers inside FMan) ++ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) ++ */ ++ initfq.fqd.context_a.hi = 0x1e000000; ++ initfq.fqd.context_a.lo = 0x80000000; ++ } ++ } ++ } ++ ++ /* Put all *private* ingress queues in our "ingress CGR". */ ++ if (priv->use_ingress_cgr && ++ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT || ++ dpa_fq->fq_type == FQ_TYPE_RX_ERROR || ++ dpa_fq->fq_type == FQ_TYPE_RX_PCD || ++ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) { ++ initfq.we_mask |= QM_INITFQ_WE_CGID; ++ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; ++ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid; ++ /* Set a fixed overhead accounting, just like for the ++ * egress CGR. ++ */ ++ initfq.we_mask |= QM_INITFQ_WE_OAC; ++ initfq.fqd.oac_init.oac = QM_OAC_CG; ++ initfq.fqd.oac_init.oal = ++ (signed char)(min(sizeof(struct sk_buff) + ++ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); ++ } ++ ++ /* Initialization common to all ingress queues */ ++ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { ++ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; ++ initfq.fqd.fq_ctrl |= ++ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; ++ initfq.fqd.context_a.stashing.exclusive = ++ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | ++ QM_STASHING_EXCL_ANNOTATION; ++ initfq.fqd.context_a.stashing.data_cl = 2; ++ initfq.fqd.context_a.stashing.annotation_cl = 1; ++ initfq.fqd.context_a.stashing.context_cl = ++ DIV_ROUND_UP(sizeof(struct qman_fq), 64); ++ } ++ ++ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); ++ if (_errno < 0) { ++ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) { ++ dpa_fq->init = 0; ++ } else { ++ dev_err(dev, "qman_init_fq(%u) = %d\n", ++ qman_fq_fqid(fq), _errno); ++ qman_destroy_fq(fq, 0); ++ } ++ return _errno; ++ } ++ } ++ ++ dpa_fq->fqid = qman_fq_fqid(fq); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_fq_init); ++ ++int __cold __attribute__((nonnull)) ++_dpa_fq_free(struct device *dev, struct qman_fq *fq) ++{ ++ int _errno, __errno; ++ struct dpa_fq *dpa_fq; ++ const struct dpa_priv_s *priv; ++ ++ _errno = 0; ++ ++ dpa_fq = container_of(fq, struct dpa_fq, fq_base); ++ priv = netdev_priv(dpa_fq->net_dev); ++ ++ if (dpa_fq->init) { ++ _errno = qman_retire_fq(fq, NULL); ++ if (unlikely(_errno < 0) && netif_msg_drv(priv)) ++ dev_err(dev, "qman_retire_fq(%u) = %d\n", ++ qman_fq_fqid(fq), _errno); ++ ++ __errno = qman_oos_fq(fq); ++ if (unlikely(__errno < 0) && netif_msg_drv(priv)) { ++ dev_err(dev, "qman_oos_fq(%u) = %d\n", ++ qman_fq_fqid(fq), __errno); ++ if (_errno >= 0) ++ _errno = __errno; ++ } ++ } ++ ++ qman_destroy_fq(fq, 0); ++ list_del(&dpa_fq->list); ++ ++ return _errno; ++} ++EXPORT_SYMBOL(_dpa_fq_free); ++ ++int __cold __attribute__((nonnull)) ++dpa_fq_free(struct device *dev, struct list_head *list) ++{ ++ int _errno, __errno; ++ struct dpa_fq *dpa_fq, *tmp; ++ ++ _errno = 0; ++ list_for_each_entry_safe(dpa_fq, tmp, list, list) { ++ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); ++ if (unlikely(__errno < 0) && _errno >= 0) ++ _errno = __errno; ++ } ++ ++ return _errno; ++} ++EXPORT_SYMBOL(dpa_fq_free); ++ ++int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable) ++{ ++ int _errno, __errno; ++ struct dpa_fq *dpa_fq, *tmp; ++ static bool print_msg __read_mostly; ++ ++ _errno = 0; ++ print_msg = true; ++ list_for_each_entry_safe(dpa_fq, tmp, list, list) { ++ __errno = dpa_fq_init(dpa_fq, td_enable); ++ if (unlikely(__errno < 0) && _errno >= 0) { ++ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) { ++ if (print_msg) { ++ dev_warn(dev, ++ "Skip RX PCD High Priority FQs initialization\n"); ++ print_msg = false; ++ } ++ if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq)) ++ dev_warn(dev, ++ "Error freeing frame queues\n"); ++ } else { ++ _errno = __errno; ++ break; ++ } ++ } ++ } ++ ++ return _errno; ++} ++EXPORT_SYMBOL(dpa_fqs_init); ++static void ++dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq, ++ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout) ++{ ++ struct fm_port_params tx_port_param; ++ bool frag_enabled = false; ++ ++ memset(&tx_port_param, 0, sizeof(tx_port_param)); ++ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid, ++ buf_layout, frag_enabled); ++} ++ ++static void ++dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count, ++ struct dpa_fq *errq, struct dpa_fq *defq, ++ struct dpa_buffer_layout_s *buf_layout) ++{ ++ struct fm_port_params rx_port_param; ++ int i; ++ bool frag_enabled = false; ++ ++ memset(&rx_port_param, 0, sizeof(rx_port_param)); ++ count = min(ARRAY_SIZE(rx_port_param.pool_param), count); ++ rx_port_param.num_pools = (uint8_t)count; ++ for (i = 0; i < count; i++) { ++ if (i >= rx_port_param.num_pools) ++ break; ++ rx_port_param.pool_param[i].id = bp[i].bpid; ++ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size; ++ } ++ ++ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid, ++ buf_layout, frag_enabled); ++} ++ ++#if defined(CONFIG_FSL_SDK_FMAN_TEST) ++/* Defined as weak, to be implemented by fman pcd tester. */ ++int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *) ++__attribute__((weak)); ++ ++int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak)); ++#else ++int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *); ++ ++int dpa_free_pcd_fqids(struct device *, uint32_t); ++ ++#endif /* CONFIG_FSL_SDK_FMAN_TEST */ ++ ++ ++int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num, ++ uint8_t alignment, uint32_t *base_fqid) ++{ ++ dev_crit(dev, "callback not implemented!\n"); ++ ++ return 0; ++} ++ ++int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid) ++{ ++ ++ dev_crit(dev, "callback not implemented!\n"); ++ ++ return 0; ++} ++ ++void dpaa_eth_init_ports(struct mac_device *mac_dev, ++ struct dpa_bp *bp, size_t count, ++ struct fm_port_fqs *port_fqs, ++ struct dpa_buffer_layout_s *buf_layout, ++ struct device *dev) ++{ ++ struct fm_port_pcd_param rx_port_pcd_param; ++ struct fm_port *rxport = mac_dev->port_dev[RX]; ++ struct fm_port *txport = mac_dev->port_dev[TX]; ++ ++ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, ++ port_fqs->tx_defq, &buf_layout[TX]); ++ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq, ++ port_fqs->rx_defq, &buf_layout[RX]); ++ ++ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids; ++ rx_port_pcd_param.cbf = dpa_free_pcd_fqids; ++ rx_port_pcd_param.dev = dev; ++ fm_port_pcd_bind(rxport, &rx_port_pcd_param); ++} ++EXPORT_SYMBOL(dpaa_eth_init_ports); ++ ++void dpa_release_sgt(struct qm_sg_entry *sgt) ++{ ++ struct dpa_bp *dpa_bp; ++ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX]; ++ uint8_t i = 0, j; ++ ++ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer)); ++ ++ do { ++ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])); ++ DPA_BUG_ON(!dpa_bp); ++ ++ j = 0; ++ do { ++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); ++ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i])); ++ ++ j++; i++; ++ } while (j < ARRAY_SIZE(bmb) && ++ !qm_sg_entry_get_final(&sgt[i-1]) && ++ qm_sg_entry_get_bpid(&sgt[i-1]) == ++ qm_sg_entry_get_bpid(&sgt[i])); ++ ++ while (bman_release(dpa_bp->pool, bmb, j, 0)) ++ cpu_relax(); ++ } while (!qm_sg_entry_get_final(&sgt[i-1])); ++} ++EXPORT_SYMBOL(dpa_release_sgt); ++ ++void __attribute__((nonnull)) ++dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd) ++{ ++ struct qm_sg_entry *sgt; ++ struct dpa_bp *dpa_bp; ++ struct bm_buffer bmb; ++ dma_addr_t addr; ++ void *vaddr; ++ ++ bmb.opaque = 0; ++ bm_buffer_set64(&bmb, qm_fd_addr(fd)); ++ ++ dpa_bp = dpa_bpid2pool(fd->bpid); ++ DPA_BUG_ON(!dpa_bp); ++ ++ if (fd->format == qm_fd_sg) { ++ vaddr = phys_to_virt(qm_fd_addr(fd)); ++ sgt = vaddr + dpa_fd_offset(fd); ++ ++ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size, ++ DMA_BIDIRECTIONAL); ++ ++ dpa_release_sgt(sgt); ++ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ dev_err(dpa_bp->dev, "DMA mapping failed"); ++ return; ++ } ++ bm_buffer_set64(&bmb, addr); ++ } ++ ++ while (bman_release(dpa_bp->pool, &bmb, 1, 0)) ++ cpu_relax(); ++} ++EXPORT_SYMBOL(dpa_fd_release); ++ ++void count_ern(struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_mr_entry *msg) ++{ ++ switch (msg->ern.rc & QM_MR_RC_MASK) { ++ case QM_MR_RC_CGR_TAILDROP: ++ percpu_priv->ern_cnt.cg_tdrop++; ++ break; ++ case QM_MR_RC_WRED: ++ percpu_priv->ern_cnt.wred++; ++ break; ++ case QM_MR_RC_ERROR: ++ percpu_priv->ern_cnt.err_cond++; ++ break; ++ case QM_MR_RC_ORPWINDOW_EARLY: ++ percpu_priv->ern_cnt.early_window++; ++ break; ++ case QM_MR_RC_ORPWINDOW_LATE: ++ percpu_priv->ern_cnt.late_window++; ++ break; ++ case QM_MR_RC_FQ_TAILDROP: ++ percpu_priv->ern_cnt.fq_tdrop++; ++ break; ++ case QM_MR_RC_ORPWINDOW_RETIRED: ++ percpu_priv->ern_cnt.fq_retired++; ++ break; ++ case QM_MR_RC_ORP_ZERO: ++ percpu_priv->ern_cnt.orp_zero++; ++ break; ++ } ++} ++EXPORT_SYMBOL(count_ern); ++ ++/** ++ * Turn on HW checksum computation for this outgoing frame. ++ * If the current protocol is not something we support in this regard ++ * (or if the stack has already computed the SW checksum), we do nothing. ++ * ++ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value ++ * otherwise. ++ * ++ * Note that this function may modify the fd->cmd field and the skb data buffer ++ * (the Parse Results area). ++ */ ++int dpa_enable_tx_csum(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results) ++{ ++ fm_prs_result_t *parse_result; ++ struct iphdr *iph; ++ struct ipv6hdr *ipv6h = NULL; ++ u8 l4_proto; ++ u16 ethertype = ntohs(skb->protocol); ++ int retval = 0; ++ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) ++ return 0; ++ ++ /* Note: L3 csum seems to be already computed in sw, but we can't choose ++ * L4 alone from the FM configuration anyway. ++ */ ++ ++ /* Fill in some fields of the Parse Results array, so the FMan ++ * can find them as if they came from the FMan Parser. ++ */ ++ parse_result = (fm_prs_result_t *)parse_results; ++ ++ /* If we're dealing with VLAN, get the real Ethernet type */ ++ if (ethertype == ETH_P_8021Q) { ++ /* We can't always assume the MAC header is set correctly ++ * by the stack, so reset to beginning of skb->data ++ */ ++ skb_reset_mac_header(skb); ++ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); ++ } ++ ++ /* Fill in the relevant L3 parse result fields ++ * and read the L4 protocol type ++ */ ++ switch (ethertype) { ++ case ETH_P_IP: ++ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); ++ iph = ip_hdr(skb); ++ DPA_BUG_ON(iph == NULL); ++ l4_proto = iph->protocol; ++ break; ++ case ETH_P_IPV6: ++ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); ++ ipv6h = ipv6_hdr(skb); ++ DPA_BUG_ON(ipv6h == NULL); ++ l4_proto = ipv6h->nexthdr; ++ break; ++ default: ++ /* We shouldn't even be here */ ++ if (netif_msg_tx_err(priv) && net_ratelimit()) ++ netdev_alert(priv->net_dev, ++ "Can't compute HW csum for L3 proto 0x%x\n", ++ ntohs(skb->protocol)); ++ retval = -EIO; ++ goto return_error; ++ } ++ ++ /* Fill in the relevant L4 parse result fields */ ++ switch (l4_proto) { ++ case IPPROTO_UDP: ++ parse_result->l4r = FM_L4_PARSE_RESULT_UDP; ++ break; ++ case IPPROTO_TCP: ++ parse_result->l4r = FM_L4_PARSE_RESULT_TCP; ++ break; ++ default: ++ /* This can as well be a BUG() */ ++ if (netif_msg_tx_err(priv) && net_ratelimit()) ++ netdev_alert(priv->net_dev, ++ "Can't compute HW csum for L4 proto 0x%x\n", ++ l4_proto); ++ retval = -EIO; ++ goto return_error; ++ } ++ ++ /* At index 0 is IPOffset_1 as defined in the Parse Results */ ++ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb); ++ parse_result->l4_off = (uint8_t)skb_transport_offset(skb); ++ ++ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ ++ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; ++ ++ /* On P1023 and similar platforms fd->cmd interpretation could ++ * be disabled by setting CONTEXT_A bit ICMD; currently this bit ++ * is not set so we do not need to check; in the future, if/when ++ * using context_a we need to check this bit ++ */ ++ ++return_error: ++ return retval; ++} ++EXPORT_SYMBOL(dpa_enable_tx_csum); ++ ++#ifdef CONFIG_FSL_DPAA_CEETM ++void dpa_enable_ceetm(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ priv->ceetm_en = true; ++} ++EXPORT_SYMBOL(dpa_enable_ceetm); ++ ++void dpa_disable_ceetm(struct net_device *dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(dev); ++ priv->ceetm_en = false; ++} ++EXPORT_SYMBOL(dpa_disable_ceetm); ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h +@@ -0,0 +1,225 @@ ++/* Copyright 2008-2013 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA_ETH_COMMON_H ++#define __DPAA_ETH_COMMON_H ++ ++#include /* struct net_device */ ++#include /* struct bm_buffer */ ++#include /* struct platform_device */ ++#include /* struct hwtstamp_config */ ++ ++#include "dpaa_eth.h" ++#include "lnxwrp_fsl_fman.h" ++ ++#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\ ++ frag_enabled) \ ++{ \ ++ param.errq = errq_id; \ ++ param.defq = defq_id; \ ++ param.priv_data_size = buf_layout->priv_data_size; \ ++ param.parse_results = buf_layout->parse_results; \ ++ param.hash_results = buf_layout->hash_results; \ ++ param.frag_enable = frag_enabled; \ ++ param.time_stamp = buf_layout->time_stamp; \ ++ param.manip_extra_space = buf_layout->manip_extra_space; \ ++ param.data_align = buf_layout->data_align; \ ++ fm_set_##type##_port_params(port, ¶m); \ ++} ++ ++#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ ++ ++#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES ++ ++#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ ++ ++#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \ ++ (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \ ++ (_errno == -EIO)) ++/* return codes for the dpaa-eth hooks */ ++enum dpaa_eth_hook_result { ++ /* fd/skb was retained by the hook. ++ * ++ * On the Rx path, this means the Ethernet driver will _not_ ++ * deliver the skb to the stack. Instead, the hook implementation ++ * is expected to properly dispose of the skb. ++ * ++ * On the Tx path, the Ethernet driver's dpa_tx() function will ++ * immediately return NETDEV_TX_OK. The hook implementation is expected ++ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan, ++ * unless you know exactly what you're doing! ++ * ++ * On the confirmation/error paths, the Ethernet driver will _not_ ++ * perform any fd cleanup, nor update the interface statistics. ++ */ ++ DPAA_ETH_STOLEN, ++ /* fd/skb was returned to the Ethernet driver for regular processing. ++ * The hook is not allowed to, for instance, reallocate the skb (as if ++ * by linearizing, copying, cloning or reallocating the headroom). ++ */ ++ DPAA_ETH_CONTINUE ++}; ++ ++typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)( ++ struct sk_buff *skb, struct net_device *net_dev, u32 fqid); ++typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)( ++ struct sk_buff *skb, struct net_device *net_dev); ++typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)( ++ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid); ++ ++/* used in napi related functions */ ++extern u16 qman_portal_max; ++ ++/* from dpa_ethtool.c */ ++extern const struct ethtool_ops dpa_ethtool_ops; ++ ++#ifdef CONFIG_FSL_DPAA_HOOKS ++/* Various hooks used for unit-testing and/or fastpath optimizations. ++ * Currently only one set of such hooks is supported. ++ */ ++struct dpaa_eth_hooks_s { ++ /* Invoked on the Tx private path, immediately after receiving the skb ++ * from the stack. ++ */ ++ dpaa_eth_egress_hook_t tx; ++ ++ /* Invoked on the Rx private path, right before passing the skb ++ * up the stack. At that point, the packet's protocol id has already ++ * been set. The skb's data pointer is now at the L3 header, and ++ * skb->mac_header points to the L2 header. skb->len has been adjusted ++ * to be the length of L3+payload (i.e., the length of the ++ * original frame minus the L2 header len). ++ * For more details on what the skb looks like, see eth_type_trans(). ++ */ ++ dpaa_eth_ingress_hook_t rx_default; ++ ++ /* Driver hook for the Rx error private path. */ ++ dpaa_eth_confirm_hook_t rx_error; ++ /* Driver hook for the Tx confirmation private path. */ ++ dpaa_eth_confirm_hook_t tx_confirm; ++ /* Driver hook for the Tx error private path. */ ++ dpaa_eth_confirm_hook_t tx_error; ++}; ++ ++void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); ++ ++extern struct dpaa_eth_hooks_s dpaa_eth_hooks; ++#endif ++ ++int dpa_netdev_init(struct net_device *net_dev, ++ const uint8_t *mac_addr, ++ uint16_t tx_timeout); ++int __cold dpa_start(struct net_device *net_dev); ++int __cold dpa_stop(struct net_device *net_dev); ++void __cold dpa_timeout(struct net_device *net_dev); ++void __cold ++dpa_get_stats64(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats); ++int dpa_change_mtu(struct net_device *net_dev, int new_mtu); ++int dpa_ndo_init(struct net_device *net_dev); ++int dpa_set_features(struct net_device *dev, netdev_features_t features); ++netdev_features_t dpa_fix_features(struct net_device *dev, ++ netdev_features_t features); ++#ifdef CONFIG_FSL_DPAA_TS ++u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, ++ enum port_type rx_tx, const void *data); ++/* Updates the skb shared hw timestamp from the hardware timestamp */ ++int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx, ++ struct skb_shared_hwtstamps *shhwtstamps, const void *data); ++#endif /* CONFIG_FSL_DPAA_TS */ ++int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); ++int __cold dpa_remove(struct platform_device *of_dev); ++struct mac_device * __cold __must_check ++__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev); ++int dpa_set_mac_address(struct net_device *net_dev, void *addr); ++void dpa_set_rx_mode(struct net_device *net_dev); ++void dpa_set_buffers_layout(struct mac_device *mac_dev, ++ struct dpa_buffer_layout_s *layout); ++int __attribute__((nonnull)) ++dpa_bp_alloc(struct dpa_bp *dpa_bp); ++void __cold __attribute__((nonnull)) ++dpa_bp_free(struct dpa_priv_s *priv); ++struct dpa_bp *dpa_bpid2pool(int bpid); ++void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp); ++bool dpa_bpid2pool_use(int bpid); ++void dpa_bp_drain(struct dpa_bp *bp); ++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE ++u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, ++ void *accel_priv, select_queue_fallback_t fallback); ++#endif ++struct dpa_fq *dpa_fq_alloc(struct device *dev, ++ u32 fq_start, ++ u32 fq_count, ++ struct list_head *list, ++ enum dpa_fq_type fq_type); ++int dpa_fq_probe_mac(struct device *dev, struct list_head *list, ++ struct fm_port_fqs *port_fqs, ++ bool tx_conf_fqs_per_core, ++ enum port_type ptype); ++int dpa_get_channel(void); ++void dpa_release_channel(void); ++void dpaa_eth_add_channel(u16 channel); ++int dpaa_eth_cgr_init(struct dpa_priv_s *priv); ++void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, ++ struct fm_port *tx_port); ++int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable); ++int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable); ++int __cold __attribute__((nonnull)) ++dpa_fq_free(struct device *dev, struct list_head *list); ++void dpaa_eth_init_ports(struct mac_device *mac_dev, ++ struct dpa_bp *bp, size_t count, ++ struct fm_port_fqs *port_fqs, ++ struct dpa_buffer_layout_s *buf_layout, ++ struct device *dev); ++void dpa_release_sgt(struct qm_sg_entry *sgt); ++void __attribute__((nonnull)) ++dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd); ++void count_ern(struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_mr_entry *msg); ++int dpa_enable_tx_csum(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results); ++#ifdef CONFIG_FSL_DPAA_CEETM ++void dpa_enable_ceetm(struct net_device *dev); ++void dpa_disable_ceetm(struct net_device *dev); ++#endif ++struct proxy_device { ++ struct mac_device *mac_dev; ++}; ++ ++/* mac device control functions exposed by proxy interface*/ ++int dpa_proxy_start(struct net_device *net_dev); ++int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev); ++int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev, ++ struct net_device *net_dev); ++int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev, ++ struct net_device *net_dev); ++ ++#endif /* __DPAA_ETH_COMMON_H */ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c +@@ -0,0 +1,381 @@ ++/* Copyright 2008-2013 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#include "dpaa_eth_base.h" ++#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ ++#include "mac.h" ++ ++#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++MODULE_DESCRIPTION(DPA_DESCRIPTION); ++ ++static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev); ++#ifdef CONFIG_PM ++ ++static int proxy_suspend(struct device *dev) ++{ ++ struct proxy_device *proxy_dev = dev_get_drvdata(dev); ++ struct mac_device *mac_dev = proxy_dev->mac_dev; ++ int err = 0; ++ ++ err = fm_port_suspend(mac_dev->port_dev[RX]); ++ if (err) ++ goto port_suspend_failed; ++ ++ err = fm_port_suspend(mac_dev->port_dev[TX]); ++ if (err) ++ err = fm_port_resume(mac_dev->port_dev[RX]); ++ ++port_suspend_failed: ++ return err; ++} ++ ++static int proxy_resume(struct device *dev) ++{ ++ struct proxy_device *proxy_dev = dev_get_drvdata(dev); ++ struct mac_device *mac_dev = proxy_dev->mac_dev; ++ int err = 0; ++ ++ err = fm_port_resume(mac_dev->port_dev[TX]); ++ if (err) ++ goto port_resume_failed; ++ ++ err = fm_port_resume(mac_dev->port_dev[RX]); ++ if (err) ++ err = fm_port_suspend(mac_dev->port_dev[TX]); ++ ++port_resume_failed: ++ return err; ++} ++ ++static const struct dev_pm_ops proxy_pm_ops = { ++ .suspend = proxy_suspend, ++ .resume = proxy_resume, ++}; ++ ++#define PROXY_PM_OPS (&proxy_pm_ops) ++ ++#else /* CONFIG_PM */ ++ ++#define PROXY_PM_OPS NULL ++ ++#endif /* CONFIG_PM */ ++ ++static int dpaa_eth_proxy_probe(struct platform_device *_of_dev) ++{ ++ int err = 0, i; ++ struct device *dev; ++ struct device_node *dpa_node; ++ struct dpa_bp *dpa_bp; ++ struct list_head proxy_fq_list; ++ size_t count; ++ struct fm_port_fqs port_fqs; ++ struct dpa_buffer_layout_s *buf_layout = NULL; ++ struct mac_device *mac_dev; ++ struct proxy_device *proxy_dev; ++ ++ dev = &_of_dev->dev; ++ ++ dpa_node = dev->of_node; ++ ++ if (!of_device_is_available(dpa_node)) ++ return -ENODEV; ++ ++ /* Get the buffer pools assigned to this interface */ ++ dpa_bp = dpa_bp_probe(_of_dev, &count); ++ if (IS_ERR(dpa_bp)) ++ return PTR_ERR(dpa_bp); ++ ++ mac_dev = dpa_mac_probe(_of_dev); ++ if (IS_ERR(mac_dev)) ++ return PTR_ERR(mac_dev); ++ ++ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL); ++ if (!proxy_dev) { ++ dev_err(dev, "devm_kzalloc() failed\n"); ++ return -ENOMEM; ++ } ++ ++ proxy_dev->mac_dev = mac_dev; ++ dev_set_drvdata(dev, proxy_dev); ++ ++ /* We have physical ports, so we need to establish ++ * the buffer layout. ++ */ ++ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), ++ GFP_KERNEL); ++ if (!buf_layout) { ++ dev_err(dev, "devm_kzalloc() failed\n"); ++ return -ENOMEM; ++ } ++ dpa_set_buffers_layout(mac_dev, buf_layout); ++ ++ INIT_LIST_HEAD(&proxy_fq_list); ++ ++ memset(&port_fqs, 0, sizeof(port_fqs)); ++ ++ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX); ++ if (!err) ++ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, ++ TX); ++ if (err < 0) { ++ devm_kfree(dev, buf_layout); ++ return err; ++ } ++ ++ /* Proxy initializer - Just configures the MAC on behalf of ++ * another partition. ++ */ ++ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, ++ buf_layout, dev); ++ ++ /* Proxy interfaces need to be started, and the allocated ++ * memory freed ++ */ ++ devm_kfree(dev, buf_layout); ++ devm_kfree(dev, dpa_bp); ++ ++ /* Free FQ structures */ ++ devm_kfree(dev, port_fqs.rx_defq); ++ devm_kfree(dev, port_fqs.rx_errq); ++ devm_kfree(dev, port_fqs.tx_defq); ++ devm_kfree(dev, port_fqs.tx_errq); ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ err = fm_port_enable(mac_dev->port_dev[i]); ++ if (err) ++ goto port_enable_fail; ++ } ++ ++ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", ++ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], ++ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); ++ ++ return 0; /* Proxy interface initialization ended */ ++ ++port_enable_fail: ++ for_each_port_device(i, mac_dev->port_dev) ++ fm_port_disable(mac_dev->port_dev[i]); ++ dpa_eth_proxy_remove(_of_dev); ++ ++ return err; ++} ++ ++int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev, ++ struct net_device *net_dev) ++{ ++ struct mac_device *mac_dev; ++ int _errno; ++ ++ mac_dev = proxy_dev->mac_dev; ++ ++ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev), ++ net_dev->dev_addr); ++ if (_errno < 0) ++ return _errno; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_proxy_set_mac_address); ++ ++int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev, ++ struct net_device *net_dev) ++{ ++ struct mac_device *mac_dev = proxy_dev->mac_dev; ++ int _errno; ++ ++ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) { ++ mac_dev->promisc = !mac_dev->promisc; ++ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev), ++ mac_dev->promisc); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n", ++ _errno); ++ } ++ ++ _errno = mac_dev->set_multi(net_dev, mac_dev); ++ if (unlikely(_errno < 0)) ++ return _errno; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpa_proxy_set_rx_mode); ++ ++int dpa_proxy_start(struct net_device *net_dev) ++{ ++ struct mac_device *mac_dev; ++ const struct dpa_priv_s *priv; ++ struct proxy_device *proxy_dev; ++ int _errno; ++ int i; ++ ++ priv = netdev_priv(net_dev); ++ proxy_dev = (struct proxy_device *)priv->peer; ++ mac_dev = proxy_dev->mac_dev; ++ ++ _errno = mac_dev->init_phy(net_dev, mac_dev); ++ if (_errno < 0) { ++ if (netif_msg_drv(priv)) ++ netdev_err(net_dev, "init_phy() = %d\n", ++ _errno); ++ return _errno; ++ } ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ _errno = fm_port_enable(mac_dev->port_dev[i]); ++ if (_errno) ++ goto port_enable_fail; ++ } ++ ++ _errno = mac_dev->start(mac_dev); ++ if (_errno < 0) { ++ if (netif_msg_drv(priv)) ++ netdev_err(net_dev, "mac_dev->start() = %d\n", ++ _errno); ++ goto port_enable_fail; ++ } ++ ++ return _errno; ++ ++port_enable_fail: ++ for_each_port_device(i, mac_dev->port_dev) ++ fm_port_disable(mac_dev->port_dev[i]); ++ ++ return _errno; ++} ++EXPORT_SYMBOL(dpa_proxy_start); ++ ++int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev) ++{ ++ struct mac_device *mac_dev = proxy_dev->mac_dev; ++ const struct dpa_priv_s *priv = netdev_priv(net_dev); ++ int _errno, i, err; ++ ++ _errno = mac_dev->stop(mac_dev); ++ if (_errno < 0) { ++ if (netif_msg_drv(priv)) ++ netdev_err(net_dev, "mac_dev->stop() = %d\n", ++ _errno); ++ return _errno; ++ } ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ err = fm_port_disable(mac_dev->port_dev[i]); ++ _errno = err ? err : _errno; ++ } ++ ++ if (mac_dev->phy_dev) ++ phy_disconnect(mac_dev->phy_dev); ++ mac_dev->phy_dev = NULL; ++ ++ return _errno; ++} ++EXPORT_SYMBOL(dpa_proxy_stop); ++ ++static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev) ++{ ++ struct device *dev = &of_dev->dev; ++ struct proxy_device *proxy_dev = dev_get_drvdata(dev); ++ ++ kfree(proxy_dev); ++ ++ dev_set_drvdata(dev, NULL); ++ ++ return 0; ++} ++ ++static const struct of_device_id dpa_proxy_match[] = { ++ { ++ .compatible = "fsl,dpa-ethernet-init" ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, dpa_proxy_match); ++ ++static struct platform_driver dpa_proxy_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME "-proxy", ++ .of_match_table = dpa_proxy_match, ++ .owner = THIS_MODULE, ++ .pm = PROXY_PM_OPS, ++ }, ++ .probe = dpaa_eth_proxy_probe, ++ .remove = dpa_eth_proxy_remove ++}; ++ ++static int __init __cold dpa_proxy_load(void) ++{ ++ int _errno; ++ ++ pr_info(DPA_DESCRIPTION "\n"); ++ ++ /* Initialize dpaa_eth mirror values */ ++ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); ++ dpa_max_frm = fm_get_max_frm(); ++ ++ _errno = platform_driver_register(&dpa_proxy_driver); ++ if (unlikely(_errno < 0)) { ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): platform_driver_register() = %d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, _errno); ++ } ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ return _errno; ++} ++module_init(dpa_proxy_load); ++ ++static void __exit __cold dpa_proxy_unload(void) ++{ ++ platform_driver_unregister(&dpa_proxy_driver); ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++} ++module_exit(dpa_proxy_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c +@@ -0,0 +1,1179 @@ ++/* Copyright 2012 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++#ifdef CONFIG_FSL_DPAA_1588 ++#include "dpaa_1588.h" ++#endif ++#ifdef CONFIG_FSL_DPAA_CEETM ++#include "dpaa_eth_ceetm.h" ++#endif ++ ++/* DMA map and add a page frag back into the bpool. ++ * @vaddr fragment must have been allocated with netdev_alloc_frag(), ++ * specifically for fitting into @dpa_bp. ++ */ ++static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr, ++ int *count_ptr) ++{ ++ struct bm_buffer bmb; ++ dma_addr_t addr; ++ ++ bmb.opaque = 0; ++ ++ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ dev_err(dpa_bp->dev, "DMA mapping failed"); ++ return; ++ } ++ ++ bm_buffer_set64(&bmb, addr); ++ ++ while (bman_release(dpa_bp->pool, &bmb, 1, 0)) ++ cpu_relax(); ++ ++ (*count_ptr)++; ++} ++ ++static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp) ++{ ++ struct bm_buffer bmb[8]; ++ void *new_buf; ++ dma_addr_t addr; ++ uint8_t i; ++ struct device *dev = dpa_bp->dev; ++ struct sk_buff *skb, **skbh; ++ ++ memset(bmb, 0, sizeof(struct bm_buffer) * 8); ++ ++ for (i = 0; i < 8; i++) { ++ /* We'll prepend the skb back-pointer; can't use the DPA ++ * priv space, because FMan will overwrite it (from offset 0) ++ * if it ends up being the second, third, etc. fragment ++ * in a S/G frame. ++ * ++ * We only need enough space to store a pointer, but allocate ++ * an entire cacheline for performance reasons. ++ */ ++#ifndef CONFIG_PPC ++ if (unlikely(dpaa_errata_a010022)) { ++ struct page *new_page = alloc_page(GFP_ATOMIC); ++ if (unlikely(!new_page)) ++ goto netdev_alloc_failed; ++ new_buf = page_address(new_page); ++ } ++ else ++#endif ++ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE); ++ ++ if (unlikely(!new_buf)) ++ goto netdev_alloc_failed; ++ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES); ++ ++ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); ++ if (unlikely(!skb)) { ++ put_page(virt_to_head_page(new_buf)); ++ goto build_skb_failed; ++ } ++ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1); ++ ++ addr = dma_map_single(dev, new_buf, ++ dpa_bp->size, DMA_BIDIRECTIONAL); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ goto dma_map_failed; ++ ++ bm_buffer_set64(&bmb[i], addr); ++ } ++ ++release_bufs: ++ /* Release the buffers. In case bman is busy, keep trying ++ * until successful. bman_release() is guaranteed to succeed ++ * in a reasonable amount of time ++ */ ++ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0))) ++ cpu_relax(); ++ return i; ++ ++dma_map_failed: ++ kfree_skb(skb); ++ ++build_skb_failed: ++netdev_alloc_failed: ++ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n"); ++ WARN_ONCE(1, "Memory allocation failure on Rx\n"); ++ ++ bm_buffer_set64(&bmb[i], 0); ++ /* Avoid releasing a completely null buffer; bman_release() requires ++ * at least one buffer. ++ */ ++ if (likely(i)) ++ goto release_bufs; ++ ++ return 0; ++} ++ ++/* Cold path wrapper over _dpa_bp_add_8_bufs(). */ ++static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu) ++{ ++ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu); ++ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp); ++} ++ ++int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) ++{ ++ int i; ++ ++ /* Give each CPU an allotment of "config_count" buffers */ ++ for_each_possible_cpu(i) { ++ int j; ++ ++ /* Although we access another CPU's counters here ++ * we do it at boot time so it is safe ++ */ ++ for (j = 0; j < dpa_bp->config_count; j += 8) ++ dpa_bp_add_8_bufs(dpa_bp, i); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(dpa_bp_priv_seed); ++ ++/* Add buffers/(pages) for Rx processing whenever bpool count falls below ++ * REFILL_THRESHOLD. ++ */ ++int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr) ++{ ++ int count = *countptr; ++ int new_bufs; ++ ++ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) { ++ do { ++ new_bufs = _dpa_bp_add_8_bufs(dpa_bp); ++ if (unlikely(!new_bufs)) { ++ /* Avoid looping forever if we've temporarily ++ * run out of memory. We'll try again at the ++ * next NAPI cycle. ++ */ ++ break; ++ } ++ count += new_bufs; ++ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT); ++ ++ *countptr = count; ++ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpaa_eth_refill_bpools); ++ ++/* Cleanup function for outgoing frame descriptors that were built on Tx path, ++ * either contiguous frames or scatter/gather ones. ++ * Skb freeing is not handled here. ++ * ++ * This function may be called on error paths in the Tx function, so guard ++ * against cases when not all fd relevant fields were filled in. ++ * ++ * Return the skb backpointer, since for S/G frames the buffer containing it ++ * gets freed here. ++ */ ++struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, ++ const struct qm_fd *fd) ++{ ++ const struct qm_sg_entry *sgt; ++ int i; ++ struct dpa_bp *dpa_bp = priv->dpa_bp; ++ dma_addr_t addr = qm_fd_addr(fd); ++ dma_addr_t sg_addr; ++ struct sk_buff **skbh; ++ struct sk_buff *skb = NULL; ++ const enum dma_data_direction dma_dir = DMA_TO_DEVICE; ++ int nr_frags; ++ int sg_len; ++ ++ /* retrieve skb back pointer */ ++ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0); ++ ++ if (unlikely(fd->format == qm_fd_sg)) { ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) + ++ sizeof(struct qm_sg_entry) * (1 + nr_frags), ++ dma_dir); ++ ++ /* The sgt buffer has been allocated with netdev_alloc_frag(), ++ * it's from lowmem. ++ */ ++ sgt = phys_to_virt(addr + dpa_fd_offset(fd)); ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid && ++ priv->tsu->hwts_tx_en_ioctl) ++ dpa_ptp_store_txstamp(priv, skb, (void *)skbh); ++#endif ++#ifdef CONFIG_FSL_DPAA_TS ++ if (unlikely(priv->ts_tx_en && ++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { ++ struct skb_shared_hwtstamps shhwtstamps; ++ ++ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh); ++ skb_tstamp_tx(skb, &shhwtstamps); ++ } ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++ /* sgt[0] is from lowmem, was dma_map_single()-ed */ ++ sg_addr = qm_sg_addr(&sgt[0]); ++ sg_len = qm_sg_entry_get_len(&sgt[0]); ++ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir); ++ ++ /* remaining pages were mapped with dma_map_page() */ ++ for (i = 1; i <= nr_frags; i++) { ++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); ++ sg_addr = qm_sg_addr(&sgt[i]); ++ sg_len = qm_sg_entry_get_len(&sgt[i]); ++ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir); ++ } ++ ++ /* Free the page frag that we allocated on Tx */ ++ put_page(virt_to_head_page(sgt)); ++ } else { ++ dma_unmap_single(dpa_bp->dev, addr, ++ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); ++#ifdef CONFIG_FSL_DPAA_TS ++ /* get the timestamp for non-SG frames */ ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid && ++ priv->tsu->hwts_tx_en_ioctl) ++ dpa_ptp_store_txstamp(priv, skb, (void *)skbh); ++#endif ++ if (unlikely(priv->ts_tx_en && ++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { ++ struct skb_shared_hwtstamps shhwtstamps; ++ ++ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh); ++ skb_tstamp_tx(skb, &shhwtstamps); ++ } ++#endif ++ } ++ ++ return skb; ++} ++EXPORT_SYMBOL(_dpa_cleanup_tx_fd); ++ ++#ifndef CONFIG_FSL_DPAA_TS ++bool dpa_skb_is_recyclable(struct sk_buff *skb) ++{ ++#ifndef CONFIG_PPC ++ /* Do no recycle skbs realigned by the errata workaround */ ++ if (unlikely(dpaa_errata_a010022) && skb->mark == NONREC_MARK) ++ return false; ++#endif ++ ++ /* No recycling possible if skb buffer is kmalloc'ed */ ++ if (skb->head_frag == 0) ++ return false; ++ ++ /* or if it's an userspace buffer */ ++ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) ++ return false; ++ ++ /* or if it's cloned or shared */ ++ if (skb_shared(skb) || skb_cloned(skb) || ++ skb->fclone != SKB_FCLONE_UNAVAILABLE) ++ return false; ++ ++ return true; ++} ++EXPORT_SYMBOL(dpa_skb_is_recyclable); ++ ++bool dpa_buf_is_recyclable(struct sk_buff *skb, ++ uint32_t min_size, ++ uint16_t min_offset, ++ unsigned char **new_buf_start) ++{ ++ unsigned char *new; ++ ++ /* In order to recycle a buffer, the following conditions must be met: ++ * - buffer size no less than the buffer pool size ++ * - buffer size no higher than an upper limit (to avoid moving too much ++ * system memory to the buffer pools) ++ * - buffer address aligned to cacheline bytes ++ * - offset of data from start of buffer no lower than a minimum value ++ * - offset of data from start of buffer no higher than a maximum value ++ */ ++ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset); ++ ++ /* left align to the nearest cacheline */ ++ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1)); ++ ++ if (likely(new >= skb->head && ++ new >= (skb->data - DPA_MAX_FD_OFFSET) && ++ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) { ++ *new_buf_start = new; ++ return true; ++ } ++ ++ return false; ++} ++EXPORT_SYMBOL(dpa_buf_is_recyclable); ++#endif ++ ++/* Build a linear skb around the received buffer. ++ * We are guaranteed there is enough room at the end of the data buffer to ++ * accommodate the shared info area of the skb. ++ */ ++static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv, ++ const struct qm_fd *fd, int *use_gro) ++{ ++ dma_addr_t addr = qm_fd_addr(fd); ++ ssize_t fd_off = dpa_fd_offset(fd); ++ void *vaddr; ++ const fm_prs_result_t *parse_results; ++ struct sk_buff *skb = NULL, **skbh; ++ ++ vaddr = phys_to_virt(addr); ++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); ++ ++ /* Retrieve the skb and adjust data and tail pointers, to make sure ++ * forwarded skbs will have enough space on Tx if extra headers ++ * are added. ++ */ ++ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1); ++ ++#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME ++ /* When using jumbo Rx buffers, we risk having frames dropped due to ++ * the socket backlog reaching its maximum allowed size. ++ * Use the frame length for the skb truesize instead of the buffer ++ * size, as this is the size of the data that actually gets copied to ++ * userspace. ++ * The stack may increase the payload. In this case, it will want to ++ * warn us that the frame length is larger than the truesize. We ++ * bypass the warning. ++ */ ++#ifndef CONFIG_PPC ++ /* We do not support Jumbo frames on LS1043 and thus we edit ++ * the skb truesize only when the 4k errata is not present. ++ */ ++ if (likely(!dpaa_errata_a010022)) ++#endif ++ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd)); ++#endif ++ ++ DPA_BUG_ON(fd_off != priv->rx_headroom); ++ skb_reserve(skb, fd_off); ++ skb_put(skb, dpa_fd_length(fd)); ++ ++ /* Peek at the parse results for csum validation */ ++ parse_results = (const fm_prs_result_t *)(vaddr + ++ DPA_RX_PRIV_DATA_SIZE); ++ _dpa_process_parse_results(parse_results, fd, skb, use_gro); ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl) ++ dpa_ptp_store_rxstamp(priv, skb, vaddr); ++#endif ++#ifdef CONFIG_FSL_DPAA_TS ++ if (priv->ts_rx_en) ++ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr); ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++ return skb; ++} ++ ++ ++/* Build an skb with the data of the first S/G entry in the linear portion and ++ * the rest of the frame as skb fragments. ++ * ++ * The page fragment holding the S/G Table is recycled here. ++ */ ++static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv, ++ const struct qm_fd *fd, int *use_gro, ++ int *count_ptr) ++{ ++ const struct qm_sg_entry *sgt; ++ dma_addr_t addr = qm_fd_addr(fd); ++ ssize_t fd_off = dpa_fd_offset(fd); ++ dma_addr_t sg_addr; ++ void *vaddr, *sg_vaddr; ++ struct dpa_bp *dpa_bp; ++ struct page *page, *head_page; ++ int frag_offset, frag_len; ++ int page_offset; ++ int i; ++ const fm_prs_result_t *parse_results; ++ struct sk_buff *skb = NULL, *skb_tmp, **skbh; ++ ++ vaddr = phys_to_virt(addr); ++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); ++ ++ dpa_bp = priv->dpa_bp; ++ /* Iterate through the SGT entries and add data buffers to the skb */ ++ sgt = vaddr + fd_off; ++ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) { ++ /* Extension bit is not supported */ ++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i])); ++ ++ /* We use a single global Rx pool */ ++ DPA_BUG_ON(dpa_bp != ++ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]))); ++ ++ sg_addr = qm_sg_addr(&sgt[i]); ++ sg_vaddr = phys_to_virt(sg_addr); ++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr, ++ SMP_CACHE_BYTES)); ++ ++ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size, ++ DMA_BIDIRECTIONAL); ++ if (i == 0) { ++ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1); ++ DPA_BUG_ON(skb->head != sg_vaddr); ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid && ++ priv->tsu->hwts_rx_en_ioctl) ++ dpa_ptp_store_rxstamp(priv, skb, vaddr); ++#endif ++#ifdef CONFIG_FSL_DPAA_TS ++ if (priv->ts_rx_en) ++ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr); ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++ /* In the case of a SG frame, FMan stores the Internal ++ * Context in the buffer containing the sgt. ++ * Inspect the parse results before anything else. ++ */ ++ parse_results = (const fm_prs_result_t *)(vaddr + ++ DPA_RX_PRIV_DATA_SIZE); ++ _dpa_process_parse_results(parse_results, fd, skb, ++ use_gro); ++ ++ /* Make sure forwarded skbs will have enough space ++ * on Tx, if extra headers are added. ++ */ ++ DPA_BUG_ON(fd_off != priv->rx_headroom); ++ skb_reserve(skb, fd_off); ++ skb_put(skb, qm_sg_entry_get_len(&sgt[i])); ++ } else { ++ /* Not the first S/G entry; all data from buffer will ++ * be added in an skb fragment; fragment index is offset ++ * by one since first S/G entry was incorporated in the ++ * linear part of the skb. ++ * ++ * Caution: 'page' may be a tail page. ++ */ ++ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1); ++ page = virt_to_page(sg_vaddr); ++ head_page = virt_to_head_page(sg_vaddr); ++ ++ /* Free (only) the skbuff shell because its data buffer ++ * is already a frag in the main skb. ++ */ ++ get_page(head_page); ++ dev_kfree_skb(skb_tmp); ++ ++ /* Compute offset in (possibly tail) page */ ++ page_offset = ((unsigned long)sg_vaddr & ++ (PAGE_SIZE - 1)) + ++ (page_address(page) - page_address(head_page)); ++ /* page_offset only refers to the beginning of sgt[i]; ++ * but the buffer itself may have an internal offset. ++ */ ++ frag_offset = qm_sg_entry_get_offset(&sgt[i]) + ++ page_offset; ++ frag_len = qm_sg_entry_get_len(&sgt[i]); ++ /* skb_add_rx_frag() does no checking on the page; if ++ * we pass it a tail page, we'll end up with ++ * bad page accounting and eventually with segafults. ++ */ ++ skb_add_rx_frag(skb, i - 1, head_page, frag_offset, ++ frag_len, dpa_bp->size); ++ } ++ /* Update the pool count for the current {cpu x bpool} */ ++ (*count_ptr)--; ++ ++ if (qm_sg_entry_get_final(&sgt[i])) ++ break; ++ } ++ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); ++ ++ /* recycle the SGT fragment */ ++ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); ++ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr); ++ return skb; ++} ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++static inline int dpa_skb_loop(const struct dpa_priv_s *priv, ++ struct sk_buff *skb) ++{ ++ if (unlikely(priv->loop_to < 0)) ++ return 0; /* loop disabled by default */ ++ ++ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */ ++ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]); ++ ++ return 1; /* Frame Tx on the selected interface */ ++} ++#endif ++ ++void __hot _dpa_rx(struct net_device *net_dev, ++ struct qman_portal *portal, ++ const struct dpa_priv_s *priv, ++ struct dpa_percpu_priv_s *percpu_priv, ++ const struct qm_fd *fd, ++ u32 fqid, ++ int *count_ptr) ++{ ++ struct dpa_bp *dpa_bp; ++ struct sk_buff *skb; ++ dma_addr_t addr = qm_fd_addr(fd); ++ u32 fd_status = fd->status; ++ unsigned int skb_len; ++ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats; ++ int use_gro = net_dev->features & NETIF_F_GRO; ++ ++ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { ++ if (netif_msg_hw(priv) && net_ratelimit()) ++ netdev_warn(net_dev, "FD status = 0x%08x\n", ++ fd_status & FM_FD_STAT_RX_ERRORS); ++ ++ percpu_stats->rx_errors++; ++ goto _release_frame; ++ } ++ ++ dpa_bp = priv->dpa_bp; ++ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); ++ ++ /* prefetch the first 64 bytes of the frame or the SGT start */ ++ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL); ++ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd)); ++ ++ /* The only FD types that we may receive are contig and S/G */ ++ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg)); ++ ++ if (likely(fd->format == qm_fd_contig)) { ++#ifdef CONFIG_FSL_DPAA_HOOKS ++ /* Execute the Rx processing hook, if it exists. */ ++ if (dpaa_eth_hooks.rx_default && ++ dpaa_eth_hooks.rx_default((void *)fd, net_dev, ++ fqid) == DPAA_ETH_STOLEN) { ++ /* won't count the rx bytes in */ ++ return; ++ } ++#endif ++ skb = contig_fd_to_skb(priv, fd, &use_gro); ++ } else { ++ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr); ++ percpu_priv->rx_sg++; ++ } ++ ++ /* Account for either the contig buffer or the SGT buffer (depending on ++ * which case we were in) having been removed from the pool. ++ */ ++ (*count_ptr)--; ++ skb->protocol = eth_type_trans(skb, net_dev); ++ ++ /* IP Reassembled frames are allowed to be larger than MTU */ ++ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) && ++ !(fd_status & FM_FD_IPR))) { ++ percpu_stats->rx_dropped++; ++ goto drop_bad_frame; ++ } ++ ++ skb_len = skb->len; ++ ++#ifdef CONFIG_FSL_DPAA_DBG_LOOP ++ if (dpa_skb_loop(priv, skb)) { ++ percpu_stats->rx_packets++; ++ percpu_stats->rx_bytes += skb_len; ++ return; ++ } ++#endif ++ ++ if (use_gro) { ++ gro_result_t gro_result; ++ const struct qman_portal_config *pc = ++ qman_p_get_portal_config(portal); ++ struct dpa_napi_portal *np = &percpu_priv->np[pc->index]; ++ ++ np->p = portal; ++ gro_result = napi_gro_receive(&np->napi, skb); ++ /* If frame is dropped by the stack, rx_dropped counter is ++ * incremented automatically, so no need for us to update it ++ */ ++ if (unlikely(gro_result == GRO_DROP)) ++ goto packet_dropped; ++ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) ++ goto packet_dropped; ++ ++ percpu_stats->rx_packets++; ++ percpu_stats->rx_bytes += skb_len; ++ ++packet_dropped: ++ return; ++ ++drop_bad_frame: ++ dev_kfree_skb(skb); ++ return; ++ ++_release_frame: ++ dpa_fd_release(net_dev, fd); ++} ++ ++int __hot skb_to_contig_fd(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd, ++ int *count_ptr, int *offset) ++{ ++ struct sk_buff **skbh; ++ dma_addr_t addr; ++ struct dpa_bp *dpa_bp = priv->dpa_bp; ++ struct net_device *net_dev = priv->net_dev; ++ int err; ++ enum dma_data_direction dma_dir; ++ unsigned char *buffer_start; ++ int dma_map_size; ++ ++#ifndef CONFIG_FSL_DPAA_TS ++ /* Check recycling conditions; only if timestamp support is not ++ * enabled, otherwise we need the fd back on tx confirmation ++ */ ++ ++ /* We can recycle the buffer if: ++ * - the pool is not full ++ * - the buffer meets the skb recycling conditions ++ * - the buffer meets our own (size, offset, align) conditions ++ */ ++ if (likely((*count_ptr < dpa_bp->target_count) && ++ dpa_skb_is_recyclable(skb) && ++ dpa_buf_is_recyclable(skb, dpa_bp->size, ++ priv->tx_headroom, &buffer_start))) { ++ /* Buffer is recyclable; use the new start address ++ * and set fd parameters and DMA mapping direction ++ */ ++ fd->bpid = dpa_bp->bpid; ++ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET); ++ fd->offset = (uint16_t)(skb->data - buffer_start); ++ dma_dir = DMA_BIDIRECTIONAL; ++ dma_map_size = dpa_bp->size; ++ ++ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1); ++ *offset = skb_headroom(skb) - fd->offset; ++ } else ++#endif ++ { ++ /* Not recyclable. ++ * We are guaranteed to have at least tx_headroom bytes ++ * available, so just use that for offset. ++ */ ++ fd->bpid = 0xff; ++ buffer_start = skb->data - priv->tx_headroom; ++ fd->offset = priv->tx_headroom; ++ dma_dir = DMA_TO_DEVICE; ++ dma_map_size = skb_tail_pointer(skb) - buffer_start; ++ ++ /* The buffer will be Tx-confirmed, but the TxConf cb must ++ * necessarily look at our Tx private data to retrieve the ++ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.) ++ */ ++ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); ++ } ++ ++ /* Enable L3/L4 hardware checksum computation. ++ * ++ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may ++ * need to write into the skb. ++ */ ++ err = dpa_enable_tx_csum(priv, skb, fd, ++ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE); ++ if (unlikely(err < 0)) { ++ if (netif_msg_tx_err(priv) && net_ratelimit()) ++ netdev_err(net_dev, "HW csum error: %d\n", err); ++ return err; ++ } ++ ++ /* Fill in the rest of the FD fields */ ++ fd->format = qm_fd_contig; ++ fd->length20 = skb->len; ++ fd->cmd |= FM_FD_CMD_FCO; ++ ++ /* Map the entire buffer size that may be seen by FMan, but no more */ ++ addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir); ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ if (netif_msg_tx_err(priv) && net_ratelimit()) ++ netdev_err(net_dev, "dma_map_single() failed\n"); ++ return -EINVAL; ++ } ++ qm_fd_addr_set64(fd, addr); ++ ++ return 0; ++} ++EXPORT_SYMBOL(skb_to_contig_fd); ++ ++#ifndef CONFIG_PPC ++/* Verify the conditions that trigger the A010022 errata: data unaligned to ++ * 16 bytes and 4K memory address crossings. ++ */ ++static bool a010022_check_skb(struct sk_buff *skb, struct dpa_priv_s *priv) ++{ ++ int nr_frags, i = 0; ++ skb_frag_t *frag; ++ ++ /* Check if the headroom is aligned */ ++ if (((uintptr_t)skb->data - priv->tx_headroom) % ++ priv->buf_layout[TX].data_align != 0) ++ return true; ++ ++ /* Check if the headroom crosses a boundary */ ++ if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb))) ++ return true; ++ ++ /* Check if the non-paged data crosses a boundary */ ++ if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) ++ return true; ++ ++ /* Check if the entire linear skb crosses a boundary */ ++ if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb))) ++ return true; ++ ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ ++ while (i < nr_frags) { ++ frag = &skb_shinfo(skb)->frags[i]; ++ ++ /* Check if a paged fragment crosses a boundary from its ++ * offset to its end. ++ */ ++ if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) ++ return true; ++ ++ i++; ++ } ++ ++ return false; ++} ++ ++/* Realign the skb by copying its contents at the start of a newly allocated ++ * page. Build a new skb around the new buffer and release the old one. ++ * A performance drop should be expected. ++ */ ++static struct sk_buff *a010022_realign_skb(struct sk_buff *skb, ++ struct dpa_priv_s *priv) ++{ ++ int trans_offset = skb_transport_offset(skb); ++ int net_offset = skb_network_offset(skb); ++ struct sk_buff *nskb = NULL; ++ int nsize, headroom; ++ struct page *npage; ++ void *npage_addr; ++ ++ /* Guarantee the minimum required headroom */ ++ if (skb_headroom(skb) >= priv->tx_headroom) ++ headroom = skb_headroom(skb); ++ else ++ headroom = priv->tx_headroom; ++ ++ npage = alloc_page(GFP_ATOMIC); ++ if (unlikely(!npage)) { ++ WARN_ONCE(1, "Memory allocation failure\n"); ++ return NULL; ++ } ++ npage_addr = page_address(npage); ++ ++ /* For the new skb we only need the old one's data (both non-paged and ++ * paged) and a headroom large enough to fit our private info. We can ++ * skip the old tailroom. ++ * ++ * Make sure the new linearized buffer will not exceed a page's size. ++ */ ++ nsize = headroom + skb->len + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ if (unlikely(nsize > 4096)) ++ goto err; ++ ++ nskb = build_skb(npage_addr, nsize); ++ if (unlikely(!nskb)) ++ goto err; ++ ++ /* Reserve only the needed headroom in order to guarantee the data's ++ * alignment. ++ * Code borrowed and adapted from skb_copy(). ++ */ ++ skb_reserve(nskb, priv->tx_headroom); ++ skb_put(nskb, skb->len); ++ if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { ++ WARN_ONCE(1, "skb parsing failure\n"); ++ goto err; ++ } ++ copy_skb_header(nskb, skb); ++ ++#ifdef CONFIG_FSL_DPAA_TS ++ /* Copy relevant timestamp info from the old skb to the new */ ++ if (priv->ts_tx_en) { ++ skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags; ++ skb_shinfo(nskb)->hwtstamps = skb_shinfo(skb)->hwtstamps; ++ skb_shinfo(nskb)->tskey = skb_shinfo(skb)->tskey; ++ if (skb->sk) ++ skb_set_owner_w(nskb, skb->sk); ++ } ++#endif ++ /* We move the headroom when we align it so we have to reset the ++ * network and transport header offsets relative to the new data ++ * pointer. The checksum offload relies on these offsets. ++ */ ++ skb_set_network_header(nskb, net_offset); ++ skb_set_transport_header(nskb, trans_offset); ++ ++ /* We don't want the buffer to be recycled so we mark it accordingly */ ++ nskb->mark = NONREC_MARK; ++ ++ dev_kfree_skb(skb); ++ return nskb; ++ ++err: ++ if (nskb) ++ dev_kfree_skb(nskb); ++ put_page(npage); ++ return NULL; ++} ++#endif ++ ++int __hot skb_to_sg_fd(struct dpa_priv_s *priv, ++ struct sk_buff *skb, struct qm_fd *fd) ++{ ++ struct dpa_bp *dpa_bp = priv->dpa_bp; ++ dma_addr_t addr; ++ dma_addr_t sg_addr; ++ struct sk_buff **skbh; ++ struct net_device *net_dev = priv->net_dev; ++ int sg_len, sgt_size; ++ int err; ++ ++ struct qm_sg_entry *sgt; ++ void *sgt_buf; ++ skb_frag_t *frag; ++ int i = 0, j = 0; ++ int nr_frags; ++ const enum dma_data_direction dma_dir = DMA_TO_DEVICE; ++ ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ fd->format = qm_fd_sg; ++ ++ sgt_size = sizeof(struct qm_sg_entry) * (1 + nr_frags); ++ ++ /* Get a page frag to store the SGTable, or a full page if the errata ++ * is in place and we need to avoid crossing a 4k boundary. ++ */ ++#ifndef CONFIG_PPC ++ if (unlikely(dpaa_errata_a010022)) ++ sgt_buf = page_address(alloc_page(GFP_ATOMIC)); ++ else ++#endif ++ sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size); ++ if (unlikely(!sgt_buf)) { ++ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n"); ++ return -ENOMEM; ++ } ++ ++ /* it seems that the memory allocator does not zero the allocated mem */ ++ memset(sgt_buf, 0, priv->tx_headroom + sgt_size); ++ ++ /* Enable L3/L4 hardware checksum computation. ++ * ++ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may ++ * need to write into the skb. ++ */ ++ err = dpa_enable_tx_csum(priv, skb, fd, ++ sgt_buf + DPA_TX_PRIV_DATA_SIZE); ++ if (unlikely(err < 0)) { ++ if (netif_msg_tx_err(priv) && net_ratelimit()) ++ netdev_err(net_dev, "HW csum error: %d\n", err); ++ goto csum_failed; ++ } ++ ++ /* Assign the data from skb->data to the first SG list entry */ ++ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); ++ sg_len = skb_headlen(skb); ++ qm_sg_entry_set_bpid(&sgt[0], 0xff); ++ qm_sg_entry_set_offset(&sgt[0], 0); ++ qm_sg_entry_set_len(&sgt[0], sg_len); ++ qm_sg_entry_set_ext(&sgt[0], 0); ++ qm_sg_entry_set_final(&sgt[0], 0); ++ ++ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir); ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ dev_err(dpa_bp->dev, "DMA mapping failed"); ++ err = -EINVAL; ++ goto sg0_map_failed; ++ } ++ ++ qm_sg_entry_set64(&sgt[0], addr); ++ ++ /* populate the rest of SGT entries */ ++ for (i = 1; i <= nr_frags; i++) { ++ frag = &skb_shinfo(skb)->frags[i - 1]; ++ qm_sg_entry_set_bpid(&sgt[i], 0xff); ++ qm_sg_entry_set_offset(&sgt[i], 0); ++ qm_sg_entry_set_len(&sgt[i], frag->size); ++ qm_sg_entry_set_ext(&sgt[i], 0); ++ ++ if (i == nr_frags) ++ qm_sg_entry_set_final(&sgt[i], 1); ++ else ++ qm_sg_entry_set_final(&sgt[i], 0); ++ ++ DPA_BUG_ON(!skb_frag_page(frag)); ++ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size, ++ dma_dir); ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ dev_err(dpa_bp->dev, "DMA mapping failed"); ++ err = -EINVAL; ++ goto sg_map_failed; ++ } ++ ++ /* keep the offset in the address */ ++ qm_sg_entry_set64(&sgt[i], addr); ++ } ++ ++ fd->length20 = skb->len; ++ fd->offset = priv->tx_headroom; ++ ++ /* DMA map the SGT page */ ++ DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0); ++ addr = dma_map_single(dpa_bp->dev, sgt_buf, ++ priv->tx_headroom + sgt_size, ++ dma_dir); ++ ++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { ++ dev_err(dpa_bp->dev, "DMA mapping failed"); ++ err = -EINVAL; ++ goto sgt_map_failed; ++ } ++ ++ qm_fd_addr_set64(fd, addr); ++ fd->bpid = 0xff; ++ fd->cmd |= FM_FD_CMD_FCO; ++ ++ return 0; ++ ++sgt_map_failed: ++sg_map_failed: ++ for (j = 0; j < i; j++) { ++ sg_addr = qm_sg_addr(&sgt[j]); ++ dma_unmap_page(dpa_bp->dev, sg_addr, ++ qm_sg_entry_get_len(&sgt[j]), dma_dir); ++ } ++sg0_map_failed: ++csum_failed: ++ put_page(virt_to_head_page(sgt_buf)); ++ ++ return err; ++} ++EXPORT_SYMBOL(skb_to_sg_fd); ++ ++int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv; ++ const int queue_mapping = dpa_get_queue_mapping(skb); ++ struct qman_fq *egress_fq, *conf_fq; ++ ++#ifdef CONFIG_FSL_DPAA_HOOKS ++ /* If there is a Tx hook, run it. */ ++ if (dpaa_eth_hooks.tx && ++ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN) ++ /* won't update any Tx stats */ ++ return NETDEV_TX_OK; ++#endif ++ ++ priv = netdev_priv(net_dev); ++ ++#ifdef CONFIG_FSL_DPAA_CEETM ++ if (priv->ceetm_en) ++ return ceetm_tx(skb, net_dev); ++#endif ++ ++ egress_fq = priv->egress_fqs[queue_mapping]; ++ conf_fq = priv->conf_fqs[queue_mapping]; ++ ++ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq); ++} ++ ++int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev, ++ struct qman_fq *egress_fq, struct qman_fq *conf_fq) ++{ ++ struct dpa_priv_s *priv; ++ struct qm_fd fd; ++ struct dpa_percpu_priv_s *percpu_priv; ++ struct rtnl_link_stats64 *percpu_stats; ++ int err = 0; ++ bool nonlinear; ++ int *countptr, offset = 0; ++ ++ priv = netdev_priv(net_dev); ++ /* Non-migratable context, safe to use raw_cpu_ptr */ ++ percpu_priv = raw_cpu_ptr(priv->percpu_priv); ++ percpu_stats = &percpu_priv->stats; ++ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); ++ ++ clear_fd(&fd); ++ ++#ifndef CONFIG_PPC ++ if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) { ++ skb = a010022_realign_skb(skb, priv); ++ if (!skb) ++ goto skb_to_fd_failed; ++ } ++#endif ++ ++ nonlinear = skb_is_nonlinear(skb); ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl) ++ fd.cmd |= FM_FD_CMD_UPD; ++#endif ++#ifdef CONFIG_FSL_DPAA_TS ++ if (unlikely(priv->ts_tx_en && ++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) ++ fd.cmd |= FM_FD_CMD_UPD; ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++#endif /* CONFIG_FSL_DPAA_TS */ ++ ++ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure ++ * we don't feed FMan with more fragments than it supports. ++ * Btw, we're using the first sgt entry to store the linear part of ++ * the skb, so we're one extra frag short. ++ */ ++ if (nonlinear && ++ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) { ++ /* Just create a S/G fd based on the skb */ ++ err = skb_to_sg_fd(priv, skb, &fd); ++ percpu_priv->tx_frag_skbuffs++; ++ } else { ++ /* Make sure we have enough headroom to accommodate private ++ * data, parse results, etc. Normally this shouldn't happen if ++ * we're here via the standard kernel stack. ++ */ ++ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) { ++ struct sk_buff *skb_new; ++ ++ skb_new = skb_realloc_headroom(skb, priv->tx_headroom); ++ if (unlikely(!skb_new)) { ++ dev_kfree_skb(skb); ++ percpu_stats->tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ dev_kfree_skb(skb); ++ skb = skb_new; ++ } ++ ++ /* We're going to store the skb backpointer at the beginning ++ * of the data buffer, so we need a privately owned skb ++ */ ++ ++ /* Code borrowed from skb_unshare(). */ ++ if (skb_cloned(skb)) { ++ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); ++ kfree_skb(skb); ++ skb = nskb; ++#ifndef CONFIG_PPC ++ if (unlikely(dpaa_errata_a010022) && ++ a010022_check_skb(skb, priv)) { ++ skb = a010022_realign_skb(skb, priv); ++ if (!skb) ++ goto skb_to_fd_failed; ++ } ++#endif ++ /* skb_copy() has now linearized the skbuff. */ ++ } else if (unlikely(nonlinear)) { ++ /* We are here because the egress skb contains ++ * more fragments than we support. In this case, ++ * we have no choice but to linearize it ourselves. ++ */ ++ err = __skb_linearize(skb); ++ } ++ if (unlikely(!skb || err < 0)) ++ /* Common out-of-memory error path */ ++ goto enomem; ++ ++ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset); ++ } ++ if (unlikely(err < 0)) ++ goto skb_to_fd_failed; ++ ++ if (fd.bpid != 0xff) { ++ skb_recycle(skb); ++ /* skb_recycle() reserves NET_SKB_PAD as skb headroom, ++ * but we need the skb to look as if returned by build_skb(). ++ * We need to manually adjust the tailptr as well. ++ */ ++ skb->data = skb->head + offset; ++ skb_reset_tail_pointer(skb); ++ ++ (*countptr)++; ++ percpu_priv->tx_returned++; ++ } ++ ++ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0)) ++ goto xmit_failed; ++ ++ netif_trans_update(net_dev); ++ return NETDEV_TX_OK; ++ ++xmit_failed: ++ if (fd.bpid != 0xff) { ++ (*countptr)--; ++ percpu_priv->tx_returned--; ++ dpa_fd_release(net_dev, &fd); ++ percpu_stats->tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ _dpa_cleanup_tx_fd(priv, &fd); ++skb_to_fd_failed: ++enomem: ++ percpu_stats->tx_errors++; ++ dev_kfree_skb(skb); ++ return NETDEV_TX_OK; ++} ++EXPORT_SYMBOL(dpa_tx_extended); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c +@@ -0,0 +1,278 @@ ++/* Copyright 2008-2012 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "dpaa_eth.h" ++#include "mac.h" /* struct mac_device */ ++#ifdef CONFIG_FSL_DPAA_1588 ++#include "dpaa_1588.h" ++#endif ++ ++static ssize_t dpaa_eth_show_addr(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ struct mac_device *mac_dev = priv->mac_dev; ++ ++ if (mac_dev) ++ return sprintf(buf, "%llx", ++ (unsigned long long)mac_dev->res->start); ++ else ++ return sprintf(buf, "none"); ++} ++ ++static ssize_t dpaa_eth_show_type(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ ssize_t res = 0; ++ ++ if (priv) ++ res = sprintf(buf, "%s", priv->if_type); ++ ++ return res; ++} ++ ++static ssize_t dpaa_eth_show_fqids(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ ssize_t bytes = 0; ++ int i = 0; ++ char *str; ++ struct dpa_fq *fq; ++ struct dpa_fq *tmp; ++ struct dpa_fq *prev = NULL; ++ u32 first_fqid = 0; ++ u32 last_fqid = 0; ++ char *prevstr = NULL; ++ ++ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) { ++ switch (fq->fq_type) { ++ case FQ_TYPE_RX_DEFAULT: ++ str = "Rx default"; ++ break; ++ case FQ_TYPE_RX_ERROR: ++ str = "Rx error"; ++ break; ++ case FQ_TYPE_RX_PCD: ++ str = "Rx PCD"; ++ break; ++ case FQ_TYPE_TX_CONFIRM: ++ str = "Tx default confirmation"; ++ break; ++ case FQ_TYPE_TX_CONF_MQ: ++ str = "Tx confirmation (mq)"; ++ break; ++ case FQ_TYPE_TX_ERROR: ++ str = "Tx error"; ++ break; ++ case FQ_TYPE_TX: ++ str = "Tx"; ++ break; ++ case FQ_TYPE_RX_PCD_HI_PRIO: ++ str ="Rx PCD High Priority"; ++ break; ++ default: ++ str = "Unknown"; ++ } ++ ++ if (prev && (abs(fq->fqid - prev->fqid) != 1 || ++ str != prevstr)) { ++ if (last_fqid == first_fqid) ++ bytes += sprintf(buf + bytes, ++ "%s: %d\n", prevstr, prev->fqid); ++ else ++ bytes += sprintf(buf + bytes, ++ "%s: %d - %d\n", prevstr, ++ first_fqid, last_fqid); ++ } ++ ++ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr) ++ last_fqid = fq->fqid; ++ else ++ first_fqid = last_fqid = fq->fqid; ++ ++ prev = fq; ++ prevstr = str; ++ i++; ++ } ++ ++ if (prev) { ++ if (last_fqid == first_fqid) ++ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, ++ prev->fqid); ++ else ++ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr, ++ first_fqid, last_fqid); ++ } ++ ++ return bytes; ++} ++ ++static ssize_t dpaa_eth_show_bpids(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ ssize_t bytes = 0; ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ struct dpa_bp *dpa_bp = priv->dpa_bp; ++ int i = 0; ++ ++ for (i = 0; i < priv->bp_count; i++) ++ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", ++ dpa_bp[i].bpid); ++ ++ return bytes; ++} ++ ++static ssize_t dpaa_eth_show_mac_regs(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ struct mac_device *mac_dev = priv->mac_dev; ++ int n = 0; ++ ++ if (mac_dev) ++ n = fm_mac_dump_regs(mac_dev, buf, n); ++ else ++ return sprintf(buf, "no mac registers\n"); ++ ++ return n; ++} ++ ++static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ struct mac_device *mac_dev = priv->mac_dev; ++ int n = 0; ++ ++ if (mac_dev) ++ n = fm_mac_dump_rx_stats(mac_dev, buf, n); ++ else ++ return sprintf(buf, "no mac rx stats\n"); ++ ++ return n; ++} ++ ++static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ struct mac_device *mac_dev = priv->mac_dev; ++ int n = 0; ++ ++ if (mac_dev) ++ n = fm_mac_dump_tx_stats(mac_dev, buf, n); ++ else ++ return sprintf(buf, "no mac tx stats\n"); ++ ++ return n; ++} ++ ++#ifdef CONFIG_FSL_DPAA_1588 ++static ssize_t dpaa_eth_show_ptp_1588(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ ++ if (priv->tsu && priv->tsu->valid) ++ return sprintf(buf, "1\n"); ++ else ++ return sprintf(buf, "0\n"); ++} ++ ++static ssize_t dpaa_eth_set_ptp_1588(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); ++ unsigned int num; ++ unsigned long flags; ++ ++ if (kstrtouint(buf, 0, &num) < 0) ++ return -EINVAL; ++ ++ local_irq_save(flags); ++ ++ if (num) { ++ if (priv->tsu) ++ priv->tsu->valid = TRUE; ++ } else { ++ if (priv->tsu) ++ priv->tsu->valid = FALSE; ++ } ++ ++ local_irq_restore(flags); ++ ++ return count; ++} ++#endif ++ ++static struct device_attribute dpaa_eth_attrs[] = { ++ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL), ++ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL), ++ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL), ++ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL), ++ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL), ++ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL), ++ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL), ++#ifdef CONFIG_FSL_DPAA_1588 ++ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588, ++ dpaa_eth_set_ptp_1588), ++#endif ++}; ++ ++void dpaa_eth_sysfs_init(struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) ++ if (device_create_file(dev, &dpaa_eth_attrs[i])) { ++ dev_err(dev, "Error creating sysfs file\n"); ++ while (i > 0) ++ device_remove_file(dev, &dpaa_eth_attrs[--i]); ++ return; ++ } ++} ++EXPORT_SYMBOL(dpaa_eth_sysfs_init); ++ ++void dpaa_eth_sysfs_remove(struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) ++ device_remove_file(dev, &dpaa_eth_attrs[i]); ++} +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h +@@ -0,0 +1,144 @@ ++/* Copyright 2013 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM dpaa_eth ++ ++#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _DPAA_ETH_TRACE_H ++ ++#include ++#include ++#include "dpaa_eth.h" ++#include ++ ++#define fd_format_name(format) { qm_fd_##format, #format } ++#define fd_format_list \ ++ fd_format_name(contig), \ ++ fd_format_name(sg) ++#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \ ++ " status=0x%08x" ++ ++/* This is used to declare a class of events. ++ * individual events of this type will be defined below. ++ */ ++ ++/* Store details about a frame descriptor and the FQ on which it was ++ * transmitted/received. ++ */ ++DECLARE_EVENT_CLASS(dpaa_eth_fd, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ struct qman_fq *fq, ++ const struct qm_fd *fd), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, fq, fd), ++ ++ /* A structure containing the relevant information we want to record. ++ * Declare name and type for each normal element, name, type and size ++ * for arrays. Use __string for variable length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(u32, fqid) ++ __field(u64, fd_addr) ++ __field(u8, fd_format) ++ __field(u16, fd_offset) ++ __field(u32, fd_length) ++ __field(u32, fd_status) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared fields */ ++ TP_fast_assign( ++ __entry->fqid = fq->fqid; ++ __entry->fd_addr = qm_fd_addr_get64(fd); ++ __entry->fd_format = fd->format; ++ __entry->fd_offset = dpa_fd_offset(fd); ++ __entry->fd_length = dpa_fd_length(fd); ++ __entry->fd_status = fd->status; ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is triggered */ ++ /* TODO: print the status using __print_flags() */ ++ TP_printk(TR_FMT, ++ __get_str(name), __entry->fqid, __entry->fd_addr, ++ __print_symbolic(__entry->fd_format, fd_format_list), ++ __entry->fd_offset, __entry->fd_length, __entry->fd_status) ++); ++ ++/* Now declare events of the above type. Format is: ++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class ++ */ ++ ++/* Tx (egress) fd */ ++DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd, ++ ++ TP_PROTO(struct net_device *netdev, ++ struct qman_fq *fq, ++ const struct qm_fd *fd), ++ ++ TP_ARGS(netdev, fq, fd) ++); ++ ++/* Rx fd */ ++DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd, ++ ++ TP_PROTO(struct net_device *netdev, ++ struct qman_fq *fq, ++ const struct qm_fd *fd), ++ ++ TP_ARGS(netdev, fq, fd) ++); ++ ++/* Tx confirmation fd */ ++DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd, ++ ++ TP_PROTO(struct net_device *netdev, ++ struct qman_fq *fq, ++ const struct qm_fd *fd), ++ ++ TP_ARGS(netdev, fq, fd) ++); ++ ++/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). ++ * The syntax is the same as for DECLARE_EVENT_CLASS(). ++ */ ++ ++#endif /* _DPAA_ETH_TRACE_H */ ++ ++/* This must be outside ifdef _DPAA_ETH_TRACE_H */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE dpaa_eth_trace ++#include +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c +@@ -0,0 +1,544 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++ ++#include "dpaa_eth.h" ++#include "mac.h" /* struct mac_device */ ++#include "dpaa_eth_common.h" ++ ++static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = { ++ "interrupts", ++ "rx packets", ++ "tx packets", ++ "tx recycled", ++ "tx confirm", ++ "tx S/G", ++ "rx S/G", ++ "tx error", ++ "rx error", ++ "bp count" ++}; ++ ++static char dpa_stats_global[][ETH_GSTRING_LEN] = { ++ /* dpa rx errors */ ++ "rx dma error", ++ "rx frame physical error", ++ "rx frame size error", ++ "rx header error", ++ "rx csum error", ++ ++ /* demultiplexing errors */ ++ "qman cg_tdrop", ++ "qman wred", ++ "qman error cond", ++ "qman early window", ++ "qman late window", ++ "qman fq tdrop", ++ "qman fq retired", ++ "qman orp disabled", ++ ++ /* congestion related stats */ ++ "congestion time (ms)", ++ "entered congestion", ++ "congested (0/1)" ++}; ++ ++#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu) ++#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global) ++ ++static int __cold dpa_get_settings(struct net_device *net_dev, ++ struct ethtool_cmd *et_cmd) ++{ ++ int _errno; ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_dbg(net_dev, "phy device not initialized\n"); ++ return 0; ++ } ++ ++ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno); ++ ++ return _errno; ++} ++ ++static int __cold dpa_set_settings(struct net_device *net_dev, ++ struct ethtool_cmd *et_cmd) ++{ ++ int _errno; ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno); ++ ++ return _errno; ++} ++ ++static void __cold dpa_get_drvinfo(struct net_device *net_dev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ int _errno; ++ ++ strncpy(drvinfo->driver, KBUILD_MODNAME, ++ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0; ++ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), ++ "%X", 0); ++ ++ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { ++ /* Truncated output */ ++ netdev_notice(net_dev, "snprintf() = %d\n", _errno); ++ } else if (unlikely(_errno < 0)) { ++ netdev_warn(net_dev, "snprintf() = %d\n", _errno); ++ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version)); ++ } ++ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0; ++} ++ ++static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev) ++{ ++ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable; ++} ++ ++static void __cold dpa_set_msglevel(struct net_device *net_dev, ++ uint32_t msg_enable) ++{ ++ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable; ++} ++ ++static int __cold dpa_nway_reset(struct net_device *net_dev) ++{ ++ int _errno; ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ _errno = 0; ++ if (priv->mac_dev->phy_dev->autoneg) { ++ _errno = phy_start_aneg(priv->mac_dev->phy_dev); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "phy_start_aneg() = %d\n", ++ _errno); ++ } ++ ++ return _errno; ++} ++ ++static void __cold dpa_get_pauseparam(struct net_device *net_dev, ++ struct ethtool_pauseparam *epause) ++{ ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ struct phy_device *phy_dev; ++ ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ if (mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return; ++ } ++ ++ phy_dev = mac_dev->phy_dev; ++ if (unlikely(phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return; ++ } ++ ++ epause->autoneg = mac_dev->autoneg_pause; ++ epause->rx_pause = mac_dev->rx_pause_active; ++ epause->tx_pause = mac_dev->tx_pause_active; ++} ++ ++static int __cold dpa_set_pauseparam(struct net_device *net_dev, ++ struct ethtool_pauseparam *epause) ++{ ++ struct dpa_priv_s *priv; ++ struct mac_device *mac_dev; ++ struct phy_device *phy_dev; ++ int _errno; ++ u32 newadv, oldadv; ++ bool rx_pause, tx_pause; ++ ++ priv = netdev_priv(net_dev); ++ mac_dev = priv->mac_dev; ++ ++ if (mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ ++ phy_dev = mac_dev->phy_dev; ++ if (unlikely(phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ if (!(phy_dev->supported & SUPPORTED_Pause) || ++ (!(phy_dev->supported & SUPPORTED_Asym_Pause) && ++ (epause->rx_pause != epause->tx_pause))) ++ return -EINVAL; ++ ++ /* The MAC should know how to handle PAUSE frame autonegotiation before ++ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE ++ * settings. ++ */ ++ mac_dev->autoneg_pause = !!epause->autoneg; ++ mac_dev->rx_pause_req = !!epause->rx_pause; ++ mac_dev->tx_pause_req = !!epause->tx_pause; ++ ++ /* Determine the sym/asym advertised PAUSE capabilities from the desired ++ * rx/tx pause settings. ++ */ ++ newadv = 0; ++ if (epause->rx_pause) ++ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; ++ if (epause->tx_pause) ++ newadv |= ADVERTISED_Asym_Pause; ++ ++ oldadv = phy_dev->advertising & ++ (ADVERTISED_Pause | ADVERTISED_Asym_Pause); ++ ++ /* If there are differences between the old and the new advertised ++ * values, restart PHY autonegotiation and advertise the new values. ++ */ ++ if (oldadv != newadv) { ++ phy_dev->advertising &= ~(ADVERTISED_Pause ++ | ADVERTISED_Asym_Pause); ++ phy_dev->advertising |= newadv; ++ if (phy_dev->autoneg) { ++ _errno = phy_start_aneg(phy_dev); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "phy_start_aneg() = %d\n", ++ _errno); ++ } ++ } ++ ++ get_pause_cfg(mac_dev, &rx_pause, &tx_pause); ++ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno); ++ ++ return _errno; ++} ++ ++#ifdef CONFIG_PM ++static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ ++ wol->supported = 0; ++ wol->wolopts = 0; ++ ++ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent)) ++ return; ++ ++ if (priv->wol & DPAA_WOL_MAGIC) { ++ wol->supported = WAKE_MAGIC; ++ wol->wolopts = WAKE_MAGIC; ++ } ++} ++ ++static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_dbg(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ if (!device_can_wakeup(net_dev->dev.parent) || ++ (wol->wolopts & ~WAKE_MAGIC)) ++ return -EOPNOTSUPP; ++ ++ priv->wol = 0; ++ ++ if (wol->wolopts & WAKE_MAGIC) { ++ priv->wol = DPAA_WOL_MAGIC; ++ device_set_wakeup_enable(net_dev->dev.parent, 1); ++ } else { ++ device_set_wakeup_enable(net_dev->dev.parent, 0); ++ } ++ ++ return 0; ++} ++#endif ++ ++static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee) ++{ ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee); ++} ++ ++static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee) ++{ ++ struct dpa_priv_s *priv; ++ ++ priv = netdev_priv(net_dev); ++ if (priv->mac_dev == NULL) { ++ netdev_info(net_dev, "This is a MAC-less interface\n"); ++ return -ENODEV; ++ } ++ ++ if (unlikely(priv->mac_dev->phy_dev == NULL)) { ++ netdev_err(net_dev, "phy device not initialized\n"); ++ return -ENODEV; ++ } ++ ++ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee); ++} ++ ++static int dpa_get_sset_count(struct net_device *net_dev, int type) ++{ ++ unsigned int total_stats, num_stats; ++ ++ num_stats = num_online_cpus() + 1; ++ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN; ++ ++ switch (type) { ++ case ETH_SS_STATS: ++ return total_stats; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus, ++ int crr_cpu, u64 bp_count, u64 *data) ++{ ++ int num_stat_values = num_cpus + 1; ++ int crr_stat = 0; ++ ++ /* update current CPU's stats and also add them to the total values */ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors; ++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors; ++ ++ data[crr_stat * num_stat_values + crr_cpu] = bp_count; ++ data[crr_stat++ * num_stat_values + num_cpus] += bp_count; ++} ++ ++static void dpa_get_ethtool_stats(struct net_device *net_dev, ++ struct ethtool_stats *stats, u64 *data) ++{ ++ u64 bp_count, cg_time, cg_num, cg_status; ++ struct dpa_percpu_priv_s *percpu_priv; ++ struct qm_mcr_querycgr query_cgr; ++ struct dpa_rx_errors rx_errors; ++ struct dpa_ern_cnt ern_cnt; ++ struct dpa_priv_s *priv; ++ unsigned int num_cpus, offset; ++ struct dpa_bp *dpa_bp; ++ int total_stats, i; ++ ++ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS); ++ priv = netdev_priv(net_dev); ++ dpa_bp = priv->dpa_bp; ++ num_cpus = num_online_cpus(); ++ bp_count = 0; ++ ++ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors)); ++ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt)); ++ memset(data, 0, total_stats * sizeof(u64)); ++ ++ for_each_online_cpu(i) { ++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i); ++ ++ if (dpa_bp->percpu_count) ++ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i)); ++ ++ rx_errors.dme += percpu_priv->rx_errors.dme; ++ rx_errors.fpe += percpu_priv->rx_errors.fpe; ++ rx_errors.fse += percpu_priv->rx_errors.fse; ++ rx_errors.phe += percpu_priv->rx_errors.phe; ++ rx_errors.cse += percpu_priv->rx_errors.cse; ++ ++ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; ++ ern_cnt.wred += percpu_priv->ern_cnt.wred; ++ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; ++ ern_cnt.early_window += percpu_priv->ern_cnt.early_window; ++ ern_cnt.late_window += percpu_priv->ern_cnt.late_window; ++ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; ++ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; ++ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; ++ ++ copy_stats(percpu_priv, num_cpus, i, bp_count, data); ++ } ++ ++ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN; ++ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors)); ++ ++ offset += sizeof(struct dpa_rx_errors) / sizeof(u64); ++ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt)); ++ ++ /* gather congestion related counters */ ++ cg_num = 0; ++ cg_status = 0; ++ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); ++ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) { ++ cg_num = priv->cgr_data.cgr_congested_count; ++ cg_status = query_cgr.cgr.cs; ++ ++ /* reset congestion stats (like QMan API does */ ++ priv->cgr_data.congested_jiffies = 0; ++ priv->cgr_data.cgr_congested_count = 0; ++ } ++ ++ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64); ++ data[offset++] = cg_time; ++ data[offset++] = cg_num; ++ data[offset++] = cg_status; ++} ++ ++static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data) ++{ ++ unsigned int i, j, num_cpus, size; ++ char stat_string_cpu[ETH_GSTRING_LEN]; ++ u8 *strings; ++ ++ strings = data; ++ num_cpus = num_online_cpus(); ++ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; ++ ++ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) { ++ for (j = 0; j < num_cpus; j++) { ++ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j); ++ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN); ++ strings += ETH_GSTRING_LEN; ++ } ++ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]); ++ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN); ++ strings += ETH_GSTRING_LEN; ++ } ++ memcpy(strings, dpa_stats_global, size); ++} ++ ++const struct ethtool_ops dpa_ethtool_ops = { ++ .get_settings = dpa_get_settings, ++ .set_settings = dpa_set_settings, ++ .get_drvinfo = dpa_get_drvinfo, ++ .get_msglevel = dpa_get_msglevel, ++ .set_msglevel = dpa_set_msglevel, ++ .nway_reset = dpa_nway_reset, ++ .get_pauseparam = dpa_get_pauseparam, ++ .set_pauseparam = dpa_set_pauseparam, ++ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */ ++ .get_link = ethtool_op_get_link, ++ .get_eee = dpa_get_eee, ++ .set_eee = dpa_set_eee, ++ .get_sset_count = dpa_get_sset_count, ++ .get_ethtool_stats = dpa_get_ethtool_stats, ++ .get_strings = dpa_get_strings, ++#ifdef CONFIG_PM ++ .get_wol = dpa_get_wol, ++ .set_wol = dpa_set_wol, ++#endif ++}; +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c +@@ -0,0 +1,291 @@ ++/* ++ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC ++ * ++ * Author: Yangbo Lu ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "dpaa_eth.h" ++#include "mac.h" ++ ++static struct mac_device *mac_dev; ++static u32 freqCompensation; ++ ++/* Bit definitions for the TMR_CTRL register */ ++#define ALM1P (1<<31) /* Alarm1 output polarity */ ++#define ALM2P (1<<30) /* Alarm2 output polarity */ ++#define FS (1<<28) /* FIPER start indication */ ++#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ ++#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ ++#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ ++#define TCLK_PERIOD_MASK (0x3ff) ++#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ ++#define FRD (1<<14) /* FIPER Realignment Disable */ ++#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ ++#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ ++#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ ++#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ ++#define COPH (1<<7) /* Generated clock output phase. */ ++#define CIPH (1<<6) /* External oscillator input clock phase */ ++#define TMSR (1<<5) /* Timer soft reset. */ ++#define BYP (1<<3) /* Bypass drift compensated clock */ ++#define TE (1<<2) /* 1588 timer enable. */ ++#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ ++#define CKSEL_MASK (0x3) ++ ++/* Bit definitions for the TMR_TEVENT register */ ++#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ ++#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ ++#define ALM2 (1<<17) /* Current time = alarm time register 2 */ ++#define ALM1 (1<<16) /* Current time = alarm time register 1 */ ++#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ ++#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ ++#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ ++ ++/* Bit definitions for the TMR_TEMASK register */ ++#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ ++#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ ++#define ALM2EN (1<<17) /* Timer ALM2 event enable */ ++#define ALM1EN (1<<16) /* Timer ALM1 event enable */ ++#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ ++#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ ++ ++/* Bit definitions for the TMR_PEVENT register */ ++#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ ++#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ ++#define RXP (1<<0) /* PTP frame has been received */ ++ ++/* Bit definitions for the TMR_PEMASK register */ ++#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ ++#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ ++#define RXPEN (1<<0) /* Receive PTP packet event enable */ ++ ++/* Bit definitions for the TMR_STAT register */ ++#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ ++#define STAT_VEC_MASK (0x3f) ++ ++/* Bit definitions for the TMR_PRSC register */ ++#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ ++#define PRSC_OCK_MASK (0xffff) ++ ++ ++#define N_EXT_TS 2 ++ ++static void set_alarm(void) ++{ ++ u64 ns; ++ ++ if (mac_dev->fm_rtc_get_cnt) ++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns); ++ ns += 1500000000ULL; ++ ns = div_u64(ns, 1000000000UL) * 1000000000ULL; ++ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS; ++ if (mac_dev->fm_rtc_set_alarm) ++ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns); ++} ++ ++static void set_fipers(void) ++{ ++ u64 fiper; ++ ++ if (mac_dev->fm_rtc_disable) ++ mac_dev->fm_rtc_disable(mac_dev->fm_dev); ++ ++ set_alarm(); ++ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS; ++ if (mac_dev->fm_rtc_set_fiper) ++ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper); ++ ++ if (mac_dev->fm_rtc_enable) ++ mac_dev->fm_rtc_enable(mac_dev->fm_dev); ++} ++ ++/* PTP clock operations */ ++ ++static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ++{ ++ u64 adj; ++ u32 diff, tmr_add; ++ int neg_adj = 0; ++ ++ if (ppb < 0) { ++ neg_adj = 1; ++ ppb = -ppb; ++ } ++ ++ tmr_add = freqCompensation; ++ adj = tmr_add; ++ adj *= ppb; ++ diff = div_u64(adj, 1000000000ULL); ++ ++ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; ++ ++ if (mac_dev->fm_rtc_set_drift) ++ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add); ++ ++ return 0; ++} ++ ++static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta) ++{ ++ s64 now; ++ ++ if (mac_dev->fm_rtc_get_cnt) ++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now); ++ ++ now += delta; ++ ++ if (mac_dev->fm_rtc_set_cnt) ++ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now); ++ set_fipers(); ++ ++ return 0; ++} ++ ++static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) ++{ ++ u64 ns; ++ u32 remainder; ++ ++ if (mac_dev->fm_rtc_get_cnt) ++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns); ++ ++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ++ ts->tv_nsec = remainder; ++ return 0; ++} ++ ++static int ptp_dpa_settime(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts) ++{ ++ u64 ns; ++ ++ ns = ts->tv_sec * 1000000000ULL; ++ ns += ts->tv_nsec; ++ ++ if (mac_dev->fm_rtc_set_cnt) ++ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns); ++ set_fipers(); ++ return 0; ++} ++ ++static int ptp_dpa_enable(struct ptp_clock_info *ptp, ++ struct ptp_clock_request *rq, int on) ++{ ++ u32 bit; ++ ++ switch (rq->type) { ++ case PTP_CLK_REQ_EXTTS: ++ switch (rq->extts.index) { ++ case 0: ++ bit = ETS1EN; ++ break; ++ case 1: ++ bit = ETS2EN; ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (on) { ++ if (mac_dev->fm_rtc_enable_interrupt) ++ mac_dev->fm_rtc_enable_interrupt( ++ mac_dev->fm_dev, bit); ++ } else { ++ if (mac_dev->fm_rtc_disable_interrupt) ++ mac_dev->fm_rtc_disable_interrupt( ++ mac_dev->fm_dev, bit); ++ } ++ return 0; ++ ++ case PTP_CLK_REQ_PPS: ++ if (on) { ++ if (mac_dev->fm_rtc_enable_interrupt) ++ mac_dev->fm_rtc_enable_interrupt( ++ mac_dev->fm_dev, PP1EN); ++ } else { ++ if (mac_dev->fm_rtc_disable_interrupt) ++ mac_dev->fm_rtc_disable_interrupt( ++ mac_dev->fm_dev, PP1EN); ++ } ++ return 0; ++ ++ default: ++ break; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++static struct ptp_clock_info ptp_dpa_caps = { ++ .owner = THIS_MODULE, ++ .name = "dpaa clock", ++ .max_adj = 512000, ++ .n_alarm = 0, ++ .n_ext_ts = N_EXT_TS, ++ .n_per_out = 0, ++ .pps = 1, ++ .adjfreq = ptp_dpa_adjfreq, ++ .adjtime = ptp_dpa_adjtime, ++ .gettime64 = ptp_dpa_gettime, ++ .settime64 = ptp_dpa_settime, ++ .enable = ptp_dpa_enable, ++}; ++ ++static int __init __cold dpa_ptp_load(void) ++{ ++ struct device *ptp_dev; ++ struct timespec64 now; ++ struct ptp_clock *clock = ptp_priv.clock; ++ int dpa_phc_index; ++ int err; ++ ++ if (!(ptp_priv.of_dev && ptp_priv.mac_dev)) ++ return -ENODEV; ++ ++ ptp_dev = &ptp_priv.of_dev->dev; ++ mac_dev = ptp_priv.mac_dev; ++ ++ if (mac_dev->fm_rtc_get_drift) ++ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation); ++ ++ getnstimeofday64(&now); ++ ptp_dpa_settime(&ptp_dpa_caps, &now); ++ ++ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev); ++ if (IS_ERR(clock)) { ++ err = PTR_ERR(clock); ++ return err; ++ } ++ dpa_phc_index = ptp_clock_index(clock); ++ return 0; ++} ++module_init(dpa_ptp_load); ++ ++static void __exit __cold dpa_ptp_unload(void) ++{ ++ struct ptp_clock *clock = ptp_priv.clock; ++ ++ if (mac_dev->fm_rtc_disable_interrupt) ++ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff); ++ ptp_clock_unregister(clock); ++} ++module_exit(dpa_ptp_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c +@@ -0,0 +1,907 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dpaa_eth.h" ++#include "mac.h" ++#include "lnxwrp_fsl_fman.h" ++ ++#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */ ++ ++#include "fsl_fman_dtsec.h" ++#include "fsl_fman_tgec.h" ++#include "fsl_fman_memac.h" ++#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h" ++ ++#define MAC_DESCRIPTION "FSL FMan MAC API based driver" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++MODULE_AUTHOR("Emil Medve "); ++ ++MODULE_DESCRIPTION(MAC_DESCRIPTION); ++ ++struct mac_priv_s { ++ struct fm_mac_dev *fm_mac; ++}; ++ ++const char *mac_driver_description __initconst = MAC_DESCRIPTION; ++const size_t mac_sizeof_priv[] = { ++ [DTSEC] = sizeof(struct mac_priv_s), ++ [XGMAC] = sizeof(struct mac_priv_s), ++ [MEMAC] = sizeof(struct mac_priv_s) ++}; ++ ++static const enet_mode_t _100[] = { ++ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100, ++ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100 ++}; ++ ++static const enet_mode_t _1000[] = { ++ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000, ++ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000, ++ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000, ++ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000, ++ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000, ++ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000, ++ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000, ++ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000, ++ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000 ++}; ++ ++static enet_mode_t __cold __attribute__((nonnull)) ++macdev2enetinterface(const struct mac_device *mac_dev) ++{ ++ switch (mac_dev->max_speed) { ++ case SPEED_100: ++ return _100[mac_dev->phy_if]; ++ case SPEED_1000: ++ return _1000[mac_dev->phy_if]; ++ case SPEED_2500: ++ return e_ENET_MODE_SGMII_2500; ++ case SPEED_10000: ++ return e_ENET_MODE_XGMII_10000; ++ default: ++ return e_ENET_MODE_MII_100; ++ } ++} ++ ++static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception) ++{ ++ struct mac_device *mac_dev; ++ ++ mac_dev = (struct mac_device *)_mac_dev; ++ ++ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) { ++ /* don't flag RX FIFO after the first */ ++ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), ++ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false); ++ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ++ exception); ++ } ++ ++ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__, ++ exception); ++} ++ ++static int __cold init(struct mac_device *mac_dev) ++{ ++ int _errno; ++ struct mac_priv_s *priv; ++ t_FmMacParams param; ++ uint32_t version; ++ ++ priv = macdev_priv(mac_dev); ++ ++ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap( ++ mac_dev->dev, mac_dev->res->start, 0x2000); ++ param.enetMode = macdev2enetinterface(mac_dev); ++ memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr), ++ sizeof(mac_dev->addr))); ++ param.macId = mac_dev->cell_index; ++ param.h_Fm = (handle_t)mac_dev->fm; ++ param.mdioIrq = NO_IRQ; ++ param.f_Exception = mac_exception; ++ param.f_Event = mac_exception; ++ param.h_App = mac_dev; ++ ++ priv->fm_mac = fm_mac_config(¶m); ++ if (unlikely(priv->fm_mac == NULL)) { ++ _errno = -EINVAL; ++ goto _return; ++ } ++ ++ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac, ++ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? ++ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS); ++ ++ _errno = fm_mac_config_max_frame_length(priv->fm_mac, ++ fm_get_max_frm()); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { ++ /* 10G always works with pad and CRC */ ++ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ _errno = fm_mac_config_half_duplex(priv->fm_mac, ++ mac_dev->half_duplex); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ } else { ++ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ } ++ ++ _errno = fm_mac_init(priv->fm_mac); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN ++ /* For 1G MAC, disable by default the MIB counters overflow interrupt */ ++ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { ++ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), ++ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ } ++#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */ ++ ++ /* For 10G MAC, disable Tx ECC exception */ ++ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) { ++ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), ++ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ } ++ ++ _errno = fm_mac_get_version(priv->fm_mac, &version); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n", ++ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? ++ "dTSEC" : "XGEC"), version); ++ ++ goto _return; ++ ++ ++_return_fm_mac_free: ++ fm_mac_free(mac_dev->get_mac_handle(mac_dev)); ++ ++_return: ++ return _errno; ++} ++ ++static int __cold memac_init(struct mac_device *mac_dev) ++{ ++ int _errno; ++ struct mac_priv_s *priv; ++ t_FmMacParams param; ++ ++ priv = macdev_priv(mac_dev); ++ ++ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap( ++ mac_dev->dev, mac_dev->res->start, 0x2000); ++ param.enetMode = macdev2enetinterface(mac_dev); ++ memcpy(¶m.addr, mac_dev->addr, sizeof(mac_dev->addr)); ++ param.macId = mac_dev->cell_index; ++ param.h_Fm = (handle_t)mac_dev->fm; ++ param.mdioIrq = NO_IRQ; ++ param.f_Exception = mac_exception; ++ param.f_Event = mac_exception; ++ param.h_App = mac_dev; ++ ++ priv->fm_mac = fm_mac_config(¶m); ++ if (unlikely(priv->fm_mac == NULL)) { ++ _errno = -EINVAL; ++ goto _return; ++ } ++ ++ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac, ++ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? ++ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS); ++ ++ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm()); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ _errno = fm_mac_init(priv->fm_mac); ++ if (unlikely(_errno < 0)) ++ goto _return_fm_mac_free; ++ ++ dev_info(mac_dev->dev, "FMan MEMAC\n"); ++ ++ goto _return; ++ ++_return_fm_mac_free: ++ fm_mac_free(priv->fm_mac); ++ ++_return: ++ return _errno; ++} ++ ++static int __cold start(struct mac_device *mac_dev) ++{ ++ int _errno; ++ struct phy_device *phy_dev = mac_dev->phy_dev; ++ ++ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev)); ++ ++ if (!_errno && phy_dev) ++ phy_start(phy_dev); ++ ++ return _errno; ++} ++ ++static int __cold stop(struct mac_device *mac_dev) ++{ ++ if (mac_dev->phy_dev) ++ phy_stop(mac_dev->phy_dev); ++ ++ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev)); ++} ++ ++static int __cold set_multi(struct net_device *net_dev, ++ struct mac_device *mac_dev) ++{ ++ struct mac_priv_s *mac_priv; ++ struct mac_address *old_addr, *tmp; ++ struct netdev_hw_addr *ha; ++ int _errno; ++ ++ mac_priv = macdev_priv(mac_dev); ++ ++ /* Clear previous address list */ ++ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) { ++ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac, ++ (t_EnetAddr *)old_addr->addr); ++ if (_errno < 0) ++ return _errno; ++ ++ list_del(&old_addr->list); ++ kfree(old_addr); ++ } ++ ++ /* Add all the addresses from the new list */ ++ netdev_for_each_mc_addr(ha, net_dev) { ++ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac, ++ (t_EnetAddr *)ha->addr); ++ if (_errno < 0) ++ return _errno; ++ ++ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC); ++ if (!tmp) { ++ dev_err(mac_dev->dev, "Out of memory\n"); ++ return -ENOMEM; ++ } ++ memcpy(tmp->addr, ha->addr, ETH_ALEN); ++ list_add(&tmp->list, &mac_dev->mc_addr_list); ++ } ++ return 0; ++} ++ ++/* Avoid redundant calls to FMD, if the MAC driver already contains the desired ++ * active PAUSE settings. Otherwise, the new active settings should be reflected ++ * in FMan. ++ */ ++int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) ++{ ++ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev); ++ int _errno = 0; ++ ++ if (unlikely(rx != mac_dev->rx_pause_active)) { ++ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx); ++ if (likely(_errno == 0)) ++ mac_dev->rx_pause_active = rx; ++ } ++ ++ if (unlikely(tx != mac_dev->tx_pause_active)) { ++ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx); ++ if (likely(_errno == 0)) ++ mac_dev->tx_pause_active = tx; ++ } ++ ++ return _errno; ++} ++EXPORT_SYMBOL(set_mac_active_pause); ++ ++/* Determine the MAC RX/TX PAUSE frames settings based on PHY ++ * autonegotiation or values set by eththool. ++ */ ++void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause) ++{ ++ struct phy_device *phy_dev = mac_dev->phy_dev; ++ u16 lcl_adv, rmt_adv; ++ u8 flowctrl; ++ ++ *rx_pause = *tx_pause = false; ++ ++ if (!phy_dev->duplex) ++ return; ++ ++ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings ++ * are those set by ethtool. ++ */ ++ if (!mac_dev->autoneg_pause) { ++ *rx_pause = mac_dev->rx_pause_req; ++ *tx_pause = mac_dev->tx_pause_req; ++ return; ++ } ++ ++ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE ++ * settings depend on the result of the link negotiation. ++ */ ++ ++ /* get local capabilities */ ++ lcl_adv = 0; ++ if (phy_dev->advertising & ADVERTISED_Pause) ++ lcl_adv |= ADVERTISE_PAUSE_CAP; ++ if (phy_dev->advertising & ADVERTISED_Asym_Pause) ++ lcl_adv |= ADVERTISE_PAUSE_ASYM; ++ ++ /* get link partner capabilities */ ++ rmt_adv = 0; ++ if (phy_dev->pause) ++ rmt_adv |= LPA_PAUSE_CAP; ++ if (phy_dev->asym_pause) ++ rmt_adv |= LPA_PAUSE_ASYM; ++ ++ /* Calculate TX/RX settings based on local and peer advertised ++ * symmetric/asymmetric PAUSE capabilities. ++ */ ++ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); ++ if (flowctrl & FLOW_CTRL_RX) ++ *rx_pause = true; ++ if (flowctrl & FLOW_CTRL_TX) ++ *tx_pause = true; ++} ++EXPORT_SYMBOL(get_pause_cfg); ++ ++static void adjust_link_void(struct net_device *net_dev) ++{ ++} ++ ++static void adjust_link(struct net_device *net_dev) ++{ ++ struct dpa_priv_s *priv = netdev_priv(net_dev); ++ struct mac_device *mac_dev = priv->mac_dev; ++ struct phy_device *phy_dev = mac_dev->phy_dev; ++ struct fm_mac_dev *fm_mac_dev; ++ bool rx_pause, tx_pause; ++ int _errno; ++ ++ fm_mac_dev = mac_dev->get_mac_handle(mac_dev); ++ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed, ++ phy_dev->duplex); ++ ++ get_pause_cfg(mac_dev, &rx_pause, &tx_pause); ++ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause); ++ if (unlikely(_errno < 0)) ++ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno); ++} ++ ++/* Initializes driver's PHY state, and attaches to the PHY. ++ * Returns 0 on success. ++ */ ++static int dtsec_init_phy(struct net_device *net_dev, ++ struct mac_device *mac_dev) ++{ ++ struct phy_device *phy_dev; ++ ++ if (of_phy_is_fixed_link(mac_dev->phy_node)) ++ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, ++ 0, mac_dev->phy_if); ++ else ++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, ++ &adjust_link, 0, mac_dev->phy_if); ++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { ++ netdev_err(net_dev, "Could not connect to PHY %s\n", ++ mac_dev->phy_node ? ++ mac_dev->phy_node->full_name : ++ mac_dev->fixed_bus_id); ++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); ++ } ++ ++ /* Remove any features not supported by the controller */ ++ phy_dev->supported &= mac_dev->if_support; ++ /* Enable the symmetric and asymmetric PAUSE frame advertisements, ++ * as most of the PHY drivers do not enable them by default. ++ */ ++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); ++ phy_dev->advertising = phy_dev->supported; ++ ++ mac_dev->phy_dev = phy_dev; ++ ++ return 0; ++} ++ ++static int xgmac_init_phy(struct net_device *net_dev, ++ struct mac_device *mac_dev) ++{ ++ struct phy_device *phy_dev; ++ ++ if (of_phy_is_fixed_link(mac_dev->phy_node)) ++ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, ++ 0, mac_dev->phy_if); ++ else ++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, ++ &adjust_link_void, 0, mac_dev->phy_if); ++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { ++ netdev_err(net_dev, "Could not attach to PHY %s\n", ++ mac_dev->phy_node ? ++ mac_dev->phy_node->full_name : ++ mac_dev->fixed_bus_id); ++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); ++ } ++ ++ phy_dev->supported &= mac_dev->if_support; ++ /* Enable the symmetric and asymmetric PAUSE frame advertisements, ++ * as most of the PHY drivers do not enable them by default. ++ */ ++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); ++ phy_dev->advertising = phy_dev->supported; ++ ++ mac_dev->phy_dev = phy_dev; ++ ++ return 0; ++} ++ ++static int memac_init_phy(struct net_device *net_dev, ++ struct mac_device *mac_dev) ++{ ++ struct phy_device *phy_dev; ++ ++ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) || ++ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500) || ++ of_phy_is_fixed_link(mac_dev->phy_node)) { ++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, ++ &adjust_link_void, 0, ++ mac_dev->phy_if); ++ } else { ++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, ++ &adjust_link, 0, mac_dev->phy_if); ++ } ++ ++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { ++ netdev_err(net_dev, "Could not connect to PHY %s\n", ++ mac_dev->phy_node ? ++ mac_dev->phy_node->full_name : ++ mac_dev->fixed_bus_id); ++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); ++ } ++ ++ /* Remove any features not supported by the controller */ ++ phy_dev->supported &= mac_dev->if_support; ++ /* Enable the symmetric and asymmetric PAUSE frame advertisements, ++ * as most of the PHY drivers do not enable them by default. ++ */ ++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); ++ phy_dev->advertising = phy_dev->supported; ++ ++ mac_dev->phy_dev = phy_dev; ++ ++ return 0; ++} ++ ++static int __cold uninit(struct fm_mac_dev *fm_mac_dev) ++{ ++ int _errno, __errno; ++ ++ _errno = fm_mac_disable(fm_mac_dev); ++ __errno = fm_mac_free(fm_mac_dev); ++ ++ if (unlikely(__errno < 0)) ++ _errno = __errno; ++ ++ return _errno; ++} ++ ++static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev) ++{ ++ const struct mac_priv_s *priv; ++ priv = macdev_priv(mac_dev); ++ return priv->fm_mac; ++} ++ ++static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn) ++{ ++ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr; ++ int i = 0, n = nn; ++ ++ FM_DMP_SUBTITLE(buf, n, "\n"); ++ ++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index); ++ ++ FM_DMP_V32(buf, n, p_mm, tsec_id); ++ FM_DMP_V32(buf, n, p_mm, tsec_id2); ++ FM_DMP_V32(buf, n, p_mm, ievent); ++ FM_DMP_V32(buf, n, p_mm, imask); ++ FM_DMP_V32(buf, n, p_mm, ecntrl); ++ FM_DMP_V32(buf, n, p_mm, ptv); ++ FM_DMP_V32(buf, n, p_mm, tmr_ctrl); ++ FM_DMP_V32(buf, n, p_mm, tmr_pevent); ++ FM_DMP_V32(buf, n, p_mm, tmr_pemask); ++ FM_DMP_V32(buf, n, p_mm, tctrl); ++ FM_DMP_V32(buf, n, p_mm, rctrl); ++ FM_DMP_V32(buf, n, p_mm, maccfg1); ++ FM_DMP_V32(buf, n, p_mm, maccfg2); ++ FM_DMP_V32(buf, n, p_mm, ipgifg); ++ FM_DMP_V32(buf, n, p_mm, hafdup); ++ FM_DMP_V32(buf, n, p_mm, maxfrm); ++ ++ FM_DMP_V32(buf, n, p_mm, macstnaddr1); ++ FM_DMP_V32(buf, n, p_mm, macstnaddr2); ++ ++ for (i = 0; i < 7; ++i) { ++ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1); ++ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2); ++ } ++ ++ FM_DMP_V32(buf, n, p_mm, car1); ++ FM_DMP_V32(buf, n, p_mm, car2); ++ ++ return n; ++} ++ ++static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn) ++{ ++ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr; ++ int n = nn; ++ ++ FM_DMP_SUBTITLE(buf, n, "\n"); ++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index); ++ ++ FM_DMP_V32(buf, n, p_mm, tgec_id); ++ FM_DMP_V32(buf, n, p_mm, command_config); ++ FM_DMP_V32(buf, n, p_mm, mac_addr_0); ++ FM_DMP_V32(buf, n, p_mm, mac_addr_1); ++ FM_DMP_V32(buf, n, p_mm, maxfrm); ++ FM_DMP_V32(buf, n, p_mm, pause_quant); ++ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections); ++ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections); ++ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e); ++ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e); ++ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl); ++ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status); ++ FM_DMP_V32(buf, n, p_mm, mdio_command); ++ FM_DMP_V32(buf, n, p_mm, mdio_data); ++ FM_DMP_V32(buf, n, p_mm, mdio_regaddr); ++ FM_DMP_V32(buf, n, p_mm, status); ++ FM_DMP_V32(buf, n, p_mm, tx_ipg_len); ++ FM_DMP_V32(buf, n, p_mm, mac_addr_2); ++ FM_DMP_V32(buf, n, p_mm, mac_addr_3); ++ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd); ++ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr); ++ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd); ++ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr); ++ FM_DMP_V32(buf, n, p_mm, imask); ++ FM_DMP_V32(buf, n, p_mm, ievent); ++ ++ return n; ++} ++ ++static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn) ++{ ++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; ++ int i = 0, n = nn; ++ ++ FM_DMP_SUBTITLE(buf, n, "\n"); ++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index); ++ ++ FM_DMP_V32(buf, n, p_mm, command_config); ++ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l); ++ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u); ++ FM_DMP_V32(buf, n, p_mm, maxfrm); ++ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl); ++ FM_DMP_V32(buf, n, p_mm, ievent); ++ FM_DMP_V32(buf, n, p_mm, tx_ipg_length); ++ FM_DMP_V32(buf, n, p_mm, imask); ++ ++ for (i = 0; i < 4; ++i) ++ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]); ++ ++ for (i = 0; i < 4; ++i) ++ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]); ++ ++ FM_DMP_V32(buf, n, p_mm, rx_pause_status); ++ ++ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) { ++ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l); ++ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u); ++ } ++ ++ FM_DMP_V32(buf, n, p_mm, lpwake_timer); ++ FM_DMP_V32(buf, n, p_mm, sleep_timer); ++ FM_DMP_V32(buf, n, p_mm, statn_config); ++ FM_DMP_V32(buf, n, p_mm, if_mode); ++ FM_DMP_V32(buf, n, p_mm, if_status); ++ FM_DMP_V32(buf, n, p_mm, hg_config); ++ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta); ++ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh); ++ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status); ++ FM_DMP_V32(buf, n, p_mm, hg_fifos_status); ++ FM_DMP_V32(buf, n, p_mm, rhm); ++ FM_DMP_V32(buf, n, p_mm, thm); ++ ++ return n; ++} ++ ++static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn) ++{ ++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; ++ int n = nn; ++ ++ FM_DMP_SUBTITLE(buf, n, "\n"); ++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index); ++ ++ /* Rx Statistics Counter */ ++ FM_DMP_V32(buf, n, p_mm, reoct_l); ++ FM_DMP_V32(buf, n, p_mm, reoct_u); ++ FM_DMP_V32(buf, n, p_mm, roct_l); ++ FM_DMP_V32(buf, n, p_mm, roct_u); ++ FM_DMP_V32(buf, n, p_mm, raln_l); ++ FM_DMP_V32(buf, n, p_mm, raln_u); ++ FM_DMP_V32(buf, n, p_mm, rxpf_l); ++ FM_DMP_V32(buf, n, p_mm, rxpf_u); ++ FM_DMP_V32(buf, n, p_mm, rfrm_l); ++ FM_DMP_V32(buf, n, p_mm, rfrm_u); ++ FM_DMP_V32(buf, n, p_mm, rfcs_l); ++ FM_DMP_V32(buf, n, p_mm, rfcs_u); ++ FM_DMP_V32(buf, n, p_mm, rvlan_l); ++ FM_DMP_V32(buf, n, p_mm, rvlan_u); ++ FM_DMP_V32(buf, n, p_mm, rerr_l); ++ FM_DMP_V32(buf, n, p_mm, rerr_u); ++ FM_DMP_V32(buf, n, p_mm, ruca_l); ++ FM_DMP_V32(buf, n, p_mm, ruca_u); ++ FM_DMP_V32(buf, n, p_mm, rmca_l); ++ FM_DMP_V32(buf, n, p_mm, rmca_u); ++ FM_DMP_V32(buf, n, p_mm, rbca_l); ++ FM_DMP_V32(buf, n, p_mm, rbca_u); ++ FM_DMP_V32(buf, n, p_mm, rdrp_l); ++ FM_DMP_V32(buf, n, p_mm, rdrp_u); ++ FM_DMP_V32(buf, n, p_mm, rpkt_l); ++ FM_DMP_V32(buf, n, p_mm, rpkt_u); ++ FM_DMP_V32(buf, n, p_mm, rund_l); ++ FM_DMP_V32(buf, n, p_mm, rund_u); ++ FM_DMP_V32(buf, n, p_mm, r64_l); ++ FM_DMP_V32(buf, n, p_mm, r64_u); ++ FM_DMP_V32(buf, n, p_mm, r127_l); ++ FM_DMP_V32(buf, n, p_mm, r127_u); ++ FM_DMP_V32(buf, n, p_mm, r255_l); ++ FM_DMP_V32(buf, n, p_mm, r255_u); ++ FM_DMP_V32(buf, n, p_mm, r511_l); ++ FM_DMP_V32(buf, n, p_mm, r511_u); ++ FM_DMP_V32(buf, n, p_mm, r1023_l); ++ FM_DMP_V32(buf, n, p_mm, r1023_u); ++ FM_DMP_V32(buf, n, p_mm, r1518_l); ++ FM_DMP_V32(buf, n, p_mm, r1518_u); ++ FM_DMP_V32(buf, n, p_mm, r1519x_l); ++ FM_DMP_V32(buf, n, p_mm, r1519x_u); ++ FM_DMP_V32(buf, n, p_mm, rovr_l); ++ FM_DMP_V32(buf, n, p_mm, rovr_u); ++ FM_DMP_V32(buf, n, p_mm, rjbr_l); ++ FM_DMP_V32(buf, n, p_mm, rjbr_u); ++ FM_DMP_V32(buf, n, p_mm, rfrg_l); ++ FM_DMP_V32(buf, n, p_mm, rfrg_u); ++ FM_DMP_V32(buf, n, p_mm, rcnp_l); ++ FM_DMP_V32(buf, n, p_mm, rcnp_u); ++ FM_DMP_V32(buf, n, p_mm, rdrntp_l); ++ FM_DMP_V32(buf, n, p_mm, rdrntp_u); ++ ++ return n; ++} ++ ++static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn) ++{ ++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr; ++ int n = nn; ++ ++ FM_DMP_SUBTITLE(buf, n, "\n"); ++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index); ++ ++ ++ /* Tx Statistics Counter */ ++ FM_DMP_V32(buf, n, p_mm, teoct_l); ++ FM_DMP_V32(buf, n, p_mm, teoct_u); ++ FM_DMP_V32(buf, n, p_mm, toct_l); ++ FM_DMP_V32(buf, n, p_mm, toct_u); ++ FM_DMP_V32(buf, n, p_mm, txpf_l); ++ FM_DMP_V32(buf, n, p_mm, txpf_u); ++ FM_DMP_V32(buf, n, p_mm, tfrm_l); ++ FM_DMP_V32(buf, n, p_mm, tfrm_u); ++ FM_DMP_V32(buf, n, p_mm, tfcs_l); ++ FM_DMP_V32(buf, n, p_mm, tfcs_u); ++ FM_DMP_V32(buf, n, p_mm, tvlan_l); ++ FM_DMP_V32(buf, n, p_mm, tvlan_u); ++ FM_DMP_V32(buf, n, p_mm, terr_l); ++ FM_DMP_V32(buf, n, p_mm, terr_u); ++ FM_DMP_V32(buf, n, p_mm, tuca_l); ++ FM_DMP_V32(buf, n, p_mm, tuca_u); ++ FM_DMP_V32(buf, n, p_mm, tmca_l); ++ FM_DMP_V32(buf, n, p_mm, tmca_u); ++ FM_DMP_V32(buf, n, p_mm, tbca_l); ++ FM_DMP_V32(buf, n, p_mm, tbca_u); ++ FM_DMP_V32(buf, n, p_mm, tpkt_l); ++ FM_DMP_V32(buf, n, p_mm, tpkt_u); ++ FM_DMP_V32(buf, n, p_mm, tund_l); ++ FM_DMP_V32(buf, n, p_mm, tund_u); ++ FM_DMP_V32(buf, n, p_mm, t64_l); ++ FM_DMP_V32(buf, n, p_mm, t64_u); ++ FM_DMP_V32(buf, n, p_mm, t127_l); ++ FM_DMP_V32(buf, n, p_mm, t127_u); ++ FM_DMP_V32(buf, n, p_mm, t255_l); ++ FM_DMP_V32(buf, n, p_mm, t255_u); ++ FM_DMP_V32(buf, n, p_mm, t511_l); ++ FM_DMP_V32(buf, n, p_mm, t511_u); ++ FM_DMP_V32(buf, n, p_mm, t1023_l); ++ FM_DMP_V32(buf, n, p_mm, t1023_u); ++ FM_DMP_V32(buf, n, p_mm, t1518_l); ++ FM_DMP_V32(buf, n, p_mm, t1518_u); ++ FM_DMP_V32(buf, n, p_mm, t1519x_l); ++ FM_DMP_V32(buf, n, p_mm, t1519x_u); ++ FM_DMP_V32(buf, n, p_mm, tcnp_l); ++ FM_DMP_V32(buf, n, p_mm, tcnp_u); ++ ++ return n; ++} ++ ++int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn) ++{ ++ int n = nn; ++ ++ n = h_mac->dump_mac_regs(h_mac, buf, n); ++ ++ return n; ++} ++EXPORT_SYMBOL(fm_mac_dump_regs); ++ ++int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn) ++{ ++ int n = nn; ++ ++ if(h_mac->dump_mac_rx_stats) ++ n = h_mac->dump_mac_rx_stats(h_mac, buf, n); ++ ++ return n; ++} ++EXPORT_SYMBOL(fm_mac_dump_rx_stats); ++ ++int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn) ++{ ++ int n = nn; ++ ++ if(h_mac->dump_mac_tx_stats) ++ n = h_mac->dump_mac_tx_stats(h_mac, buf, n); ++ ++ return n; ++} ++EXPORT_SYMBOL(fm_mac_dump_tx_stats); ++ ++static void __cold setup_dtsec(struct mac_device *mac_dev) ++{ ++ mac_dev->init_phy = dtsec_init_phy; ++ mac_dev->init = init; ++ mac_dev->start = start; ++ mac_dev->stop = stop; ++ mac_dev->set_promisc = fm_mac_set_promiscuous; ++ mac_dev->change_addr = fm_mac_modify_mac_addr; ++ mac_dev->set_multi = set_multi; ++ mac_dev->uninit = uninit; ++ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp; ++ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp; ++ mac_dev->get_mac_handle = get_mac_handle; ++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; ++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; ++ mac_dev->fm_rtc_enable = fm_rtc_enable; ++ mac_dev->fm_rtc_disable = fm_rtc_disable; ++ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt; ++ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt; ++ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift; ++ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift; ++ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm; ++ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper; ++ mac_dev->set_wol = fm_mac_set_wol; ++ mac_dev->dump_mac_regs = dtsec_dump_regs; ++} ++ ++static void __cold setup_xgmac(struct mac_device *mac_dev) ++{ ++ mac_dev->init_phy = xgmac_init_phy; ++ mac_dev->init = init; ++ mac_dev->start = start; ++ mac_dev->stop = stop; ++ mac_dev->set_promisc = fm_mac_set_promiscuous; ++ mac_dev->change_addr = fm_mac_modify_mac_addr; ++ mac_dev->set_multi = set_multi; ++ mac_dev->uninit = uninit; ++ mac_dev->get_mac_handle = get_mac_handle; ++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; ++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; ++ mac_dev->set_wol = fm_mac_set_wol; ++ mac_dev->dump_mac_regs = xgmac_dump_regs; ++} ++ ++static void __cold setup_memac(struct mac_device *mac_dev) ++{ ++ mac_dev->init_phy = memac_init_phy; ++ mac_dev->init = memac_init; ++ mac_dev->start = start; ++ mac_dev->stop = stop; ++ mac_dev->set_promisc = fm_mac_set_promiscuous; ++ mac_dev->change_addr = fm_mac_modify_mac_addr; ++ mac_dev->set_multi = set_multi; ++ mac_dev->uninit = uninit; ++ mac_dev->get_mac_handle = get_mac_handle; ++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames; ++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames; ++ mac_dev->fm_rtc_enable = fm_rtc_enable; ++ mac_dev->fm_rtc_disable = fm_rtc_disable; ++ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt; ++ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt; ++ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift; ++ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift; ++ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm; ++ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper; ++ mac_dev->set_wol = fm_mac_set_wol; ++ mac_dev->dump_mac_regs = memac_dump_regs; ++ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx; ++ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx; ++} ++ ++void (*const mac_setup[])(struct mac_device *mac_dev) = { ++ [DTSEC] = setup_dtsec, ++ [XGMAC] = setup_xgmac, ++ [MEMAC] = setup_memac ++}; +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c +@@ -0,0 +1,489 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lnxwrp_fm_ext.h" ++ ++#include "mac.h" ++ ++#define DTSEC_SUPPORTED \ ++ (SUPPORTED_10baseT_Half \ ++ | SUPPORTED_10baseT_Full \ ++ | SUPPORTED_100baseT_Half \ ++ | SUPPORTED_100baseT_Full \ ++ | SUPPORTED_Autoneg \ ++ | SUPPORTED_Pause \ ++ | SUPPORTED_Asym_Pause \ ++ | SUPPORTED_MII) ++ ++static const char phy_str[][11] = { ++ [PHY_INTERFACE_MODE_MII] = "mii", ++ [PHY_INTERFACE_MODE_GMII] = "gmii", ++ [PHY_INTERFACE_MODE_SGMII] = "sgmii", ++ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii", ++ [PHY_INTERFACE_MODE_TBI] = "tbi", ++ [PHY_INTERFACE_MODE_RMII] = "rmii", ++ [PHY_INTERFACE_MODE_RGMII] = "rgmii", ++ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", ++ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", ++ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", ++ [PHY_INTERFACE_MODE_RTBI] = "rtbi", ++ [PHY_INTERFACE_MODE_XGMII] = "xgmii", ++ [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500", ++}; ++ ++static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(phy_str); i++) ++ if (strcmp(str, phy_str[i]) == 0) ++ return (phy_interface_t)i; ++ ++ return PHY_INTERFACE_MODE_MII; ++} ++ ++static const uint16_t phy2speed[] = { ++ [PHY_INTERFACE_MODE_MII] = SPEED_100, ++ [PHY_INTERFACE_MODE_GMII] = SPEED_1000, ++ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, ++ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000, ++ [PHY_INTERFACE_MODE_TBI] = SPEED_1000, ++ [PHY_INTERFACE_MODE_RMII] = SPEED_100, ++ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, ++ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, ++ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, ++ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, ++ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, ++ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000, ++ [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500, ++}; ++ ++static struct mac_device * __cold ++alloc_macdev(struct device *dev, size_t sizeof_priv, ++ void (*setup)(struct mac_device *mac_dev)) ++{ ++ struct mac_device *mac_dev; ++ ++ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL); ++ if (unlikely(mac_dev == NULL)) ++ mac_dev = ERR_PTR(-ENOMEM); ++ else { ++ mac_dev->dev = dev; ++ dev_set_drvdata(dev, mac_dev); ++ setup(mac_dev); ++ } ++ ++ return mac_dev; ++} ++ ++static int __cold free_macdev(struct mac_device *mac_dev) ++{ ++ dev_set_drvdata(mac_dev->dev, NULL); ++ ++ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev)); ++} ++ ++static const struct of_device_id mac_match[] = { ++ [DTSEC] = { ++ .compatible = "fsl,fman-1g-mac" ++ }, ++ [XGMAC] = { ++ .compatible = "fsl,fman-10g-mac" ++ }, ++ [MEMAC] = { ++ .compatible = "fsl,fman-memac" ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, mac_match); ++ ++static int __cold mac_probe(struct platform_device *_of_dev) ++{ ++ int _errno, i; ++ struct device *dev; ++ struct device_node *mac_node, *dev_node; ++ struct mac_device *mac_dev; ++ struct platform_device *of_dev; ++ struct resource res; ++ const uint8_t *mac_addr; ++ const char *char_prop; ++ int nph; ++ u32 cell_index; ++ const struct of_device_id *match; ++ ++ dev = &_of_dev->dev; ++ mac_node = dev->of_node; ++ ++ match = of_match_device(mac_match, dev); ++ if (!match) ++ return -EINVAL; ++ ++ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; ++ i++) ++ ; ++ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1); ++ ++ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]); ++ if (IS_ERR(mac_dev)) { ++ _errno = PTR_ERR(mac_dev); ++ dev_err(dev, "alloc_macdev() = %d\n", _errno); ++ goto _return; ++ } ++ ++ INIT_LIST_HEAD(&mac_dev->mc_addr_list); ++ ++ /* Get the FM node */ ++ dev_node = of_get_parent(mac_node); ++ if (unlikely(dev_node == NULL)) { ++ dev_err(dev, "of_get_parent(%s) failed\n", ++ mac_node->full_name); ++ _errno = -EINVAL; ++ goto _return_dev_set_drvdata; ++ } ++ ++ of_dev = of_find_device_by_node(dev_node); ++ if (unlikely(of_dev == NULL)) { ++ dev_err(dev, "of_find_device_by_node(%s) failed\n", ++ dev_node->full_name); ++ _errno = -EINVAL; ++ goto _return_of_node_put; ++ } ++ ++ mac_dev->fm_dev = fm_bind(&of_dev->dev); ++ if (unlikely(mac_dev->fm_dev == NULL)) { ++ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name); ++ _errno = -ENODEV; ++ goto _return_of_node_put; ++ } ++ ++ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev); ++ of_node_put(dev_node); ++ ++ /* Get the address of the memory mapped registers */ ++ _errno = of_address_to_resource(mac_node, 0, &res); ++ if (unlikely(_errno < 0)) { ++ dev_err(dev, "of_address_to_resource(%s) = %d\n", ++ mac_node->full_name, _errno); ++ goto _return_dev_set_drvdata; ++ } ++ ++ mac_dev->res = __devm_request_region( ++ dev, ++ fm_get_mem_region(mac_dev->fm_dev), ++ res.start, res.end + 1 - res.start, "mac"); ++ if (unlikely(mac_dev->res == NULL)) { ++ dev_err(dev, "__devm_request_mem_region(mac) failed\n"); ++ _errno = -EBUSY; ++ goto _return_dev_set_drvdata; ++ } ++ ++ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, ++ mac_dev->res->end + 1 ++ - mac_dev->res->start); ++ if (unlikely(mac_dev->vaddr == NULL)) { ++ dev_err(dev, "devm_ioremap() failed\n"); ++ _errno = -EIO; ++ goto _return_dev_set_drvdata; ++ } ++ ++#define TBIPA_OFFSET 0x1c ++#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */ ++ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0); ++ if (mac_dev->tbi_node) { ++ u32 tbiaddr = TBIPA_DEFAULT_ADDR; ++ const __be32 *tbi_reg; ++ void __iomem *addr; ++ ++ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL); ++ if (tbi_reg) ++ tbiaddr = be32_to_cpup(tbi_reg); ++ addr = mac_dev->vaddr + TBIPA_OFFSET; ++ /* TODO: out_be32 does not exist on ARM */ ++ out_be32(addr, tbiaddr); ++ } ++ ++ if (!of_device_is_available(mac_node)) { ++ devm_iounmap(dev, mac_dev->vaddr); ++ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev), ++ res.start, res.end + 1 - res.start); ++ fm_unbind(mac_dev->fm_dev); ++ devm_kfree(dev, mac_dev); ++ dev_set_drvdata(dev, NULL); ++ return -ENODEV; ++ } ++ ++ /* Get the cell-index */ ++ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index); ++ if (unlikely(_errno)) { ++ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n", ++ mac_node->full_name); ++ goto _return_dev_set_drvdata; ++ } ++ mac_dev->cell_index = (uint8_t)cell_index; ++ if (mac_dev->cell_index >= 8) ++ mac_dev->cell_index -= 8; ++ ++ /* Get the MAC address */ ++ mac_addr = of_get_mac_address(mac_node); ++ if (unlikely(mac_addr == NULL)) { ++ dev_err(dev, "of_get_mac_address(%s) failed\n", ++ mac_node->full_name); ++ _errno = -EINVAL; ++ goto _return_dev_set_drvdata; ++ } ++ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); ++ ++ /* Verify the number of port handles */ ++ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); ++ if (unlikely(nph < 0)) { ++ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n", ++ mac_node->full_name); ++ _errno = nph; ++ goto _return_dev_set_drvdata; ++ } ++ ++ if (nph != ARRAY_SIZE(mac_dev->port_dev)) { ++ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n", ++ mac_node->full_name); ++ _errno = -EINVAL; ++ goto _return_dev_set_drvdata; ++ } ++ ++ for_each_port_device(i, mac_dev->port_dev) { ++ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); ++ if (unlikely(dev_node == NULL)) { ++ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n", ++ mac_node->full_name); ++ _errno = -EINVAL; ++ goto _return_of_node_put; ++ } ++ ++ of_dev = of_find_device_by_node(dev_node); ++ if (unlikely(of_dev == NULL)) { ++ dev_err(dev, "of_find_device_by_node(%s) failed\n", ++ dev_node->full_name); ++ _errno = -EINVAL; ++ goto _return_of_node_put; ++ } ++ ++ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev); ++ if (unlikely(mac_dev->port_dev[i] == NULL)) { ++ dev_err(dev, "dev_get_drvdata(%s) failed\n", ++ dev_node->full_name); ++ _errno = -EINVAL; ++ goto _return_of_node_put; ++ } ++ of_node_put(dev_node); ++ } ++ ++ /* Get the PHY connection type */ ++ _errno = of_property_read_string(mac_node, "phy-connection-type", ++ &char_prop); ++ if (unlikely(_errno)) { ++ dev_warn(dev, ++ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n", ++ mac_node->full_name); ++ mac_dev->phy_if = PHY_INTERFACE_MODE_MII; ++ } else ++ mac_dev->phy_if = str2phy(char_prop); ++ ++ mac_dev->link = false; ++ mac_dev->half_duplex = false; ++ mac_dev->speed = phy2speed[mac_dev->phy_if]; ++ mac_dev->max_speed = mac_dev->speed; ++ mac_dev->if_support = DTSEC_SUPPORTED; ++ /* We don't support half-duplex in SGMII mode */ ++ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") || ++ strstr(char_prop, "sgmii-2500")) ++ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | ++ SUPPORTED_100baseT_Half); ++ ++ /* Gigabit support (no half-duplex) */ ++ if (mac_dev->max_speed == SPEED_1000 || ++ mac_dev->max_speed == SPEED_2500) ++ mac_dev->if_support |= SUPPORTED_1000baseT_Full; ++ ++ /* The 10G interface only supports one mode */ ++ if (strstr(char_prop, "xgmii")) ++ mac_dev->if_support = SUPPORTED_10000baseT_Full; ++ ++ /* Get the rest of the PHY information */ ++ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); ++ if (!mac_dev->phy_node) { ++ struct phy_device *phy; ++ ++ if (!of_phy_is_fixed_link(mac_node)) { ++ dev_err(dev, "Wrong PHY information of mac node %s\n", ++ mac_node->full_name); ++ goto _return_dev_set_drvdata; ++ } ++ ++ _errno = of_phy_register_fixed_link(mac_node); ++ if (_errno) ++ goto _return_dev_set_drvdata; ++ ++ mac_dev->fixed_link = devm_kzalloc(mac_dev->dev, ++ sizeof(*mac_dev->fixed_link), ++ GFP_KERNEL); ++ if (!mac_dev->fixed_link) ++ goto _return_dev_set_drvdata; ++ ++ mac_dev->phy_node = of_node_get(mac_node); ++ phy = of_phy_find_device(mac_dev->phy_node); ++ if (!phy) ++ goto _return_dev_set_drvdata; ++ ++ mac_dev->fixed_link->link = phy->link; ++ mac_dev->fixed_link->speed = phy->speed; ++ mac_dev->fixed_link->duplex = phy->duplex; ++ mac_dev->fixed_link->pause = phy->pause; ++ mac_dev->fixed_link->asym_pause = phy->asym_pause; ++ } ++ ++ _errno = mac_dev->init(mac_dev); ++ if (unlikely(_errno < 0)) { ++ dev_err(dev, "mac_dev->init() = %d\n", _errno); ++ goto _return_dev_set_drvdata; ++ } ++ ++ /* pause frame autonegotiation enabled*/ ++ mac_dev->autoneg_pause = true; ++ ++ /* by intializing the values to false, force FMD to enable PAUSE frames ++ * on RX and TX ++ */ ++ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true; ++ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false; ++ _errno = set_mac_active_pause(mac_dev, true, true); ++ if (unlikely(_errno < 0)) ++ dev_err(dev, "set_mac_active_pause() = %d\n", _errno); ++ ++ dev_info(dev, ++ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", ++ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], ++ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); ++ ++ goto _return; ++ ++_return_of_node_put: ++ of_node_put(dev_node); ++_return_dev_set_drvdata: ++ dev_set_drvdata(dev, NULL); ++_return: ++ return _errno; ++} ++ ++static int __cold mac_remove(struct platform_device *of_dev) ++{ ++ int i, _errno; ++ struct device *dev; ++ struct mac_device *mac_dev; ++ ++ dev = &of_dev->dev; ++ mac_dev = (struct mac_device *)dev_get_drvdata(dev); ++ ++ for_each_port_device(i, mac_dev->port_dev) ++ fm_port_unbind(mac_dev->port_dev[i]); ++ ++ fm_unbind(mac_dev->fm_dev); ++ ++ _errno = free_macdev(mac_dev); ++ ++ return _errno; ++} ++ ++static struct platform_driver mac_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = mac_match, ++ .owner = THIS_MODULE, ++ }, ++ .probe = mac_probe, ++ .remove = mac_remove ++}; ++ ++static int __init __cold mac_load(void) ++{ ++ int _errno; ++ ++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description); ++ ++ _errno = platform_driver_register(&mac_driver); ++ if (unlikely(_errno < 0)) { ++ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, _errno); ++ goto _return; ++ } ++ ++ goto _return; ++ ++_return: ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ return _errno; ++} ++module_init(mac_load); ++ ++static void __exit __cold mac_unload(void) ++{ ++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ platform_driver_unregister(&mac_driver); ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++} ++module_exit(mac_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h +@@ -0,0 +1,135 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __MAC_H ++#define __MAC_H ++ ++#include /* struct device, BUS_ID_SIZE */ ++#include /* ETH_ALEN */ ++#include /* phy_interface_t, struct phy_device */ ++#include ++ ++#include "lnxwrp_fsl_fman.h" /* struct port_device */ ++ ++enum {DTSEC, XGMAC, MEMAC}; ++ ++struct mac_device { ++ struct device *dev; ++ void *priv; ++ uint8_t cell_index; ++ struct resource *res; ++ void __iomem *vaddr; ++ uint8_t addr[ETH_ALEN]; ++ bool promisc; ++ ++ struct fm *fm_dev; ++ struct fm_port *port_dev[2]; ++ ++ phy_interface_t phy_if; ++ u32 if_support; ++ bool link; ++ bool half_duplex; ++ uint16_t speed; ++ uint16_t max_speed; ++ struct device_node *phy_node; ++ char fixed_bus_id[MII_BUS_ID_SIZE + 3]; ++ struct device_node *tbi_node; ++ struct phy_device *phy_dev; ++ void *fm; ++ /* List of multicast addresses */ ++ struct list_head mc_addr_list; ++ struct fixed_phy_status *fixed_link; ++ ++ bool autoneg_pause; ++ bool rx_pause_req; ++ bool tx_pause_req; ++ bool rx_pause_active; ++ bool tx_pause_active; ++ ++ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev); ++ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); ++ int (*init)(struct mac_device *mac_dev); ++ int (*start)(struct mac_device *mac_dev); ++ int (*stop)(struct mac_device *mac_dev); ++ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable); ++ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr); ++ int (*set_multi)(struct net_device *net_dev, ++ struct mac_device *mac_dev); ++ int (*uninit)(struct fm_mac_dev *fm_mac_dev); ++ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev); ++ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev); ++ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en); ++ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en); ++ int (*fm_rtc_enable)(struct fm *fm_dev); ++ int (*fm_rtc_disable)(struct fm *fm_dev); ++ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts); ++ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts); ++ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift); ++ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift); ++ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time); ++ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id, ++ uint64_t fiper); ++#ifdef CONFIG_PTP_1588_CLOCK_DPAA ++ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events); ++ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events); ++#endif ++ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev, ++ bool en); ++ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn); ++ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn); ++ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn); ++}; ++ ++struct mac_address { ++ uint8_t addr[ETH_ALEN]; ++ struct list_head list; ++}; ++ ++#define get_fm_handle(net_dev) \ ++ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev) ++ ++#define for_each_port_device(i, port_dev) \ ++ for (i = 0; i < ARRAY_SIZE(port_dev); i++) ++ ++static inline __attribute((nonnull)) void *macdev_priv( ++ const struct mac_device *mac_dev) ++{ ++ return (void *)mac_dev + sizeof(*mac_dev); ++} ++ ++extern const char *mac_driver_description; ++extern const size_t mac_sizeof_priv[]; ++extern void (*const mac_setup[])(struct mac_device *mac_dev); ++ ++int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx); ++void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause); ++ ++#endif /* __MAC_H */ +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c +@@ -0,0 +1,848 @@ ++/* Copyright 2011-2012 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/* Offline Parsing / Host Command port driver for FSL QorIQ FMan. ++ * Validates device-tree configuration and sets up the offline ports. ++ */ ++ ++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ ++ KBUILD_BASENAME".c", __LINE__, __func__ ++#else ++#define pr_fmt(fmt) \ ++ KBUILD_MODNAME ": " fmt ++#endif ++ ++ ++#include ++#include ++#include ++#include ++ ++#include "offline_port.h" ++#include "dpaa_eth.h" ++#include "dpaa_eth_common.h" ++ ++#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver" ++/* Manip extra space and data alignment for fragmentation */ ++#define FRAG_MANIP_SPACE 128 ++#define FRAG_DATA_ALIGN 64 ++ ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Bogdan Hamciuc "); ++MODULE_DESCRIPTION(OH_MOD_DESCRIPTION); ++ ++ ++static const struct of_device_id oh_port_match_table[] = { ++ { ++ .compatible = "fsl,dpa-oh" ++ }, ++ { ++ .compatible = "fsl,dpa-oh-shared" ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, oh_port_match_table); ++ ++#ifdef CONFIG_PM ++ ++static int oh_suspend(struct device *dev) ++{ ++ struct dpa_oh_config_s *oh_config; ++ ++ oh_config = dev_get_drvdata(dev); ++ return fm_port_suspend(oh_config->oh_port); ++} ++ ++static int oh_resume(struct device *dev) ++{ ++ struct dpa_oh_config_s *oh_config; ++ ++ oh_config = dev_get_drvdata(dev); ++ return fm_port_resume(oh_config->oh_port); ++} ++ ++static const struct dev_pm_ops oh_pm_ops = { ++ .suspend = oh_suspend, ++ .resume = oh_resume, ++}; ++ ++#define OH_PM_OPS (&oh_pm_ops) ++ ++#else /* CONFIG_PM */ ++ ++#define OH_PM_OPS NULL ++ ++#endif /* CONFIG_PM */ ++ ++/* Creates Frame Queues */ ++static uint32_t oh_fq_create(struct qman_fq *fq, ++ uint32_t fq_id, uint16_t channel, ++ uint16_t wq_id) ++{ ++ struct qm_mcc_initfq fq_opts; ++ uint32_t create_flags, init_flags; ++ uint32_t ret = 0; ++ ++ if (fq == NULL) ++ return 1; ++ ++ /* Set flags for FQ create */ ++ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL; ++ ++ /* Create frame queue */ ++ ret = qman_create_fq(fq_id, create_flags, fq); ++ if (ret != 0) ++ return 1; ++ ++ /* Set flags for FQ init */ ++ init_flags = QMAN_INITFQ_FLAG_SCHED; ++ ++ /* Set FQ init options. Specify destination WQ ID and channel */ ++ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ; ++ fq_opts.fqd.dest.wq = wq_id; ++ fq_opts.fqd.dest.channel = channel; ++ ++ /* Initialize frame queue */ ++ ret = qman_init_fq(fq, init_flags, &fq_opts); ++ if (ret != 0) { ++ qman_destroy_fq(fq, 0); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void dump_fq(struct device *dev, int fqid, uint16_t channel) ++{ ++ if (channel) { ++ /* display fqs with a valid (!= 0) destination channel */ ++ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel); ++ } ++} ++ ++static void dump_fq_duple(struct device *dev, struct qman_fq *fqs, ++ int fqs_count, uint16_t channel_id) ++{ ++ int i; ++ for (i = 0; i < fqs_count; i++) ++ dump_fq(dev, (fqs + i)->fqid, channel_id); ++} ++ ++static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf) ++{ ++ struct list_head *fq_list; ++ struct fq_duple *fqd; ++ int i; ++ ++ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid); ++ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid); ++ ++ /* TX queues (old initialization) */ ++ dev_info(dev, "Initialized queues:"); ++ for (i = 0; i < conf->egress_cnt; i++) ++ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt, ++ conf->channel); ++ ++ /* initialized ingress queues */ ++ list_for_each(fq_list, &conf->fqs_ingress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id); ++ } ++ ++ /* initialized egress queues */ ++ list_for_each(fq_list, &conf->fqs_egress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id); ++ } ++} ++ ++/* Destroys Frame Queues */ ++static void oh_fq_destroy(struct qman_fq *fq) ++{ ++ int _errno = 0; ++ ++ _errno = qman_retire_fq(fq, NULL); ++ if (unlikely(_errno < 0)) ++ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, ++ qman_fq_fqid(fq), _errno); ++ ++ _errno = qman_oos_fq(fq); ++ if (unlikely(_errno < 0)) { ++ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, ++ qman_fq_fqid(fq), _errno); ++ } ++ ++ qman_destroy_fq(fq, 0); ++} ++ ++/* Allocation code for the OH port's PCD frame queues */ ++static int __cold oh_alloc_pcd_fqids(struct device *dev, ++ uint32_t num, ++ uint8_t alignment, ++ uint32_t *base_fqid) ++{ ++ dev_crit(dev, "callback not implemented!\n"); ++ BUG(); ++ ++ return 0; ++} ++ ++static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid) ++{ ++ dev_crit(dev, "callback not implemented!\n"); ++ BUG(); ++ ++ return 0; ++} ++ ++static void oh_set_buffer_layout(struct fm_port *port, ++ struct dpa_buffer_layout_s *layout) ++{ ++ struct fm_port_params params; ++ ++ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE; ++ layout->parse_results = true; ++ layout->hash_results = true; ++ layout->time_stamp = false; ++ ++ fm_port_get_buff_layout_ext_params(port, ¶ms); ++ layout->manip_extra_space = params.manip_extra_space; ++ layout->data_align = params.data_align; ++} ++ ++static int ++oh_port_probe(struct platform_device *_of_dev) ++{ ++ struct device *dpa_oh_dev; ++ struct device_node *dpa_oh_node; ++ int lenp, _errno = 0, fq_idx, duple_idx; ++ int n_size, i, j, ret, duples_count; ++ struct platform_device *oh_of_dev; ++ struct device_node *oh_node, *bpool_node = NULL, *root_node; ++ struct device *oh_dev; ++ struct dpa_oh_config_s *oh_config = NULL; ++ const __be32 *oh_all_queues; ++ const __be32 *channel_ids; ++ const __be32 *oh_tx_queues; ++ uint32_t queues_count; ++ uint32_t crt_fqid_base; ++ uint32_t crt_fq_count; ++ bool frag_enabled = false; ++ struct fm_port_params oh_port_tx_params; ++ struct fm_port_pcd_param oh_port_pcd_params; ++ struct dpa_buffer_layout_s buf_layout; ++ ++ /* True if the current partition owns the OH port. */ ++ bool init_oh_port; ++ ++ const struct of_device_id *match; ++ int crt_ext_pools_count; ++ u32 ext_pool_size; ++ u32 port_id; ++ u32 channel_id; ++ ++ int channel_ids_count; ++ int channel_idx; ++ struct fq_duple *fqd; ++ struct list_head *fq_list, *fq_list_tmp; ++ ++ const __be32 *bpool_cfg; ++ uint32_t bpid; ++ ++ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params)); ++ dpa_oh_dev = &_of_dev->dev; ++ dpa_oh_node = dpa_oh_dev->of_node; ++ BUG_ON(dpa_oh_node == NULL); ++ ++ match = of_match_device(oh_port_match_table, dpa_oh_dev); ++ if (!match) ++ return -EINVAL; ++ ++ dev_dbg(dpa_oh_dev, "Probing OH port...\n"); ++ ++ /* Find the referenced OH node */ ++ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0); ++ if (oh_node == NULL) { ++ dev_err(dpa_oh_dev, ++ "Can't find OH node referenced from node %s\n", ++ dpa_oh_node->full_name); ++ return -EINVAL; ++ } ++ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n", ++ match->compatible); ++ ++ _errno = of_property_read_u32(oh_node, "cell-index", &port_id); ++ if (_errno) { ++ dev_err(dpa_oh_dev, "No port id found in node %s\n", ++ dpa_oh_node->full_name); ++ goto return_kfree; ++ } ++ ++ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id", ++ &channel_id); ++ if (_errno) { ++ dev_err(dpa_oh_dev, "No channel id found in node %s\n", ++ dpa_oh_node->full_name); ++ goto return_kfree; ++ } ++ ++ oh_of_dev = of_find_device_by_node(oh_node); ++ BUG_ON(oh_of_dev == NULL); ++ oh_dev = &oh_of_dev->dev; ++ ++ /* The OH port must be initialized exactly once. ++ * The following scenarios are of interest: ++ * - the node is Linux-private (will always initialize it); ++ * - the node is shared between two Linux partitions ++ * (only one of them will initialize it); ++ * - the node is shared between a Linux and a LWE partition ++ * (Linux will initialize it) - "fsl,dpa-oh-shared" ++ */ ++ ++ /* Check if the current partition owns the OH port ++ * and ought to initialize it. It may be the case that we leave this ++ * to another (also Linux) partition. ++ */ ++ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared"); ++ ++ /* If we aren't the "owner" of the OH node, we're done here. */ ++ if (!init_oh_port) { ++ dev_dbg(dpa_oh_dev, ++ "Not owning the shared OH port %s, will not initialize it.\n", ++ oh_node->full_name); ++ of_node_put(oh_node); ++ return 0; ++ } ++ ++ /* Allocate OH dev private data */ ++ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL); ++ if (oh_config == NULL) { ++ dev_err(dpa_oh_dev, ++ "Can't allocate private data for OH node %s referenced from node %s!\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ INIT_LIST_HEAD(&oh_config->fqs_ingress_list); ++ INIT_LIST_HEAD(&oh_config->fqs_egress_list); ++ ++ /* FQs that enter OH port */ ++ lenp = 0; ++ oh_all_queues = of_get_property(dpa_oh_node, ++ "fsl,qman-frame-queues-ingress", &lenp); ++ if (lenp % (2 * sizeof(*oh_all_queues))) { ++ dev_warn(dpa_oh_dev, ++ "Wrong ingress queues format for OH node %s referenced from node %s!\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ /* just ignore the last unpaired value */ ++ } ++ ++ duples_count = lenp / (2 * sizeof(*oh_all_queues)); ++ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n", ++ duples_count); ++ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) { ++ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]); ++ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]); ++ ++ fqd = devm_kzalloc(dpa_oh_dev, ++ sizeof(struct fq_duple), GFP_KERNEL); ++ if (!fqd) { ++ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n", ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ fqd->fqs = devm_kzalloc(dpa_oh_dev, ++ crt_fq_count * sizeof(struct qman_fq), ++ GFP_KERNEL); ++ if (!fqd->fqs) { ++ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n", ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ for (j = 0; j < crt_fq_count; j++) ++ (fqd->fqs + j)->fqid = crt_fqid_base + j; ++ fqd->fqs_count = crt_fq_count; ++ fqd->channel_id = (uint16_t)channel_id; ++ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list); ++ } ++ ++ /* create the ingress queues */ ++ list_for_each(fq_list, &oh_config->fqs_ingress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ ++ for (j = 0; j < fqd->fqs_count; j++) { ++ ret = oh_fq_create(fqd->fqs + j, ++ (fqd->fqs + j)->fqid, ++ fqd->channel_id, 3); ++ if (ret != 0) { ++ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n", ++ (fqd->fqs + j)->fqid, ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ } ++ } ++ ++ /* FQs that exit OH port */ ++ lenp = 0; ++ oh_all_queues = of_get_property(dpa_oh_node, ++ "fsl,qman-frame-queues-egress", &lenp); ++ if (lenp % (2 * sizeof(*oh_all_queues))) { ++ dev_warn(dpa_oh_dev, ++ "Wrong egress queues format for OH node %s referenced from node %s!\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ /* just ignore the last unpaired value */ ++ } ++ ++ duples_count = lenp / (2 * sizeof(*oh_all_queues)); ++ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n", ++ duples_count); ++ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) { ++ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]); ++ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]); ++ ++ fqd = devm_kzalloc(dpa_oh_dev, ++ sizeof(struct fq_duple), GFP_KERNEL); ++ if (!fqd) { ++ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n", ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ fqd->fqs = devm_kzalloc(dpa_oh_dev, ++ crt_fq_count * sizeof(struct qman_fq), ++ GFP_KERNEL); ++ if (!fqd->fqs) { ++ dev_err(dpa_oh_dev, ++ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n", ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ for (j = 0; j < crt_fq_count; j++) ++ (fqd->fqs + j)->fqid = crt_fqid_base + j; ++ fqd->fqs_count = crt_fq_count; ++ /* channel ID is specified in another attribute */ ++ fqd->channel_id = 0; ++ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list); ++ ++ /* allocate the queue */ ++ ++ } ++ ++ /* channel_ids for FQs that exit OH port */ ++ lenp = 0; ++ channel_ids = of_get_property(dpa_oh_node, ++ "fsl,qman-channel-ids-egress", &lenp); ++ ++ channel_ids_count = lenp / (sizeof(*channel_ids)); ++ if (channel_ids_count != duples_count) { ++ dev_warn(dpa_oh_dev, ++ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ /* just ignore the queues that do not have a Channel ID */ ++ } ++ ++ channel_idx = 0; ++ list_for_each(fq_list, &oh_config->fqs_egress_list) { ++ if (channel_idx + 1 > channel_ids_count) ++ break; ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ fqd->channel_id = ++ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]); ++ } ++ ++ /* create egress queues */ ++ list_for_each(fq_list, &oh_config->fqs_egress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ ++ if (fqd->channel_id == 0) { ++ /* missing channel id in dts */ ++ continue; ++ } ++ ++ for (j = 0; j < fqd->fqs_count; j++) { ++ ret = oh_fq_create(fqd->fqs + j, ++ (fqd->fqs + j)->fqid, ++ fqd->channel_id, 3); ++ if (ret != 0) { ++ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n", ++ (fqd->fqs + j)->fqid, ++ oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ } ++ } ++ ++ /* Read FQ ids/nums for the DPA OH node */ ++ oh_all_queues = of_get_property(dpa_oh_node, ++ "fsl,qman-frame-queues-oh", &lenp); ++ if (oh_all_queues == NULL) { ++ dev_err(dpa_oh_dev, ++ "No frame queues have been defined for OH node %s referenced from node %s\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ /* Check that the OH error and default FQs are there */ ++ BUG_ON(lenp % (2 * sizeof(*oh_all_queues))); ++ queues_count = lenp / (2 * sizeof(*oh_all_queues)); ++ if (queues_count != 2) { ++ dev_err(dpa_oh_dev, ++ "Error and Default queues must be defined for OH node %s referenced from node %s\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ /* Read the FQIDs defined for this OH port */ ++ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count); ++ fq_idx = 0; ++ ++ /* Error FQID - must be present */ ++ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]); ++ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]); ++ if (crt_fq_count != 1) { ++ dev_err(dpa_oh_dev, ++ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n", ++ oh_node->full_name, dpa_oh_node->full_name, ++ crt_fq_count); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ oh_config->error_fqid = crt_fqid_base; ++ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n", ++ oh_config->error_fqid, oh_node->full_name); ++ ++ /* Default FQID - must be present */ ++ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]); ++ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]); ++ if (crt_fq_count != 1) { ++ dev_err(dpa_oh_dev, ++ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n", ++ oh_node->full_name, dpa_oh_node->full_name, ++ crt_fq_count); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ oh_config->default_fqid = crt_fqid_base; ++ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n", ++ oh_config->default_fqid, oh_node->full_name); ++ ++ /* TX FQID - presence is optional */ ++ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx", ++ &lenp); ++ if (oh_tx_queues == NULL) { ++ dev_dbg(dpa_oh_dev, ++ "No tx queues have been defined for OH node %s referenced from node %s\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ goto config_port; ++ } ++ ++ /* Check that queues-tx has only a base and a count defined */ ++ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues))); ++ queues_count = lenp / (2 * sizeof(*oh_tx_queues)); ++ if (queues_count != 1) { ++ dev_err(dpa_oh_dev, ++ "TX queues must be defined in only one tuple for OH node %s referenced from node %s\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ fq_idx = 0; ++ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]); ++ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]); ++ oh_config->egress_cnt = crt_fq_count; ++ ++ /* Allocate TX queues */ ++ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count); ++ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev, ++ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL); ++ if (oh_config->egress_fqs == NULL) { ++ dev_err(dpa_oh_dev, ++ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n", ++ oh_node->full_name, dpa_oh_node->full_name); ++ _errno = -ENOMEM; ++ goto return_kfree; ++ } ++ ++ /* Create TX queues */ ++ for (i = 0; i < crt_fq_count; i++) { ++ ret = oh_fq_create(oh_config->egress_fqs + i, ++ crt_fqid_base + i, (uint16_t)channel_id, 3); ++ if (ret != 0) { ++ dev_err(dpa_oh_dev, ++ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n", ++ crt_fqid_base + i, oh_node->full_name, ++ dpa_oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ } ++ ++config_port: ++ /* Get a handle to the fm_port so we can set ++ * its configuration params ++ */ ++ oh_config->oh_port = fm_port_bind(oh_dev); ++ if (oh_config->oh_port == NULL) { ++ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n", ++ oh_node->full_name); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ oh_set_buffer_layout(oh_config->oh_port, &buf_layout); ++ ++ /* read the pool handlers */ ++ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node, ++ "fsl,bman-buffer-pools", NULL); ++ if (crt_ext_pools_count <= 0) { ++ dev_info(dpa_oh_dev, ++ "OH port %s has no buffer pool. Fragmentation will not be enabled\n", ++ oh_node->full_name); ++ goto init_port; ++ } ++ ++ /* used for reading ext_pool_size*/ ++ root_node = of_find_node_by_path("/"); ++ if (root_node == NULL) { ++ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n"); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ n_size = of_n_size_cells(root_node); ++ of_node_put(root_node); ++ ++ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n", ++ crt_ext_pools_count); ++ ++ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count; ++ ++ for (i = 0; i < crt_ext_pools_count; i++) { ++ bpool_node = of_parse_phandle(dpa_oh_node, ++ "fsl,bman-buffer-pools", i); ++ if (bpool_node == NULL) { ++ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n"); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid); ++ if (_errno) { ++ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n"); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid; ++ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid); ++ ++ bpool_cfg = of_get_property(bpool_node, ++ "fsl,bpool-ethernet-cfg", &lenp); ++ if (bpool_cfg == NULL) { ++ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n"); ++ _errno = -EINVAL; ++ goto return_kfree; ++ } ++ ++ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size); ++ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size; ++ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n", ++ ext_pool_size); ++ of_node_put(bpool_node); ++ ++ } ++ ++ if (buf_layout.data_align != FRAG_DATA_ALIGN || ++ buf_layout.manip_extra_space != FRAG_MANIP_SPACE) ++ goto init_port; ++ ++ frag_enabled = true; ++ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d", ++ port_id); ++ ++init_port: ++ of_node_put(oh_node); ++ /* Set Tx params */ ++ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params, ++ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout), ++ frag_enabled); ++ /* Set PCD params */ ++ oh_port_pcd_params.cba = oh_alloc_pcd_fqids; ++ oh_port_pcd_params.cbf = oh_free_pcd_fqids; ++ oh_port_pcd_params.dev = dpa_oh_dev; ++ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params); ++ ++ dev_set_drvdata(dpa_oh_dev, oh_config); ++ ++ /* Enable the OH port */ ++ _errno = fm_port_enable(oh_config->oh_port); ++ if (_errno) ++ goto return_kfree; ++ ++ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name); ++ ++ /* print of all referenced & created queues */ ++ dump_oh_config(dpa_oh_dev, oh_config); ++ ++ return 0; ++ ++return_kfree: ++ if (bpool_node) ++ of_node_put(bpool_node); ++ if (oh_node) ++ of_node_put(oh_node); ++ if (oh_config && oh_config->egress_fqs) ++ devm_kfree(dpa_oh_dev, oh_config->egress_fqs); ++ ++ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ list_del(fq_list); ++ devm_kfree(dpa_oh_dev, fqd->fqs); ++ devm_kfree(dpa_oh_dev, fqd); ++ } ++ ++ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) { ++ fqd = list_entry(fq_list, struct fq_duple, fq_list); ++ list_del(fq_list); ++ devm_kfree(dpa_oh_dev, fqd->fqs); ++ devm_kfree(dpa_oh_dev, fqd); ++ } ++ ++ devm_kfree(dpa_oh_dev, oh_config); ++ return _errno; ++} ++ ++static int __cold oh_port_remove(struct platform_device *_of_dev) ++{ ++ int _errno = 0, i; ++ struct dpa_oh_config_s *oh_config; ++ ++ pr_info("Removing OH port...\n"); ++ ++ oh_config = dev_get_drvdata(&_of_dev->dev); ++ if (oh_config == NULL) { ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): No OH config in device private data!\n", ++ KBUILD_BASENAME".c", __LINE__, __func__); ++ _errno = -ENODEV; ++ goto return_error; ++ } ++ ++ if (oh_config->egress_fqs) ++ for (i = 0; i < oh_config->egress_cnt; i++) ++ oh_fq_destroy(oh_config->egress_fqs + i); ++ ++ if (oh_config->oh_port == NULL) { ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): No fm port in device private data!\n", ++ KBUILD_BASENAME".c", __LINE__, __func__); ++ _errno = -EINVAL; ++ goto free_egress_fqs; ++ } ++ ++ _errno = fm_port_disable(oh_config->oh_port); ++ ++free_egress_fqs: ++ if (oh_config->egress_fqs) ++ devm_kfree(&_of_dev->dev, oh_config->egress_fqs); ++ devm_kfree(&_of_dev->dev, oh_config); ++ dev_set_drvdata(&_of_dev->dev, NULL); ++ ++return_error: ++ return _errno; ++} ++ ++static struct platform_driver oh_port_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = oh_port_match_table, ++ .owner = THIS_MODULE, ++ .pm = OH_PM_OPS, ++ }, ++ .probe = oh_port_probe, ++ .remove = oh_port_remove ++}; ++ ++static int __init __cold oh_port_load(void) ++{ ++ int _errno; ++ ++ pr_info(OH_MOD_DESCRIPTION "\n"); ++ ++ _errno = platform_driver_register(&oh_port_driver); ++ if (_errno < 0) { ++ pr_err(KBUILD_MODNAME ++ ": %s:%hu:%s(): platform_driver_register() = %d\n", ++ KBUILD_BASENAME".c", __LINE__, __func__, _errno); ++ } ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++ return _errno; ++} ++module_init(oh_port_load); ++ ++static void __exit __cold oh_port_unload(void) ++{ ++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", ++ KBUILD_BASENAME".c", __func__); ++ ++ platform_driver_unregister(&oh_port_driver); ++ ++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", ++ KBUILD_BASENAME".c", __func__); ++} ++module_exit(oh_port_unload); +--- /dev/null ++++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h +@@ -0,0 +1,59 @@ ++/* Copyright 2011 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __OFFLINE_PORT_H ++#define __OFFLINE_PORT_H ++ ++struct fm_port; ++struct qman_fq; ++ ++/* fqs are defined in duples (base_fq, fq_count) */ ++struct fq_duple { ++ struct qman_fq *fqs; ++ int fqs_count; ++ uint16_t channel_id; ++ struct list_head fq_list; ++}; ++ ++/* OH port configuration */ ++struct dpa_oh_config_s { ++ uint32_t error_fqid; ++ uint32_t default_fqid; ++ struct fm_port *oh_port; ++ uint32_t egress_cnt; ++ struct qman_fq *egress_fqs; ++ uint16_t channel; ++ ++ struct list_head fqs_ingress_list; ++ struct list_head fqs_egress_list; ++}; ++ ++#endif /* __OFFLINE_PORT_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Kconfig -@@ -0,0 +1,151 @@ +@@ -0,0 +1,153 @@ +menu "Frame Manager support" + +menuconfig FSL_SDK_FMAN + bool "Freescale Frame Manager (datapath) support - SDK driver" -+ depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN ++ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && !FSL_FMAN + default y + ---help--- + If unsure, say Y. @@ -541,10 +13561,10 @@ Signed-off-by: Zhao Qiang + bool "FMan PFC support (EXPERIMENTAL)" + depends on ( FMAN_V3H || FMAN_V3L || FMAN_ARM) && FSL_SDK_FMAN + default n -+ ---help--- -+ This option enables PFC support on FMan v3 ports. -+ Data Center Bridging defines Classes of Service that are -+ flow-controlled using PFC pause frames. ++ help ++ This option enables PFC support on FMan v3 ports. ++ Data Center Bridging defines Classes of Service that are ++ flow-controlled using PFC pause frames. + +if FMAN_PFC +config FMAN_PFC_COS_COUNT @@ -552,6 +13572,8 @@ Signed-off-by: Zhao Qiang + depends on FMAN_PFC && FSL_SDK_FMAN + range 1 4 + default "3" ++ help ++ The number of Classes of Service controlled by PFC. + +config FMAN_PFC_QUANTA_0 + int "The pause quanta for PFC CoS 0" @@ -1881,7 +14903,7 @@ Signed-off-by: Zhao Qiang + --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c -@@ -0,0 +1,1463 @@ +@@ -0,0 +1,1464 @@ +/* + * Copyright 2008-2013 Freescale Semiconductor Inc. + * @@ -3255,6 +16277,7 @@ Signed-off-by: Zhao Qiang + + p_FmMacControllerDriver->f_FM_MAC_Enable = DtsecEnable; + p_FmMacControllerDriver->f_FM_MAC_Disable = DtsecDisable; ++ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL; + + p_FmMacControllerDriver->f_FM_MAC_SetException = DtsecSetException; + @@ -3723,7 +16746,7 @@ Signed-off-by: Zhao Qiang +#endif /* __DTSEC_MII_ACC_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c -@@ -0,0 +1,646 @@ +@@ -0,0 +1,658 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -4025,6 +17048,18 @@ Signed-off-by: Zhao Qiang + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + ++t_Error FM_MAC_Resume (t_Handle h_FmMac) ++{ ++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; ++ ++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); ++ ++ if (p_FmMacControllerDriver->f_FM_MAC_Resume) ++ return p_FmMacControllerDriver->f_FM_MAC_Resume(h_FmMac); ++ ++ return E_OK; ++} ++ +/* ......................................................................... */ + +t_Error FM_MAC_Enable1588TimeStamp (t_Handle h_FmMac) @@ -4372,7 +17407,7 @@ Signed-off-by: Zhao Qiang +#endif /* (defined(DEBUG_ERRORS) && ... */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h -@@ -0,0 +1,224 @@ +@@ -0,0 +1,225 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -4466,6 +17501,7 @@ Signed-off-by: Zhao Qiang + + t_Error (*f_FM_MAC_Enable) (t_Handle h_FmMac, e_CommMode mode); + t_Error (*f_FM_MAC_Disable) (t_Handle h_FmMac, e_CommMode mode); ++ t_Error (*f_FM_MAC_Resume) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Enable1588TimeStamp) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Disable1588TimeStamp) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Reset) (t_Handle h_FmMac, bool wait); @@ -6881,7 +19917,7 @@ Signed-off-by: Zhao Qiang +} --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c -@@ -0,0 +1,1088 @@ +@@ -0,0 +1,1096 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -7696,6 +20732,37 @@ Signed-off-by: Zhao Qiang + return fman_memac_get_max_frame_len(p_Memac->p_MemMap); +} + ++static t_Error MemacInitInternalPhy(t_Handle h_Memac) ++{ ++ t_Memac *p_Memac = (t_Memac *)h_Memac; ++ uint8_t i, phyAddr; ++ ++ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII) ++ { ++ /* Configure internal SGMII PHY */ ++ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) ++ SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR); ++ else ++ SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR); ++ } ++ else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII) ++ { ++ /* Configure 4 internal SGMII PHYs */ ++ for (i = 0; i < 4; i++) ++ { ++ /* QSGMII PHY address occupies 3 upper bits of 5-bit ++ phyAddress; the lower 2 bits are used to extend ++ register address space and access each one of 4 ++ ports inside QSGMII. */ ++ phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i); ++ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) ++ SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr); ++ else ++ SetupSgmiiInternalPhy(p_Memac, phyAddr); ++ } ++ } ++ return E_OK; ++} + +/*****************************************************************************/ +/* mEMAC Init & Free API */ @@ -7709,7 +20776,6 @@ Signed-off-by: Zhao Qiang + struct memac_cfg *p_MemacDriverParam; + enum enet_interface enet_interface; + enum enet_speed enet_speed; -+ uint8_t i, phyAddr; + t_EnetAddr ethAddr; + e_FmMacType portType; + t_Error err; @@ -7771,30 +20837,7 @@ Signed-off-by: Zhao Qiang + } +#endif /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 */ + -+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII) -+ { -+ /* Configure internal SGMII PHY */ -+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -+ SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR); -+ else -+ SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR); -+ } -+ else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII) -+ { -+ /* Configure 4 internal SGMII PHYs */ -+ for (i = 0; i < 4; i++) -+ { -+ /* QSGMII PHY address occupies 3 upper bits of 5-bit -+ phyAddress; the lower 2 bits are used to extend -+ register address space and access each one of 4 -+ ports inside QSGMII. */ -+ phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i); -+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX) -+ SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr); -+ else -+ SetupSgmiiInternalPhy(p_Memac, phyAddr); -+ } -+ } ++ MemacInitInternalPhy(h_Memac); + + /* Max Frame Length */ + err = FmSetMacMaxFrame(p_Memac->fmMacControllerDriver.h_Fm, @@ -7892,6 +20935,7 @@ Signed-off-by: Zhao Qiang + + p_FmMacControllerDriver->f_FM_MAC_Enable = MemacEnable; + p_FmMacControllerDriver->f_FM_MAC_Disable = MemacDisable; ++ p_FmMacControllerDriver->f_FM_MAC_Resume = MemacInitInternalPhy; + + p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = MemacSetTxAutoPauseFrames; + p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = MemacSetTxPauseFrames; @@ -8242,7 +21286,7 @@ Signed-off-by: Zhao Qiang +#endif /* __MEMAC_MII_ACC_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c -@@ -0,0 +1,974 @@ +@@ -0,0 +1,975 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -9142,6 +22186,7 @@ Signed-off-by: Zhao Qiang + + p_FmMacControllerDriver->f_FM_MAC_Enable = TgecEnable; + p_FmMacControllerDriver->f_FM_MAC_Disable = TgecDisable; ++ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL; + + p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = TgecTxMacPause; + p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = TgecSetTxPauseFrames; @@ -13091,7 +26136,7 @@ Signed-off-by: Zhao Qiang +#endif /* __CRC64_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c -@@ -0,0 +1,7538 @@ +@@ -0,0 +1,7582 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -13130,6 +26175,7 @@ Signed-off-by: Zhao Qiang + + @Description FM Coarse Classifier implementation + *//***************************************************************************/ ++#include +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" @@ -13238,6 +26284,10 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + { + p_StatsObj = DequeueStatsObj(&p_CcNode->availableStatsLst); ++ ++ /* Clean statistics counters & ADs */ ++ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); ++ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); + } + else + { @@ -13288,8 +26338,11 @@ Signed-off-by: Zhao Qiang + upon node initialization and now will be enqueued back to the list */ + if (p_CcNode->maxNumOfKeys) + { -+ /* Nullify counters */ -+ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); ++ /* Clean statistics counters */ ++ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); ++ ++ /* Clean statistics ADs */ ++ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj); + } @@ -14392,6 +27445,7 @@ Signed-off-by: Zhao Qiang + + LIST_Del(&p_CcNode->availableStatsLst); + ++ ReleaseLst(&p_CcNode->availableStatsLst); + ReleaseLst(&p_CcNode->ccPrevNodesLst); + ReleaseLst(&p_CcNode->ccTreeIdLst); + ReleaseLst(&p_CcNode->ccTreesLst); @@ -15268,6 +28322,7 @@ Signed-off-by: Zhao Qiang + uint32_t tmp = 0, tmpNia = 0; + uint16_t profileId; + t_Handle p_AdNewPtr = NULL; ++ t_Error err = E_OK; + + /* There are 3 cases handled in this routine of building a "result" type AD. + * Case 1: No Manip. The action descriptor is built within the match table. @@ -15380,12 +28435,18 @@ Signed-off-by: Zhao Qiang + if (p_CcNextEngineParams->params.plcrParams.sharedProfile) + { + tmpNia |= NIA_PLCR_ABSOLUTE; -+ FmPcdPlcrGetAbsoluteIdByProfileParams( ++ err = FmPcdPlcrGetAbsoluteIdByProfileParams( + (t_Handle)p_FmPcd, + e_FM_PCD_PLCR_SHARED, + NULL, + p_CcNextEngineParams->params.plcrParams.newRelativeProfileId, + &profileId); ++ ++ if (err != E_OK) { ++ REPORT_ERROR(MAJOR, err, NO_MSG); ++ return; ++ } ++ + } + else + profileId = @@ -17207,6 +30268,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -18422,6 +31486,9 @@ Signed-off-by: Zhao Qiang + if (p_FmPcd->p_CcShadow) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; + +} @@ -18496,6 +31563,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -18584,6 +31654,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -18638,6 +31711,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -18743,6 +31819,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -18834,6 +31913,9 @@ Signed-off-by: Zhao Qiang + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + ++ ReleaseLst(&h_OldPointersLst); ++ ReleaseLst(&h_NewPointersLst); ++ + return err; +} + @@ -20225,6 +33307,13 @@ Signed-off-by: Zhao Qiang + return NULL; + } + ++ /*Fix: QorIQ SDK / QSDK-2131*/ ++ if (p_Param->ccNextEngineParamsForMiss.nextEngine == e_FM_PCD_INVALID) ++ { ++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Next PCD Engine for on-miss entry is invalid. On-miss entry is always required. You can use e_FM_PCD_DONE.")); ++ return NULL; ++ } ++ +#if (DPAA_VERSION >= 11) + if (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_RMON) + { @@ -21910,7 +34999,7 @@ Signed-off-by: Zhao Qiang +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+static uint32_t ReadClsPlanBlockActionReg(uint8_t grpId) ++static uint32_t __attribute__((unused)) ReadClsPlanBlockActionReg(uint8_t grpId) +{ + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_READ | @@ -30620,7 +43709,7 @@ Signed-off-by: Zhao Qiang +#endif /* __FM_MANIP_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c -@@ -0,0 +1,2094 @@ +@@ -0,0 +1,2095 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -30935,7 +44024,8 @@ Signed-off-by: Zhao Qiang + p_Lock = FM_PCD_LOCK_OBJ(p_FmPcd->freeLocksLst.p_Next); + LIST_DelAndInit(&p_Lock->node); + } -+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); ++ if (p_FmPcd->h_Spinlock) ++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); + + return p_Lock; +} @@ -33546,7 +46636,7 @@ Signed-off-by: Zhao Qiang +#endif /* __FM_PCD_IPC_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c -@@ -0,0 +1,1846 @@ +@@ -0,0 +1,1847 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -33585,6 +46675,7 @@ Signed-off-by: Zhao Qiang + + @Description FM PCD POLICER... +*//***************************************************************************/ ++#include +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" @@ -33754,13 +46845,13 @@ Signed-off-by: Zhao Qiang + div = 1000000000; /* nano */ + div *= 10; /* 10 nano */ + } -+ *p_Integer = (tmp< +#endif /* __FM_PLCR_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c -@@ -0,0 +1,422 @@ +@@ -0,0 +1,423 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -35602,6 +48693,7 @@ Signed-off-by: Zhao Qiang + + @Description FM PCD ... +*//***************************************************************************/ ++#include +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" @@ -46056,8 +59148,8 @@ Signed-off-by: Zhao Qiang + The 4 left-most bits (15:12) of the VlanId parameter are control flags. + Flags[3:1] (VlanId[15:13]): Reserved, should be cleared. + Flags[0] (VlanId[12]): Temporary address. -+ ? 0 - Assigned IP address. -+ ? 1- Temporary (tentative) IP address. ++ • 0 - Assigned IP address. ++ • 1- Temporary (tentative) IP address. + Refer to the FMan Controller spec for more details. +*//***************************************************************************/ +typedef _Packed struct @@ -48763,7 +61855,7 @@ Signed-off-by: Zhao Qiang + + @Cautions None +*//***************************************************************************/ -+ ++#include +#include "error_ext.h" +#include "debug_ext.h" +#include "string_ext.h" @@ -49195,11 +62287,11 @@ Signed-off-by: Zhao Qiang + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("Alarm time must be equal or larger than RTC period - %d nanoseconds", + p_Rtc->clockPeriodNanoSec)); -+ if (p_FmRtcAlarmParams->alarmTime % (uint64_t)p_Rtc->clockPeriodNanoSec) ++ tmpAlarm = p_FmRtcAlarmParams->alarmTime; ++ if (do_div(tmpAlarm, p_Rtc->clockPeriodNanoSec)) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("Alarm time must be a multiple of RTC period - %d nanoseconds", + p_Rtc->clockPeriodNanoSec)); -+ tmpAlarm = p_FmRtcAlarmParams->alarmTime/(uint64_t)p_Rtc->clockPeriodNanoSec; + + if (p_FmRtcAlarmParams->f_AlarmCallback) + { @@ -49233,11 +62325,11 @@ Signed-off-by: Zhao Qiang + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("Periodic pulse must be equal or larger than RTC period - %d nanoseconds", + p_Rtc->clockPeriodNanoSec)); -+ if (p_FmRtcPeriodicPulseParams->periodicPulsePeriod % (uint64_t)p_Rtc->clockPeriodNanoSec) ++ tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod; ++ if (do_div(tmpFiper, p_Rtc->clockPeriodNanoSec)) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("Periodic pulse must be a multiple of RTC period - %d nanoseconds", + p_Rtc->clockPeriodNanoSec)); -+ tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod/(uint64_t)p_Rtc->clockPeriodNanoSec; + if (tmpFiper & 0xffffffff00000000LL) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("Periodic pulse/RTC Period must be smaller than 4294967296", @@ -49353,7 +62445,7 @@ Signed-off-by: Zhao Qiang + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + -+ ts = ts/p_Rtc->clockPeriodNanoSec; ++ do_div(ts, p_Rtc->clockPeriodNanoSec); + fman_rtc_set_timer(p_Rtc->p_MemMap, (int64_t)ts); + + return E_OK; @@ -50919,7 +64011,7 @@ Signed-off-by: Zhao Qiang +} --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c -@@ -0,0 +1,5195 @@ +@@ -0,0 +1,5216 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -50965,6 +64057,7 @@ Signed-off-by: Zhao Qiang +#include "sprint_ext.h" +#include "debug_ext.h" +#include "fm_muram_ext.h" ++#include + +#include "fm_common.h" +#include "fm_ipc.h" @@ -52807,6 +65900,19 @@ Signed-off-by: Zhao Qiang + return p_Fm->p_FmStateStruct->fmId; +} + ++t_Error FmReset(t_Handle h_Fm) ++{ ++ t_Fm *p_Fm = (t_Fm*)h_Fm; ++ ++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); ++ ++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET); ++ CORE_MemoryBarrier(); ++ XX_UDelay(100); ++ ++ return E_OK; ++} ++ +t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm, + uint8_t hardwarePortId, + uint8_t numOfFmanCtrls, @@ -54316,6 +67422,7 @@ Signed-off-by: Zhao Qiang + p_Fm->p_FmStateStruct->extraFifoPoolSize = 0; + p_Fm->p_FmStateStruct->exceptions = DEFAULT_exceptions; + p_Fm->resetOnInit = DEFAULT_resetOnInit; ++ p_Fm->f_ResetOnInitOverride = DEFAULT_resetOnInitOverrideCallback; + p_Fm->fwVerify = DEFAULT_VerifyUcode; + p_Fm->firmware.size = p_FmParam->firmware.size; + if (p_Fm->firmware.size) @@ -54454,25 +67561,18 @@ Signed-off-by: Zhao Qiang + if ((err = FwNotResetErratumBugzilla6173WA(p_Fm)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); +#else /* not FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */ -+#ifndef CONFIG_FMAN_ARM -+ { -+ u32 svr = mfspr(SPRN_SVR); + -+ if (((SVR_SOC_VER(svr) == SVR_T4240 && SVR_REV(svr) > 0x10)) || -+ ((SVR_SOC_VER(svr) == SVR_T4160 && SVR_REV(svr) > 0x10)) || -+ ((SVR_SOC_VER(svr) == SVR_T4080 && SVR_REV(svr) > 0x10)) || -+ (SVR_SOC_VER(svr) == SVR_T1024) || -+ (SVR_SOC_VER(svr) == SVR_T1023) || -+ (SVR_SOC_VER(svr) == SVR_T2080) || -+ (SVR_SOC_VER(svr) == SVR_T2081)) { -+ DBG(WARNING, ("Hack: No FM reset!\n")); -+ } else { -+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET); -+ CORE_MemoryBarrier(); -+ XX_UDelay(100); -+ } ++ if (p_Fm->f_ResetOnInitOverride) ++ { ++ /* Perform user specific FMan reset */ ++ p_Fm->f_ResetOnInitOverride(h_Fm); + } -+#endif ++ else ++ { ++ /* Perform FMan reset */ ++ FmReset(h_Fm); ++ } ++ + if (fman_is_qmi_halt_not_busy_state(p_Fm->p_FmQmiRegs)) + { + fman_resume(p_Fm->p_FmFpmRegs); @@ -54735,6 +67835,19 @@ Signed-off-by: Zhao Qiang + return E_OK; +} + ++t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride) ++{ ++ t_Fm *p_Fm = (t_Fm*)h_Fm; ++ ++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); ++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); ++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED); ++ ++ p_Fm->f_ResetOnInitOverride = f_ResetOnInitOverride; ++ ++ return E_OK; ++} ++ +t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; @@ -56009,9 +69122,9 @@ Signed-off-by: Zhao Qiang + effValue = (uint64_t) + ((uint64_t)GET_UINT32(p_MonRegs->tpc2h) << 32 | GET_UINT32(p_MonRegs->tpc2l)); + -+ p_Mon->percentCnt[0] = (uint8_t)((clkCnt - utilValue) * 100 / clkCnt); ++ p_Mon->percentCnt[0] = (uint8_t)div64_u64((clkCnt - utilValue) * 100, clkCnt); + if (clkCnt != utilValue) -+ p_Mon->percentCnt[1] = (uint8_t)(((clkCnt - utilValue) - effValue) * 100 / (clkCnt - utilValue)); ++ p_Mon->percentCnt[1] = (uint8_t)div64_u64(((clkCnt - utilValue) - effValue) * 100, clkCnt - utilValue); + else + p_Mon->percentCnt[1] = 0; + @@ -56117,7 +69230,7 @@ Signed-off-by: Zhao Qiang +#endif --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h -@@ -0,0 +1,646 @@ +@@ -0,0 +1,648 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -56416,6 +69529,7 @@ Signed-off-by: Zhao Qiang +#define DEFAULT_catastrophicErr e_FM_CATASTROPHIC_ERR_STALL_PORT +#define DEFAULT_dmaErr e_FM_DMA_ERR_CATASTROPHIC +#define DEFAULT_resetOnInit FALSE ++#define DEFAULT_resetOnInitOverrideCallback NULL +#define DEFAULT_haltOnExternalActivation FALSE /* do not change! if changed, must be disabled for rev1 ! */ +#define DEFAULT_haltOnUnrecoverableEccError FALSE /* do not change! if changed, must be disabled for rev1 ! */ +#define DEFAULT_externalEccRamsEnable FALSE @@ -56759,6 +69873,7 @@ Signed-off-by: Zhao Qiang + t_FmFirmwareParams firmware; + bool fwVerify; + bool resetOnInit; ++ t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride; + uint32_t userSetExceptions; +} t_Fm; + @@ -57411,7 +70526,7 @@ Signed-off-by: Zhao Qiang +} --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c -@@ -0,0 +1,1399 @@ +@@ -0,0 +1,1398 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -57445,7 +70560,7 @@ Signed-off-by: Zhao Qiang + */ + + -+ ++#include +#include "fsl_fman.h" +#include "dpaa_integration_ext.h" + @@ -57600,10 +70715,9 @@ Signed-off-by: Zhao Qiang + * we do not div back, since we write this value as a fraction + * see spec */ + -+ frac = (((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq) -+ / fm_clk_freq; ++ frac = ((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq; + /* we check remainder of the division in order to round up if not int */ -+ if (((ts_freq << 16) - (intgr << 16)*fm_clk_freq) % fm_clk_freq) ++ if (do_div(frac, fm_clk_freq)) + frac++; + + tmp = (intgr << FPM_TS_INT_SHIFT) | (uint16_t)frac; @@ -58813,7 +71927,7 @@ Signed-off-by: Zhao Qiang +} --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h -@@ -0,0 +1,1203 @@ +@@ -0,0 +1,1214 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -59886,6 +73000,17 @@ Signed-off-by: Zhao Qiang +uint8_t FmGetId(t_Handle h_Fm); + +/**************************************************************************//** ++ @Function FmReset ++ ++ @Description Used to reset the FM ++ ++ @Param[in] h_Fm A handle to an FM Module. ++ ++ @Return E_OK on success; Error code otherwise. ++*//***************************************************************************/ ++t_Error FmReset(t_Handle h_Fm); ++ ++/**************************************************************************//** + @Function FmGetSetPortParams + + @Description Used by FM-PORT driver to pass and receive parameters between @@ -62881,7 +76006,7 @@ Signed-off-by: Zhao Qiang +#endif /* __crc_mac_addr_ext_h */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h -@@ -0,0 +1,207 @@ +@@ -0,0 +1,210 @@ +/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc + * All rights reserved. + * @@ -62943,8 +76068,19 @@ Signed-off-by: Zhao Qiang + @Description Frame descriptor +*//***************************************************************************/ +typedef _Packed struct t_DpaaFD { -+ volatile uint32_t id; /**< FD id */ -+ volatile uint32_t addrl; /**< Data Address */ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ volatile uint8_t liodn; ++ volatile uint8_t bpid; ++ volatile uint8_t elion; ++ volatile uint8_t addrh; ++ volatile uint32_t addrl; ++#else ++ volatile uint32_t addrl; ++ volatile uint8_t addrh; ++ volatile uint8_t elion; ++ volatile uint8_t bpid; ++ volatile uint8_t liodn; ++ #endif + volatile uint32_t length; /**< Frame length */ + volatile uint32_t status; /**< FD status */ +} _PackedType t_DpaaFD; @@ -62979,11 +76115,7 @@ Signed-off-by: Zhao Qiang +#define DPAA_FD_OFFSET_MASK 0x1ff00000 /**< FD OFFSET field mask */ +#define DPAA_FD_LENGTH_MASK 0x000fffff /**< FD LENGTH field mask */ + -+#define DPAA_FD_GET_DD(fd) ((((t_DpaaFD *)fd)->id & DPAA_FD_DD_MASK) >> (31-1)) /**< Macro to get FD DD field */ -+#define DPAA_FD_GET_PID(fd) (((((t_DpaaFD *)fd)->id & DPAA_FD_PID_MASK) >> (31-7)) | \ -+ ((((t_DpaaFD *)fd)->id & DPAA_FD_ELIODN_MASK) >> (31-19-6))) /**< Macro to get FD PID field */ -+#define DPAA_FD_GET_BPID(fd) ((((t_DpaaFD *)fd)->id & DPAA_FD_BPID_MASK) >> (31-15)) /**< Macro to get FD BPID field */ -+#define DPAA_FD_GET_ADDRH(fd) (((t_DpaaFD *)fd)->id & DPAA_FD_ADDRH_MASK) /**< Macro to get FD ADDRH field */ ++#define DPAA_FD_GET_ADDRH(fd) ((t_DpaaFD *)fd)->addrh /**< Macro to get FD ADDRH field */ +#define DPAA_FD_GET_ADDRL(fd) ((t_DpaaFD *)fd)->addrl /**< Macro to get FD ADDRL field */ +#define DPAA_FD_GET_PHYS_ADDR(fd) ((physAddress_t)(((uint64_t)DPAA_FD_GET_ADDRH(fd) << 32) | (uint64_t)DPAA_FD_GET_ADDRL(fd))) /**< Macro to get FD ADDR field */ +#define DPAA_FD_GET_FORMAT(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_FORMAT_MASK) >> (31-2)) /**< Macro to get FD FORMAT field */ @@ -62992,11 +76124,7 @@ Signed-off-by: Zhao Qiang +#define DPAA_FD_GET_STATUS(fd) ((t_DpaaFD *)fd)->status /**< Macro to get FD STATUS field */ +#define DPAA_FD_GET_ADDR(fd) XX_PhysToVirt(DPAA_FD_GET_PHYS_ADDR(fd)) /**< Macro to get FD ADDR (virtual) */ + -+#define DPAA_FD_SET_DD(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_DD_MASK) | (((val) << (31-1)) & DPAA_FD_DD_MASK ))) /**< Macro to set FD DD field */ -+ /**< Macro to set FD PID field or LIODN offset*/ -+#define DPAA_FD_SET_PID(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~(DPAA_FD_PID_MASK|DPAA_FD_ELIODN_MASK)) | ((((val) << (31-7)) & DPAA_FD_PID_MASK) | ((((val)>>6) << (31-19)) & DPAA_FD_ELIODN_MASK)))) -+#define DPAA_FD_SET_BPID(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_BPID_MASK) | (((val) << (31-15)) & DPAA_FD_BPID_MASK))) /**< Macro to set FD BPID field */ -+#define DPAA_FD_SET_ADDRH(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_ADDRH_MASK) | ((val) & DPAA_FD_ADDRH_MASK))) /**< Macro to set FD ADDRH field */ ++#define DPAA_FD_SET_ADDRH(fd,val) ((t_DpaaFD *)fd)->addrh = (val) /**< Macro to set FD ADDRH field */ +#define DPAA_FD_SET_ADDRL(fd,val) ((t_DpaaFD *)fd)->addrl = (val) /**< Macro to set FD ADDRL field */ +#define DPAA_FD_SET_ADDR(fd,val) \ +do { \ @@ -63091,7 +76219,7 @@ Signed-off-by: Zhao Qiang +#endif /* __DPAA_EXT_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h -@@ -0,0 +1,1705 @@ +@@ -0,0 +1,1731 @@ +/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc + * All rights reserved. + * @@ -63780,6 +76908,15 @@ Signed-off-by: Zhao Qiang + until this value is reached (Hystheresis) */ +} t_FmDmaThresholds; + ++/**************************************************************************//** ++ @Function t_FmResetOnInitOverrideCallback ++ ++ @Description FMan specific reset on init user callback routine, ++ will be used to override the standard FMan reset on init procedure ++ ++ @Param[in] h_Fm - FMan handler ++*//***************************************************************************/ ++typedef void (t_FmResetOnInitOverrideCallback)(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_ConfigResetOnInit @@ -63799,6 +76936,23 @@ Signed-off-by: Zhao Qiang +t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable); + +/**************************************************************************//** ++ @Function FM_ConfigResetOnInitOverrideCallback ++ ++ @Description Define a special reset of FM before initialization. ++ Change the default configuration [DEFAULT_resetOnInitOverrideCallback]. ++ ++ @Param[in] h_Fm A handle to an FM Module. ++ @Param[in] f_ResetOnInitOverride FM specific reset on init user callback routine. ++ ++ @Return E_OK on success; Error code otherwise. ++ ++ @Cautions Allowed only following FM_Config() and before FM_Init(). ++ This routine should NOT be called from guest-partition ++ (i.e. guestId != NCSW_MASTER_ID) ++*//***************************************************************************/ ++t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride); ++ ++/**************************************************************************//** + @Function FM_ConfigTotalFifoSize + + @Description Define Total FIFO size for the whole FM. @@ -64799,7 +77953,7 @@ Signed-off-by: Zhao Qiang +#endif /* __FM_EXT */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h -@@ -0,0 +1,846 @@ +@@ -0,0 +1,859 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -65269,6 +78423,19 @@ Signed-off-by: Zhao Qiang +t_Error FM_MAC_Disable(t_Handle h_FmMac, e_CommMode mode); + +/**************************************************************************//** ++ @Function FM_MAC_Resume ++ ++ @Description Re-init the MAC after suspend ++ ++ @Param[in] h_FmMac A handle to a FM MAC Module. ++ ++ @Return E_OK on success; Error code otherwise. ++ ++ @Cautions Allowed only following FM_MAC_Init(). ++*//***************************************************************************/ ++t_Error FM_MAC_Resume(t_Handle h_FmMac); ++ ++/**************************************************************************//** + @Function FM_MAC_Enable1588TimeStamp + + @Description Enables the TSU operation. @@ -83698,7 +96865,7 @@ Signed-off-by: Zhao Qiang +#endif /* __FSL_FMAN_TGEC_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h -@@ -0,0 +1,290 @@ +@@ -0,0 +1,291 @@ +/* + * Copyright 2012 Freescale Semiconductor Inc. + * @@ -83964,6 +97131,7 @@ Signed-off-by: Zhao Qiang +#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 +#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 +#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 ++#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 + +#define FM_BCB_ERRATA_BMI_SW001 +#define FM_LEN_CHECK_ERRATA_FMAN_SW002 @@ -84372,7 +97540,7 @@ Signed-off-by: Zhao Qiang +#endif /* __PART_INTEGRATION_EXT_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h -@@ -0,0 +1,292 @@ +@@ -0,0 +1,293 @@ +/* + * Copyright 2012 Freescale Semiconductor Inc. + * @@ -84640,6 +97808,7 @@ Signed-off-by: Zhao Qiang +#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 +#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 +#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 ++#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 + +#define FM_BCB_ERRATA_BMI_SW001 +#define FM_LEN_CHECK_ERRATA_FMAN_SW002 @@ -87228,7 +100397,7 @@ Signed-off-by: Zhao Qiang +#endif /* __PART_INTEGRATION_EXT_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h -@@ -0,0 +1,99 @@ +@@ -0,0 +1,100 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -87268,6 +100437,7 @@ Signed-off-by: Zhao Qiang + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +#include ++#include + +#elif defined(__MWERKS__) +#define LOW(x) ( sizeof(x)==8 ? *(1+(int32_t*)&x) : (*(int32_t*)&x)) @@ -87475,7 +100645,7 @@ Signed-off-by: Zhao Qiang +} while (0) + +/* Ceiling division - not the fastest way, but safer in terms of overflow */ -+#define DIV_CEIL(x,y) (((x)/(y)) + ((((((x)/(y)))*(y)) == (x)) ? 0 : 1)) ++#define DIV_CEIL(x,y) (div64_u64((x),(y)) + (((div64_u64((x),(y))*(y)) == (x)) ? 0 : 1)) + +/* Round up a number to be a multiple of a second number */ +#define ROUND_UP(x,y) ((((x) + (y) - 1) / (y)) * (y)) @@ -90176,7 +103346,7 @@ Signed-off-by: Zhao Qiang +#endif /* __FSL_FMAN_TEST_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h -@@ -0,0 +1,127 @@ +@@ -0,0 +1,128 @@ +/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc. + * All rights reserved. + * @@ -90226,6 +103396,7 @@ Signed-off-by: Zhao Qiang +EXPORT_SYMBOL(FM_PORT_Disable); +EXPORT_SYMBOL(FM_PORT_Enable); +EXPORT_SYMBOL(FM_PORT_SetPCD); ++EXPORT_SYMBOL(FM_PORT_DeletePCD); + +/* Runtime PCD exported routines */ +EXPORT_SYMBOL(FM_PCD_Enable); @@ -90472,7 +103643,7 @@ Signed-off-by: Zhao Qiang +#endif /* __LNXWRP_FM_EXT_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h -@@ -0,0 +1,919 @@ +@@ -0,0 +1,921 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -91240,6 +104411,8 @@ Signed-off-by: Zhao Qiang + +int fm_mac_disable(struct fm_mac_dev *fm_mac_dev); + ++int fm_mac_resume(struct fm_mac_dev *fm_mac_dev); ++ +int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev, + bool enable); + @@ -93324,7 +106497,7 @@ Signed-off-by: Zhao Qiang +module_exit(fmt_unload); --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c -@@ -0,0 +1,2795 @@ +@@ -0,0 +1,2908 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -93386,10 +106559,10 @@ Signed-off-by: Zhao Qiang +#include +#include +#include -+#include /* For struct qe_firmware */ +#ifndef CONFIG_FMAN_ARM +#include +#include ++#include +#endif +#include /* For file access mask */ +#include @@ -93778,6 +106951,47 @@ Signed-off-by: Zhao Qiang + return E_OK; +} + ++/* Structure that defines QE firmware binary files. ++ * ++ * See Documentation/powerpc/qe_firmware.txt for a description of these ++ * fields. ++ */ ++struct qe_firmware { ++ struct qe_header { ++ __be32 length; /* Length of the entire structure, in bytes */ ++ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */ ++ u8 version; /* Version of this layout. First ver is '1' */ ++ } header; ++ u8 id[62]; /* Null-terminated identifier string */ ++ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */ ++ u8 count; /* Number of microcode[] structures */ ++ struct { ++ __be16 model; /* The SOC model */ ++ u8 major; /* The SOC revision major */ ++ u8 minor; /* The SOC revision minor */ ++ } __attribute__ ((packed)) soc; ++ u8 padding[4]; /* Reserved, for alignment */ ++ __be64 extended_modes; /* Extended modes */ ++ __be32 vtraps[8]; /* Virtual trap addresses */ ++ u8 reserved[4]; /* Reserved, for future expansion */ ++ struct qe_microcode { ++ u8 id[32]; /* Null-terminated identifier */ ++ __be32 traps[16]; /* Trap addresses, 0 == ignore */ ++ __be32 eccr; /* The value for the ECCR register */ ++ __be32 iram_offset; /* Offset into I-RAM for the code */ ++ __be32 count; /* Number of 32-bit words of the code */ ++ __be32 code_offset; /* Offset of the actual microcode */ ++ u8 major; /* The microcode version major */ ++ u8 minor; /* The microcode version minor */ ++ u8 revision; /* The microcode version revision */ ++ u8 padding; /* Reserved, for alignment */ ++ u8 reserved[4]; /* Reserved, for future expansion */ ++ } __attribute__ ((packed)) microcode[1]; ++ /* All microcode binaries should be located here */ ++ /* CRC32 should be located here, after the microcode binaries */ ++} __attribute__ ((packed)); ++ ++ +/** + * FindFmanMicrocode - find the Fman microcode + * @@ -93826,11 +107040,35 @@ Signed-off-by: Zhao Qiang +#define SVR_T4_DEVICE_ID 0x82400000 +#define SVR_DEVICE_ID_MASK 0xFFF00000 + ++#define OF_DEV_ID_NUM 2 /* one used, another one zeroed */ ++ ++/* searches for a subnode with the given name/compatible */ ++static bool HasFmPcdOfNode(struct device_node *fm_node, ++ struct of_device_id *ids, ++ const char *name, ++ const char *compatible) ++{ ++ struct device_node *dev_node; ++ bool ret = false; ++ ++ memset(ids, 0, OF_DEV_ID_NUM*sizeof(struct of_device_id)); ++ if (WARN_ON(strlen(name) >= sizeof(ids[0].name))) ++ return false; ++ strcpy(ids[0].name, name); ++ if (WARN_ON(strlen(compatible) >= sizeof(ids[0].compatible))) ++ return false; ++ strcpy(ids[0].compatible, compatible); ++ for_each_child_of_node(fm_node, dev_node) ++ if (of_match_node(ids, dev_node) != NULL) ++ ret = true; ++ return ret; ++} ++ +static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + struct device_node *fm_node, *dev_node; -+ struct of_device_id name; ++ struct of_device_id ids[OF_DEV_ID_NUM]; + struct resource res; + struct clk *clk; + u32 clk_rate; @@ -93866,6 +107104,7 @@ Signed-off-by: Zhao Qiang + p_LnxWrpFmDev->irq = of_irq_to_resource(fm_node, 0, NULL); + if (unlikely(p_LnxWrpFmDev->irq == /*NO_IRQ*/0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + @@ -93874,6 +107113,7 @@ Signed-off-by: Zhao Qiang + + if (unlikely(p_LnxWrpFmDev->err_irq == /*NO_IRQ*/0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + @@ -93881,6 +107121,7 @@ Signed-off-by: Zhao Qiang + _errno = of_address_to_resource(fm_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + @@ -93894,7 +107135,8 @@ Signed-off-by: Zhao Qiang + dev_err(&of_dev->dev, "%s: Failed to get FM clock structure\n", + __func__); + of_node_put(fm_node); -+ return NULL; ++ DestroyFmDev(p_LnxWrpFmDev); ++ return NULL; + } + + clk_rate = clk_get_rate(clk); @@ -93902,23 +107144,25 @@ Signed-off-by: Zhao Qiang + dev_err(&of_dev->dev, "%s: Failed to determine FM clock rate\n", + __func__); + of_node_put(fm_node); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + + p_LnxWrpFmDev->fmDevSettings.param.fmClkFreq = DIV_ROUND_UP(clk_rate, 1000000); /* In MHz, rounded */ + /* Get the MURAM base address and size */ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("muram") >= sizeof(name.name))) ++ memset(ids, 0, sizeof(ids)); ++ if (WARN_ON(strlen("muram") >= sizeof(ids[0].name))) + return NULL; -+ strcpy(name.name, "muram"); -+ if (WARN_ON(strlen("fsl,fman-muram") >= sizeof(name.compatible))) ++ strcpy(ids[0].name, "muram"); ++ if (WARN_ON(strlen("fsl,fman-muram") >= sizeof(ids[0].compatible))) + return NULL; -+ strcpy(name.compatible, "fsl,fman-muram"); ++ strcpy(ids[0].compatible, "fsl,fman-muram"); + for_each_child_of_node(fm_node, dev_node) { -+ if (likely(of_match_node(&name, dev_node) != NULL)) { ++ if (likely(of_match_node(ids, dev_node) != NULL)) { + _errno = of_address_to_resource(dev_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + @@ -93939,18 +107183,19 @@ Signed-off-by: Zhao Qiang + } + + /* Get the RTC base address and size */ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("rtc") >= sizeof(name.name))) ++ memset(ids, 0, sizeof(ids)); ++ if (WARN_ON(strlen("ptp-timer") >= sizeof(ids[0].name))) + return NULL; -+ strcpy(name.name, "rtc"); -+ if (WARN_ON(strlen("fsl,fman-rtc") >= sizeof(name.compatible))) ++ strcpy(ids[0].name, "ptp-timer"); ++ if (WARN_ON(strlen("fsl,fman-rtc") >= sizeof(ids[0].compatible))) + return NULL; -+ strcpy(name.compatible, "fsl,fman-rtc"); ++ strcpy(ids[0].compatible, "fsl,fman-rtc"); + for_each_child_of_node(fm_node, dev_node) { -+ if (likely(of_match_node(&name, dev_node) != NULL)) { ++ if (likely(of_match_node(ids, dev_node) != NULL)) { + _errno = of_address_to_resource(dev_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + @@ -93967,6 +107212,7 @@ Signed-off-by: Zhao Qiang + _errno = of_address_to_resource(dev_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); ++ DestroyFmDev(p_LnxWrpFmDev); + return NULL; + } + p_LnxWrpFmDev->fmVspBaseAddr = 0; @@ -93977,49 +107223,10 @@ Signed-off-by: Zhao Qiang +#endif + + /* Get all PCD nodes */ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("parser") >= sizeof(name.name))) -+ return NULL; -+ strcpy(name.name, "parser"); -+ if (WARN_ON(strlen("fsl,fman-parser") >= sizeof(name.compatible))) -+ return NULL; -+ strcpy(name.compatible, "fsl,fman-parser"); -+ for_each_child_of_node(fm_node, dev_node) -+ if (likely(of_match_node(&name, dev_node) != NULL)) -+ p_LnxWrpFmDev->prsActive = TRUE; -+ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("keygen") >= sizeof(name.name))) -+ return NULL; -+ strcpy(name.name, "keygen"); -+ if (WARN_ON(strlen("fsl,fman-keygen") >= sizeof(name.compatible))) -+ return NULL; -+ strcpy(name.compatible, "fsl,fman-keygen"); -+ for_each_child_of_node(fm_node, dev_node) -+ if (likely(of_match_node(&name, dev_node) != NULL)) -+ p_LnxWrpFmDev->kgActive = TRUE; -+ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("cc") >= sizeof(name.name))) -+ return NULL; -+ strcpy(name.name, "cc"); -+ if (WARN_ON(strlen("fsl,fman-cc") >= sizeof(name.compatible))) -+ return NULL; -+ strcpy(name.compatible, "fsl,fman-cc"); -+ for_each_child_of_node(fm_node, dev_node) -+ if (likely(of_match_node(&name, dev_node) != NULL)) -+ p_LnxWrpFmDev->ccActive = TRUE; -+ -+ memset(&name, 0, sizeof(struct of_device_id)); -+ if (WARN_ON(strlen("policer") >= sizeof(name.name))) -+ return NULL; -+ strcpy(name.name, "policer"); -+ if (WARN_ON(strlen("fsl,fman-policer") >= sizeof(name.compatible))) -+ return NULL; -+ strcpy(name.compatible, "fsl,fman-policer"); -+ for_each_child_of_node(fm_node, dev_node) -+ if (likely(of_match_node(&name, dev_node) != NULL)) -+ p_LnxWrpFmDev->plcrActive = TRUE; ++ p_LnxWrpFmDev->prsActive = HasFmPcdOfNode(fm_node, ids, "parser", "fsl,fman-parser"); ++ p_LnxWrpFmDev->kgActive = HasFmPcdOfNode(fm_node, ids, "keygen", "fsl,fman-keygen"); ++ p_LnxWrpFmDev->ccActive = HasFmPcdOfNode(fm_node, ids, "cc", "fsl,fman-cc"); ++ p_LnxWrpFmDev->plcrActive = HasFmPcdOfNode(fm_node, ids, "policer", "fsl,fman-policer"); + + if (p_LnxWrpFmDev->prsActive || p_LnxWrpFmDev->kgActive || + p_LnxWrpFmDev->ccActive || p_LnxWrpFmDev->plcrActive) @@ -94281,6 +107488,60 @@ Signed-off-by: Zhao Qiang + + return ioread32be(&guts_regs->rcwsr[regnum]); +} ++ ++#define FMAN1_ALL_MACS_MASK 0xFCC00000 ++#define FMAN2_ALL_MACS_MASK 0x000FCC00 ++ ++/** ++ * @Function ResetOnInitErrata_A007273 ++ * ++ * @Description Workaround for Errata A-007273 ++ * This workaround is required to avoid a FMan hang during reset on initialization. ++ * Enable all MACs in guts.devdisr2 register, ++ * then perform a regular FMan reset and then restore MACs to their original state. ++ * ++ * @Param[in] h_Fm - FM module descriptor ++ * ++ * @Return None. ++ */ ++void ResetOnInitErrata_A007273(t_Handle h_Fm) ++{ ++ struct ccsr_guts __iomem *guts_regs = NULL; ++ struct device_node *guts_node; ++ u32 devdisr2, enableMacs; ++ ++ /* Get guts registers */ ++ guts_node = of_find_matching_node(NULL, guts_device_ids); ++ if (!guts_node) { ++ pr_err("could not find GUTS node\n"); ++ return; ++ } ++ guts_regs = of_iomap(guts_node, 0); ++ of_node_put(guts_node); ++ if (!guts_regs) { ++ pr_err("ioremap of GUTS node failed\n"); ++ return; ++ } ++ ++ /* Read current state */ ++ devdisr2 = ioread32be(&guts_regs->devdisr2); ++ ++ if (FmGetId(h_Fm) == 0) ++ enableMacs = devdisr2 & ~FMAN1_ALL_MACS_MASK; ++ else ++ enableMacs = devdisr2 & ~FMAN2_ALL_MACS_MASK; ++ ++ /* Enable all MACs */ ++ iowrite32be(enableMacs, &guts_regs->devdisr2); ++ ++ /* Perform standard FMan reset */ ++ FmReset(h_Fm); ++ ++ /* Restore devdisr2 value */ ++ iowrite32be(devdisr2, &guts_regs->devdisr2); ++ ++ iounmap(guts_regs); ++} +#endif + +static t_Error InitFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) @@ -94316,9 +107577,13 @@ Signed-off-by: Zhao Qiang + int i; + int usz = p_LnxWrpFmDev->fmDevSettings.param.firmware.size; + void * p_Code = p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code; ++ u32 *dest = kzalloc(usz, GFP_KERNEL); + ++ if (p_Code && dest) + for(i=0; i < usz / 4; ++i) -+ ((u32 *)p_Code)[i] = be32_to_cpu(((u32 *)p_Code)[i]); ++ dest[i] = be32_to_cpu(((u32 *)p_Code)[i]); ++ ++ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = dest; + } +#endif + @@ -94359,6 +107624,13 @@ Signed-off-by: Zhao Qiang + if (FM_ConfigResetOnInit(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); + ++#ifndef CONFIG_FMAN_ARM ++#ifdef FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 ++ if (FM_ConfigResetOnInitOverrideCallback(p_LnxWrpFmDev->h_Dev, ResetOnInitErrata_A007273) != E_OK) ++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); ++#endif /* FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 */ ++#endif /* CONFIG_FMAN_ARM */ ++ +#ifdef CONFIG_FMAN_P1023 + if (FM_ConfigDmaAidOverride(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); @@ -95060,6 +108332,20 @@ Signed-off-by: Zhao Qiang +} +EXPORT_SYMBOL(fm_mac_disable); + ++int fm_mac_resume(struct fm_mac_dev *fm_mac_dev) ++{ ++ int _errno; ++ t_Error err; ++ ++ err = FM_MAC_Resume(fm_mac_dev); ++ _errno = -GET_ERROR_TYPE(err); ++ if (unlikely(_errno < 0)) ++ pr_err("FM_MAC_Resume() = 0x%08x\n", err); ++ ++ return _errno; ++} ++EXPORT_SYMBOL(fm_mac_resume); ++ +int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev, + bool enable) +{ @@ -96167,7 +109453,7 @@ Signed-off-by: Zhao Qiang +#ifndef __LNXWRP_FM_H__ +#define __LNXWRP_FM_H__ + -+#include /* struct qman_fq */ ++#include /* struct qman_fq */ + +#include "std_ext.h" +#include "error_ext.h" @@ -96419,7 +109705,7 @@ Signed-off-by: Zhao Qiang +#endif /* __LNXWRP_FM_H__ */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c -@@ -0,0 +1,1507 @@ +@@ -0,0 +1,1480 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -96526,31 +109812,15 @@ Signed-off-by: Zhao Qiang +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +{ + /* extract the HC frame address */ -+#ifdef CONFIG_ARM -+ uint32_t *hcf_va = XX_PhysToVirt(((struct qm_fd *)&dq->fd)->addr); -+#else -+ uint64_t hcf_va = (uint64_t)XX_PhysToVirt(((struct qm_fd *)&dq->fd)->addr); -+#endif ++ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *)&dq->fd)); + int hcf_l = ((struct qm_fd *)&dq->fd)->length20; + int i; + + /* 32b byteswap of all data in the HC Frame */ + for(i = 0; i < hcf_l / 4; ++i) -+ ((uint32_t *)(hcf_va))[i] = -+ ___constant_swab32(((uint32_t *)(hcf_va))[i]); ++ hcf_va[i] = ++ ___constant_swab32(hcf_va[i]); +} -+{ -+ /* byteswap FD's 40bit address field LE to BE*/ -+ uint8_t t; -+ -+ t = ((uint8_t*)&dq->fd)[6]; -+ ((uint8_t*)&dq->fd)[6] = ((uint8_t*)&dq->fd)[5]; -+ ((uint8_t*)&dq->fd)[5] = ((uint8_t*)&dq->fd)[4]; -+ ((uint8_t*)&dq->fd)[4] = ((uint8_t*)&dq->fd)[3]; -+ ((uint8_t*)&dq->fd)[3] = ((uint8_t*)&dq->fd)[7]; -+ ((uint8_t*)&dq->fd)[7] = t; -+} -+ +#endif + FM_PCD_HcTxConf(p_LnxWrpFmDev->h_PcdDev, (t_DpaaFD *)&dq->fd); + spin_lock_irqsave(&lock, flags); @@ -96665,30 +109935,15 @@ Signed-off-by: Zhao Qiang + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +{ -+ /* byteswap FD's 40bit address field */ -+ uint8_t t; -+ -+ t = ((uint8_t*)p_Fd)[7]; -+ ((uint8_t*)p_Fd)[7] = ((uint8_t*)p_Fd)[3]; -+ ((uint8_t*)p_Fd)[3] = ((uint8_t*)p_Fd)[4]; -+ ((uint8_t*)p_Fd)[4] = ((uint8_t*)p_Fd)[5]; -+ ((uint8_t*)p_Fd)[5] = ((uint8_t*)p_Fd)[6]; -+ ((uint8_t*)p_Fd)[6] = t; -+} -+{ + /* extract the HC frame address */ -+#ifdef CONFIG_ARM -+ uint32_t *hcf_va = XX_PhysToVirt(((struct qm_fd *) p_Fd)->addr); -+#else -+ uint64_t hcf_va = (uint64_t)XX_PhysToVirt(((struct qm_fd *) p_Fd)->addr); -+#endif ++ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *) p_Fd)); + int hcf_l = ((struct qm_fd *)p_Fd)->length20; + int i; + + /* 32b byteswap of all data in the HC Frame */ + for(i = 0; i < hcf_l / 4; ++i) -+ ((uint32_t *)(hcf_va))[i] = -+ ___constant_swab32(((uint32_t *)(hcf_va))[i]); ++ hcf_va[i] = ++ ___constant_swab32(hcf_va[i]); +} +#endif + @@ -96812,6 +110067,7 @@ Signed-off-by: Zhao Qiang + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams. + qmChannel = p_LnxWrpFmPortDev->txCh; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-tx")) { ++ tmp_prop -= 0x28; + if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_TX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", @@ -96840,6 +110096,7 @@ Signed-off-by: Zhao Qiang + settings.param.specificParams.nonRxParams.qmChannel = + p_LnxWrpFmPortDev->txCh; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-tx")) { ++ tmp_prop -= 0x30; + if (unlikely(tmp_prop>= FM_MAX_NUM_OF_10G_TX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", @@ -96872,6 +110129,7 @@ Signed-off-by: Zhao Qiang + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams. + qmChannel = p_LnxWrpFmPortDev->txCh; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-rx")) { ++ tmp_prop -= 0x08; + if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_RX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", @@ -96887,6 +110145,7 @@ Signed-off-by: Zhao Qiang + if (p_LnxWrpFmDev->pcdActive) + p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-rx")) { ++ tmp_prop -= 0x10; + if (unlikely(tmp_prop >= FM_MAX_NUM_OF_10G_RX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", @@ -97623,7 +110882,7 @@ Signed-off-by: Zhao Qiang + FqAlloc(p_LnxWrpFmDev, + 0, + QMAN_FQ_FLAG_NO_ENQUEUE, -+ p_LnxWrpFmDev->hcCh, 7); ++ p_LnxWrpFmDev->hcCh, 1); + if (!p_LnxWrpFmDev->hc_tx_conf_fq) + RETURN_ERROR(MAJOR, E_NULL_POINTER, + ("Frame queue allocation failed...")); @@ -97632,7 +110891,7 @@ Signed-off-by: Zhao Qiang + FqAlloc(p_LnxWrpFmDev, + 0, + QMAN_FQ_FLAG_NO_ENQUEUE, -+ p_LnxWrpFmDev->hcCh, 7); ++ p_LnxWrpFmDev->hcCh, 2); + if (!p_LnxWrpFmDev->hc_tx_err_fq) + RETURN_ERROR(MAJOR, E_NULL_POINTER, + ("Frame queue allocation failed...")); @@ -97989,7 +111248,7 @@ Signed-off-by: Zhao Qiang +#include +#include +#include -+#include ++#include +#include +#ifndef CONFIG_FMAN_ARM +#include @@ -101130,10 +114389,10 @@ Signed-off-by: Zhao Qiang + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + { + uint8_t portId = param.port_params.port_id; -+ param.p_fm = p_LnxWrpFmDev->h_Dev; + param.liodn_offset = + p_LnxWrpFmDev->rxPorts[portId].settings.param.specificParams.rxParams.liodnOffset; + } ++ param.p_fm = p_LnxWrpFmDev->h_Dev; + param.id = FM_VSP_Config((t_FmVspParams *)¶m); + +#if defined(CONFIG_COMPAT) @@ -102745,7 +116004,7 @@ Signed-off-by: Zhao Qiang +}; --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c -@@ -0,0 +1,1300 @@ +@@ -0,0 +1,1297 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -103949,17 +117208,14 @@ Signed-off-by: Zhao Qiang + + if (compat == COMPAT_US_TO_K) + { -+ param->p_fm = compat_pcd_id2ptr(compat_param->p_fm); + memcpy(¶m->ext_buf_pools, &compat_param->ext_buf_pools, sizeof(ioc_fm_ext_pools)); + param->liodn_offset = compat_param->liodn_offset; + param->port_params.port_id = compat_param->port_params.port_id; + param->port_params.port_type = compat_param->port_params.port_type; + param->relative_profile_id = compat_param->relative_profile_id; -+ param->id = compat_pcd_id2ptr(compat_param->id); + } + else + { -+ compat_param->p_fm = compat_pcd_ptr2id(param->p_fm); + memcpy(&compat_param->ext_buf_pools, ¶m->ext_buf_pools, sizeof(ioc_fm_ext_pools)); + compat_param->liodn_offset = param->liodn_offset; + compat_param->port_params.port_id = param->port_params.port_id; @@ -107425,7 +120681,7 @@ Signed-off-by: Zhao Qiang +#endif /* LNXWRP_SYSFS_FM_H_ */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c -@@ -0,0 +1,1255 @@ +@@ -0,0 +1,1268 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * @@ -107674,13 +120930,16 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108040,13 +121299,15 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108078,13 +121339,15 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108117,14 +121380,16 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108163,14 +121428,16 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108202,14 +121469,16 @@ Signed-off-by: Zhao Qiang + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) -+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = -+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) ++ p_LnxWrpFmPortDev = ++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); ++ + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { @@ -108754,16 +122023,16 @@ Signed-off-by: Zhao Qiang +obj-y += fsl-ncsw-xx.o + +ifneq ($(CONFIG_FMAN_ARM),y) -+fsl-ncsw-xx-objs := xx_linux.o udivdi3.o \ ++fsl-ncsw-xx-objs := xx_linux.o \ + module_strings.o +else -+fsl-ncsw-xx-objs := xx_arm_linux.o udivdi3.o \ ++fsl-ncsw-xx-objs := xx_arm_linux.o \ + module_strings.o +endif + --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c -@@ -0,0 +1,45 @@ +@@ -0,0 +1,46 @@ +/* + * Copyright 2012 Freescale Semiconductor Inc. + * @@ -108807,144 +122076,10 @@ Signed-off-by: Zhao Qiang + "FM-MAC", /* MODULE_FM_MAC */ + "FM-Port", /* MODULE_FM_PORT */ + "MM", /* MODULE_MM */ -+ "FM-SP" /* MODULE_FM_SP */ ++ "FM-SP", /* MODULE_FM_SP */ ++ "FM-MACSEC" /* MODULE_FM_MACSEC */ +}; --- /dev/null -+++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/udivdi3.c -@@ -0,0 +1,132 @@ -+/* -+ * Copyright 2008-2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) -+#define MODVERSIONS -+#endif -+#ifdef MODVERSIONS -+#include -+#endif /* MODVERSIONS */ -+ -+#include -+#include -+#include -+ -+ -+#define BITS_PER_UNIT 8 -+#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT) -+ -+ -+typedef unsigned int UQItype __attribute__ ((mode (QI))); -+typedef int SItype __attribute__ ((mode (SI))); -+typedef unsigned int USItype __attribute__ ((mode (SI))); -+typedef int DItype __attribute__ ((mode (DI))); -+typedef int word_type __attribute__ ((mode (__word__))); -+typedef unsigned int UDItype __attribute__ ((mode (DI))); -+ -+struct DIstruct {SItype low, high;}; -+ -+typedef union -+{ -+ struct DIstruct s; -+ DItype ll; -+} DIunion; -+ -+ -+/* bit divisor, dividend and result. dynamic precision */ -+static __inline__ uint64_t _div64_64(uint64_t dividend, uint64_t divisor) -+{ -+ uint32_t d = divisor; -+ -+ if (divisor > 0xffffffffULL) -+ { -+ unsigned int shift = fls(divisor >> 32); -+ -+ d = divisor >> shift; -+ dividend >>= shift; -+ } -+ -+ /* avoid 64 bit division if possible */ -+ if (dividend >> 32) -+ do_div(dividend, d); -+ else -+ dividend = (uint32_t) dividend / d; -+ -+ return dividend; -+} -+ -+UDItype __udivdi3 (UDItype n, UDItype d) -+{ -+ return _div64_64(n, d); -+} -+ -+DItype __divdi3 (DItype n, DItype d) -+{ -+ DItype sign = 1; -+ if (n<0) -+ { -+ sign *= -1; -+ n *= -1; -+ } -+ if (d<0) -+ { -+ sign *= -1; -+ d *= -1; -+ } -+ return sign*_div64_64((UDItype)n, (UDItype)d); -+} -+ -+UDItype __umoddi3 (UDItype n, UDItype d) -+{ -+ return n-(_div64_64(n, d)*d); -+} -+ -+#ifdef MODULE -+word_type __ucmpdi2 (DItype a, DItype b) -+{ -+ DIunion au, bu; -+ -+ au.ll = a, bu.ll = b; -+ -+ if ((USItype) au.s.high < (USItype) bu.s.high) -+ return 0; -+ else if ((USItype) au.s.high > (USItype) bu.s.high) -+ return 2; -+ if ((USItype) au.s.low < (USItype) bu.s.low) -+ return 0; -+ else if ((USItype) au.s.low > (USItype) bu.s.low) -+ return 2; -+ return 1; -+} -+#endif /* MODULE */ ---- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c @@ -0,0 +1,905 @@ +/* @@ -110774,6 +123909,24837 @@ Signed-off-by: Zhao Qiang + xx_Free((void*)(*((uintptr_t *)(p) - 1))); +} --- /dev/null ++++ b/drivers/staging/fsl_qbman/Kconfig +@@ -0,0 +1,228 @@ ++config FSL_SDK_DPA ++ bool "Freescale Datapath Queue and Buffer management" ++ depends on !FSL_DPAA ++ select FSL_QMAN_FQ_LOOKUP if PPC64 ++ select FSL_QMAN_FQ_LOOKUP if ARM64 ++ ++ ++menu "Freescale Datapath QMan/BMan options" ++ depends on FSL_SDK_DPA ++ ++config FSL_DPA_CHECKING ++ bool "additional driver checking" ++ default n ++ ---help--- ++ Compiles in additional checks to sanity-check the drivers and any ++ use of it by other code. Not recommended for performance. ++ ++config FSL_DPA_CAN_WAIT ++ bool ++ default y ++ ++config FSL_DPA_CAN_WAIT_SYNC ++ bool ++ default y ++ ++config FSL_DPA_PIRQ_FAST ++ bool ++ default y ++ ++config FSL_DPA_PIRQ_SLOW ++ bool ++ default y ++ ++config FSL_DPA_PORTAL_SHARE ++ bool ++ default y ++ ++config FSL_SDK_BMAN ++ bool "Freescale Buffer Manager (BMan) support" ++ default y ++ ++if FSL_SDK_BMAN ++ ++config FSL_BMAN_CONFIG ++ bool "BMan device management" ++ default y ++ ---help--- ++ If this linux image is running natively, you need this option. If this ++ linux image is running as a guest OS under the hypervisor, only one ++ guest OS ("the control plane") needs this option. ++ ++config FSL_BMAN_TEST ++ tristate "BMan self-tests" ++ default n ++ ---help--- ++ This option compiles self-test code for BMan. ++ ++config FSL_BMAN_TEST_HIGH ++ bool "BMan high-level self-test" ++ depends on FSL_BMAN_TEST ++ default y ++ ---help--- ++ This requires the presence of cpu-affine portals, and performs ++ high-level API testing with them (whichever portal(s) are affine to ++ the cpu(s) the test executes on). ++ ++config FSL_BMAN_TEST_THRESH ++ bool "BMan threshold test" ++ depends on FSL_BMAN_TEST ++ default y ++ ---help--- ++ Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded ++ before multiple threads (one per cpu) create pool objects to track ++ depletion state changes. The pool is then drained to empty by a ++ "drainer" thread, and the other threads that they observe exactly ++ the depletion state changes that are expected. ++ ++config FSL_BMAN_DEBUGFS ++ tristate "BMan debugfs interface" ++ depends on DEBUG_FS ++ default y ++ ---help--- ++ This option compiles debugfs code for BMan. ++ ++endif # FSL_SDK_BMAN ++ ++config FSL_SDK_QMAN ++ bool "Freescale Queue Manager (QMan) support" ++ default y ++ ++if FSL_SDK_QMAN ++ ++config FSL_QMAN_POLL_LIMIT ++ int ++ default 32 ++ ++config FSL_QMAN_CONFIG ++ bool "QMan device management" ++ default y ++ ---help--- ++ If this linux image is running natively, you need this option. If this ++ linux image is running as a guest OS under the hypervisor, only one ++ guest OS ("the control plane") needs this option. ++ ++config FSL_QMAN_TEST ++ tristate "QMan self-tests" ++ default n ++ ---help--- ++ This option compiles self-test code for QMan. ++ ++config FSL_QMAN_TEST_STASH_POTATO ++ bool "QMan 'hot potato' data-stashing self-test" ++ depends on FSL_QMAN_TEST ++ default y ++ ---help--- ++ This performs a "hot potato" style test enqueuing/dequeuing a frame ++ across a series of FQs scheduled to different portals (and cpus), with ++ DQRR, data and context stashing always on. ++ ++config FSL_QMAN_TEST_HIGH ++ bool "QMan high-level self-test" ++ depends on FSL_QMAN_TEST ++ default y ++ ---help--- ++ This requires the presence of cpu-affine portals, and performs ++ high-level API testing with them (whichever portal(s) are affine to ++ the cpu(s) the test executes on). ++ ++config FSL_QMAN_DEBUGFS ++ tristate "QMan debugfs interface" ++ depends on DEBUG_FS ++ default y ++ ---help--- ++ This option compiles debugfs code for QMan. ++ ++# H/w settings that can be hard-coded for now. ++config FSL_QMAN_FQD_SZ ++ int "size of Frame Queue Descriptor region" ++ default 10 ++ ---help--- ++ This is the size of the FQD region defined as: PAGE_SIZE * (2^value) ++ ex: 10 => PAGE_SIZE * (2^10) ++ Note: Default device-trees now require minimum Kconfig setting of 10. ++ ++config FSL_QMAN_PFDR_SZ ++ int "size of the PFDR pool" ++ default 13 ++ ---help--- ++ This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value) ++ ex: 13 => PAGE_SIZE * (2^13) ++ ++# Corenet initiator settings. Stash request queues are 4-deep to match cores' ++# ability to snart. Stash priority is 3, other priorities are 2. ++config FSL_QMAN_CI_SCHED_CFG_SRCCIV ++ int ++ depends on FSL_QMAN_CONFIG ++ default 4 ++config FSL_QMAN_CI_SCHED_CFG_SRQ_W ++ int ++ depends on FSL_QMAN_CONFIG ++ default 3 ++config FSL_QMAN_CI_SCHED_CFG_RW_W ++ int ++ depends on FSL_QMAN_CONFIG ++ default 2 ++config FSL_QMAN_CI_SCHED_CFG_BMAN_W ++ int ++ depends on FSL_QMAN_CONFIG ++ default 2 ++ ++# portal interrupt settings ++config FSL_QMAN_PIRQ_DQRR_ITHRESH ++ int ++ default 12 ++config FSL_QMAN_PIRQ_MR_ITHRESH ++ int ++ default 4 ++config FSL_QMAN_PIRQ_IPERIOD ++ int ++ default 100 ++ ++# 64 bit kernel support ++config FSL_QMAN_FQ_LOOKUP ++ bool ++ default n ++ ++config QMAN_CEETM_UPDATE_PERIOD ++ int "Token update period for shaping, in nanoseconds" ++ default 1000 ++ ---help--- ++ Traffic shaping works by performing token calculations (using ++ credits) on shaper instances periodically. This update period ++ sets the granularity for how often those token rate credit ++ updates are performed, and thus determines the accuracy and ++ range of traffic rates that can be configured by users. The ++ reference manual recommends a 1 microsecond period as providing ++ a good balance between granularity and range. ++ ++ Unless you know what you are doing, leave this value at its default. ++ ++config FSL_QMAN_INIT_TIMEOUT ++ int "timeout for qman init stage, in seconds" ++ default 10 ++ ---help--- ++ The timeout setting to quit the initialization loop for non-control ++ partition in case the control partition fails to boot-up. ++ ++endif # FSL_SDK_QMAN ++ ++config FSL_USDPAA ++ bool "Freescale USDPAA process driver" ++ depends on FSL_SDK_DPA ++ default y ++ ---help--- ++ This driver provides user-space access to kernel-managed ++ resource interfaces for USDPAA applications, on the assumption ++ that each process will open this device once. Specifically, this ++ device exposes functionality that would be awkward if exposed ++ via the portal devices - ie. this device exposes functionality ++ that is inherently process-wide rather than portal-specific. ++ This device is necessary for obtaining access to DMA memory and ++ for allocation of Qman and Bman resources. In short, if you wish ++ to use USDPAA applications, you need this. ++ ++ If unsure, say Y. ++ ++ ++endmenu +--- /dev/null ++++ b/drivers/staging/fsl_qbman/Makefile +@@ -0,0 +1,28 @@ ++subdir-ccflags-y := -Werror ++ ++# Common ++obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o ++obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o ++ ++# Bman ++obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o ++obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o ++obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o ++obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o ++bman_tester-y = bman_test.o ++bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o ++bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o ++bman_debugfs_interface-y = bman_debugfs.o ++ ++# Qman ++obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o ++obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o ++obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o ++qman_tester-y = qman_test.o ++qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o ++qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o ++obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o ++qman_debugfs_interface-y = qman_debugfs.o ++ ++# USDPAA ++obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_config.c +@@ -0,0 +1,720 @@ ++/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "bman_private.h" ++#include ++ ++/* Last updated for v00.79 of the BG */ ++ ++struct bman; ++ ++/* Register offsets */ ++#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04)) ++#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04)) ++#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04)) ++#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04)) ++#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04)) ++#define REG_FBPR_FPC 0x0800 ++#define REG_STATE_IDLE 0x960 ++#define REG_STATE_STOP 0x964 ++#define REG_ECSR 0x0a00 ++#define REG_ECIR 0x0a04 ++#define REG_EADR 0x0a08 ++#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) ++#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) ++#define REG_IP_REV_1 0x0bf8 ++#define REG_IP_REV_2 0x0bfc ++#define REG_FBPR_BARE 0x0c00 ++#define REG_FBPR_BAR 0x0c04 ++#define REG_FBPR_AR 0x0c10 ++#define REG_SRCIDR 0x0d04 ++#define REG_LIODNR 0x0d08 ++#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */ ++ ++/* Used by all error interrupt registers except 'inhibit' */ ++#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ ++#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ ++#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ ++#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ ++#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ ++ ++/* BMAN_ECIR valid error bit */ ++#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI) ++ ++union bman_ecir { ++ u32 ecir_raw; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved1:4; ++ u32 portal_num:4; ++ u32 __reserved2:12; ++ u32 numb:4; ++ u32 __reserved3:2; ++ u32 pid:6; ++#else ++ u32 pid:6; ++ u32 __reserved3:2; ++ u32 numb:4; ++ u32 __reserved2:12; ++ u32 portal_num:4; ++ u32 __reserved1:4; ++#endif ++ } __packed info; ++}; ++ ++union bman_eadr { ++ u32 eadr_raw; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved1:5; ++ u32 memid:3; ++ u32 __reserved2:14; ++ u32 eadr:10; ++#else ++ u32 eadr:10; ++ u32 __reserved2:14; ++ u32 memid:3; ++ u32 __reserved1:5; ++#endif ++ } __packed info; ++}; ++ ++struct bman_hwerr_txt { ++ u32 mask; ++ const char *txt; ++}; ++ ++#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b } ++ ++static const struct bman_hwerr_txt bman_hwerr_txts[] = { ++ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"), ++ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"), ++ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), ++ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), ++ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"), ++}; ++#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt)) ++ ++struct bman_error_info_mdata { ++ u16 addr_mask; ++ u16 bits; ++ const char *txt; ++}; ++ ++#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} ++static const struct bman_error_info_mdata error_mdata[] = { ++ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"), ++ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"), ++ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"), ++}; ++#define BMAN_ERR_MDATA_COUNT \ ++ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata)) ++ ++/* Add this in Kconfig */ ++#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI) ++ ++/** ++ * bm_err_isr__ - Manipulate global interrupt registers ++ * @v: for accessors that write values, this is the 32-bit value ++ * ++ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All ++ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of ++ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means ++ * "write the enable register" rather than "enable the write register"! ++ */ ++#define bm_err_isr_status_read(bm) \ ++ __bm_err_isr_read(bm, bm_isr_status) ++#define bm_err_isr_status_clear(bm, m) \ ++ __bm_err_isr_write(bm, bm_isr_status, m) ++#define bm_err_isr_enable_read(bm) \ ++ __bm_err_isr_read(bm, bm_isr_enable) ++#define bm_err_isr_enable_write(bm, v) \ ++ __bm_err_isr_write(bm, bm_isr_enable, v) ++#define bm_err_isr_disable_read(bm) \ ++ __bm_err_isr_read(bm, bm_isr_disable) ++#define bm_err_isr_disable_write(bm, v) \ ++ __bm_err_isr_write(bm, bm_isr_disable, v) ++#define bm_err_isr_inhibit(bm) \ ++ __bm_err_isr_write(bm, bm_isr_inhibit, 1) ++#define bm_err_isr_uninhibit(bm) \ ++ __bm_err_isr_write(bm, bm_isr_inhibit, 0) ++ ++/* ++ * TODO: unimplemented registers ++ * ++ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT, ++ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ ++ */ ++ ++/* Encapsulate "struct bman *" as a cast of the register space address. */ ++ ++static struct bman *bm_create(void *regs) ++{ ++ return (struct bman *)regs; ++} ++ ++static inline u32 __bm_in(struct bman *bm, u32 offset) ++{ ++ return in_be32((void *)bm + offset); ++} ++static inline void __bm_out(struct bman *bm, u32 offset, u32 val) ++{ ++ out_be32((void *)bm + offset, val); ++} ++#define bm_in(reg) __bm_in(bm, REG_##reg) ++#define bm_out(reg, val) __bm_out(bm, REG_##reg, val) ++ ++static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n) ++{ ++ return __bm_in(bm, REG_ERR_ISR + (n << 2)); ++} ++ ++static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val) ++{ ++ __bm_out(bm, REG_ERR_ISR + (n << 2), val); ++} ++ ++static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor) ++{ ++ u32 v = bm_in(IP_REV_1); ++ *id = (v >> 16); ++ *major = (v >> 8) & 0xff; ++ *minor = v & 0xff; ++} ++ ++static u32 __generate_thresh(u32 val, int roundup) ++{ ++ u32 e = 0; /* co-efficient, exponent */ ++ int oddbit = 0; ++ while (val > 0xff) { ++ oddbit = val & 1; ++ val >>= 1; ++ e++; ++ if (roundup && oddbit) ++ val++; ++ } ++ DPA_ASSERT(e < 0x10); ++ return val | (e << 8); ++} ++ ++static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt, ++ u32 hwdet, u32 hwdxt) ++{ ++ DPA_ASSERT(pool < bman_pool_max); ++ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0)); ++ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1)); ++ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0)); ++ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1)); ++} ++ ++static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size) ++{ ++ u32 exp = ilog2(size); ++ /* choke if size isn't within range */ ++ DPA_ASSERT((size >= 4096) && (size <= 1073741824) && ++ is_power_of_2(size)); ++ /* choke if '[e]ba' has lower-alignment than 'size' */ ++ DPA_ASSERT(!(ba & (size - 1))); ++ bm_out(FBPR_BARE, upper_32_bits(ba)); ++ bm_out(FBPR_BAR, lower_32_bits(ba)); ++ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1)); ++} ++ ++/*****************/ ++/* Config driver */ ++/*****************/ ++ ++/* TODO: Kconfig these? */ ++#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12) ++ ++/* We support only one of these. */ ++static struct bman *bm; ++static struct device_node *bm_node; ++ ++/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used ++ * during bman_init_ccsr(). */ ++static dma_addr_t fbpr_a; ++static size_t fbpr_sz = DEFAULT_FBPR_SZ; ++ ++static int bman_fbpr(struct reserved_mem *rmem) ++{ ++ fbpr_a = rmem->base; ++ fbpr_sz = rmem->size; ++ ++ WARN_ON(!(fbpr_a && fbpr_sz)); ++ ++ return 0; ++} ++RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); ++ ++static int __init fsl_bman_init(struct device_node *node) ++{ ++ struct resource res; ++ u32 __iomem *regs; ++ const char *s; ++ int ret, standby = 0; ++ u16 id; ++ u8 major, minor; ++ ++ ret = of_address_to_resource(node, 0, &res); ++ if (ret) { ++ pr_err("Can't get %s property 'reg'\n", ++ node->full_name); ++ return ret; ++ } ++ s = of_get_property(node, "fsl,hv-claimable", &ret); ++ if (s && !strcmp(s, "standby")) ++ standby = 1; ++ /* Global configuration */ ++ regs = ioremap(res.start, res.end - res.start + 1); ++ bm = bm_create(regs); ++ BUG_ON(!bm); ++ bm_node = node; ++ bm_get_version(bm, &id, &major, &minor); ++ pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor); ++ if ((major == 1) && (minor == 0)) { ++ bman_ip_rev = BMAN_REV10; ++ bman_pool_max = 64; ++ } else if ((major == 2) && (minor == 0)) { ++ bman_ip_rev = BMAN_REV20; ++ bman_pool_max = 8; ++ } else if ((major == 2) && (minor == 1)) { ++ bman_ip_rev = BMAN_REV21; ++ bman_pool_max = 64; ++ } else { ++ pr_warn("unknown Bman version, default to rev1.0\n"); ++ } ++ ++ if (standby) { ++ pr_info(" -> in standby mode\n"); ++ return 0; ++ } ++ return 0; ++} ++ ++int bman_have_ccsr(void) ++{ ++ return bm ? 1 : 0; ++} ++ ++int bm_pool_set(u32 bpid, const u32 *thresholds) ++{ ++ if (!bm) ++ return -ENODEV; ++ bm_set_pool(bm, bpid, thresholds[0], ++ thresholds[1], thresholds[2], ++ thresholds[3]); ++ return 0; ++} ++EXPORT_SYMBOL(bm_pool_set); ++ ++__init int bman_init_early(void) ++{ ++ struct device_node *dn; ++ int ret; ++ ++ for_each_compatible_node(dn, NULL, "fsl,bman") { ++ if (bm) ++ pr_err("%s: only one 'fsl,bman' allowed\n", ++ dn->full_name); ++ else { ++ if (!of_device_is_available(dn)) ++ continue; ++ ++ ret = fsl_bman_init(dn); ++ BUG_ON(ret); ++ } ++ } ++ return 0; ++} ++postcore_initcall_sync(bman_init_early); ++ ++ ++static void log_edata_bits(u32 bit_count) ++{ ++ u32 i, j, mask = 0xffffffff; ++ ++ pr_warn("Bman ErrInt, EDATA:\n"); ++ i = bit_count/32; ++ if (bit_count%32) { ++ i++; ++ mask = ~(mask << bit_count%32); ++ } ++ j = 16-i; ++ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask); ++ j++; ++ for (; j < 16; j++) ++ pr_warn(" 0x%08x\n", bm_in(EDATA(j))); ++} ++ ++static void log_additional_error_info(u32 isr_val, u32 ecsr_val) ++{ ++ union bman_ecir ecir_val; ++ union bman_eadr eadr_val; ++ ++ ecir_val.ecir_raw = bm_in(ECIR); ++ /* Is portal info valid */ ++ if (ecsr_val & PORTAL_ECSR_ERR) { ++ pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n", ++ ecir_val.info.portal_num, ecir_val.info.numb, ++ ecir_val.info.pid); ++ } ++ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) { ++ eadr_val.eadr_raw = bm_in(EADR); ++ pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n", ++ error_mdata[eadr_val.info.memid].txt, ++ error_mdata[eadr_val.info.memid].addr_mask ++ & eadr_val.info.eadr); ++ log_edata_bits(error_mdata[eadr_val.info.memid].bits); ++ } ++} ++ ++/* Bman interrupt handler */ ++static irqreturn_t bman_isr(int irq, void *ptr) ++{ ++ u32 isr_val, ier_val, ecsr_val, isr_mask, i; ++ ++ ier_val = bm_err_isr_enable_read(bm); ++ isr_val = bm_err_isr_status_read(bm); ++ ecsr_val = bm_in(ECSR); ++ isr_mask = isr_val & ier_val; ++ ++ if (!isr_mask) ++ return IRQ_NONE; ++ for (i = 0; i < BMAN_HWE_COUNT; i++) { ++ if (bman_hwerr_txts[i].mask & isr_mask) { ++ pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt); ++ if (bman_hwerr_txts[i].mask & ecsr_val) { ++ log_additional_error_info(isr_mask, ecsr_val); ++ /* Re-arm error capture registers */ ++ bm_out(ECSR, ecsr_val); ++ } ++ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) { ++ pr_devel("Bman un-enabling error 0x%x\n", ++ bman_hwerr_txts[i].mask); ++ ier_val &= ~bman_hwerr_txts[i].mask; ++ bm_err_isr_enable_write(bm, ier_val); ++ } ++ } ++ } ++ bm_err_isr_status_clear(bm, isr_val); ++ return IRQ_HANDLED; ++} ++ ++static int __bind_irq(void) ++{ ++ int ret, err_irq; ++ ++ err_irq = of_irq_to_resource(bm_node, 0, NULL); ++ if (err_irq == 0) { ++ pr_info("Can't get %s property '%s'\n", bm_node->full_name, ++ "interrupts"); ++ return -ENODEV; ++ } ++ ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node); ++ if (ret) { ++ pr_err("request_irq() failed %d for '%s'\n", ret, ++ bm_node->full_name); ++ return -ENODEV; ++ } ++ /* Disable Buffer Pool State Change */ ++ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN); ++ /* Write-to-clear any stale bits, (eg. starvation being asserted prior ++ * to resource allocation during driver init). */ ++ bm_err_isr_status_clear(bm, 0xffffffff); ++ /* Enable Error Interrupts */ ++ bm_err_isr_enable_write(bm, 0xffffffff); ++ return 0; ++} ++ ++int bman_init_ccsr(struct device_node *node) ++{ ++ int ret; ++ if (!bman_have_ccsr()) ++ return 0; ++ if (node != bm_node) ++ return -EINVAL; ++ /* FBPR memory */ ++ bm_set_memory(bm, fbpr_a, 0, fbpr_sz); ++ pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz); ++ ++ ret = __bind_irq(); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++u32 bm_pool_free_buffers(u32 bpid) ++{ ++ return bm_in(POOL_CONTENT(bpid)); ++} ++ ++#ifdef CONFIG_SYSFS ++ ++#define DRV_NAME "fsl-bman" ++#define SBEC_MAX_ID 1 ++#define SBEC_MIN_ID 0 ++ ++static ssize_t show_fbpr_fpc(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC)); ++}; ++ ++static ssize_t show_pool_count(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ u32 data; ++ int i; ++ ++ if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max)) ++ return -EINVAL; ++ data = bm_in(POOL_CONTENT(i)); ++ return snprintf(buf, PAGE_SIZE, "%d\n", data); ++}; ++ ++static ssize_t show_err_isr(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR)); ++}; ++ ++static ssize_t show_sbec(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ int i; ++ ++ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) ++ return -EINVAL; ++ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) ++ return -EINVAL; ++ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); ++}; ++ ++static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); ++static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL); ++ ++/* Didn't use DEVICE_ATTR as 64 of this would be required. ++ * Initialize them when needed. */ ++static char *name_attrs_pool_count; /* "xx" + null-terminator */ ++static struct device_attribute *dev_attr_buffer_pool_count; ++ ++static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); ++ ++static struct attribute *bman_dev_attributes[] = { ++ &dev_attr_fbpr_fpc.attr, ++ &dev_attr_err_isr.attr, ++ NULL ++}; ++ ++static struct attribute *bman_dev_ecr_attributes[] = { ++ &dev_attr_sbec_0.attr, ++ &dev_attr_sbec_1.attr, ++ NULL ++}; ++ ++static struct attribute **bman_dev_pool_count_attributes; ++ ++ ++/* root level */ ++static const struct attribute_group bman_dev_attr_grp = { ++ .name = NULL, ++ .attrs = bman_dev_attributes ++}; ++static const struct attribute_group bman_dev_ecr_grp = { ++ .name = "error_capture", ++ .attrs = bman_dev_ecr_attributes ++}; ++static struct attribute_group bman_dev_pool_countent_grp = { ++ .name = "pool_count", ++}; ++ ++static int of_fsl_bman_remove(struct platform_device *ofdev) ++{ ++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); ++ return 0; ++}; ++ ++static int of_fsl_bman_probe(struct platform_device *ofdev) ++{ ++ int ret, i; ++ ++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp); ++ if (ret) ++ goto done; ++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); ++ if (ret) ++ goto del_group_0; ++ ++ name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3, ++ GFP_KERNEL); ++ if (!name_attrs_pool_count) { ++ pr_err("Can't alloc name_attrs_pool_count\n"); ++ goto del_group_1; ++ } ++ ++ dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) * ++ bman_pool_max, GFP_KERNEL); ++ if (!dev_attr_buffer_pool_count) { ++ pr_err("Can't alloc dev_attr-buffer_pool_count\n"); ++ goto del_group_2; ++ } ++ ++ bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) * ++ (bman_pool_max + 1), GFP_KERNEL); ++ if (!bman_dev_pool_count_attributes) { ++ pr_err("can't alloc bman_dev_pool_count_attributes\n"); ++ goto del_group_3; ++ } ++ ++ for (i = 0; i < bman_pool_max; i++) { ++ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i); ++ if (!ret) ++ goto del_group_4; ++ dev_attr_buffer_pool_count[i].attr.name = ++ (name_attrs_pool_count + i * 3); ++ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR; ++ dev_attr_buffer_pool_count[i].show = show_pool_count; ++ bman_dev_pool_count_attributes[i] = ++ &dev_attr_buffer_pool_count[i].attr; ++ sysfs_attr_init(bman_dev_pool_count_attributes[i]); ++ } ++ bman_dev_pool_count_attributes[bman_pool_max] = NULL; ++ ++ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes; ++ ++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp); ++ if (ret) ++ goto del_group_4; ++ ++ goto done; ++ ++del_group_4: ++ kfree(bman_dev_pool_count_attributes); ++del_group_3: ++ kfree(dev_attr_buffer_pool_count); ++del_group_2: ++ kfree(name_attrs_pool_count); ++del_group_1: ++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); ++del_group_0: ++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); ++done: ++ if (ret) ++ dev_err(&ofdev->dev, ++ "Cannot create dev attributes ret=%d\n", ret); ++ return ret; ++}; ++ ++static struct of_device_id of_fsl_bman_ids[] = { ++ { ++ .compatible = "fsl,bman", ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, of_fsl_bman_ids); ++ ++#ifdef CONFIG_SUSPEND ++static u32 saved_isdr; ++ ++static int bman_pm_suspend_noirq(struct device *dev) ++{ ++ uint32_t idle_state; ++ ++ suspend_unused_bportal(); ++ /* save isdr, disable all, clear isr */ ++ saved_isdr = bm_err_isr_disable_read(bm); ++ bm_err_isr_disable_write(bm, 0xffffffff); ++ bm_err_isr_status_clear(bm, 0xffffffff); ++ ++ if (bman_ip_rev < BMAN_REV21) { ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Bman version doesn't have STATE_IDLE\n"); ++#endif ++ return 0; ++ } ++ idle_state = bm_in(STATE_IDLE); ++ if (!(idle_state & 0x1)) { ++ pr_err("Bman not idle 0x%x aborting\n", idle_state); ++ bm_err_isr_disable_write(bm, saved_isdr); ++ resume_unused_bportal(); ++ return -EBUSY; ++ } ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state); ++#endif ++ return 0; ++} ++ ++static int bman_pm_resume_noirq(struct device *dev) ++{ ++ /* restore isdr */ ++ bm_err_isr_disable_write(bm, saved_isdr); ++ resume_unused_bportal(); ++ return 0; ++} ++#else ++#define bman_pm_suspend_noirq NULL ++#define bman_pm_resume_noirq NULL ++#endif ++ ++static const struct dev_pm_ops bman_pm_ops = { ++ .suspend_noirq = bman_pm_suspend_noirq, ++ .resume_noirq = bman_pm_resume_noirq, ++}; ++ ++static struct platform_driver of_fsl_bman_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRV_NAME, ++ .of_match_table = of_fsl_bman_ids, ++ .pm = &bman_pm_ops, ++ }, ++ .probe = of_fsl_bman_probe, ++ .remove = of_fsl_bman_remove, ++}; ++ ++static int bman_ctrl_init(void) ++{ ++ return platform_driver_register(&of_fsl_bman_driver); ++} ++ ++static void bman_ctrl_exit(void) ++{ ++ platform_driver_unregister(&of_fsl_bman_driver); ++} ++ ++module_init(bman_ctrl_init); ++module_exit(bman_ctrl_exit); ++ ++#endif /* CONFIG_SYSFS */ +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_debugfs.c +@@ -0,0 +1,119 @@ ++/* Copyright 2010-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++static struct dentry *dfs_root; /* debugfs root directory */ ++ ++/******************************************************************************* ++ * Query Buffer Pool State ++ ******************************************************************************/ ++static int query_bp_state_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct bm_pool_state state; ++ int i, j; ++ u32 mask; ++ ++ memset(&state, 0, sizeof(struct bm_pool_state)); ++ ret = bman_query_pools(&state); ++ if (ret) { ++ seq_printf(file, "Error %d\n", ret); ++ return 0; ++ } ++ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n"); ++ for (i = 0; i < 2; i++) { ++ mask = 0x80000000; ++ for (j = 0; j < 32; j++) { ++ seq_printf(file, ++ " %-2u %-3s %-3s\n", ++ (i*32)+j, ++ (state.as.state.__state[i] & mask) ? "no" : "yes", ++ (state.ds.state.__state[i] & mask) ? "yes" : "no"); ++ mask >>= 1; ++ } ++ } ++ return 0; ++} ++ ++static int query_bp_state_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, query_bp_state_show, NULL); ++} ++ ++static const struct file_operations query_bp_state_fops = { ++ .owner = THIS_MODULE, ++ .open = query_bp_state_open, ++ .read = seq_read, ++ .release = single_release, ++}; ++ ++static int __init bman_debugfs_module_init(void) ++{ ++ int ret = 0; ++ struct dentry *d; ++ ++ dfs_root = debugfs_create_dir("bman", NULL); ++ ++ if (dfs_root == NULL) { ++ ret = -ENOMEM; ++ pr_err("Cannot create bman debugfs dir\n"); ++ goto _return; ++ } ++ d = debugfs_create_file("query_bp_state", ++ S_IRUGO, ++ dfs_root, ++ NULL, ++ &query_bp_state_fops); ++ if (d == NULL) { ++ ret = -ENOMEM; ++ pr_err("Cannot create query_bp_state\n"); ++ goto _return; ++ } ++ return 0; ++ ++_return: ++ debugfs_remove_recursive(dfs_root); ++ return ret; ++} ++ ++static void __exit bman_debugfs_module_exit(void) ++{ ++ debugfs_remove_recursive(dfs_root); ++} ++ ++ ++module_init(bman_debugfs_module_init); ++module_exit(bman_debugfs_module_exit); ++MODULE_LICENSE("Dual BSD/GPL"); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_driver.c +@@ -0,0 +1,575 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "bman_low.h" ++#ifdef CONFIG_HOTPLUG_CPU ++#include ++#endif ++/* ++ * Global variables of the max portal/pool number this bman version supported ++ */ ++u16 bman_ip_rev; ++EXPORT_SYMBOL(bman_ip_rev); ++u16 bman_pool_max; ++EXPORT_SYMBOL(bman_pool_max); ++static u16 bman_portal_max; ++ ++/* After initialising cpus that own shared portal configs, we cache the ++ * resulting portals (ie. not just the configs) in this array. Then we ++ * initialise slave cpus that don't have their own portals, redirecting them to ++ * portals from this cache in a round-robin assignment. */ ++static struct bman_portal *shared_portals[NR_CPUS]; ++static int num_shared_portals; ++static int shared_portals_idx; ++static LIST_HEAD(unused_pcfgs); ++static DEFINE_SPINLOCK(unused_pcfgs_lock); ++static void *affine_bportals[NR_CPUS]; ++ ++static int __init fsl_bpool_init(struct device_node *node) ++{ ++ int ret; ++ u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret); ++ if (!bpid || (ret != 4)) { ++ pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name); ++ return -ENODEV; ++ } ++ thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret); ++ if (thresh) { ++ if (ret != 16) { ++ pr_err("Invalid %s property '%s'\n", ++ node->full_name, "fsl,bpool-thresholds"); ++ return -ENODEV; ++ } ++ } ++ if (thresh) { ++#ifdef CONFIG_FSL_BMAN_CONFIG ++ ret = bm_pool_set(be32_to_cpu(*bpid), thresh); ++ if (ret) ++ pr_err("No CCSR node for %s property '%s'\n", ++ node->full_name, "fsl,bpool-thresholds"); ++ return ret; ++#else ++ pr_err("Ignoring %s property '%s', no CCSR support\n", ++ node->full_name, "fsl,bpool-thresholds"); ++#endif ++ } ++ return 0; ++} ++ ++static int __init fsl_bpid_range_init(struct device_node *node) ++{ ++ int ret; ++ u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret); ++ if (!range) { ++ pr_err("No 'fsl,bpid-range' property in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ pr_info("Bman: BPID allocator includes range %d:%d\n", ++ be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ return 0; ++} ++ ++static struct bm_portal_config * __init parse_pcfg(struct device_node *node) ++{ ++ struct bm_portal_config *pcfg; ++ const u32 *index; ++ int irq, ret; ++ resource_size_t len; ++ ++ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); ++ if (!pcfg) { ++ pr_err("can't allocate portal config"); ++ return NULL; ++ } ++ ++ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") || ++ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) { ++ bman_ip_rev = BMAN_REV10; ++ bman_pool_max = 64; ++ bman_portal_max = 10; ++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") || ++ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) { ++ bman_ip_rev = BMAN_REV20; ++ bman_pool_max = 8; ++ bman_portal_max = 3; ++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) { ++ bman_ip_rev = BMAN_REV21; ++ bman_pool_max = 64; ++ bman_portal_max = 50; ++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) { ++ bman_ip_rev = BMAN_REV21; ++ bman_pool_max = 64; ++ bman_portal_max = 25; ++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) { ++ bman_ip_rev = BMAN_REV21; ++ bman_pool_max = 64; ++ bman_portal_max = 18; ++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) { ++ bman_ip_rev = BMAN_REV21; ++ bman_pool_max = 64; ++ bman_portal_max = 10; ++ } else { ++ pr_warn("unknown BMan version in portal node," ++ "default to rev1.0\n"); ++ bman_ip_rev = BMAN_REV10; ++ bman_pool_max = 64; ++ bman_portal_max = 10; ++ } ++ ++ ret = of_address_to_resource(node, DPA_PORTAL_CE, ++ &pcfg->addr_phys[DPA_PORTAL_CE]); ++ if (ret) { ++ pr_err("Can't get %s property 'reg::CE'\n", node->full_name); ++ goto err; ++ } ++ ret = of_address_to_resource(node, DPA_PORTAL_CI, ++ &pcfg->addr_phys[DPA_PORTAL_CI]); ++ if (ret) { ++ pr_err("Can't get %s property 'reg::CI'\n", node->full_name); ++ goto err; ++ } ++ ++ index = of_get_property(node, "cell-index", &ret); ++ if (!index || (ret != 4)) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, ++ "cell-index"); ++ goto err; ++ } ++ if (be32_to_cpu(*index) >= bman_portal_max) { ++ pr_err("BMan portal cell index %d out of range, max %d\n", ++ be32_to_cpu(*index), bman_portal_max); ++ goto err; ++ } ++ ++ pcfg->public_cfg.cpu = -1; ++ ++ irq = irq_of_parse_and_map(node, 0); ++ if (irq == 0) { ++ pr_err("Can't get %s property 'interrupts'\n", node->full_name); ++ goto err; ++ } ++ pcfg->public_cfg.irq = irq; ++ pcfg->public_cfg.index = be32_to_cpu(*index); ++ bman_depletion_fill(&pcfg->public_cfg.mask); ++ ++ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); ++ if (len != (unsigned long)len) ++ goto err; ++ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns( ++ pcfg->addr_phys[DPA_PORTAL_CE].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE])); ++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap( ++ pcfg->addr_phys[DPA_PORTAL_CI].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI])); ++ ++#else ++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( ++ pcfg->addr_phys[DPA_PORTAL_CE].start, ++ (unsigned long)len, ++ 0); ++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( ++ pcfg->addr_phys[DPA_PORTAL_CI].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), ++ _PAGE_GUARDED | _PAGE_NO_CACHE); ++#endif ++ /* disable bp depletion */ ++ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0)); ++ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1)); ++ return pcfg; ++err: ++ kfree(pcfg); ++ return NULL; ++} ++ ++static struct bm_portal_config *get_pcfg(struct list_head *list) ++{ ++ struct bm_portal_config *pcfg; ++ if (list_empty(list)) ++ return NULL; ++ pcfg = list_entry(list->prev, struct bm_portal_config, list); ++ list_del(&pcfg->list); ++ return pcfg; ++} ++ ++static struct bm_portal_config *get_pcfg_idx(struct list_head *list, ++ uint32_t idx) ++{ ++ struct bm_portal_config *pcfg; ++ if (list_empty(list)) ++ return NULL; ++ list_for_each_entry(pcfg, list, list) { ++ if (pcfg->public_cfg.index == idx) { ++ list_del(&pcfg->list); ++ return pcfg; ++ } ++ } ++ return NULL; ++} ++ ++struct bm_portal_config *bm_get_unused_portal(void) ++{ ++ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); ++} ++ ++struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx) ++{ ++ struct bm_portal_config *ret; ++ spin_lock(&unused_pcfgs_lock); ++ if (idx == QBMAN_ANY_PORTAL_IDX) ++ ret = get_pcfg(&unused_pcfgs); ++ else ++ ret = get_pcfg_idx(&unused_pcfgs, idx); ++ spin_unlock(&unused_pcfgs_lock); ++ return ret; ++} ++ ++void bm_put_unused_portal(struct bm_portal_config *pcfg) ++{ ++ spin_lock(&unused_pcfgs_lock); ++ list_add(&pcfg->list, &unused_pcfgs); ++ spin_unlock(&unused_pcfgs_lock); ++} ++ ++static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) ++{ ++ struct bman_portal *p; ++ p = bman_create_affine_portal(pcfg); ++ if (p) { ++#ifdef CONFIG_FSL_DPA_PIRQ_SLOW ++ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN); ++#endif ++ pr_info("Bman portal %sinitialised, cpu %d\n", ++ pcfg->public_cfg.is_shared ? "(shared) " : "", ++ pcfg->public_cfg.cpu); ++ affine_bportals[pcfg->public_cfg.cpu] = p; ++ } else ++ pr_crit("Bman portal failure on cpu %d\n", ++ pcfg->public_cfg.cpu); ++ return p; ++} ++ ++static void init_slave(int cpu) ++{ ++ struct bman_portal *p; ++ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); ++ if (!p) ++ pr_err("Bman slave portal failure on cpu %d\n", cpu); ++ else ++ pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu); ++ if (shared_portals_idx >= num_shared_portals) ++ shared_portals_idx = 0; ++ affine_bportals[cpu] = p; ++} ++ ++/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the ++ * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes ++ * and/or ranges of indexes, with each being optionally prefixed by "s" to ++ * explicitly mark it or them for sharing. ++ * Eg; ++ * bportals=s0,1-3,s4 ++ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared" ++ * portals, and any remaining cpus share the portals that are assigned to cpus 0 ++ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share ++ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu ++ * 0's portal.) */ ++static struct cpumask want_unshared __initdata; /* cpus requested without "s" */ ++static struct cpumask want_shared __initdata; /* cpus requested with "s" */ ++ ++static int __init parse_bportals(char *str) ++{ ++ return parse_portals_bootarg(str, &want_shared, &want_unshared, ++ "bportals"); ++} ++__setup("bportals=", parse_bportals); ++ ++static int bman_offline_cpu(unsigned int cpu) ++{ ++ struct bman_portal *p; ++ const struct bm_portal_config *pcfg; ++ p = (struct bman_portal *)affine_bportals[cpu]; ++ if (p) { ++ pcfg = bman_get_bm_portal_config(p); ++ if (pcfg) ++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); ++ } ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static int bman_online_cpu(unsigned int cpu) ++{ ++ struct bman_portal *p; ++ const struct bm_portal_config *pcfg; ++ p = (struct bman_portal *)affine_bportals[cpu]; ++ if (p) { ++ pcfg = bman_get_bm_portal_config(p); ++ if (pcfg) ++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); ++ } ++ return 0; ++} ++static int bman_hotplug_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long)hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ bman_online_cpu(cpu); ++ break; ++ case CPU_DOWN_PREPARE: ++ case CPU_DOWN_PREPARE_FROZEN: ++ bman_offline_cpu(cpu); ++ default: ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block bman_hotplug_cpu_notifier = { ++ .notifier_call = bman_hotplug_cpu_callback, ++}; ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++/* Initialise the Bman driver. The meat of this function deals with portals. The ++ * following describes the flow of portal-handling, the code "steps" refer to ++ * this description; ++ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with ++ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not ++ * bound). ++ * 2. The "want_shared" and "want_unshared" lists (as filled by the ++ * "bportals=[...]" bootarg) are processed, allocating portals and assigning ++ * them to cpus, placing them in the relevant list and setting ::cpu as ++ * appropriate. If no "bportals" bootarg was present, the defaut is to try to ++ * assign portals to all online cpus at the time of driver initialisation. ++ * Any failure to allocate portals (when parsing the "want" lists or when ++ * using default behaviour) will be silently tolerated (the "fixup" logic in ++ * step 3 will determine what happens in this case). ++ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for ++ * sharing and sharing is required (because not all cpus have been assigned ++ * portals), then one portal will marked for sharing. Conversely if no ++ * sharing is required, any portals marked for sharing will not be shared. It ++ * may be that sharing occurs when it wasn't expected, if portal allocation ++ * failed to honour all the requested assignments (including the default ++ * assignments if no bootarg is present). ++ * 4. Unshared portals are initialised on their respective cpus. ++ * 5. Shared portals are initialised on their respective cpus. ++ * 6. Each remaining cpu is initialised to slave to one of the shared portals, ++ * which are selected in a round-robin fashion. ++ * Any portal configs left unused are available for USDPAA allocation. ++ */ ++__init int bman_init(void) ++{ ++ struct cpumask slave_cpus; ++ struct cpumask unshared_cpus = *cpu_none_mask; ++ struct cpumask shared_cpus = *cpu_none_mask; ++ LIST_HEAD(unshared_pcfgs); ++ LIST_HEAD(shared_pcfgs); ++ struct device_node *dn; ++ struct bm_portal_config *pcfg; ++ struct bman_portal *p; ++ int cpu, ret; ++ struct cpumask offline_cpus; ++ ++ /* Initialise the Bman (CCSR) device */ ++ for_each_compatible_node(dn, NULL, "fsl,bman") { ++ if (!bman_init_ccsr(dn)) ++ pr_info("Bman err interrupt handler present\n"); ++ else ++ pr_err("Bman CCSR setup failed\n"); ++ } ++ /* Initialise any declared buffer pools */ ++ for_each_compatible_node(dn, NULL, "fsl,bpool") { ++ ret = fsl_bpool_init(dn); ++ if (ret) ++ return ret; ++ } ++ /* Step 1. See comments at the beginning of the file. */ ++ for_each_compatible_node(dn, NULL, "fsl,bman-portal") { ++ if (!of_device_is_available(dn)) ++ continue; ++ pcfg = parse_pcfg(dn); ++ if (pcfg) ++ list_add_tail(&pcfg->list, &unused_pcfgs); ++ } ++ /* Step 2. */ ++ for_each_possible_cpu(cpu) { ++ if (cpumask_test_cpu(cpu, &want_shared)) { ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &shared_pcfgs); ++ cpumask_set_cpu(cpu, &shared_cpus); ++ } ++ if (cpumask_test_cpu(cpu, &want_unshared)) { ++ if (cpumask_test_cpu(cpu, &shared_cpus)) ++ continue; ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &unshared_pcfgs); ++ cpumask_set_cpu(cpu, &unshared_cpus); ++ } ++ } ++ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { ++ /* Default, give an unshared portal to each online cpu */ ++ for_each_online_cpu(cpu) { ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &unshared_pcfgs); ++ cpumask_set_cpu(cpu, &unshared_cpus); ++ } ++ } ++ /* Step 3. */ ++ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); ++ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); ++ if (cpumask_empty(&slave_cpus)) { ++ /* No sharing required */ ++ if (!list_empty(&shared_pcfgs)) { ++ /* Migrate "shared" to "unshared" */ ++ cpumask_or(&unshared_cpus, &unshared_cpus, ++ &shared_cpus); ++ cpumask_clear(&shared_cpus); ++ list_splice_tail(&shared_pcfgs, &unshared_pcfgs); ++ INIT_LIST_HEAD(&shared_pcfgs); ++ } ++ } else { ++ /* Sharing required */ ++ if (list_empty(&shared_pcfgs)) { ++ /* Migrate one "unshared" to "shared" */ ++ pcfg = get_pcfg(&unshared_pcfgs); ++ if (!pcfg) { ++ pr_crit("No BMan portals available!\n"); ++ return 0; ++ } ++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); ++ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); ++ list_add_tail(&pcfg->list, &shared_pcfgs); ++ } ++ } ++ /* Step 4. */ ++ list_for_each_entry(pcfg, &unshared_pcfgs, list) { ++ pcfg->public_cfg.is_shared = 0; ++ p = init_pcfg(pcfg); ++ if (!p) { ++ pr_crit("Unable to initialize bman portal\n"); ++ return 0; ++ } ++ } ++ /* Step 5. */ ++ list_for_each_entry(pcfg, &shared_pcfgs, list) { ++ pcfg->public_cfg.is_shared = 1; ++ p = init_pcfg(pcfg); ++ if (p) ++ shared_portals[num_shared_portals++] = p; ++ } ++ /* Step 6. */ ++ if (!cpumask_empty(&slave_cpus)) ++ for_each_cpu(cpu, &slave_cpus) ++ init_slave(cpu); ++ pr_info("Bman portals initialised\n"); ++ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); ++ for_each_cpu(cpu, &offline_cpus) ++ bman_offline_cpu(cpu); ++#ifdef CONFIG_HOTPLUG_CPU ++ register_hotcpu_notifier(&bman_hotplug_cpu_notifier); ++#endif ++ return 0; ++} ++ ++__init int bman_resource_init(void) ++{ ++ struct device_node *dn; ++ int ret; ++ ++ /* Initialise BPID allocation ranges */ ++ for_each_compatible_node(dn, NULL, "fsl,bpid-range") { ++ ret = fsl_bpid_range_init(dn); ++ if (ret) ++ return ret; ++ } ++ return 0; ++} ++ ++#ifdef CONFIG_SUSPEND ++void suspend_unused_bportal(void) ++{ ++ struct bm_portal_config *pcfg; ++ ++ if (list_empty(&unused_pcfgs)) ++ return; ++ ++ list_for_each_entry(pcfg, &unused_pcfgs, list) { ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Need to save bportal %d\n", pcfg->public_cfg.index); ++#endif ++ /* save isdr, disable all via isdr, clear isr */ ++ pcfg->saved_isdr = ++ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); ++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + ++ 0xe08); ++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + ++ 0xe00); ++ } ++ return; ++} ++ ++void resume_unused_bportal(void) ++{ ++ struct bm_portal_config *pcfg; ++ ++ if (list_empty(&unused_pcfgs)) ++ return; ++ ++ list_for_each_entry(pcfg, &unused_pcfgs, list) { ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index); ++#endif ++ /* restore isdr */ ++ __raw_writel(pcfg->saved_isdr, ++ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); ++ } ++ return; ++} ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_high.c +@@ -0,0 +1,1145 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "bman_low.h" ++ ++/* Compilation constants */ ++#define RCR_THRESH 2 /* reread h/w CI when running out of space */ ++#define IRQNAME "BMan portal %d" ++#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ ++ ++struct bman_portal { ++ struct bm_portal p; ++ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ ++ struct bman_depletion *pools; ++ int thresh_set; ++ unsigned long irq_sources; ++ u32 slowpoll; /* only used when interrupts are off */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ ++#endif ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ raw_spinlock_t sharing_lock; /* only used if is_shared */ ++ int is_shared; ++ struct bman_portal *sharing_redirect; ++#endif ++ /* When the cpu-affine portal is activated, this is non-NULL */ ++ const struct bm_portal_config *config; ++ /* This is needed for power management */ ++ struct platform_device *pdev; ++ /* 64-entry hash-table of pool objects that are tracking depletion ++ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so ++ * we're not fussy about cache-misses and so forth - whereas the above ++ * members should all fit in one cacheline. ++ * BTW, with 64 entries in the hash table and 64 buffer pools to track, ++ * you'll never guess the hash-function ... */ ++ struct bman_pool *cb[64]; ++ char irqname[MAX_IRQNAME]; ++ /* Track if the portal was alloced by the driver */ ++ u8 alloced; ++ /* power management data */ ++ u32 save_isdr; ++}; ++ ++/* For an explanation of the locking, redirection, or affine-portal logic, ++ * please consult the Qman driver for details. This is the same, only simpler ++ * (no fiddly Qman-specific bits.) */ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++#define PORTAL_IRQ_LOCK(p, irqflags) \ ++ do { \ ++ if ((p)->is_shared) \ ++ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ ++ else \ ++ local_irq_save(irqflags); \ ++ } while (0) ++#define PORTAL_IRQ_UNLOCK(p, irqflags) \ ++ do { \ ++ if ((p)->is_shared) \ ++ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ ++ irqflags); \ ++ else \ ++ local_irq_restore(irqflags); \ ++ } while (0) ++#else ++#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) ++#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) ++#endif ++ ++static cpumask_t affine_mask; ++static DEFINE_SPINLOCK(affine_mask_lock); ++static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); ++static inline struct bman_portal *get_raw_affine_portal(void) ++{ ++ return &get_cpu_var(bman_affine_portal); ++} ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++static inline struct bman_portal *get_affine_portal(void) ++{ ++ struct bman_portal *p = get_raw_affine_portal(); ++ if (p->sharing_redirect) ++ return p->sharing_redirect; ++ return p; ++} ++#else ++#define get_affine_portal() get_raw_affine_portal() ++#endif ++static inline void put_affine_portal(void) ++{ ++ put_cpu_var(bman_affine_portal); ++} ++static inline struct bman_portal *get_poll_portal(void) ++{ ++ return &get_cpu_var(bman_affine_portal); ++} ++#define put_poll_portal() ++ ++/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be ++ * more than one such object per Bman buffer pool, eg. if different users of the ++ * pool are operating via different portals. */ ++struct bman_pool { ++ struct bman_pool_params params; ++ /* Used for hash-table admin when using depletion notifications. */ ++ struct bman_portal *portal; ++ struct bman_pool *next; ++ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ ++ struct bm_buffer *sp; ++ unsigned int sp_fill; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ atomic_t in_use; ++#endif ++}; ++ ++/* (De)Registration of depletion notification callbacks */ ++static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) ++{ ++ __maybe_unused unsigned long irqflags; ++ pool->portal = portal; ++ PORTAL_IRQ_LOCK(portal, irqflags); ++ pool->next = portal->cb[pool->params.bpid]; ++ portal->cb[pool->params.bpid] = pool; ++ if (!pool->next) ++ /* First object for that bpid on this portal, enable the BSCN ++ * mask bit. */ ++ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); ++ PORTAL_IRQ_UNLOCK(portal, irqflags); ++} ++static void depletion_unlink(struct bman_pool *pool) ++{ ++ struct bman_pool *it, *last = NULL; ++ struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; ++ __maybe_unused unsigned long irqflags; ++ PORTAL_IRQ_LOCK(pool->portal, irqflags); ++ it = *base; /* <-- gotcha, don't do this prior to the irq_save */ ++ while (it != pool) { ++ last = it; ++ it = it->next; ++ } ++ if (!last) ++ *base = pool->next; ++ else ++ last->next = pool->next; ++ if (!last && !pool->next) { ++ /* Last object for that bpid on this portal, disable the BSCN ++ * mask bit. */ ++ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); ++ /* And "forget" that we last saw this pool as depleted */ ++ bman_depletion_unset(&pool->portal->pools[1], ++ pool->params.bpid); ++ } ++ PORTAL_IRQ_UNLOCK(pool->portal, irqflags); ++} ++ ++/* In the case that the application's core loop calls qman_poll() and ++ * bman_poll(), we ought to balance how often we incur the overheads of the ++ * slow-path poll. We'll use two decrementer sources. The idle decrementer ++ * constant is used when the last slow-poll detected no work to do, and the busy ++ * decrementer constant when the last slow-poll had work to do. */ ++#define SLOW_POLL_IDLE 1000 ++#define SLOW_POLL_BUSY 10 ++static u32 __poll_portal_slow(struct bman_portal *p, u32 is); ++ ++/* Portal interrupt handler */ ++static irqreturn_t portal_isr(__always_unused int irq, void *ptr) ++{ ++ struct bman_portal *p = ptr; ++ u32 clear = p->irq_sources; ++ u32 is = bm_isr_status_read(&p->p) & p->irq_sources; ++ clear |= __poll_portal_slow(p, is); ++ bm_isr_status_clear(&p->p, clear); ++ return IRQ_HANDLED; ++} ++ ++#ifdef CONFIG_SUSPEND ++static int _bman_portal_suspend_noirq(struct device *dev) ++{ ++ struct bman_portal *p = (struct bman_portal *)dev->platform_data; ++#ifdef CONFIG_PM_DEBUG ++ struct platform_device *pdev = to_platform_device(dev); ++#endif ++ p->save_isdr = bm_isr_disable_read(&p->p); ++ bm_isr_disable_write(&p->p, 0xffffffff); ++ bm_isr_status_clear(&p->p, 0xffffffff); ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Suspend for %s\n", pdev->name); ++#endif ++ return 0; ++} ++ ++static int _bman_portal_resume_noirq(struct device *dev) ++{ ++ struct bman_portal *p = (struct bman_portal *)dev->platform_data; ++ ++ /* restore isdr */ ++ bm_isr_disable_write(&p->p, p->save_isdr); ++ return 0; ++} ++#else ++#define _bman_portal_suspend_noirq NULL ++#define _bman_portal_resume_noirq NULL ++#endif ++ ++struct dev_pm_domain bman_portal_device_pm_domain = { ++ .ops = { ++ USE_PLATFORM_PM_SLEEP_OPS ++ .suspend_noirq = _bman_portal_suspend_noirq, ++ .resume_noirq = _bman_portal_resume_noirq, ++ } ++}; ++ ++struct bman_portal *bman_create_portal( ++ struct bman_portal *portal, ++ const struct bm_portal_config *config) ++{ ++ struct bm_portal *__p; ++ const struct bman_depletion *pools = &config->public_cfg.mask; ++ int ret; ++ u8 bpid = 0; ++ char buf[16]; ++ ++ if (!portal) { ++ portal = kmalloc(sizeof(*portal), GFP_KERNEL); ++ if (!portal) ++ return portal; ++ portal->alloced = 1; ++ } else ++ portal->alloced = 0; ++ ++ __p = &portal->p; ++ ++ /* prep the low-level portal struct with the mapped addresses from the ++ * config, everything that follows depends on it and "config" is more ++ * for (de)reference... */ ++ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; ++ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; ++ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { ++ pr_err("Bman RCR initialisation failed\n"); ++ goto fail_rcr; ++ } ++ if (bm_mc_init(__p)) { ++ pr_err("Bman MC initialisation failed\n"); ++ goto fail_mc; ++ } ++ if (bm_isr_init(__p)) { ++ pr_err("Bman ISR initialisation failed\n"); ++ goto fail_isr; ++ } ++ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); ++ if (!portal->pools) ++ goto fail_pools; ++ portal->pools[0] = *pools; ++ bman_depletion_init(portal->pools + 1); ++ while (bpid < bman_pool_max) { ++ /* Default to all BPIDs disabled, we enable as required at ++ * run-time. */ ++ bm_isr_bscn_mask(__p, bpid, 0); ++ bpid++; ++ } ++ portal->slowpoll = 0; ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ portal->rcri_owned = NULL; ++#endif ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ raw_spin_lock_init(&portal->sharing_lock); ++ portal->is_shared = config->public_cfg.is_shared; ++ portal->sharing_redirect = NULL; ++#endif ++ sprintf(buf, "bportal-%u", config->public_cfg.index); ++ portal->pdev = platform_device_alloc(buf, -1); ++ if (!portal->pdev) ++ goto fail_devalloc; ++ portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain; ++ portal->pdev->dev.platform_data = portal; ++ ret = platform_device_add(portal->pdev); ++ if (ret) ++ goto fail_devadd; ++ memset(&portal->cb, 0, sizeof(portal->cb)); ++ /* Write-to-clear any stale interrupt status bits */ ++ bm_isr_disable_write(__p, 0xffffffff); ++ portal->irq_sources = 0; ++ bm_isr_enable_write(__p, portal->irq_sources); ++ bm_isr_status_clear(__p, 0xffffffff); ++ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); ++ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, ++ portal)) { ++ pr_err("request_irq() failed\n"); ++ goto fail_irq; ++ } ++ if ((config->public_cfg.cpu != -1) && ++ irq_can_set_affinity(config->public_cfg.irq) && ++ irq_set_affinity(config->public_cfg.irq, ++ cpumask_of(config->public_cfg.cpu))) { ++ pr_err("irq_set_affinity() failed %s\n", portal->irqname); ++ goto fail_affinity; ++ } ++ ++ /* Need RCR to be empty before continuing */ ++ ret = bm_rcr_get_fill(__p); ++ if (ret) { ++ pr_err("Bman RCR unclean\n"); ++ goto fail_rcr_empty; ++ } ++ /* Success */ ++ portal->config = config; ++ ++ bm_isr_disable_write(__p, 0); ++ bm_isr_uninhibit(__p); ++ return portal; ++fail_rcr_empty: ++fail_affinity: ++ free_irq(config->public_cfg.irq, portal); ++fail_irq: ++ platform_device_del(portal->pdev); ++fail_devadd: ++ platform_device_put(portal->pdev); ++fail_devalloc: ++ kfree(portal->pools); ++fail_pools: ++ bm_isr_finish(__p); ++fail_isr: ++ bm_mc_finish(__p); ++fail_mc: ++ bm_rcr_finish(__p); ++fail_rcr: ++ if (portal->alloced) ++ kfree(portal); ++ return NULL; ++} ++ ++struct bman_portal *bman_create_affine_portal( ++ const struct bm_portal_config *config) ++{ ++ struct bman_portal *portal; ++ ++ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu); ++ portal = bman_create_portal(portal, config); ++ if (portal) { ++ spin_lock(&affine_mask_lock); ++ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); ++ spin_unlock(&affine_mask_lock); ++ } ++ return portal; ++} ++ ++ ++struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, ++ int cpu) ++{ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ struct bman_portal *p; ++ p = &per_cpu(bman_affine_portal, cpu); ++ BUG_ON(p->config); ++ BUG_ON(p->is_shared); ++ BUG_ON(!redirect->config->public_cfg.is_shared); ++ p->irq_sources = 0; ++ p->sharing_redirect = redirect; ++ return p; ++#else ++ BUG(); ++ return NULL; ++#endif ++} ++ ++void bman_destroy_portal(struct bman_portal *bm) ++{ ++ const struct bm_portal_config *pcfg; ++ pcfg = bm->config; ++ bm_rcr_cce_update(&bm->p); ++ bm_rcr_cce_update(&bm->p); ++ ++ free_irq(pcfg->public_cfg.irq, bm); ++ ++ kfree(bm->pools); ++ bm_isr_finish(&bm->p); ++ bm_mc_finish(&bm->p); ++ bm_rcr_finish(&bm->p); ++ bm->config = NULL; ++ if (bm->alloced) ++ kfree(bm); ++} ++ ++const struct bm_portal_config *bman_destroy_affine_portal(void) ++{ ++ struct bman_portal *bm = get_raw_affine_portal(); ++ const struct bm_portal_config *pcfg; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (bm->sharing_redirect) { ++ bm->sharing_redirect = NULL; ++ put_affine_portal(); ++ return NULL; ++ } ++ bm->is_shared = 0; ++#endif ++ pcfg = bm->config; ++ bman_destroy_portal(bm); ++ spin_lock(&affine_mask_lock); ++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); ++ spin_unlock(&affine_mask_lock); ++ put_affine_portal(); ++ return pcfg; ++} ++ ++/* When release logic waits on available RCR space, we need a global waitqueue ++ * in the case of "affine" use (as the waits wake on different cpus which means ++ * different portals - so we can't wait on any per-portal waitqueue). */ ++static DECLARE_WAIT_QUEUE_HEAD(affine_queue); ++ ++static u32 __poll_portal_slow(struct bman_portal *p, u32 is) ++{ ++ struct bman_depletion tmp; ++ u32 ret = is; ++ ++ /* There is a gotcha to be aware of. If we do the query before clearing ++ * the status register, we may miss state changes that occur between the ++ * two. If we write to clear the status register before the query, the ++ * cache-enabled query command may overtake the status register write ++ * unless we use a heavyweight sync (which we don't want). Instead, we ++ * write-to-clear the status register then *read it back* before doing ++ * the query, hence the odd while loop with the 'is' accumulation. */ ++ if (is & BM_PIRQ_BSCN) { ++ struct bm_mc_result *mcr; ++ __maybe_unused unsigned long irqflags; ++ unsigned int i, j; ++ u32 __is; ++ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); ++ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { ++ is |= __is; ++ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); ++ } ++ is &= ~BM_PIRQ_BSCN; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ bm_mc_start(&p->p); ++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); ++ while (!(mcr = bm_mc_result(&p->p))) ++ cpu_relax(); ++ tmp = mcr->query.ds.state; ++ tmp.__state[0] = be32_to_cpu(tmp.__state[0]); ++ tmp.__state[1] = be32_to_cpu(tmp.__state[1]); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ for (i = 0; i < 2; i++) { ++ int idx = i * 32; ++ /* tmp is a mask of currently-depleted pools. ++ * pools[0] is mask of those we care about. ++ * pools[1] is our previous view (we only want to ++ * be told about changes). */ ++ tmp.__state[i] &= p->pools[0].__state[i]; ++ if (tmp.__state[i] == p->pools[1].__state[i]) ++ /* fast-path, nothing to see, move along */ ++ continue; ++ for (j = 0; j <= 31; j++, idx++) { ++ struct bman_pool *pool = p->cb[idx]; ++ int b4 = bman_depletion_get(&p->pools[1], idx); ++ int af = bman_depletion_get(&tmp, idx); ++ if (b4 == af) ++ continue; ++ while (pool) { ++ pool->params.cb(p, pool, ++ pool->params.cb_ctx, af); ++ pool = pool->next; ++ } ++ } ++ } ++ p->pools[1] = tmp; ++ } ++ ++ if (is & BM_PIRQ_RCRI) { ++ __maybe_unused unsigned long irqflags; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ bm_rcr_cce_update(&p->p); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ /* If waiting for sync, we only cancel the interrupt threshold ++ * when the ring utilisation hits zero. */ ++ if (p->rcri_owned) { ++ if (!bm_rcr_get_fill(&p->p)) { ++ p->rcri_owned = NULL; ++ bm_rcr_set_ithresh(&p->p, 0); ++ } ++ } else ++#endif ++ bm_rcr_set_ithresh(&p->p, 0); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ wake_up(&affine_queue); ++ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); ++ is &= ~BM_PIRQ_RCRI; ++ } ++ ++ /* There should be no status register bits left undefined */ ++ DPA_ASSERT(!is); ++ return ret; ++} ++ ++const struct bman_portal_config *bman_get_portal_config(void) ++{ ++ struct bman_portal *p = get_affine_portal(); ++ const struct bman_portal_config *ret = &p->config->public_cfg; ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(bman_get_portal_config); ++ ++u32 bman_irqsource_get(void) ++{ ++ struct bman_portal *p = get_raw_affine_portal(); ++ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(bman_irqsource_get); ++ ++int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits) ++{ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (p->sharing_redirect) ++ return -EINVAL; ++ else ++#endif ++ { ++ __maybe_unused unsigned long irqflags; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); ++ bm_isr_enable_write(&p->p, p->irq_sources); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(bman_p_irqsource_add); ++ ++int bman_irqsource_add(__maybe_unused u32 bits) ++{ ++ struct bman_portal *p = get_raw_affine_portal(); ++ int ret = 0; ++ ret = bman_p_irqsource_add(p, bits); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(bman_irqsource_add); ++ ++int bman_irqsource_remove(u32 bits) ++{ ++ struct bman_portal *p = get_raw_affine_portal(); ++ __maybe_unused unsigned long irqflags; ++ u32 ier; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (p->sharing_redirect) { ++ put_affine_portal(); ++ return -EINVAL; ++ } ++#endif ++ /* Our interrupt handler only processes+clears status register bits that ++ * are in p->irq_sources. As we're trimming that mask, if one of them ++ * were to assert in the status register just before we remove it from ++ * the enable register, there would be an interrupt-storm when we ++ * release the IRQ lock. So we wait for the enable register update to ++ * take effect in h/w (by reading it back) and then clear all other bits ++ * in the status register. Ie. we clear them from ISR once it's certain ++ * IER won't allow them to reassert. */ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ bits &= BM_PIRQ_VISIBLE; ++ clear_bits(bits, &p->irq_sources); ++ bm_isr_enable_write(&p->p, p->irq_sources); ++ ier = bm_isr_enable_read(&p->p); ++ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a ++ * data-dependency, ie. to protect against re-ordering. */ ++ bm_isr_status_clear(&p->p, ~ier); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return 0; ++} ++EXPORT_SYMBOL(bman_irqsource_remove); ++ ++const cpumask_t *bman_affine_cpus(void) ++{ ++ return &affine_mask; ++} ++EXPORT_SYMBOL(bman_affine_cpus); ++ ++u32 bman_poll_slow(void) ++{ ++ struct bman_portal *p = get_poll_portal(); ++ u32 ret; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (unlikely(p->sharing_redirect)) ++ ret = (u32)-1; ++ else ++#endif ++ { ++ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; ++ ret = __poll_portal_slow(p, is); ++ bm_isr_status_clear(&p->p, ret); ++ } ++ put_poll_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(bman_poll_slow); ++ ++/* Legacy wrapper */ ++void bman_poll(void) ++{ ++ struct bman_portal *p = get_poll_portal(); ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (unlikely(p->sharing_redirect)) ++ goto done; ++#endif ++ if (!(p->slowpoll--)) { ++ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; ++ u32 active = __poll_portal_slow(p, is); ++ if (active) ++ p->slowpoll = SLOW_POLL_BUSY; ++ else ++ p->slowpoll = SLOW_POLL_IDLE; ++ } ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++done: ++#endif ++ put_poll_portal(); ++} ++EXPORT_SYMBOL(bman_poll); ++ ++static const u32 zero_thresholds[4] = {0, 0, 0, 0}; ++ ++struct bman_pool *bman_new_pool(const struct bman_pool_params *params) ++{ ++ struct bman_pool *pool = NULL; ++ u32 bpid; ++ ++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { ++ int ret = bman_alloc_bpid(&bpid); ++ if (ret) ++ return NULL; ++ } else { ++ if (params->bpid >= bman_pool_max) ++ return NULL; ++ bpid = params->bpid; ++ } ++#ifdef CONFIG_FSL_BMAN_CONFIG ++ if (params->flags & BMAN_POOL_FLAG_THRESH) { ++ int ret = bm_pool_set(bpid, params->thresholds); ++ if (ret) ++ goto err; ++ } ++#else ++ if (params->flags & BMAN_POOL_FLAG_THRESH) ++ goto err; ++#endif ++ pool = kmalloc(sizeof(*pool), GFP_KERNEL); ++ if (!pool) ++ goto err; ++ pool->sp = NULL; ++ pool->sp_fill = 0; ++ pool->params = *params; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ atomic_set(&pool->in_use, 1); ++#endif ++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) ++ pool->params.bpid = bpid; ++ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { ++ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, ++ GFP_KERNEL); ++ if (!pool->sp) ++ goto err; ++ } ++ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { ++ struct bman_portal *p = get_affine_portal(); ++ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { ++ pr_err("Depletion events disabled for bpid %d\n", bpid); ++ goto err; ++ } ++ depletion_link(p, pool); ++ put_affine_portal(); ++ } ++ return pool; ++err: ++#ifdef CONFIG_FSL_BMAN_CONFIG ++ if (params->flags & BMAN_POOL_FLAG_THRESH) ++ bm_pool_set(bpid, zero_thresholds); ++#endif ++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) ++ bman_release_bpid(bpid); ++ if (pool) { ++ kfree(pool->sp); ++ kfree(pool); ++ } ++ return NULL; ++} ++EXPORT_SYMBOL(bman_new_pool); ++ ++void bman_free_pool(struct bman_pool *pool) ++{ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++ if (pool->params.flags & BMAN_POOL_FLAG_THRESH) ++ bm_pool_set(pool->params.bpid, zero_thresholds); ++#endif ++ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) ++ depletion_unlink(pool); ++ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { ++ if (pool->sp_fill) ++ pr_err("Stockpile not flushed, has %u in bpid %u.\n", ++ pool->sp_fill, pool->params.bpid); ++ kfree(pool->sp); ++ pool->sp = NULL; ++ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; ++ } ++ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) ++ bman_release_bpid(pool->params.bpid); ++ kfree(pool); ++} ++EXPORT_SYMBOL(bman_free_pool); ++ ++const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) ++{ ++ return &pool->params; ++} ++EXPORT_SYMBOL(bman_get_params); ++ ++static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) ++{ ++ if (avail) ++ bm_rcr_cce_prefetch(&p->p); ++ else ++ bm_rcr_cce_update(&p->p); ++} ++ ++int bman_rcr_is_empty(void) ++{ ++ __maybe_unused unsigned long irqflags; ++ struct bman_portal *p = get_affine_portal(); ++ u8 avail; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ update_rcr_ci(p, 0); ++ avail = bm_rcr_get_fill(&p->p); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return avail == 0; ++} ++EXPORT_SYMBOL(bman_rcr_is_empty); ++ ++static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ __maybe_unused struct bman_pool *pool, ++#endif ++ __maybe_unused unsigned long *irqflags, ++ __maybe_unused u32 flags) ++{ ++ struct bm_rcr_entry *r; ++ u8 avail; ++ ++ *p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(*p, (*irqflags)); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && ++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { ++ if ((*p)->rcri_owned) { ++ PORTAL_IRQ_UNLOCK(*p, (*irqflags)); ++ put_affine_portal(); ++ return NULL; ++ } ++ (*p)->rcri_owned = pool; ++ } ++#endif ++ avail = bm_rcr_get_avail(&(*p)->p); ++ if (avail < 2) ++ update_rcr_ci(*p, avail); ++ r = bm_rcr_start(&(*p)->p); ++ if (unlikely(!r)) { ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && ++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) ++ (*p)->rcri_owned = NULL; ++#endif ++ PORTAL_IRQ_UNLOCK(*p, (*irqflags)); ++ put_affine_portal(); ++ } ++ return r; ++} ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, ++ struct bman_pool *pool, ++ __maybe_unused unsigned long *irqflags, ++ u32 flags) ++{ ++ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); ++ if (!rcr) ++ bm_rcr_set_ithresh(&(*p)->p, 1); ++ return rcr; ++} ++ ++static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, ++ struct bman_pool *pool, ++ __maybe_unused unsigned long *irqflags, ++ u32 flags) ++{ ++ struct bm_rcr_entry *rcr; ++#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ pool = NULL; ++#endif ++ if (flags & BMAN_RELEASE_FLAG_WAIT_INT) ++ /* NB: return NULL if signal occurs before completion. Signal ++ * can occur during return. Caller must check for signal */ ++ wait_event_interruptible(affine_queue, ++ (rcr = __wait_rel_start(p, pool, irqflags, flags))); ++ else ++ wait_event(affine_queue, ++ (rcr = __wait_rel_start(p, pool, irqflags, flags))); ++ return rcr; ++} ++#endif ++ ++static inline int __bman_release(struct bman_pool *pool, ++ const struct bm_buffer *bufs, u8 num, u32 flags) ++{ ++ struct bman_portal *p; ++ struct bm_rcr_entry *r; ++ __maybe_unused unsigned long irqflags; ++ u32 i = num - 1; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & BMAN_RELEASE_FLAG_WAIT) ++ r = wait_rel_start(&p, pool, &irqflags, flags); ++ else ++ r = try_rel_start(&p, pool, &irqflags, flags); ++#else ++ r = try_rel_start(&p, &irqflags, flags); ++#endif ++ if (!r) ++ return -EBUSY; ++ /* We can copy all but the first entry, as this can trigger badness ++ * with the valid-bit. Use the overlay to mask the verb byte. */ ++ r->bufs[0].opaque = ++ ((cpu_to_be64((bufs[0].opaque | ++ ((u64)pool->params.bpid<<48)) ++ & 0x00ffffffffffffff))); ++ if (i) { ++ for (i = 1; i < num; i++) ++ r->bufs[i].opaque = ++ cpu_to_be64(bufs[i].opaque); ++ } ++ ++ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | ++ (num & BM_RCR_VERB_BUFCOUNT_MASK)); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ /* if we wish to sync we need to set the threshold after h/w sees the ++ * new ring entry. As we're mixing cache-enabled and cache-inhibited ++ * accesses, this requires a heavy-weight sync. */ ++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && ++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { ++ hwsync(); ++ bm_rcr_set_ithresh(&p->p, 1); ++ } ++#endif ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && ++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { ++ if (flags & BMAN_RELEASE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->rcri_owned != pool)); ++ else ++ wait_event(affine_queue, (p->rcri_owned != pool)); ++ } ++#endif ++ return 0; ++} ++ ++int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, ++ u32 flags) ++{ ++ int ret; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (!num || (num > 8)) ++ return -EINVAL; ++ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) ++ return -EINVAL; ++#endif ++ /* Without stockpile, this API is a pass-through to the h/w operation */ ++ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) ++ return __bman_release(pool, bufs, num, flags); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (!atomic_dec_and_test(&pool->in_use)) { ++ pr_crit("Parallel attempts to enter bman_released() detected."); ++ panic("only one instance of bman_released/acquired allowed"); ++ } ++#endif ++ /* Two movements of buffers are possible, and can occur in either order. ++ * A: moving buffers from the caller to the stockpile. ++ * B: moving buffers from the stockpile to hardware. ++ * Order 1: if there is already enough space in the stockpile for A ++ * then we want to do A first, and only do B if we trigger the ++ * stockpile-high threshold. ++ * Order 2: if there is not enough space in the stockpile for A, then ++ * we want to do B first, then do A if B had succeeded. However in this ++ * case B is dependent on how many buffers the user needs to release, ++ * not the stockpile-high threshold. ++ * Due to the different handling of B between the two cases, putting A ++ * and B in a while() loop would require quite obscure logic, so handle ++ * the different sequences explicitly. */ ++ if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) { ++ /* Order 1: do A */ ++ copy_words(pool->sp + pool->sp_fill, bufs, ++ sizeof(struct bm_buffer) * num); ++ pool->sp_fill += num; ++ /* do B relative to STOCKPILE_HIGH */ ++ while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) { ++ ret = __bman_release(pool, ++ pool->sp + (pool->sp_fill - 8), 8, ++ flags); ++ if (ret >= 0) ++ pool->sp_fill -= 8; ++ } ++ } else { ++ /* Order 2: do B relative to 'num' */ ++ do { ++ ret = __bman_release(pool, ++ pool->sp + (pool->sp_fill - 8), 8, ++ flags); ++ if (ret < 0) ++ /* failure */ ++ goto release_done; ++ pool->sp_fill -= 8; ++ } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ); ++ /* do A */ ++ copy_words(pool->sp + pool->sp_fill, bufs, ++ sizeof(struct bm_buffer) * num); ++ pool->sp_fill += num; ++ } ++ /* success */ ++ ret = 0; ++release_done: ++#ifdef CONFIG_FSL_DPA_CHECKING ++ atomic_inc(&pool->in_use); ++#endif ++ return ret; ++} ++EXPORT_SYMBOL(bman_release); ++ ++static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, ++ u8 num) ++{ ++ struct bman_portal *p = get_affine_portal(); ++ struct bm_mc_command *mcc; ++ struct bm_mc_result *mcr; ++ __maybe_unused unsigned long irqflags; ++ int ret, i; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = bm_mc_start(&p->p); ++ mcc->acquire.bpid = pool->params.bpid; ++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | ++ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); ++ while (!(mcr = bm_mc_result(&p->p))) ++ cpu_relax(); ++ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; ++ if (bufs) { ++ for (i = 0; i < num; i++) ++ bufs[i].opaque = ++ be64_to_cpu(mcr->acquire.bufs[i].opaque); ++ } ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (ret != num) ++ ret = -ENOMEM; ++ return ret; ++} ++ ++int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, ++ u32 flags) ++{ ++ int ret; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (!num || (num > 8)) ++ return -EINVAL; ++ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) ++ return -EINVAL; ++#endif ++ /* Without stockpile, this API is a pass-through to the h/w operation */ ++ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) ++ return __bman_acquire(pool, bufs, num); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (!atomic_dec_and_test(&pool->in_use)) { ++ pr_crit("Parallel attempts to enter bman_acquire() detected."); ++ panic("only one instance of bman_released/acquired allowed"); ++ } ++#endif ++ /* Two movements of buffers are possible, and can occur in either order. ++ * A: moving buffers from stockpile to the caller. ++ * B: moving buffers from hardware to the stockpile. ++ * Order 1: if there are already enough buffers in the stockpile for A ++ * then we want to do A first, and only do B if we trigger the ++ * stockpile-low threshold. ++ * Order 2: if there are not enough buffers in the stockpile for A, ++ * then we want to do B first, then do A if B had succeeded. However in ++ * this case B is dependent on how many buffers the user needs, not the ++ * stockpile-low threshold. ++ * Due to the different handling of B between the two cases, putting A ++ * and B in a while() loop would require quite obscure logic, so handle ++ * the different sequences explicitly. */ ++ if (num <= pool->sp_fill) { ++ /* Order 1: do A */ ++ copy_words(bufs, pool->sp + (pool->sp_fill - num), ++ sizeof(struct bm_buffer) * num); ++ pool->sp_fill -= num; ++ /* do B relative to STOCKPILE_LOW */ ++ while (pool->sp_fill <= BMAN_STOCKPILE_LOW) { ++ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8); ++ if (ret < 0) ++ ret = __bman_acquire(pool, ++ pool->sp + pool->sp_fill, 1); ++ if (ret < 0) ++ break; ++ pool->sp_fill += ret; ++ } ++ } else { ++ /* Order 2: do B relative to 'num' */ ++ do { ++ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8); ++ if (ret < 0) ++ ret = __bman_acquire(pool, ++ pool->sp + pool->sp_fill, 1); ++ if (ret < 0) ++ /* failure */ ++ goto acquire_done; ++ pool->sp_fill += ret; ++ } while (pool->sp_fill < num); ++ /* do A */ ++ copy_words(bufs, pool->sp + (pool->sp_fill - num), ++ sizeof(struct bm_buffer) * num); ++ pool->sp_fill -= num; ++ } ++ /* success */ ++ ret = num; ++acquire_done: ++#ifdef CONFIG_FSL_DPA_CHECKING ++ atomic_inc(&pool->in_use); ++#endif ++ return ret; ++} ++EXPORT_SYMBOL(bman_acquire); ++ ++int bman_flush_stockpile(struct bman_pool *pool, u32 flags) ++{ ++ u8 num; ++ int ret; ++ ++ while (pool->sp_fill) { ++ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); ++ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), ++ num, flags); ++ if (ret) ++ return ret; ++ pool->sp_fill -= num; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(bman_flush_stockpile); ++ ++int bman_query_pools(struct bm_pool_state *state) ++{ ++ struct bman_portal *p = get_affine_portal(); ++ struct bm_mc_result *mcr; ++ __maybe_unused unsigned long irqflags; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ bm_mc_start(&p->p); ++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); ++ while (!(mcr = bm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); ++ *state = mcr->query; ++ state->as.state.__state[0] = be32_to_cpu(state->as.state.__state[0]); ++ state->as.state.__state[1] = be32_to_cpu(state->as.state.__state[1]); ++ state->ds.state.__state[0] = be32_to_cpu(state->ds.state.__state[0]); ++ state->ds.state.__state[1] = be32_to_cpu(state->ds.state.__state[1]); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return 0; ++} ++EXPORT_SYMBOL(bman_query_pools); ++ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++u32 bman_query_free_buffers(struct bman_pool *pool) ++{ ++ return bm_pool_free_buffers(pool->params.bpid); ++} ++EXPORT_SYMBOL(bman_query_free_buffers); ++ ++int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds) ++{ ++ u32 bpid; ++ ++ bpid = bman_get_params(pool)->bpid; ++ ++ return bm_pool_set(bpid, thresholds); ++} ++EXPORT_SYMBOL(bman_update_pool_thresholds); ++#endif ++ ++int bman_shutdown_pool(u32 bpid) ++{ ++ struct bman_portal *p = get_affine_portal(); ++ __maybe_unused unsigned long irqflags; ++ int ret; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ret = bm_shutdown_pool(&p->p, bpid); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(bman_shutdown_pool); ++ ++const struct bm_portal_config *bman_get_bm_portal_config( ++ struct bman_portal *portal) ++{ ++ return portal->sharing_redirect ? NULL : portal->config; ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_low.h +@@ -0,0 +1,565 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "bman_private.h" ++ ++/***************************/ ++/* Portal register assists */ ++/***************************/ ++ ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ ++/* Cache-inhibited register offsets */ ++#define BM_REG_RCR_PI_CINH 0x0000 ++#define BM_REG_RCR_CI_CINH 0x0004 ++#define BM_REG_RCR_ITR 0x0008 ++#define BM_REG_CFG 0x0100 ++#define BM_REG_SCN(n) (0x0200 + ((n) << 2)) ++#define BM_REG_ISR 0x0e00 ++#define BM_REG_IIR 0x0e0c ++ ++/* Cache-enabled register offsets */ ++#define BM_CL_CR 0x0000 ++#define BM_CL_RR0 0x0100 ++#define BM_CL_RR1 0x0140 ++#define BM_CL_RCR 0x1000 ++#define BM_CL_RCR_PI_CENA 0x3000 ++#define BM_CL_RCR_CI_CENA 0x3100 ++ ++#endif ++ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ ++/* Cache-inhibited register offsets */ ++#define BM_REG_RCR_PI_CINH 0x3000 ++#define BM_REG_RCR_CI_CINH 0x3100 ++#define BM_REG_RCR_ITR 0x3200 ++#define BM_REG_CFG 0x3300 ++#define BM_REG_SCN(n) (0x3400 + ((n) << 6)) ++#define BM_REG_ISR 0x3e00 ++#define BM_REG_IIR 0x3ec0 ++ ++/* Cache-enabled register offsets */ ++#define BM_CL_CR 0x0000 ++#define BM_CL_RR0 0x0100 ++#define BM_CL_RR1 0x0140 ++#define BM_CL_RCR 0x1000 ++#define BM_CL_RCR_PI_CENA 0x3000 ++#define BM_CL_RCR_CI_CENA 0x3100 ++ ++#endif ++ ++/* BTW, the drivers (and h/w programming model) already obtain the required ++ * synchronisation for portal accesses via lwsync(), hwsync(), and ++ * data-dependencies. Use of barrier()s or other order-preserving primitives ++ * simply degrade performance. Hence the use of the __raw_*() interfaces, which ++ * simply ensure that the compiler treats the portal registers as volatile (ie. ++ * non-coherent). */ ++ ++/* Cache-inhibited register access. */ ++#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o))) ++#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \ ++ (bm)->addr_ci + (o)); ++#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg) ++#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val) ++ ++/* Cache-enabled (index) register access */ ++#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o)) ++#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o)) ++#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o))) ++#define __bm_cl_out(bm, o, val) \ ++ do { \ ++ u32 *__tmpclout = (bm)->addr_ce + (o); \ ++ __raw_writel(cpu_to_be32(val), __tmpclout); \ ++ dcbf(__tmpclout); \ ++ } while (0) ++#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o)) ++#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA) ++#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA) ++#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA) ++#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val) ++#define bm_cl_invalidate(reg)\ ++ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA) ++ ++/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf ++ * analysis, look at using the "extra" bit in the ring index registers to avoid ++ * cyclic issues. */ ++static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last) ++{ ++ /* 'first' is included, 'last' is excluded */ ++ if (first <= last) ++ return last - first; ++ return ringsize + last - first; ++} ++ ++/* Portal modes. ++ * Enum types; ++ * pmode == production mode ++ * cmode == consumption mode, ++ * Enum values use 3 letter codes. First letter matches the portal mode, ++ * remaining two letters indicate; ++ * ci == cache-inhibited portal register ++ * ce == cache-enabled portal register ++ * vb == in-band valid-bit (cache-enabled) ++ */ ++enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ ++ bm_rcr_pci = 0, /* PI index, cache-inhibited */ ++ bm_rcr_pce = 1, /* PI index, cache-enabled */ ++ bm_rcr_pvb = 2 /* valid-bit */ ++}; ++enum bm_rcr_cmode { /* s/w-only */ ++ bm_rcr_cci, /* CI index, cache-inhibited */ ++ bm_rcr_cce /* CI index, cache-enabled */ ++}; ++ ++ ++/* ------------------------- */ ++/* --- Portal structures --- */ ++ ++#define BM_RCR_SIZE 8 ++ ++struct bm_rcr { ++ struct bm_rcr_entry *ring, *cursor; ++ u8 ci, available, ithresh, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ u32 busy; ++ enum bm_rcr_pmode pmode; ++ enum bm_rcr_cmode cmode; ++#endif ++}; ++ ++struct bm_mc { ++ struct bm_mc_command *cr; ++ struct bm_mc_result *rr; ++ u8 rridx, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ enum { ++ /* Can only be _mc_start()ed */ ++ mc_idle, ++ /* Can only be _mc_commit()ed or _mc_abort()ed */ ++ mc_user, ++ /* Can only be _mc_retry()ed */ ++ mc_hw ++ } state; ++#endif ++}; ++ ++struct bm_addr { ++ void __iomem *addr_ce; /* cache-enabled */ ++ void __iomem *addr_ci; /* cache-inhibited */ ++}; ++ ++struct bm_portal { ++ struct bm_addr addr; ++ struct bm_rcr rcr; ++ struct bm_mc mc; ++ struct bm_portal_config config; ++} ____cacheline_aligned; ++ ++ ++/* --------------- */ ++/* --- RCR API --- */ ++ ++/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ ++#define RCR_CARRYCLEAR(p) \ ++ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6))) ++ ++/* Bit-wise logic to convert a ring pointer to a ring index */ ++static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e) ++{ ++ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1); ++} ++ ++/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ ++static inline void RCR_INC(struct bm_rcr *rcr) ++{ ++ /* NB: this is odd-looking, but experiments show that it generates ++ * fast code with essentially no branching overheads. We increment to ++ * the next RCR pointer and handle overflow and 'vbit'. */ ++ struct bm_rcr_entry *partial = rcr->cursor + 1; ++ rcr->cursor = RCR_CARRYCLEAR(partial); ++ if (partial != rcr->cursor) ++ rcr->vbit ^= BM_RCR_VERB_VBIT; ++} ++ ++static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, ++ __maybe_unused enum bm_rcr_cmode cmode) ++{ ++ /* This use of 'register', as well as all other occurrences, is because ++ * it has been observed to generate much faster code with gcc than is ++ * otherwise the case. */ ++ register struct bm_rcr *rcr = &portal->rcr; ++ u32 cfg; ++ u8 pi; ++ ++ rcr->ring = portal->addr.addr_ce + BM_CL_RCR; ++ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); ++ ++ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); ++ rcr->cursor = rcr->ring + pi; ++ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0; ++ rcr->available = BM_RCR_SIZE - 1 ++ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); ++ rcr->ithresh = bm_in(RCR_ITR); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 0; ++ rcr->pmode = pmode; ++ rcr->cmode = cmode; ++#endif ++ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */ ++ bm_out(CFG, cfg); ++ return 0; ++} ++ ++static inline void bm_rcr_finish(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); ++ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); ++ DPA_ASSERT(!rcr->busy); ++ if (pi != RCR_PTR2IDX(rcr->cursor)) ++ pr_crit("losing uncommited RCR entries\n"); ++ if (ci != rcr->ci) ++ pr_crit("missing existing RCR completions\n"); ++ if (rcr->ci != RCR_PTR2IDX(rcr->cursor)) ++ pr_crit("RCR destroyed unquiesced\n"); ++} ++ ++static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(!rcr->busy); ++ if (!rcr->available) ++ return NULL; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 1; ++#endif ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(rcr->cursor); ++#endif ++ return rcr->cursor; ++} ++ ++static inline void bm_rcr_abort(struct bm_portal *portal) ++{ ++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->busy); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 0; ++#endif ++} ++ ++static inline struct bm_rcr_entry *bm_rcr_pend_and_next( ++ struct bm_portal *portal, u8 myverb) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->busy); ++ DPA_ASSERT(rcr->pmode != bm_rcr_pvb); ++ if (rcr->available == 1) ++ return NULL; ++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; ++ dcbf_64(rcr->cursor); ++ RCR_INC(rcr); ++ rcr->available--; ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(rcr->cursor); ++#endif ++ return rcr->cursor; ++} ++ ++static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->busy); ++ DPA_ASSERT(rcr->pmode == bm_rcr_pci); ++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; ++ RCR_INC(rcr); ++ rcr->available--; ++ hwsync(); ++ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor)); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 0; ++#endif ++} ++ ++static inline void bm_rcr_pce_prefetch(struct bm_portal *portal) ++{ ++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->pmode == bm_rcr_pce); ++ bm_cl_invalidate(RCR_PI); ++ bm_cl_touch_rw(RCR_PI); ++} ++ ++static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->busy); ++ DPA_ASSERT(rcr->pmode == bm_rcr_pce); ++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; ++ RCR_INC(rcr); ++ rcr->available--; ++ lwsync(); ++ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor)); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 0; ++#endif ++} ++ ++static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ struct bm_rcr_entry *rcursor; ++ DPA_ASSERT(rcr->busy); ++ DPA_ASSERT(rcr->pmode == bm_rcr_pvb); ++ lwsync(); ++ rcursor = rcr->cursor; ++ rcursor->__dont_write_directly__verb = myverb | rcr->vbit; ++ dcbf_64(rcursor); ++ RCR_INC(rcr); ++ rcr->available--; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ rcr->busy = 0; ++#endif ++} ++ ++static inline u8 bm_rcr_cci_update(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ u8 diff, old_ci = rcr->ci; ++ DPA_ASSERT(rcr->cmode == bm_rcr_cci); ++ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); ++ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); ++ rcr->available += diff; ++ return diff; ++} ++ ++static inline void bm_rcr_cce_prefetch(struct bm_portal *portal) ++{ ++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr; ++ DPA_ASSERT(rcr->cmode == bm_rcr_cce); ++ bm_cl_touch_ro(RCR_CI); ++} ++ ++static inline u8 bm_rcr_cce_update(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ u8 diff, old_ci = rcr->ci; ++ DPA_ASSERT(rcr->cmode == bm_rcr_cce); ++ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1); ++ bm_cl_invalidate(RCR_CI); ++ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); ++ rcr->available += diff; ++ return diff; ++} ++ ++static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ return rcr->ithresh; ++} ++ ++static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ rcr->ithresh = ithresh; ++ bm_out(RCR_ITR, ithresh); ++} ++ ++static inline u8 bm_rcr_get_avail(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ return rcr->available; ++} ++ ++static inline u8 bm_rcr_get_fill(struct bm_portal *portal) ++{ ++ register struct bm_rcr *rcr = &portal->rcr; ++ return BM_RCR_SIZE - 1 - rcr->available; ++} ++ ++ ++/* ------------------------------ */ ++/* --- Management command API --- */ ++ ++static inline int bm_mc_init(struct bm_portal *portal) ++{ ++ register struct bm_mc *mc = &portal->mc; ++ mc->cr = portal->addr.addr_ce + BM_CL_CR; ++ mc->rr = portal->addr.addr_ce + BM_CL_RR0; ++ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & ++ BM_MCC_VERB_VBIT) ? 0 : 1; ++ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = mc_idle; ++#endif ++ return 0; ++} ++ ++static inline void bm_mc_finish(struct bm_portal *portal) ++{ ++ __maybe_unused register struct bm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == mc_idle); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (mc->state != mc_idle) ++ pr_crit("Losing incomplete MC command\n"); ++#endif ++} ++ ++static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) ++{ ++ register struct bm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == mc_idle); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = mc_user; ++#endif ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(mc->cr); ++#endif ++ return mc->cr; ++} ++ ++static inline void bm_mc_abort(struct bm_portal *portal) ++{ ++ __maybe_unused register struct bm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == mc_user); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = mc_idle; ++#endif ++} ++ ++static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) ++{ ++ register struct bm_mc *mc = &portal->mc; ++ struct bm_mc_result *rr = mc->rr + mc->rridx; ++ DPA_ASSERT(mc->state == mc_user); ++ lwsync(); ++ mc->cr->__dont_write_directly__verb = myverb | mc->vbit; ++ dcbf(mc->cr); ++ dcbit_ro(rr); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = mc_hw; ++#endif ++} ++ ++static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal) ++{ ++ register struct bm_mc *mc = &portal->mc; ++ struct bm_mc_result *rr = mc->rr + mc->rridx; ++ DPA_ASSERT(mc->state == mc_hw); ++ /* The inactive response register's verb byte always returns zero until ++ * its command is submitted and completed. This includes the valid-bit, ++ * in case you were wondering... */ ++ if (!__raw_readb(&rr->verb)) { ++ dcbit_ro(rr); ++ return NULL; ++ } ++ mc->rridx ^= 1; ++ mc->vbit ^= BM_MCC_VERB_VBIT; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = mc_idle; ++#endif ++ return rr; ++} ++ ++ ++/* ------------------------------------- */ ++/* --- Portal interrupt register API --- */ ++ ++static inline int bm_isr_init(__always_unused struct bm_portal *portal) ++{ ++ return 0; ++} ++ ++static inline void bm_isr_finish(__always_unused struct bm_portal *portal) ++{ ++} ++ ++#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32) ++#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31)) ++static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid, ++ int enable) ++{ ++ u32 val; ++ DPA_ASSERT(bpid < bman_pool_max); ++ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */ ++ val = __bm_in(&portal->addr, SCN_REG(bpid)); ++ if (enable) ++ val |= SCN_BIT(bpid); ++ else ++ val &= ~SCN_BIT(bpid); ++ __bm_out(&portal->addr, SCN_REG(bpid), val); ++} ++ ++static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n) ++{ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6)); ++#else ++ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2)); ++#endif ++} ++ ++static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, ++ u32 val) ++{ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val); ++#else ++ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val); ++#endif ++} ++ ++/* Buffer Pool Cleanup */ ++static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid) ++{ ++ struct bm_mc_command *bm_cmd; ++ struct bm_mc_result *bm_res; ++ ++ int aq_count = 0; ++ bool stop = false; ++ while (!stop) { ++ /* Acquire buffers until empty */ ++ bm_cmd = bm_mc_start(p); ++ bm_cmd->acquire.bpid = bpid; ++ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1); ++ while (!(bm_res = bm_mc_result(p))) ++ cpu_relax(); ++ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { ++ /* Pool is empty */ ++ /* TBD : Should we do a few extra iterations in ++ case some other some blocks keep buffers 'on deck', ++ which may also be problematic */ ++ stop = true; ++ } else ++ ++aq_count; ++ } ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_private.h +@@ -0,0 +1,166 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpa_sys.h" ++#include ++ ++/* Revision info (for errata and feature handling) */ ++#define BMAN_REV10 0x0100 ++#define BMAN_REV20 0x0200 ++#define BMAN_REV21 0x0201 ++#define QBMAN_ANY_PORTAL_IDX 0xffffffff ++extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ ++ ++/* ++ * Global variables of the max portal/pool number this bman version supported ++ */ ++extern u16 bman_pool_max; ++ ++/* used by CCSR and portal interrupt code */ ++enum bm_isr_reg { ++ bm_isr_status = 0, ++ bm_isr_enable = 1, ++ bm_isr_disable = 2, ++ bm_isr_inhibit = 3 ++}; ++ ++struct bm_portal_config { ++ /* Corenet portal addresses; ++ * [0]==cache-enabled, [1]==cache-inhibited. */ ++ __iomem void *addr_virt[2]; ++ struct resource addr_phys[2]; ++ /* Allow these to be joined in lists */ ++ struct list_head list; ++ /* User-visible portal configuration settings */ ++ struct bman_portal_config public_cfg; ++ /* power management saved data */ ++ u32 saved_isdr; ++}; ++ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++/* Hooks from bman_driver.c to bman_config.c */ ++int bman_init_ccsr(struct device_node *node); ++#endif ++ ++/* Hooks from bman_driver.c in to bman_high.c */ ++struct bman_portal *bman_create_portal( ++ struct bman_portal *portal, ++ const struct bm_portal_config *config); ++struct bman_portal *bman_create_affine_portal( ++ const struct bm_portal_config *config); ++struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, ++ int cpu); ++void bman_destroy_portal(struct bman_portal *bm); ++ ++const struct bm_portal_config *bman_destroy_affine_portal(void); ++ ++/* Hooks from fsl_usdpaa.c to bman_driver.c */ ++struct bm_portal_config *bm_get_unused_portal(void); ++struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx); ++void bm_put_unused_portal(struct bm_portal_config *pcfg); ++void bm_set_liodns(struct bm_portal_config *pcfg); ++ ++/* Pool logic in the portal driver, during initialisation, needs to know if ++ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++int bman_have_ccsr(void); ++#else ++#define bman_have_ccsr() 0 ++#endif ++ ++/* Stockpile build constants. The _LOW value: when bman_acquire() is called and ++ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it ++ * might fail (if the buffer pool is depleted). So this value provides some ++ * "stagger" in that the bman_acquire() function will only fail if lots of bufs ++ * are requested at once or if h/w has been tested a couple of times without ++ * luck. The _HIGH value: when bman_release() is called and the stockpile ++ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if ++ * the release ring is full). So this value provides some "stagger" so that ++ * ring-access is retried a couple of times prior to the API returning a ++ * failure. The following *must* be true; ++ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8 ++ * (to avoid thrashing) ++ * BMAN_STOCKPILE_SZ >= 16 ++ * (as the release logic expects to either send 8 buffers to hw prior to ++ * adding the given buffers to the stockpile or add the buffers to the ++ * stockpile before sending 8 to hw, as the API must be an all-or-nothing ++ * success/fail.) ++ */ ++#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */ ++#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */ ++#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */ ++ ++/*************************************************/ ++/* BMan s/w corenet portal, low-level i/face */ ++/*************************************************/ ++ ++/* Used by all portal interrupt registers except 'inhibit' ++ * This mask contains all the "irqsource" bits visible to API users ++ */ ++#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN) ++ ++/* These are bm__(). So for example, bm_disable_write() means "write ++ * the disable register" rather than "disable the ability to write". */ ++#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status) ++#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m) ++#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable) ++#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v) ++#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable) ++#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v) ++#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1) ++#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0) ++ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++/* Set depletion thresholds associated with a buffer pool. Requires that the ++ * operating system have access to Bman CCSR (ie. compiled in support and ++ * run-time access courtesy of the device-tree). */ ++int bm_pool_set(u32 bpid, const u32 *thresholds); ++#define BM_POOL_THRESH_SW_ENTER 0 ++#define BM_POOL_THRESH_SW_EXIT 1 ++#define BM_POOL_THRESH_HW_ENTER 2 ++#define BM_POOL_THRESH_HW_EXIT 3 ++ ++/* Read the free buffer count for a given buffer */ ++u32 bm_pool_free_buffers(u32 bpid); ++ ++__init int bman_init(void); ++__init int bman_resource_init(void); ++ ++const struct bm_portal_config *bman_get_bm_portal_config( ++ struct bman_portal *portal); ++ ++/* power management */ ++#ifdef CONFIG_SUSPEND ++void suspend_unused_bportal(void); ++void resume_unused_bportal(void); ++#endif ++ ++#endif /* CONFIG_FSL_BMAN_CONFIG */ +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_test.c +@@ -0,0 +1,56 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "bman_test.h" ++ ++MODULE_AUTHOR("Geoff Thorpe"); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("Bman testing"); ++ ++static int test_init(void) ++{ ++#ifdef CONFIG_FSL_BMAN_TEST_HIGH ++ int loop = 1; ++ while (loop--) ++ bman_test_high(); ++#endif ++#ifdef CONFIG_FSL_BMAN_TEST_THRESH ++ bman_test_thresh(); ++#endif ++ return 0; ++} ++ ++static void test_exit(void) ++{ ++} ++ ++module_init(test_init); ++module_exit(test_exit); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_test.h +@@ -0,0 +1,44 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++void bman_test_high(void); ++void bman_test_thresh(void); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_test_high.c +@@ -0,0 +1,183 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "bman_test.h" ++#include "bman_private.h" ++ ++/*************/ ++/* constants */ ++/*************/ ++ ++#define PORTAL_OPAQUE ((void *)0xf00dbeef) ++#define POOL_OPAQUE ((void *)0xdeadabba) ++#define NUM_BUFS 93 ++#define LOOPS 3 ++#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU ++ ++/***************/ ++/* global vars */ ++/***************/ ++ ++static struct bman_pool *pool; ++static int depleted; ++static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; ++static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; ++static int bufs_received; ++ ++/* Predeclare the callback so we can instantiate pool parameters */ ++static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int); ++ ++/**********************/ ++/* internal functions */ ++/**********************/ ++ ++static void bufs_init(void) ++{ ++ int i; ++ for (i = 0; i < NUM_BUFS; i++) ++ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); ++ bufs_received = 0; ++} ++ ++static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) ++{ ++ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) { ++ ++ /* On SoCs with Bman revison 2.0, Bman only respects the 40 ++ * LS-bits of buffer addresses, masking off the upper 8-bits on ++ * release commands. The API provides for 48-bit addresses ++ * because some SoCs support all 48-bits. When generating ++ * garbage addresses for testing, we either need to zero the ++ * upper 8-bits when releasing to Bman (otherwise we'll be ++ * disappointed when the buffers we acquire back from Bman ++ * don't match), or we need to mask the upper 8-bits off when ++ * comparing. We do the latter. ++ */ ++ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) ++ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) ++ return -1; ++ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) ++ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) ++ return 1; ++ } else { ++ if (bm_buffer_get64(a) < bm_buffer_get64(b)) ++ return -1; ++ if (bm_buffer_get64(a) > bm_buffer_get64(b)) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void bufs_confirm(void) ++{ ++ int i, j; ++ for (i = 0; i < NUM_BUFS; i++) { ++ int matches = 0; ++ for (j = 0; j < NUM_BUFS; j++) ++ if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) ++ matches++; ++ BUG_ON(matches != 1); ++ } ++} ++ ++/********/ ++/* test */ ++/********/ ++ ++static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool, ++ void *pool_ctx, int __depleted) ++{ ++ BUG_ON(__pool != pool); ++ BUG_ON(pool_ctx != POOL_OPAQUE); ++ depleted = __depleted; ++} ++ ++void bman_test_high(void) ++{ ++ struct bman_pool_params pparams = { ++ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, ++ .cb = depletion_cb, ++ .cb_ctx = POOL_OPAQUE, ++ }; ++ int i, loops = LOOPS; ++ struct bm_buffer tmp_buf; ++ ++ bufs_init(); ++ ++ pr_info("BMAN: --- starting high-level test ---\n"); ++ ++ pool = bman_new_pool(&pparams); ++ BUG_ON(!pool); ++ ++ /*******************/ ++ /* Release buffers */ ++ /*******************/ ++do_loop: ++ i = 0; ++ while (i < NUM_BUFS) { ++ u32 flags = BMAN_RELEASE_FLAG_WAIT; ++ int num = 8; ++ if ((i + num) > NUM_BUFS) ++ num = NUM_BUFS - i; ++ if ((i + num) == NUM_BUFS) ++ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; ++ if (bman_release(pool, bufs_in + i, num, flags)) ++ panic("bman_release() failed\n"); ++ i += num; ++ } ++ ++ /*******************/ ++ /* Acquire buffers */ ++ /*******************/ ++ while (i > 0) { ++ int tmp, num = 8; ++ if (num > i) ++ num = i; ++ tmp = bman_acquire(pool, bufs_out + i - num, num, 0); ++ BUG_ON(tmp != num); ++ i -= num; ++ } ++ ++ i = bman_acquire(pool, &tmp_buf, 1, 0); ++ BUG_ON(i > 0); ++ ++ bufs_confirm(); ++ ++ if (--loops) ++ goto do_loop; ++ ++ /************/ ++ /* Clean up */ ++ /************/ ++ bman_free_pool(pool); ++ pr_info("BMAN: --- finished high-level test ---\n"); ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/bman_test_thresh.c +@@ -0,0 +1,196 @@ ++/* Copyright 2010-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "bman_test.h" ++ ++/* Test constants */ ++#define TEST_NUMBUFS 129728 ++#define TEST_EXIT 129536 ++#define TEST_ENTRY 129024 ++ ++struct affine_test_data { ++ struct task_struct *t; ++ int cpu; ++ int expect_affinity; ++ int drain; ++ int num_enter; ++ int num_exit; ++ struct list_head node; ++ struct completion wakethread; ++ struct completion wakeparent; ++}; ++ ++static void cb_depletion(struct bman_portal *portal, ++ struct bman_pool *pool, ++ void *opaque, ++ int depleted) ++{ ++ struct affine_test_data *data = opaque; ++ int c = smp_processor_id(); ++ pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n", ++ bman_get_params(pool)->bpid, !!depleted, c, data->cpu); ++ /* We should be executing on the CPU of the thread that owns the pool if ++ * and that CPU has an affine portal (ie. it isn't slaved). */ ++ BUG_ON((c != data->cpu) && data->expect_affinity); ++ BUG_ON((c == data->cpu) && !data->expect_affinity); ++ if (depleted) ++ data->num_enter++; ++ else ++ data->num_exit++; ++} ++ ++/* Params used to set up a pool, this also dynamically allocates a BPID */ ++static const struct bman_pool_params params_nocb = { ++ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH, ++ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 } ++}; ++ ++/* Params used to set up each cpu's pool with callbacks enabled */ ++static struct bman_pool_params params_cb = { ++ .bpid = 0, /* will be replaced to match pool_nocb */ ++ .flags = BMAN_POOL_FLAG_DEPLETION, ++ .cb = cb_depletion ++}; ++ ++static struct bman_pool *pool_nocb; ++static LIST_HEAD(threads); ++ ++static int affine_test(void *__data) ++{ ++ struct bman_pool *pool; ++ struct affine_test_data *data = __data; ++ struct bman_pool_params my_params = params_cb; ++ ++ pr_info("thread %d: starting\n", data->cpu); ++ /* create the pool */ ++ my_params.cb_ctx = data; ++ pool = bman_new_pool(&my_params); ++ BUG_ON(!pool); ++ complete(&data->wakeparent); ++ wait_for_completion(&data->wakethread); ++ init_completion(&data->wakethread); ++ ++ /* if we're the drainer, we get signalled for that */ ++ if (data->drain) { ++ struct bm_buffer buf; ++ int ret; ++ pr_info("thread %d: draining...\n", data->cpu); ++ do { ++ ret = bman_acquire(pool, &buf, 1, 0); ++ } while (ret > 0); ++ pr_info("thread %d: draining done.\n", data->cpu); ++ complete(&data->wakeparent); ++ wait_for_completion(&data->wakethread); ++ init_completion(&data->wakethread); ++ } ++ ++ /* cleanup */ ++ bman_free_pool(pool); ++ while (!kthread_should_stop()) ++ cpu_relax(); ++ pr_info("thread %d: exiting\n", data->cpu); ++ return 0; ++} ++ ++static struct affine_test_data *start_affine_test(int cpu, int drain) ++{ ++ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL); ++ ++ if (!data) ++ return NULL; ++ data->cpu = cpu; ++ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus()); ++ data->drain = drain; ++ data->num_enter = 0; ++ data->num_exit = 0; ++ init_completion(&data->wakethread); ++ init_completion(&data->wakeparent); ++ list_add_tail(&data->node, &threads); ++ data->t = kthread_create(affine_test, data, "threshtest%d", cpu); ++ BUG_ON(IS_ERR(data->t)); ++ kthread_bind(data->t, cpu); ++ wake_up_process(data->t); ++ return data; ++} ++ ++void bman_test_thresh(void) ++{ ++ int loop = TEST_NUMBUFS; ++ int ret, num_cpus = 0; ++ struct affine_test_data *data, *drainer = NULL; ++ ++ pr_info("bman_test_thresh: start\n"); ++ ++ /* allocate a BPID and seed it */ ++ pool_nocb = bman_new_pool(¶ms_nocb); ++ BUG_ON(!pool_nocb); ++ while (loop--) { ++ struct bm_buffer buf; ++ bm_buffer_set64(&buf, 0x0badbeef + loop); ++ ret = bman_release(pool_nocb, &buf, 1, ++ BMAN_RELEASE_FLAG_WAIT); ++ BUG_ON(ret); ++ } ++ while (!bman_rcr_is_empty()) ++ cpu_relax(); ++ pr_info("bman_test_thresh: buffers are in\n"); ++ ++ /* create threads and wait for them to create pools */ ++ params_cb.bpid = bman_get_params(pool_nocb)->bpid; ++ for_each_cpu(loop, cpu_online_mask) { ++ data = start_affine_test(loop, drainer ? 0 : 1); ++ BUG_ON(!data); ++ if (!drainer) ++ drainer = data; ++ num_cpus++; ++ wait_for_completion(&data->wakeparent); ++ } ++ ++ /* signal the drainer to start draining */ ++ complete(&drainer->wakethread); ++ wait_for_completion(&drainer->wakeparent); ++ init_completion(&drainer->wakeparent); ++ ++ /* tear down */ ++ list_for_each_entry_safe(data, drainer, &threads, node) { ++ complete(&data->wakethread); ++ ret = kthread_stop(data->t); ++ BUG_ON(ret); ++ list_del(&data->node); ++ /* check that we get the expected callbacks (and no others) */ ++ BUG_ON(data->num_enter != 1); ++ BUG_ON(data->num_exit != 0); ++ kfree(data); ++ } ++ bman_free_pool(pool_nocb); ++ ++ pr_info("bman_test_thresh: done\n"); ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_alloc.c +@@ -0,0 +1,706 @@ ++/* Copyright 2009-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpa_sys.h" ++#include ++#include ++ ++/* Qman and Bman APIs are front-ends to the common code; */ ++ ++static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */ ++static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */ ++static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */ ++static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */ ++static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */ ++static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */ ++static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */ ++static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */ ++ ++/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing ++ * FQIDs (probably from user-space), it can filter out those that aren't in the ++ * OOS state (better to leak a h/w resource than to crash). This function ++ * returns the number of invalid IDs that were not released. */ ++static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count, ++ int (*is_valid)(u32 id)) ++{ ++ int valid_mode = 0; ++ u32 loop = id, total_invalid = 0; ++ while (loop < (id + count)) { ++ int isvalid = is_valid ? is_valid(loop) : 1; ++ if (!valid_mode) { ++ /* We're looking for a valid ID to terminate an invalid ++ * range */ ++ if (isvalid) { ++ /* We finished a range of invalid IDs, a valid ++ * range is now underway */ ++ valid_mode = 1; ++ count -= (loop - id); ++ id = loop; ++ } else ++ total_invalid++; ++ } else { ++ /* We're looking for an invalid ID to terminate a ++ * valid range */ ++ if (!isvalid) { ++ /* Release the range of valid IDs, an unvalid ++ * range is now underway */ ++ if (loop > id) ++ dpa_alloc_free(alloc, id, loop - id); ++ valid_mode = 0; ++ } ++ } ++ loop++; ++ } ++ /* Release any unterminated range of valid IDs */ ++ if (valid_mode && count) ++ dpa_alloc_free(alloc, id, count); ++ return total_invalid; ++} ++ ++/* BPID allocator front-end */ ++ ++int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial) ++{ ++ return dpa_alloc_new(&bpalloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(bman_alloc_bpid_range); ++ ++static int bp_cleanup(u32 bpid) ++{ ++ return bman_shutdown_pool(bpid) == 0; ++} ++void bman_release_bpid_range(u32 bpid, u32 count) ++{ ++ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup); ++ if (total_invalid) ++ pr_err("BPID range [%d..%d] (%d) had %d leaks\n", ++ bpid, bpid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(bman_release_bpid_range); ++ ++void bman_seed_bpid_range(u32 bpid, u32 count) ++{ ++ dpa_alloc_seed(&bpalloc, bpid, count); ++} ++EXPORT_SYMBOL(bman_seed_bpid_range); ++ ++int bman_reserve_bpid_range(u32 bpid, u32 count) ++{ ++ return dpa_alloc_reserve(&bpalloc, bpid, count); ++} ++EXPORT_SYMBOL(bman_reserve_bpid_range); ++ ++ ++/* FQID allocator front-end */ ++ ++int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial) ++{ ++ return dpa_alloc_new(&fqalloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_fqid_range); ++ ++static int fq_cleanup(u32 fqid) ++{ ++ return qman_shutdown_fq(fqid) == 0; ++} ++void qman_release_fqid_range(u32 fqid, u32 count) ++{ ++ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup); ++ if (total_invalid) ++ pr_err("FQID range [%d..%d] (%d) had %d leaks\n", ++ fqid, fqid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_fqid_range); ++ ++int qman_reserve_fqid_range(u32 fqid, u32 count) ++{ ++ return dpa_alloc_reserve(&fqalloc, fqid, count); ++} ++EXPORT_SYMBOL(qman_reserve_fqid_range); ++ ++void qman_seed_fqid_range(u32 fqid, u32 count) ++{ ++ dpa_alloc_seed(&fqalloc, fqid, count); ++} ++EXPORT_SYMBOL(qman_seed_fqid_range); ++ ++/* Pool-channel allocator front-end */ ++ ++int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial) ++{ ++ return dpa_alloc_new(&qpalloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_pool_range); ++ ++static int qpool_cleanup(u32 qp) ++{ ++ /* We query all FQDs starting from ++ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs ++ * whose destination channel is the pool-channel being released. ++ * When a non-OOS FQD is found we attempt to clean it up */ ++ struct qman_fq fq = { ++ .fqid = 1 ++ }; ++ int err; ++ do { ++ struct qm_mcr_queryfq_np np; ++ err = qman_query_fq_np(&fq, &np); ++ if (err) ++ /* FQID range exceeded, found no problems */ ++ return 1; ++ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { ++ struct qm_fqd fqd; ++ err = qman_query_fq(&fq, &fqd); ++ BUG_ON(err); ++ if (fqd.dest.channel == qp) { ++ /* The channel is the FQ's target, clean it */ ++ if (qman_shutdown_fq(fq.fqid) != 0) ++ /* Couldn't shut down the FQ ++ so the pool must be leaked */ ++ return 0; ++ } ++ } ++ /* Move to the next FQID */ ++ fq.fqid++; ++ } while (1); ++} ++void qman_release_pool_range(u32 qp, u32 count) ++{ ++ u32 total_invalid = release_id_range(&qpalloc, qp, ++ count, qpool_cleanup); ++ if (total_invalid) { ++ /* Pool channels are almost always used individually */ ++ if (count == 1) ++ pr_err("Pool channel 0x%x had %d leaks\n", ++ qp, total_invalid); ++ else ++ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n", ++ qp, qp + count - 1, count, total_invalid); ++ } ++} ++EXPORT_SYMBOL(qman_release_pool_range); ++ ++ ++void qman_seed_pool_range(u32 poolid, u32 count) ++{ ++ dpa_alloc_seed(&qpalloc, poolid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_pool_range); ++ ++int qman_reserve_pool_range(u32 poolid, u32 count) ++{ ++ return dpa_alloc_reserve(&qpalloc, poolid, count); ++} ++EXPORT_SYMBOL(qman_reserve_pool_range); ++ ++ ++/* CGR ID allocator front-end */ ++ ++int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial) ++{ ++ return dpa_alloc_new(&cgralloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_cgrid_range); ++ ++static int cqr_cleanup(u32 cgrid) ++{ ++ /* We query all FQDs starting from ++ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs ++ * whose CGR is the CGR being released. ++ */ ++ struct qman_fq fq = { ++ .fqid = 1 ++ }; ++ int err; ++ do { ++ struct qm_mcr_queryfq_np np; ++ err = qman_query_fq_np(&fq, &np); ++ if (err) ++ /* FQID range exceeded, found no problems */ ++ return 1; ++ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { ++ struct qm_fqd fqd; ++ err = qman_query_fq(&fq, &fqd); ++ BUG_ON(err); ++ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && ++ (fqd.cgid == cgrid)) { ++ pr_err("CRGID 0x%x is being used by FQID 0x%x," ++ " CGR will be leaked\n", ++ cgrid, fq.fqid); ++ return 1; ++ } ++ } ++ /* Move to the next FQID */ ++ fq.fqid++; ++ } while (1); ++} ++ ++void qman_release_cgrid_range(u32 cgrid, u32 count) ++{ ++ u32 total_invalid = release_id_range(&cgralloc, cgrid, ++ count, cqr_cleanup); ++ if (total_invalid) ++ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n", ++ cgrid, cgrid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_cgrid_range); ++ ++void qman_seed_cgrid_range(u32 cgrid, u32 count) ++{ ++ dpa_alloc_seed(&cgralloc, cgrid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_cgrid_range); ++ ++/* CEETM CHANNEL ID allocator front-end */ ++int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, ++ int partial) ++{ ++ return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range); ++ ++int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, ++ int partial) ++{ ++ return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range); ++ ++void qman_release_ceetm0_channel_range(u32 channelid, u32 count) ++{ ++ u32 total_invalid; ++ ++ total_invalid = release_id_range(&ceetm0_challoc, channelid, count, ++ NULL); ++ if (total_invalid) ++ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", ++ channelid, channelid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_ceetm0_channel_range); ++ ++void qman_seed_ceetm0_channel_range(u32 channelid, u32 count) ++{ ++ dpa_alloc_seed(&ceetm0_challoc, channelid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_ceetm0_channel_range); ++ ++void qman_release_ceetm1_channel_range(u32 channelid, u32 count) ++{ ++ u32 total_invalid; ++ total_invalid = release_id_range(&ceetm1_challoc, channelid, count, ++ NULL); ++ if (total_invalid) ++ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", ++ channelid, channelid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_ceetm1_channel_range); ++ ++void qman_seed_ceetm1_channel_range(u32 channelid, u32 count) ++{ ++ dpa_alloc_seed(&ceetm1_challoc, channelid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_ceetm1_channel_range); ++ ++/* CEETM LFQID allocator front-end */ ++int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, ++ int partial) ++{ ++ return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range); ++ ++int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, ++ int partial) ++{ ++ return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial); ++} ++EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range); ++ ++void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count) ++{ ++ u32 total_invalid; ++ ++ total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count, ++ NULL); ++ if (total_invalid) ++ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", ++ lfqid, lfqid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range); ++ ++void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count) ++{ ++ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range); ++ ++void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count) ++{ ++ u32 total_invalid; ++ ++ total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count, ++ NULL); ++ if (total_invalid) ++ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", ++ lfqid, lfqid + count - 1, count, total_invalid); ++} ++EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range); ++ ++void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count) ++{ ++ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count); ++ ++} ++EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range); ++ ++ ++/* Everything else is the common backend to all the allocators */ ++ ++/* The allocator is a (possibly-empty) list of these; */ ++struct alloc_node { ++ struct list_head list; ++ u32 base; ++ u32 num; ++ /* refcount and is_alloced are only set ++ when the node is in the used list */ ++ unsigned int refcount; ++ int is_alloced; ++}; ++ ++/* #define DPA_ALLOC_DEBUG */ ++ ++#ifdef DPA_ALLOC_DEBUG ++#define DPRINT pr_info ++static void DUMP(struct dpa_alloc *alloc) ++{ ++ int off = 0; ++ char buf[256]; ++ struct alloc_node *p; ++ pr_info("Free Nodes\n"); ++ list_for_each_entry(p, &alloc->free, list) { ++ if (off < 255) ++ off += snprintf(buf + off, 255-off, "{%d,%d}", ++ p->base, p->base + p->num - 1); ++ } ++ pr_info("%s\n", buf); ++ ++ off = 0; ++ pr_info("Used Nodes\n"); ++ list_for_each_entry(p, &alloc->used, list) { ++ if (off < 255) ++ off += snprintf(buf + off, 255-off, "{%d,%d}", ++ p->base, p->base + p->num - 1); ++ } ++ pr_info("%s\n", buf); ++ ++ ++ ++} ++#else ++#define DPRINT(x...) ++#define DUMP(a) ++#endif ++ ++int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, ++ int partial) ++{ ++ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL; ++ u32 base, next_best_base = 0, num = 0, next_best_num = 0; ++ struct alloc_node *margin_left, *margin_right; ++ ++ *result = (u32)-1; ++ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial); ++ DUMP(alloc); ++ /* If 'align' is 0, it should behave as though it was 1 */ ++ if (!align) ++ align = 1; ++ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); ++ if (!margin_left) ++ goto err; ++ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); ++ if (!margin_right) { ++ kfree(margin_left); ++ goto err; ++ } ++ spin_lock_irq(&alloc->lock); ++ list_for_each_entry(i, &alloc->free, list) { ++ base = (i->base + align - 1) / align; ++ base *= align; ++ if ((base - i->base) >= i->num) ++ /* alignment is impossible, regardless of count */ ++ continue; ++ num = i->num - (base - i->base); ++ if (num >= count) { ++ /* this one will do nicely */ ++ num = count; ++ goto done; ++ } ++ if (num > next_best_num) { ++ next_best = i; ++ next_best_base = base; ++ next_best_num = num; ++ } ++ } ++ if (partial && next_best) { ++ i = next_best; ++ base = next_best_base; ++ num = next_best_num; ++ } else ++ i = NULL; ++done: ++ if (i) { ++ if (base != i->base) { ++ margin_left->base = i->base; ++ margin_left->num = base - i->base; ++ list_add_tail(&margin_left->list, &i->list); ++ } else ++ kfree(margin_left); ++ if ((base + num) < (i->base + i->num)) { ++ margin_right->base = base + num; ++ margin_right->num = (i->base + i->num) - ++ (base + num); ++ list_add(&margin_right->list, &i->list); ++ } else ++ kfree(margin_right); ++ list_del(&i->list); ++ kfree(i); ++ *result = base; ++ } else { ++ spin_unlock_irq(&alloc->lock); ++ kfree(margin_left); ++ kfree(margin_right); ++ } ++ ++err: ++ DPRINT("returning %d\n", i ? num : -ENOMEM); ++ DUMP(alloc); ++ if (!i) ++ return -ENOMEM; ++ ++ /* Add the allocation to the used list with a refcount of 1 */ ++ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); ++ if (!used_node) { ++ spin_unlock_irq(&alloc->lock); ++ return -ENOMEM; ++ } ++ used_node->base = *result; ++ used_node->num = num; ++ used_node->refcount = 1; ++ used_node->is_alloced = 1; ++ list_add_tail(&used_node->list, &alloc->used); ++ spin_unlock_irq(&alloc->lock); ++ return (int)num; ++} ++ ++/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid ++ * forcing error-handling on to users in the deallocation path. */ ++static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count) ++{ ++ struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC); ++ BUG_ON(!node); ++ DPRINT("release_range(%d,%d)\n", base_id, count); ++ DUMP(alloc); ++ BUG_ON(!count); ++ spin_lock_irq(&alloc->lock); ++ ++ ++ node->base = base_id; ++ node->num = count; ++ list_for_each_entry(i, &alloc->free, list) { ++ if (i->base >= node->base) { ++ /* BUG_ON(any overlapping) */ ++ BUG_ON(i->base < (node->base + node->num)); ++ list_add_tail(&node->list, &i->list); ++ goto done; ++ } ++ } ++ list_add_tail(&node->list, &alloc->free); ++done: ++ /* Merge to the left */ ++ i = list_entry(node->list.prev, struct alloc_node, list); ++ if (node->list.prev != &alloc->free) { ++ BUG_ON((i->base + i->num) > node->base); ++ if ((i->base + i->num) == node->base) { ++ node->base = i->base; ++ node->num += i->num; ++ list_del(&i->list); ++ kfree(i); ++ } ++ } ++ /* Merge to the right */ ++ i = list_entry(node->list.next, struct alloc_node, list); ++ if (node->list.next != &alloc->free) { ++ BUG_ON((node->base + node->num) > i->base); ++ if ((node->base + node->num) == i->base) { ++ node->num += i->num; ++ list_del(&i->list); ++ kfree(i); ++ } ++ } ++ spin_unlock_irq(&alloc->lock); ++ DUMP(alloc); ++} ++ ++ ++void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count) ++{ ++ struct alloc_node *i = NULL; ++ spin_lock_irq(&alloc->lock); ++ ++ /* First find the node in the used list and decrement its ref count */ ++ list_for_each_entry(i, &alloc->used, list) { ++ if (i->base == base_id && i->num == count) { ++ --i->refcount; ++ if (i->refcount == 0) { ++ list_del(&i->list); ++ spin_unlock_irq(&alloc->lock); ++ if (i->is_alloced) ++ _dpa_alloc_free(alloc, base_id, count); ++ kfree(i); ++ return; ++ } ++ spin_unlock_irq(&alloc->lock); ++ return; ++ } ++ } ++ /* Couldn't find the allocation */ ++ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n", ++ base_id, count); ++ spin_unlock_irq(&alloc->lock); ++} ++ ++void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count) ++{ ++ /* Same as free but no previous allocation checking is needed */ ++ _dpa_alloc_free(alloc, base_id, count); ++} ++ ++ ++int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num) ++{ ++ struct alloc_node *i = NULL, *used_node; ++ ++ DPRINT("alloc_reserve(%d,%d)\n", base, num); ++ DUMP(alloc); ++ ++ spin_lock_irq(&alloc->lock); ++ ++ /* Check for the node in the used list. ++ If found, increase it's refcount */ ++ list_for_each_entry(i, &alloc->used, list) { ++ if ((i->base == base) && (i->num == num)) { ++ ++i->refcount; ++ spin_unlock_irq(&alloc->lock); ++ return 0; ++ } ++ if ((base >= i->base) && (base < (i->base + i->num))) { ++ /* This is an attempt to reserve a region that was ++ already reserved or alloced with a different ++ base or num */ ++ pr_err("Cannot reserve %d - %d, it overlaps with" ++ " existing reservation from %d - %d\n", ++ base, base + num - 1, i->base, ++ i->base + i->num - 1); ++ spin_unlock_irq(&alloc->lock); ++ return -1; ++ } ++ } ++ /* Check to make sure this ID isn't in the free list */ ++ list_for_each_entry(i, &alloc->free, list) { ++ if ((base >= i->base) && (base < (i->base + i->num))) { ++ /* yep, the reservation is within this node */ ++ pr_err("Cannot reserve %d - %d, it overlaps with" ++ " free range %d - %d and must be alloced\n", ++ base, base + num - 1, ++ i->base, i->base + i->num - 1); ++ spin_unlock_irq(&alloc->lock); ++ return -1; ++ } ++ } ++ /* Add the allocation to the used list with a refcount of 1 */ ++ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); ++ if (!used_node) { ++ spin_unlock_irq(&alloc->lock); ++ return -ENOMEM; ++ ++ } ++ used_node->base = base; ++ used_node->num = num; ++ used_node->refcount = 1; ++ used_node->is_alloced = 0; ++ list_add_tail(&used_node->list, &alloc->used); ++ spin_unlock_irq(&alloc->lock); ++ return 0; ++} ++ ++ ++int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count) ++{ ++ struct alloc_node *i = NULL; ++ DPRINT("alloc_pop()\n"); ++ DUMP(alloc); ++ spin_lock_irq(&alloc->lock); ++ if (!list_empty(&alloc->free)) { ++ i = list_entry(alloc->free.next, struct alloc_node, list); ++ list_del(&i->list); ++ } ++ spin_unlock_irq(&alloc->lock); ++ DPRINT("returning %d\n", i ? 0 : -ENOMEM); ++ DUMP(alloc); ++ if (!i) ++ return -ENOMEM; ++ *result = i->base; ++ *count = i->num; ++ kfree(i); ++ return 0; ++} ++ ++int dpa_alloc_check(struct dpa_alloc *list_head, u32 item) ++{ ++ struct alloc_node *i = NULL; ++ int res = 0; ++ DPRINT("alloc_check()\n"); ++ spin_lock_irq(&list_head->lock); ++ ++ list_for_each_entry(i, &list_head->free, list) { ++ if ((item >= i->base) && (item < (i->base + i->num))) { ++ res = 1; ++ break; ++ } ++ } ++ spin_unlock_irq(&list_head->lock); ++ return res; ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_sys.h +@@ -0,0 +1,259 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPA_SYS_H ++#define DPA_SYS_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* When copying aligned words or shorts, try to avoid memcpy() */ ++#define CONFIG_TRY_BETTER_MEMCPY ++ ++/* For 2-element tables related to cache-inhibited and cache-enabled mappings */ ++#define DPA_PORTAL_CE 0 ++#define DPA_PORTAL_CI 1 ++ ++/***********************/ ++/* Misc inline assists */ ++/***********************/ ++ ++#if defined CONFIG_PPC32 ++#include "dpa_sys_ppc32.h" ++#elif defined CONFIG_PPC64 ++#include "dpa_sys_ppc64.h" ++#elif defined CONFIG_ARM ++#include "dpa_sys_arm.h" ++#elif defined CONFIG_ARM64 ++#include "dpa_sys_arm64.h" ++#endif ++ ++ ++#ifdef CONFIG_FSL_DPA_CHECKING ++#define DPA_ASSERT(x) \ ++ do { \ ++ if (!(x)) { \ ++ pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \ ++ __stringify_1(x)); \ ++ dump_stack(); \ ++ panic("assertion failure"); \ ++ } \ ++ } while (0) ++#else ++#define DPA_ASSERT(x) ++#endif ++ ++/* memcpy() stuff - when you know alignments in advance */ ++#ifdef CONFIG_TRY_BETTER_MEMCPY ++static inline void copy_words(void *dest, const void *src, size_t sz) ++{ ++ u32 *__dest = dest; ++ const u32 *__src = src; ++ size_t __sz = sz >> 2; ++ BUG_ON((unsigned long)dest & 0x3); ++ BUG_ON((unsigned long)src & 0x3); ++ BUG_ON(sz & 0x3); ++ while (__sz--) ++ *(__dest++) = *(__src++); ++} ++static inline void copy_shorts(void *dest, const void *src, size_t sz) ++{ ++ u16 *__dest = dest; ++ const u16 *__src = src; ++ size_t __sz = sz >> 1; ++ BUG_ON((unsigned long)dest & 0x1); ++ BUG_ON((unsigned long)src & 0x1); ++ BUG_ON(sz & 0x1); ++ while (__sz--) ++ *(__dest++) = *(__src++); ++} ++static inline void copy_bytes(void *dest, const void *src, size_t sz) ++{ ++ u8 *__dest = dest; ++ const u8 *__src = src; ++ while (sz--) ++ *(__dest++) = *(__src++); ++} ++#else ++#define copy_words memcpy ++#define copy_shorts memcpy ++#define copy_bytes memcpy ++#endif ++ ++/************/ ++/* RB-trees */ ++/************/ ++ ++/* We encapsulate RB-trees so that its easier to use non-linux forms in ++ * non-linux systems. This also encapsulates the extra plumbing that linux code ++ * usually provides when using RB-trees. This encapsulation assumes that the ++ * data type held by the tree is u32. */ ++ ++struct dpa_rbtree { ++ struct rb_root root; ++}; ++#define DPA_RBTREE { .root = RB_ROOT } ++ ++static inline void dpa_rbtree_init(struct dpa_rbtree *tree) ++{ ++ tree->root = RB_ROOT; ++} ++ ++#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \ ++static inline int name##_push(struct dpa_rbtree *tree, type *obj) \ ++{ \ ++ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \ ++ while (*p) { \ ++ u32 item; \ ++ parent = *p; \ ++ item = rb_entry(parent, type, node_field)->val_field; \ ++ if (obj->val_field < item) \ ++ p = &parent->rb_left; \ ++ else if (obj->val_field > item) \ ++ p = &parent->rb_right; \ ++ else \ ++ return -EBUSY; \ ++ } \ ++ rb_link_node(&obj->node_field, parent, p); \ ++ rb_insert_color(&obj->node_field, &tree->root); \ ++ return 0; \ ++} \ ++static inline void name##_del(struct dpa_rbtree *tree, type *obj) \ ++{ \ ++ rb_erase(&obj->node_field, &tree->root); \ ++} \ ++static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \ ++{ \ ++ type *ret; \ ++ struct rb_node *p = tree->root.rb_node; \ ++ while (p) { \ ++ ret = rb_entry(p, type, node_field); \ ++ if (val < ret->val_field) \ ++ p = p->rb_left; \ ++ else if (val > ret->val_field) \ ++ p = p->rb_right; \ ++ else \ ++ return ret; \ ++ } \ ++ return NULL; \ ++} ++ ++/************/ ++/* Bootargs */ ++/************/ ++ ++/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax ++ * though; a comma-separated list of items, each item being a cpu index and/or a ++ * range of cpu indices, and each item optionally be prefixed by "s" to indicate ++ * that the portal associated with that cpu should be shared. See bman_driver.c ++ * for more specifics. */ ++static int __parse_portals_cpu(const char **s, unsigned int *cpu) ++{ ++ *cpu = 0; ++ if (!isdigit(**s)) ++ return -EINVAL; ++ while (isdigit(**s)) ++ *cpu = *cpu * 10 + (*((*s)++) - '0'); ++ return 0; ++} ++static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared, ++ struct cpumask *want_unshared, ++ const char *argname) ++{ ++ const char *s = str; ++ unsigned int shared, cpu1, cpu2, loop; ++ ++keep_going: ++ if (*s == 's') { ++ shared = 1; ++ s++; ++ } else ++ shared = 0; ++ if (__parse_portals_cpu(&s, &cpu1)) ++ goto err; ++ if (*s == '-') { ++ s++; ++ if (__parse_portals_cpu(&s, &cpu2)) ++ goto err; ++ if (cpu2 < cpu1) ++ goto err; ++ } else ++ cpu2 = cpu1; ++ for (loop = cpu1; loop <= cpu2; loop++) ++ cpumask_set_cpu(loop, shared ? want_shared : want_unshared); ++ if (*s == ',') { ++ s++; ++ goto keep_going; ++ } else if ((*s == '\0') || isspace(*s)) ++ return 0; ++err: ++ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str, ++ (unsigned long)s - (unsigned long)str); ++ return -EINVAL; ++} ++#ifdef CONFIG_FSL_USDPAA ++/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */ ++int usdpaa_get_portal_config(struct file *filp, void *cinh, ++ enum usdpaa_portal_type ptype, unsigned int *irq, ++ void **iir_reg); ++#endif ++#endif /* DPA_SYS_H */ +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h +@@ -0,0 +1,95 @@ ++/* Copyright 2016 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPA_SYS_ARM_H ++#define DPA_SYS_ARM_H ++ ++#include ++#include ++ ++/* Implementation of ARM specific routines */ ++ ++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler ++ * barriers and that dcb*() won't fall victim to compiler or execution ++ * reordering with respect to other code/instructions that manipulate the same ++ * cacheline. */ ++#define hwsync() { asm volatile("dmb st" : : : "memory"); } ++#define lwsync() { asm volatile("dmb st" : : : "memory"); } ++#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); } ++#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); } ++#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); } ++#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); } ++ ++#define dcbz_64(p) { memset(p, 0, sizeof(*p)); } ++ ++#define dcbf_64(p) \ ++ do { \ ++ dcbf((u32)p); \ ++ } while (0) ++/* Commonly used combo */ ++#define dcbit_ro(p) \ ++ do { \ ++ dcbi((u32)p); \ ++ dcbt_ro((u32)p); \ ++ } while (0) ++ ++static inline u64 mfatb(void) ++{ ++ return get_cycles(); ++} ++ ++static inline u32 in_be32(volatile void *addr) ++{ ++ return be32_to_cpu(*((volatile u32 *) addr)); ++} ++ ++static inline void out_be32(void *addr, u32 val) ++{ ++ *((u32 *) addr) = cpu_to_be32(val); ++} ++ ++ ++static inline void set_bits(unsigned long mask, volatile unsigned long *p) ++{ ++ *p |= mask; ++} ++static inline void clear_bits(unsigned long mask, volatile unsigned long *p) ++{ ++ *p &= ~mask; ++} ++ ++static inline void flush_dcache_range(unsigned long start, unsigned long stop) ++{ ++ __cpuc_flush_dcache_area((void *) start, stop - start); ++} ++ ++#define hard_smp_processor_id() raw_smp_processor_id() ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h +@@ -0,0 +1,102 @@ ++/* Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPA_SYS_ARM64_H ++#define DPA_SYS_ARM64_H ++ ++#include ++#include ++ ++/* Implementation of ARM 64 bit specific routines */ ++ ++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler ++ * barriers and that dcb*() won't fall victim to compiler or execution ++ * reordering with respect to other code/instructions that manipulate the same ++ * cacheline. */ ++#define hwsync() { asm volatile("dmb st" : : : "memory"); } ++#define lwsync() { asm volatile("dmb st" : : : "memory"); } ++#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } ++#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); } ++#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); } ++#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } ++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } ++ ++#define dcbz_64(p) \ ++ do { \ ++ dcbz(p); \ ++ } while (0) ++ ++#define dcbf_64(p) \ ++ do { \ ++ dcbf(p); \ ++ } while (0) ++/* Commonly used combo */ ++#define dcbit_ro(p) \ ++ do { \ ++ dcbi(p); \ ++ dcbt_ro(p); \ ++ } while (0) ++ ++static inline u64 mfatb(void) ++{ ++ return get_cycles(); ++} ++ ++static inline u32 in_be32(volatile void *addr) ++{ ++ return be32_to_cpu(*((volatile u32 *) addr)); ++} ++ ++static inline void out_be32(void *addr, u32 val) ++{ ++ *((u32 *) addr) = cpu_to_be32(val); ++} ++ ++ ++static inline void set_bits(unsigned long mask, volatile unsigned long *p) ++{ ++ *p |= mask; ++} ++static inline void clear_bits(unsigned long mask, volatile unsigned long *p) ++{ ++ *p &= ~mask; ++} ++ ++static inline void flush_dcache_range(unsigned long start, unsigned long stop) ++{ ++ __flush_dcache_area((void *) start, stop - start); ++} ++ ++#define hard_smp_processor_id() raw_smp_processor_id() ++ ++ ++ ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h +@@ -0,0 +1,70 @@ ++/* Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPA_SYS_PPC32_H ++#define DPA_SYS_PPC32_H ++ ++/* Implementation of PowerPC 32 bit specific routines */ ++ ++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler ++ * barriers and that dcb*() won't fall victim to compiler or execution ++ * reordering with respect to other code/instructions that manipulate the same ++ * cacheline. */ ++#define hwsync() __asm__ __volatile__ ("sync" : : : "memory") ++#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory") ++#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory") ++#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)) ++#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)) ++#define dcbi(p) dcbf(p) ++ ++#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p)) ++#define dcbz_64(p) dcbzl(p) ++#define dcbf_64(p) dcbf(p) ++ ++/* Commonly used combo */ ++#define dcbit_ro(p) \ ++ do { \ ++ dcbi(p); \ ++ dcbt_ro(p); \ ++ } while (0) ++ ++static inline u64 mfatb(void) ++{ ++ u32 hi, lo, chk; ++ do { ++ hi = mfspr(SPRN_ATBU); ++ lo = mfspr(SPRN_ATBL); ++ chk = mfspr(SPRN_ATBU); ++ } while (unlikely(hi != chk)); ++ return ((u64)hi << 32) | (u64)lo; ++} ++ ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h +@@ -0,0 +1,79 @@ ++/* Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPA_SYS_PPC64_H ++#define DPA_SYS_PPC64_H ++ ++/* Implementation of PowerPC 64 bit specific routines */ ++ ++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler ++ * barriers and that dcb*() won't fall victim to compiler or execution ++ * reordering with respect to other code/instructions that manipulate the same ++ * cacheline. */ ++#define hwsync() __asm__ __volatile__ ("sync" : : : "memory") ++#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory") ++#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory") ++#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)) ++#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)) ++#define dcbi(p) dcbf(p) ++ ++#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p)) ++#define dcbz_64(p) \ ++ do { \ ++ dcbz((void*)p + 32); \ ++ dcbz(p); \ ++ } while (0) ++#define dcbf_64(p) \ ++ do { \ ++ dcbf((void*)p + 32); \ ++ dcbf(p); \ ++ } while (0) ++/* Commonly used combo */ ++#define dcbit_ro(p) \ ++ do { \ ++ dcbi(p); \ ++ dcbi((void*)p + 32); \ ++ dcbt_ro(p); \ ++ dcbt_ro((void*)p + 32); \ ++ } while (0) ++ ++static inline u64 mfatb(void) ++{ ++ u32 hi, lo, chk; ++ do { ++ hi = mfspr(SPRN_ATBU); ++ lo = mfspr(SPRN_ATBL); ++ chk = mfspr(SPRN_ATBU); ++ } while (unlikely(hi != chk)); ++ return ((u64)hi << 32) | (u64)lo; ++} ++ ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c +@@ -0,0 +1,1983 @@ ++/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc. ++ * Authors: Andy Fleming ++ * Timur Tabi ++ * Geoff Thorpe ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64)) ++#include ++#endif ++ ++#include "dpa_sys.h" ++#include ++#include "bman_low.h" ++#include "qman_low.h" ++ ++/* Physical address range of the memory reservation, exported for mm/mem.c */ ++static u64 phys_start; ++static u64 phys_size; ++static u64 arg_phys_size; ++ ++/* PFN versions of the above */ ++static unsigned long pfn_start; ++static unsigned long pfn_size; ++ ++/* Memory reservations are manipulated under this spinlock (which is why 'refs' ++ * isn't atomic_t). */ ++static DEFINE_SPINLOCK(mem_lock); ++ ++/* The range of TLB1 indices */ ++static unsigned int first_tlb; ++static unsigned int num_tlb = 1; ++static unsigned int current_tlb; /* loops around for fault handling */ ++ ++/* Memory reservation is represented as a list of 'mem_fragment's, some of which ++ * may be mapped. Unmapped fragments are always merged where possible. */ ++static LIST_HEAD(mem_list); ++ ++struct mem_mapping; ++ ++/* Memory fragments are in 'mem_list'. */ ++struct mem_fragment { ++ u64 base; ++ u64 len; ++ unsigned long pfn_base; /* PFN version of 'base' */ ++ unsigned long pfn_len; /* PFN version of 'len' */ ++ unsigned int refs; /* zero if unmapped */ ++ u64 root_len; /* Size of the orignal fragment */ ++ unsigned long root_pfn; /* PFN of the orignal fragment */ ++ struct list_head list; ++ /* if mapped, flags+name captured at creation time */ ++ u32 flags; ++ char name[USDPAA_DMA_NAME_MAX]; ++ u64 map_len; ++ /* support multi-process locks per-memory-fragment. */ ++ int has_locking; ++ wait_queue_head_t wq; ++ struct mem_mapping *owner; ++}; ++ ++/* Mappings of memory fragments in 'struct ctx'. These are created from ++ * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a ++ * mmap(). */ ++struct mem_mapping { ++ struct mem_fragment *root_frag; ++ u32 frag_count; ++ u64 total_size; ++ struct list_head list; ++ int refs; ++ void *virt_addr; ++}; ++ ++struct portal_mapping { ++ struct usdpaa_ioctl_portal_map user; ++ union { ++ struct qm_portal_config *qportal; ++ struct bm_portal_config *bportal; ++ }; ++ /* Declare space for the portals in case the process ++ exits unexpectedly and needs to be cleaned by the kernel */ ++ union { ++ struct qm_portal qman_portal_low; ++ struct bm_portal bman_portal_low; ++ }; ++ struct list_head list; ++ struct resource *phys; ++ struct iommu_domain *iommu_domain; ++}; ++ ++/* Track the DPAA resources the process is using */ ++struct active_resource { ++ struct list_head list; ++ u32 id; ++ u32 num; ++ unsigned int refcount; ++}; ++ ++/* Per-FD state (which should also be per-process but we don't enforce that) */ ++struct ctx { ++ /* Lock to protect the context */ ++ spinlock_t lock; ++ /* Allocated resources get put here for accounting */ ++ struct list_head resources[usdpaa_id_max]; ++ /* list of DMA maps */ ++ struct list_head maps; ++ /* list of portal maps */ ++ struct list_head portals; ++}; ++ ++/* Different resource classes */ ++static const struct alloc_backend { ++ enum usdpaa_id_type id_type; ++ int (*alloc)(u32 *, u32, u32, int); ++ void (*release)(u32 base, unsigned int count); ++ int (*reserve)(u32 base, unsigned int count); ++ const char *acronym; ++} alloc_backends[] = { ++ { ++ .id_type = usdpaa_id_fqid, ++ .alloc = qman_alloc_fqid_range, ++ .release = qman_release_fqid_range, ++ .reserve = qman_reserve_fqid_range, ++ .acronym = "FQID" ++ }, ++ { ++ .id_type = usdpaa_id_bpid, ++ .alloc = bman_alloc_bpid_range, ++ .release = bman_release_bpid_range, ++ .reserve = bman_reserve_bpid_range, ++ .acronym = "BPID" ++ }, ++ { ++ .id_type = usdpaa_id_qpool, ++ .alloc = qman_alloc_pool_range, ++ .release = qman_release_pool_range, ++ .reserve = qman_reserve_pool_range, ++ .acronym = "QPOOL" ++ }, ++ { ++ .id_type = usdpaa_id_cgrid, ++ .alloc = qman_alloc_cgrid_range, ++ .release = qman_release_cgrid_range, ++ .acronym = "CGRID" ++ }, ++ { ++ .id_type = usdpaa_id_ceetm0_lfqid, ++ .alloc = qman_alloc_ceetm0_lfqid_range, ++ .release = qman_release_ceetm0_lfqid_range, ++ .acronym = "CEETM0_LFQID" ++ }, ++ { ++ .id_type = usdpaa_id_ceetm0_channelid, ++ .alloc = qman_alloc_ceetm0_channel_range, ++ .release = qman_release_ceetm0_channel_range, ++ .acronym = "CEETM0_LFQID" ++ }, ++ { ++ .id_type = usdpaa_id_ceetm1_lfqid, ++ .alloc = qman_alloc_ceetm1_lfqid_range, ++ .release = qman_release_ceetm1_lfqid_range, ++ .acronym = "CEETM1_LFQID" ++ }, ++ { ++ .id_type = usdpaa_id_ceetm1_channelid, ++ .alloc = qman_alloc_ceetm1_channel_range, ++ .release = qman_release_ceetm1_channel_range, ++ .acronym = "CEETM1_LFQID" ++ }, ++ { ++ /* This terminates the array */ ++ .id_type = usdpaa_id_max ++ } ++}; ++ ++/* Determines the largest acceptable page size for a given size ++ The sizes are determined by what the TLB1 acceptable page sizes are */ ++static u32 largest_page_size(u32 size) ++{ ++ int shift = 30; /* Start at 1G size */ ++ if (size < 4096) ++ return 0; ++ do { ++ if (size >= (1<= 12); /* Up to 4k */ ++ return 0; ++} ++ ++/* Determine if value is power of 4 */ ++static inline bool is_power_of_4(u64 x) ++{ ++ if (x == 0 || ((x & (x - 1)) != 0)) ++ return false; ++ return !!(x & 0x5555555555555555ull); ++} ++ ++/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This ++ * splits the fragment into 4 and returns the upper-most. (The caller can loop ++ * until it has a suitable fragment size.) */ ++static struct mem_fragment *split_frag(struct mem_fragment *frag) ++{ ++ struct mem_fragment *x[3]; ++ ++ x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); ++ x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); ++ x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC); ++ if (!x[0] || !x[1] || !x[2]) { ++ kfree(x[0]); ++ kfree(x[1]); ++ kfree(x[2]); ++ return NULL; ++ } ++ BUG_ON(frag->refs); ++ frag->len >>= 2; ++ frag->pfn_len >>= 2; ++ x[0]->base = frag->base + frag->len; ++ x[1]->base = x[0]->base + frag->len; ++ x[2]->base = x[1]->base + frag->len; ++ x[0]->len = x[1]->len = x[2]->len = frag->len; ++ x[0]->pfn_base = frag->pfn_base + frag->pfn_len; ++ x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len; ++ x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len; ++ x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len; ++ x[0]->refs = x[1]->refs = x[2]->refs = 0; ++ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len; ++ x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn; ++ x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0; ++ list_add_tail(&x[0]->list, &frag->list); ++ list_add_tail(&x[1]->list, &x[0]->list); ++ list_add_tail(&x[2]->list, &x[1]->list); ++ return x[2]; ++} ++ ++static __maybe_unused void dump_frags(void) ++{ ++ struct mem_fragment *frag; ++ int i = 0; ++ list_for_each_entry(frag, &mem_list, list) { ++ pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n", ++ i, frag->base, frag->pfn_base, ++ frag->len, frag->root_len, frag->root_pfn, ++ frag->refs, frag->name); ++ ++i; ++ } ++} ++ ++/* Walk the list of fragments and adjoin neighbouring segments if possible */ ++static void compress_frags(void) ++{ ++ /* Walk the fragment list and combine fragments */ ++ struct mem_fragment *frag, *nxtfrag; ++ u64 len = 0; ++ ++ int i, numfrags; ++ ++ ++ frag = list_entry(mem_list.next, struct mem_fragment, list); ++ ++ while (&frag->list != &mem_list) { ++ /* Must combine consecutive fragemenst with ++ same root_pfn such that they are power of 4 */ ++ if (frag->refs != 0) { ++ frag = list_entry(frag->list.next, ++ struct mem_fragment, list); ++ continue; /* Not this window */ ++ } ++ len = frag->len; ++ numfrags = 0; ++ nxtfrag = list_entry(frag->list.next, ++ struct mem_fragment, list); ++ while (true) { ++ if (&nxtfrag->list == &mem_list) { ++ numfrags = 0; ++ break; /* End of list */ ++ } ++ if (nxtfrag->refs) { ++ numfrags = 0; ++ break; /* In use still */ ++ } ++ if (nxtfrag->root_pfn != frag->root_pfn) { ++ numfrags = 0; ++ break; /* Crosses root fragment boundary */ ++ } ++ len += nxtfrag->len; ++ numfrags++; ++ if (is_power_of_4(len)) { ++ /* These fragments can be combined */ ++ break; ++ } ++ nxtfrag = list_entry(nxtfrag->list.next, ++ struct mem_fragment, list); ++ } ++ if (numfrags == 0) { ++ frag = list_entry(frag->list.next, ++ struct mem_fragment, list); ++ continue; /* try the next window */ ++ } ++ for (i = 0; i < numfrags; i++) { ++ struct mem_fragment *todel = ++ list_entry(nxtfrag->list.prev, ++ struct mem_fragment, list); ++ nxtfrag->len += todel->len; ++ nxtfrag->pfn_len += todel->pfn_len; ++ list_del(&todel->list); ++ } ++ /* Re evaluate the list, things may merge now */ ++ frag = list_entry(mem_list.next, struct mem_fragment, list); ++ } ++} ++ ++/* Hook from arch/powerpc/mm/mem.c */ ++int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size) ++{ ++ struct mem_fragment *frag; ++ int idx = -1; ++ if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size))) ++ return -1; ++ /* It's in-range, we need to find the fragment */ ++ spin_lock(&mem_lock); ++ list_for_each_entry(frag, &mem_list, list) { ++ if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base + ++ frag->pfn_len))) { ++ *phys_addr = frag->base; ++ *size = frag->len; ++ idx = current_tlb++; ++ if (current_tlb >= (first_tlb + num_tlb)) ++ current_tlb = first_tlb; ++ break; ++ } ++ } ++ spin_unlock(&mem_lock); ++ return idx; ++} ++ ++static int usdpaa_open(struct inode *inode, struct file *filp) ++{ ++ const struct alloc_backend *backend = &alloc_backends[0]; ++ struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL); ++ if (!ctx) ++ return -ENOMEM; ++ filp->private_data = ctx; ++ ++ while (backend->id_type != usdpaa_id_max) { ++ INIT_LIST_HEAD(&ctx->resources[backend->id_type]); ++ backend++; ++ } ++ ++ INIT_LIST_HEAD(&ctx->maps); ++ INIT_LIST_HEAD(&ctx->portals); ++ spin_lock_init(&ctx->lock); ++ ++ //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi; ++ ++ return 0; ++} ++ ++#define DQRR_MAXFILL 15 ++ ++/* Reset a QMan portal to its default state */ ++static int init_qm_portal(struct qm_portal_config *config, ++ struct qm_portal *portal) ++{ ++ const struct qm_dqrr_entry *dqrr = NULL; ++ int i; ++ ++ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; ++ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; ++ ++ /* Make sure interrupts are inhibited */ ++ qm_out(IIR, 1); ++ ++ /* Initialize the DQRR. This will stop any dequeue ++ commands that are in progress */ ++ if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb, ++ qm_dqrr_cdc, DQRR_MAXFILL)) { ++ pr_err("qm_dqrr_init() failed when trying to" ++ " recover portal, portal will be leaked\n"); ++ return 1; ++ } ++ ++ /* Discard any entries on the DQRR */ ++ /* If we consume the ring twice something is wrong */ ++ for (i = 0; i < DQRR_MAXFILL * 2; i++) { ++ qm_dqrr_pvb_update(portal); ++ dqrr = qm_dqrr_current(portal); ++ if (!dqrr) ++ break; ++ qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0); ++ qm_dqrr_pvb_update(portal); ++ qm_dqrr_next(portal); ++ } ++ /* Initialize the EQCR */ ++ if (qm_eqcr_init(portal, qm_eqcr_pvb, ++ qm_eqcr_get_ci_stashing(portal), 1)) { ++ pr_err("Qman EQCR initialisation failed\n"); ++ return 1; ++ } ++ /* initialize the MR */ ++ if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) { ++ pr_err("Qman MR initialisation failed\n"); ++ return 1; ++ } ++ qm_mr_pvb_update(portal); ++ while (qm_mr_current(portal)) { ++ qm_mr_next(portal); ++ qm_mr_cci_consume_to_current(portal); ++ qm_mr_pvb_update(portal); ++ } ++ ++ if (qm_mc_init(portal)) { ++ pr_err("Qman MC initialisation failed\n"); ++ return 1; ++ } ++ return 0; ++} ++ ++static int init_bm_portal(struct bm_portal_config *config, ++ struct bm_portal *portal) ++{ ++ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; ++ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; ++ ++ if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) { ++ pr_err("Bman RCR initialisation failed\n"); ++ return 1; ++ } ++ if (bm_mc_init(portal)) { ++ pr_err("Bman MC initialisation failed\n"); ++ return 1; ++ } ++ return 0; ++} ++ ++/* Function that will scan all FQ's in the system. For each FQ that is not ++ OOS it will call the check_channel helper to determine if the FQ should ++ be torn down. If the check_channel helper returns true the FQ will be ++ transitioned to the OOS state */ ++static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx, ++ bool (*check_channel)(void*, u32)) ++{ ++ u32 fq_id = 0; ++ while (1) { ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ u8 state; ++ u32 channel; ++ ++ /* Determine the channel for the FQID */ ++ mcc = qm_mc_start(portal); ++ mcc->queryfq.fqid = fq_id; ++ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ); ++ while (!(mcr = qm_mc_result(portal))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ++ == QM_MCR_VERB_QUERYFQ); ++ if (mcr->result != QM_MCR_RESULT_OK) ++ break; /* End of valid FQIDs */ ++ ++ channel = mcr->queryfq.fqd.dest.channel; ++ /* Determine the state of the FQID */ ++ mcc = qm_mc_start(portal); ++ mcc->queryfq_np.fqid = fq_id; ++ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP); ++ while (!(mcr = qm_mc_result(portal))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ++ == QM_MCR_VERB_QUERYFQ_NP); ++ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; ++ if (state == QM_MCR_NP_STATE_OOS) ++ /* Already OOS, no need to do anymore checks */ ++ goto next; ++ ++ if (check_channel(ctx, channel)) ++ qm_shutdown_fq(&portal, 1, fq_id); ++ next: ++ ++fq_id; ++ } ++ return 0; ++} ++ ++static bool check_channel_device(void *_ctx, u32 channel) ++{ ++ struct ctx *ctx = _ctx; ++ struct portal_mapping *portal, *tmpportal; ++ struct active_resource *res; ++ ++ /* See if the FQ is destined for one of the portals we're cleaning up */ ++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { ++ if (portal->user.type == usdpaa_portal_qman) { ++ if (portal->qportal->public_cfg.channel == channel) { ++ /* This FQs destination is a portal ++ we're cleaning, send a retire */ ++ return true; ++ } ++ } ++ } ++ ++ /* Check the pool channels that will be released as well */ ++ list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) { ++ if ((res->id >= channel) && ++ ((res->id + res->num - 1) <= channel)) ++ return true; ++ } ++ return false; ++} ++ ++static bool check_portal_channel(void *ctx, u32 channel) ++{ ++ u32 portal_channel = *(u32 *)ctx; ++ if (portal_channel == channel) { ++ /* This FQs destination is a portal ++ we're cleaning, send a retire */ ++ return true; ++ } ++ return false; ++} ++ ++ ++ ++ ++static int usdpaa_release(struct inode *inode, struct file *filp) ++{ ++ struct ctx *ctx = filp->private_data; ++ struct mem_mapping *map, *tmpmap; ++ struct portal_mapping *portal, *tmpportal; ++ const struct alloc_backend *backend = &alloc_backends[0]; ++ struct active_resource *res; ++ struct qm_portal *qm_cleanup_portal = NULL; ++ struct bm_portal *bm_cleanup_portal = NULL; ++ struct qm_portal_config *qm_alloced_portal = NULL; ++ struct bm_portal_config *bm_alloced_portal = NULL; ++ ++ struct qm_portal *portal_array[qman_portal_max]; ++ int portal_count = 0; ++ ++ /* Ensure the release operation cannot be migrated to another ++ CPU as CPU specific variables may be needed during cleanup */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++#endif ++ /* The following logic is used to recover resources that were not ++ correctly released by the process that is closing the FD. ++ Step 1: syncronize the HW with the qm_portal/bm_portal structures ++ in the kernel ++ */ ++ ++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { ++ /* Try to recover any portals that weren't shut down */ ++ if (portal->user.type == usdpaa_portal_qman) { ++ portal_array[portal_count] = &portal->qman_portal_low; ++ ++portal_count; ++ init_qm_portal(portal->qportal, ++ &portal->qman_portal_low); ++ if (!qm_cleanup_portal) { ++ qm_cleanup_portal = &portal->qman_portal_low; ++ } else { ++ /* Clean FQs on the dedicated channel */ ++ u32 chan = portal->qportal->public_cfg.channel; ++ qm_check_and_destroy_fqs( ++ &portal->qman_portal_low, &chan, ++ check_portal_channel); ++ } ++ } else { ++ /* BMAN */ ++ init_bm_portal(portal->bportal, ++ &portal->bman_portal_low); ++ if (!bm_cleanup_portal) ++ bm_cleanup_portal = &portal->bman_portal_low; ++ } ++ } ++ /* If no portal was found, allocate one for cleanup */ ++ if (!qm_cleanup_portal) { ++ qm_alloced_portal = qm_get_unused_portal(); ++ if (!qm_alloced_portal) { ++ pr_crit("No QMan portal avalaible for cleanup\n"); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_enable(); ++#endif ++ return -1; ++ } ++ qm_cleanup_portal = kmalloc(sizeof(struct qm_portal), ++ GFP_KERNEL); ++ if (!qm_cleanup_portal) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_enable(); ++#endif ++ return -ENOMEM; ++ } ++ init_qm_portal(qm_alloced_portal, qm_cleanup_portal); ++ portal_array[portal_count] = qm_cleanup_portal; ++ ++portal_count; ++ } ++ if (!bm_cleanup_portal) { ++ bm_alloced_portal = bm_get_unused_portal(); ++ if (!bm_alloced_portal) { ++ pr_crit("No BMan portal avalaible for cleanup\n"); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_enable(); ++#endif ++ return -1; ++ } ++ bm_cleanup_portal = kmalloc(sizeof(struct bm_portal), ++ GFP_KERNEL); ++ if (!bm_cleanup_portal) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_enable(); ++#endif ++ return -ENOMEM; ++ } ++ init_bm_portal(bm_alloced_portal, bm_cleanup_portal); ++ } ++ ++ /* OOS the FQs associated with this process */ ++ qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device); ++ ++ while (backend->id_type != usdpaa_id_max) { ++ int leaks = 0; ++ list_for_each_entry(res, &ctx->resources[backend->id_type], ++ list) { ++ if (backend->id_type == usdpaa_id_fqid) { ++ int i = 0; ++ for (; i < res->num; i++) { ++ /* Clean FQs with the cleanup portal */ ++ qm_shutdown_fq(portal_array, ++ portal_count, ++ res->id + i); ++ } ++ } ++ leaks += res->num; ++ backend->release(res->id, res->num); ++ } ++ if (leaks) ++ pr_crit("USDPAA process leaking %d %s%s\n", leaks, ++ backend->acronym, (leaks > 1) ? "s" : ""); ++ backend++; ++ } ++ /* Release any DMA regions */ ++ spin_lock(&mem_lock); ++ list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) { ++ struct mem_fragment *current_frag = map->root_frag; ++ int i; ++ if (map->root_frag->has_locking && ++ (map->root_frag->owner == map)) { ++ map->root_frag->owner = NULL; ++ wake_up(&map->root_frag->wq); ++ } ++ /* Check each fragment and merge if the ref count is 0 */ ++ for (i = 0; i < map->frag_count; i++) { ++ --current_frag->refs; ++ current_frag = list_entry(current_frag->list.prev, ++ struct mem_fragment, list); ++ } ++ ++ compress_frags(); ++ list_del(&map->list); ++ kfree(map); ++ } ++ spin_unlock(&mem_lock); ++ ++ /* Return portals */ ++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) { ++ if (portal->user.type == usdpaa_portal_qman) { ++ /* Give the portal back to the allocator */ ++ init_qm_portal(portal->qportal, ++ &portal->qman_portal_low); ++ qm_put_unused_portal(portal->qportal); ++ } else { ++ init_bm_portal(portal->bportal, ++ &portal->bman_portal_low); ++ bm_put_unused_portal(portal->bportal); ++ } ++ list_del(&portal->list); ++ kfree(portal); ++ } ++ if (qm_alloced_portal) { ++ qm_put_unused_portal(qm_alloced_portal); ++ kfree(qm_cleanup_portal); ++ } ++ if (bm_alloced_portal) { ++ bm_put_unused_portal(bm_alloced_portal); ++ kfree(bm_cleanup_portal); ++ } ++ ++ kfree(ctx); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_enable(); ++#endif ++ return 0; ++} ++ ++static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma, ++ int *match, unsigned long *pfn) ++{ ++ struct mem_mapping *map; ++ ++ list_for_each_entry(map, &ctx->maps, list) { ++ int i; ++ struct mem_fragment *frag = map->root_frag; ++ ++ for (i = 0; i < map->frag_count; i++) { ++ if (frag->pfn_base == vma->vm_pgoff) { ++ *match = 1; ++ *pfn = frag->pfn_base; ++ return 0; ++ } ++ frag = list_entry(frag->list.next, struct mem_fragment, ++ list); ++ } ++ } ++ *match = 0; ++ return 0; ++} ++ ++static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma, ++ int *match, unsigned long *pfn) ++{ ++ *pfn = res->start >> PAGE_SHIFT; ++ if (*pfn == vma->vm_pgoff) { ++ *match = 1; ++ if ((vma->vm_end - vma->vm_start) != resource_size(res)) ++ return -EINVAL; ++ } else ++ *match = 0; ++ return 0; ++} ++ ++static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma, ++ int *match, unsigned long *pfn) ++{ ++ struct portal_mapping *portal; ++ int ret; ++ ++ list_for_each_entry(portal, &ctx->portals, list) { ++ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma, ++ match, pfn); ++ if (*match) { ++ vma->vm_page_prot = ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ pgprot_cached_ns(vma->vm_page_prot); ++#else ++ pgprot_cached_noncoherent(vma->vm_page_prot); ++#endif ++ return ret; ++ } ++ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma, ++ match, pfn); ++ if (*match) { ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ return ret; ++ } ++ } ++ *match = 0; ++ return 0; ++} ++ ++static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct ctx *ctx = filp->private_data; ++ unsigned long pfn = 0; ++ int match, ret; ++ ++ spin_lock(&mem_lock); ++ ret = check_mmap_dma(ctx, vma, &match, &pfn); ++ if (!match) ++ ret = check_mmap_portal(ctx, vma, &match, &pfn); ++ spin_unlock(&mem_lock); ++ if (!match) ++ return -EINVAL; ++ if (!ret) ++ ret = remap_pfn_range(vma, vma->vm_start, pfn, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot); ++ return ret; ++} ++ ++/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz' ++ * must be a power of 2, but both 'addr' and 'sz' can be expressions. */ ++#define USDPAA_MEM_ROUNDUP(addr, sz) \ ++ ({ \ ++ unsigned long foo_align = (sz) - 1; \ ++ ((addr) + foo_align) & ~foo_align; \ ++ }) ++/* Searching for a size-aligned virtual address range starting from 'addr' */ ++static unsigned long usdpaa_get_unmapped_area(struct file *file, ++ unsigned long addr, ++ unsigned long len, ++ unsigned long pgoff, ++ unsigned long flags) ++{ ++ struct vm_area_struct *vma; ++ ++ if (len % PAGE_SIZE) ++ return -EINVAL; ++ if (!len) ++ return -EINVAL; ++ ++ /* Need to align the address to the largest pagesize of the mapping ++ * because the MMU requires the virtual address to have the same ++ * alignment as the physical address */ ++ addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len)); ++ vma = find_vma(current->mm, addr); ++ /* Keep searching until we reach the end of currently-used virtual ++ * address-space or we find a big enough gap. */ ++ while (vma) { ++ if ((addr + len) < vma->vm_start) ++ return addr; ++ ++ addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len)); ++ vma = vma->vm_next; ++ } ++ if ((TASK_SIZE - len) < addr) ++ return -ENOMEM; ++ return addr; ++} ++ ++static long ioctl_id_alloc(struct ctx *ctx, void __user *arg) ++{ ++ struct usdpaa_ioctl_id_alloc i; ++ const struct alloc_backend *backend; ++ struct active_resource *res; ++ int ret = copy_from_user(&i, arg, sizeof(i)); ++ if (ret) ++ return ret; ++ if ((i.id_type >= usdpaa_id_max) || !i.num) ++ return -EINVAL; ++ backend = &alloc_backends[i.id_type]; ++ /* Allocate the required resource type */ ++ ret = backend->alloc(&i.base, i.num, i.align, i.partial); ++ if (ret < 0) ++ return ret; ++ i.num = ret; ++ /* Copy the result to user-space */ ++ ret = copy_to_user(arg, &i, sizeof(i)); ++ if (ret) { ++ backend->release(i.base, i.num); ++ return ret; ++ } ++ /* Assign the allocated range to the FD accounting */ ++ res = kmalloc(sizeof(*res), GFP_KERNEL); ++ if (!res) { ++ backend->release(i.base, i.num); ++ return -ENOMEM; ++ } ++ spin_lock(&ctx->lock); ++ res->id = i.base; ++ res->num = i.num; ++ res->refcount = 1; ++ list_add(&res->list, &ctx->resources[i.id_type]); ++ spin_unlock(&ctx->lock); ++ return 0; ++} ++ ++static long ioctl_id_release(struct ctx *ctx, void __user *arg) ++{ ++ struct usdpaa_ioctl_id_release i; ++ const struct alloc_backend *backend; ++ struct active_resource *tmp, *pos; ++ ++ int ret = copy_from_user(&i, arg, sizeof(i)); ++ if (ret) ++ return ret; ++ if ((i.id_type >= usdpaa_id_max) || !i.num) ++ return -EINVAL; ++ backend = &alloc_backends[i.id_type]; ++ /* Pull the range out of the FD accounting - the range is valid iff this ++ * succeeds. */ ++ spin_lock(&ctx->lock); ++ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) { ++ if (pos->id == i.base && pos->num == i.num) { ++ pos->refcount--; ++ if (pos->refcount) { ++ spin_unlock(&ctx->lock); ++ return 0; /* Still being used */ ++ } ++ list_del(&pos->list); ++ kfree(pos); ++ spin_unlock(&ctx->lock); ++ goto found; ++ } ++ } ++ /* Failed to find the resource */ ++ spin_unlock(&ctx->lock); ++ pr_err("Couldn't find resource type %d base 0x%x num %d\n", ++ i.id_type, i.base, i.num); ++ return -EINVAL; ++found: ++ /* Release the resource to the backend */ ++ backend->release(i.base, i.num); ++ return 0; ++} ++ ++static long ioctl_id_reserve(struct ctx *ctx, void __user *arg) ++{ ++ struct usdpaa_ioctl_id_reserve i; ++ const struct alloc_backend *backend; ++ struct active_resource *tmp, *pos; ++ ++ int ret = copy_from_user(&i, arg, sizeof(i)); ++ if (ret) ++ return ret; ++ if ((i.id_type >= usdpaa_id_max) || !i.num) ++ return -EINVAL; ++ backend = &alloc_backends[i.id_type]; ++ if (!backend->reserve) ++ return -EINVAL; ++ /* Pull the range out of the FD accounting - the range is valid iff this ++ * succeeds. */ ++ spin_lock(&ctx->lock); ++ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) { ++ if (pos->id == i.base && pos->num == i.num) { ++ pos->refcount++; ++ spin_unlock(&ctx->lock); ++ return 0; ++ } ++ } ++ ++ /* Failed to find the resource */ ++ spin_unlock(&ctx->lock); ++ ++ /* Reserve the resource in the backend */ ++ ret = backend->reserve(i.base, i.num); ++ if (ret) ++ return ret; ++ /* Assign the reserved range to the FD accounting */ ++ pos = kmalloc(sizeof(*pos), GFP_KERNEL); ++ if (!pos) { ++ backend->release(i.base, i.num); ++ return -ENOMEM; ++ } ++ spin_lock(&ctx->lock); ++ pos->id = i.base; ++ pos->num = i.num; ++ pos->refcount = 1; ++ list_add(&pos->list, &ctx->resources[i.id_type]); ++ spin_unlock(&ctx->lock); ++ return 0; ++} ++ ++static long ioctl_dma_map(struct file *fp, struct ctx *ctx, ++ struct usdpaa_ioctl_dma_map *i) ++{ ++ struct mem_fragment *frag, *start_frag, *next_frag; ++ struct mem_mapping *map, *tmp; ++ int ret = 0; ++ u32 largest_page, so_far = 0; ++ int frag_count = 0; ++ unsigned long next_addr = PAGE_SIZE, populate; ++ ++ /* error checking to ensure values copied from user space are valid */ ++ if (i->len % PAGE_SIZE) ++ return -EINVAL; ++ ++ map = kmalloc(sizeof(*map), GFP_KERNEL); ++ if (!map) ++ return -ENOMEM; ++ ++ spin_lock(&mem_lock); ++ if (i->flags & USDPAA_DMA_FLAG_SHARE) { ++ list_for_each_entry(frag, &mem_list, list) { ++ if (frag->refs && (frag->flags & ++ USDPAA_DMA_FLAG_SHARE) && ++ !strncmp(i->name, frag->name, ++ USDPAA_DMA_NAME_MAX)) { ++ /* Matching entry */ ++ if ((i->flags & USDPAA_DMA_FLAG_CREATE) && ++ !(i->flags & USDPAA_DMA_FLAG_LAZY)) { ++ ret = -EBUSY; ++ goto out; ++ } ++ ++ /* Check to ensure size matches record */ ++ if (i->len != frag->map_len && i->len) { ++ pr_err("ioctl_dma_map() Size requested does not match %s and is none zero\n", ++ frag->name); ++ return -EINVAL; ++ } ++ ++ /* Check if this has already been mapped ++ to this process */ ++ list_for_each_entry(tmp, &ctx->maps, list) ++ if (tmp->root_frag == frag) { ++ /* Already mapped, just need to ++ inc ref count */ ++ tmp->refs++; ++ kfree(map); ++ i->did_create = 0; ++ i->len = tmp->total_size; ++ i->phys_addr = frag->base; ++ i->ptr = tmp->virt_addr; ++ spin_unlock(&mem_lock); ++ return 0; ++ } ++ /* Matching entry - just need to map */ ++ i->has_locking = frag->has_locking; ++ i->did_create = 0; ++ i->len = frag->map_len; ++ start_frag = frag; ++ goto do_map; ++ } ++ } ++ /* No matching entry */ ++ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) { ++ pr_err("ioctl_dma_map() No matching entry\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ } ++ /* New fragment required, size must be provided. */ ++ if (!i->len) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Find one of more contiguous fragments that satisfy the total length ++ trying to minimize the number of fragments ++ compute the largest page size that the allocation could use */ ++ largest_page = largest_page_size(i->len); ++ start_frag = NULL; ++ while (largest_page && ++ largest_page <= largest_page_size(phys_size) && ++ start_frag == NULL) { ++ /* Search the list for a frag of that size */ ++ list_for_each_entry(frag, &mem_list, list) { ++ if (!frag->refs && (frag->len == largest_page)) { ++ /* See if the next x fragments are free ++ and can accomidate the size */ ++ u32 found_size = largest_page; ++ next_frag = list_entry(frag->list.prev, ++ struct mem_fragment, ++ list); ++ /* If the fragement is too small check ++ if the neighbours cab support it */ ++ while (found_size < i->len) { ++ if (&mem_list == &next_frag->list) ++ break; /* End of list */ ++ if (next_frag->refs != 0 || ++ next_frag->len == 0) ++ break; /* not enough space */ ++ found_size += next_frag->len; ++ next_frag = list_entry( ++ next_frag->list.prev, ++ struct mem_fragment, ++ list); ++ } ++ if (found_size >= i->len) { ++ /* Success! there is enough contigous ++ free space */ ++ start_frag = frag; ++ break; ++ } ++ } ++ } /* next frag loop */ ++ /* Couldn't statisfy the request with this ++ largest page size, try a smaller one */ ++ largest_page <<= 2; ++ } ++ if (start_frag == NULL) { ++ /* Couldn't find proper amount of space */ ++ ret = -ENOMEM; ++ goto out; ++ } ++ i->did_create = 1; ++do_map: ++ /* Verify there is sufficient space to do the mapping */ ++ down_write(¤t->mm->mmap_sem); ++ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0); ++ up_write(¤t->mm->mmap_sem); ++ ++ if (next_addr & ~PAGE_MASK) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ /* We may need to divide the final fragment to accomidate the mapping */ ++ next_frag = start_frag; ++ while (so_far != i->len) { ++ BUG_ON(next_frag->len == 0); ++ while ((next_frag->len + so_far) > i->len) { ++ /* Split frag until they match */ ++ split_frag(next_frag); ++ } ++ so_far += next_frag->len; ++ next_frag->refs++; ++ ++frag_count; ++ next_frag = list_entry(next_frag->list.prev, ++ struct mem_fragment, list); ++ } ++ if (i->did_create) { ++ size_t name_len = 0; ++ start_frag->flags = i->flags; ++ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX); ++ name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX); ++ if (name_len >= USDPAA_DMA_NAME_MAX) { ++ ret = -EFAULT; ++ goto out; ++ } ++ start_frag->map_len = i->len; ++ start_frag->has_locking = i->has_locking; ++ init_waitqueue_head(&start_frag->wq); ++ start_frag->owner = NULL; ++ } ++ ++ /* Setup the map entry */ ++ map->root_frag = start_frag; ++ map->total_size = i->len; ++ map->frag_count = frag_count; ++ map->refs = 1; ++ list_add(&map->list, &ctx->maps); ++ i->phys_addr = start_frag->base; ++out: ++ spin_unlock(&mem_lock); ++ ++ if (!ret) { ++ unsigned long longret; ++ down_write(¤t->mm->mmap_sem); ++ longret = do_mmap_pgoff(fp, next_addr, map->total_size, ++ PROT_READ | ++ (i->flags & ++ USDPAA_DMA_FLAG_RDONLY ? 0 ++ : PROT_WRITE), ++ MAP_SHARED, ++ start_frag->pfn_base, ++ &populate); ++ up_write(¤t->mm->mmap_sem); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ } else { ++ i->ptr = (void *)longret; ++ map->virt_addr = i->ptr; ++ } ++ } else ++ kfree(map); ++ return ret; ++} ++ ++static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg) ++{ ++ struct mem_mapping *map; ++ struct vm_area_struct *vma; ++ int ret, i; ++ struct mem_fragment *current_frag; ++ size_t sz; ++ unsigned long base; ++ unsigned long vaddr; ++ ++ down_write(¤t->mm->mmap_sem); ++ vma = find_vma(current->mm, (unsigned long)arg); ++ if (!vma || (vma->vm_start > (unsigned long)arg)) { ++ up_write(¤t->mm->mmap_sem); ++ return -EFAULT; ++ } ++ spin_lock(&mem_lock); ++ list_for_each_entry(map, &ctx->maps, list) { ++ if (map->root_frag->pfn_base == vma->vm_pgoff) { ++ /* Drop the map lock if we hold it */ ++ if (map->root_frag->has_locking && ++ (map->root_frag->owner == map)) { ++ map->root_frag->owner = NULL; ++ wake_up(&map->root_frag->wq); ++ } ++ goto map_match; ++ } ++ } ++ /* Failed to find a matching mapping for this process */ ++ ret = -EFAULT; ++ spin_unlock(&mem_lock); ++ goto out; ++map_match: ++ map->refs--; ++ if (map->refs != 0) { ++ /* Another call the dma_map is referencing this */ ++ ret = 0; ++ spin_unlock(&mem_lock); ++ goto out; ++ } ++ ++ current_frag = map->root_frag; ++ vaddr = (unsigned long) map->virt_addr; ++ for (i = 0; i < map->frag_count; i++) { ++ DPA_ASSERT(current_frag->refs > 0); ++ --current_frag->refs; ++#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64)) ++ /* ++ * Make sure we invalidate the TLB entry for ++ * this fragment, otherwise a remap of a different ++ * page to this vaddr would give acces to an ++ * incorrect piece of memory ++ */ ++ cleartlbcam(vaddr, mfspr(SPRN_PID)); ++#endif ++ vaddr += current_frag->len; ++ current_frag = list_entry(current_frag->list.prev, ++ struct mem_fragment, list); ++ } ++ map->root_frag->name[0] = 0; ++ list_del(&map->list); ++ compress_frags(); ++ spin_unlock(&mem_lock); ++ ++ base = vma->vm_start; ++ sz = vma->vm_end - vma->vm_start; ++ do_munmap(current->mm, base, sz); ++ ret = 0; ++ out: ++ up_write(¤t->mm->mmap_sem); ++ return ret; ++} ++ ++static long ioctl_dma_stats(struct ctx *ctx, void __user *arg) ++{ ++ struct mem_fragment *frag; ++ struct usdpaa_ioctl_dma_used result; ++ ++ result.free_bytes = 0; ++ result.total_bytes = phys_size; ++ ++ list_for_each_entry(frag, &mem_list, list) { ++ if (frag->refs == 0) ++ result.free_bytes += frag->len; ++ } ++ ++ return copy_to_user(arg, &result, sizeof(result)); } ++ ++static int test_lock(struct mem_mapping *map) ++{ ++ int ret = 0; ++ spin_lock(&mem_lock); ++ if (!map->root_frag->owner) { ++ map->root_frag->owner = map; ++ ret = 1; ++ } ++ spin_unlock(&mem_lock); ++ return ret; ++} ++ ++static long ioctl_dma_lock(struct ctx *ctx, void __user *arg) ++{ ++ struct mem_mapping *map; ++ struct vm_area_struct *vma; ++ ++ down_read(¤t->mm->mmap_sem); ++ vma = find_vma(current->mm, (unsigned long)arg); ++ if (!vma || (vma->vm_start > (unsigned long)arg)) { ++ up_read(¤t->mm->mmap_sem); ++ return -EFAULT; ++ } ++ spin_lock(&mem_lock); ++ list_for_each_entry(map, &ctx->maps, list) { ++ if (map->root_frag->pfn_base == vma->vm_pgoff) ++ goto map_match; ++ } ++ map = NULL; ++map_match: ++ spin_unlock(&mem_lock); ++ up_read(¤t->mm->mmap_sem); ++ ++ if (!map) ++ return -EFAULT; ++ if (!map->root_frag->has_locking) ++ return -ENODEV; ++ return wait_event_interruptible(map->root_frag->wq, test_lock(map)); ++} ++ ++static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg) ++{ ++ struct mem_mapping *map; ++ struct vm_area_struct *vma; ++ int ret; ++ ++ down_read(¤t->mm->mmap_sem); ++ vma = find_vma(current->mm, (unsigned long)arg); ++ if (!vma || (vma->vm_start > (unsigned long)arg)) ++ ret = -EFAULT; ++ else { ++ spin_lock(&mem_lock); ++ list_for_each_entry(map, &ctx->maps, list) { ++ if (map->root_frag->pfn_base == vma->vm_pgoff) { ++ if (!map->root_frag->has_locking) ++ ret = -ENODEV; ++ else if (map->root_frag->owner == map) { ++ map->root_frag->owner = NULL; ++ wake_up(&map->root_frag->wq); ++ ret = 0; ++ } else ++ ret = -EBUSY; ++ goto map_match; ++ } ++ } ++ ret = -EINVAL; ++map_match: ++ spin_unlock(&mem_lock); ++ } ++ up_read(¤t->mm->mmap_sem); ++ return ret; ++} ++ ++static int portal_mmap(struct file *fp, struct resource *res, void **ptr) ++{ ++ unsigned long longret = 0, populate; ++ resource_size_t len; ++ ++ down_write(¤t->mm->mmap_sem); ++ len = resource_size(res); ++ if (len != (unsigned long)len) ++ return -EINVAL; ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ res->start >> PAGE_SHIFT, &populate); ++ up_write(¤t->mm->mmap_sem); ++ ++ if (longret & ~PAGE_MASK) ++ return (int)longret; ++ ++ *ptr = (void *) longret; ++ return 0; ++} ++ ++static void portal_munmap(struct resource *res, void *ptr) ++{ ++ down_write(¤t->mm->mmap_sem); ++ do_munmap(current->mm, (unsigned long)ptr, resource_size(res)); ++ up_write(¤t->mm->mmap_sem); ++} ++ ++static long ioctl_portal_map(struct file *fp, struct ctx *ctx, ++ struct usdpaa_ioctl_portal_map *arg) ++{ ++ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); ++ int ret; ++ ++ if (!mapping) ++ return -ENOMEM; ++ ++ mapping->user = *arg; ++ mapping->iommu_domain = NULL; ++ ++ if (mapping->user.type == usdpaa_portal_qman) { ++ mapping->qportal = ++ qm_get_unused_portal_idx(mapping->user.index); ++ if (!mapping->qportal) { ++ ret = -ENODEV; ++ goto err_get_portal; ++ } ++ mapping->phys = &mapping->qportal->addr_phys[0]; ++ mapping->user.channel = mapping->qportal->public_cfg.channel; ++ mapping->user.pools = mapping->qportal->public_cfg.pools; ++ mapping->user.index = mapping->qportal->public_cfg.index; ++ } else if (mapping->user.type == usdpaa_portal_bman) { ++ mapping->bportal = ++ bm_get_unused_portal_idx(mapping->user.index); ++ if (!mapping->bportal) { ++ ret = -ENODEV; ++ goto err_get_portal; ++ } ++ mapping->phys = &mapping->bportal->addr_phys[0]; ++ mapping->user.index = mapping->bportal->public_cfg.index; ++ } else { ++ ret = -EINVAL; ++ goto err_copy_from_user; ++ } ++ /* Need to put pcfg in ctx's list before the mmaps because the mmap ++ * handlers look it up. */ ++ spin_lock(&mem_lock); ++ list_add(&mapping->list, &ctx->portals); ++ spin_unlock(&mem_lock); ++ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE], ++ &mapping->user.addr.cena); ++ if (ret) ++ goto err_mmap_cena; ++ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI], ++ &mapping->user.addr.cinh); ++ if (ret) ++ goto err_mmap_cinh; ++ *arg = mapping->user; ++ return ret; ++ ++err_mmap_cinh: ++ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena); ++err_mmap_cena: ++ if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal) ++ qm_put_unused_portal(mapping->qportal); ++ else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal) ++ bm_put_unused_portal(mapping->bportal); ++ spin_lock(&mem_lock); ++ list_del(&mapping->list); ++ spin_unlock(&mem_lock); ++err_get_portal: ++err_copy_from_user: ++ kfree(mapping); ++ return ret; ++} ++ ++static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i) ++{ ++ struct portal_mapping *mapping; ++ struct vm_area_struct *vma; ++ unsigned long pfn; ++ u32 channel; ++ ++ /* Get the PFN corresponding to one of the virt addresses */ ++ down_read(¤t->mm->mmap_sem); ++ vma = find_vma(current->mm, (unsigned long)i->cinh); ++ if (!vma || (vma->vm_start > (unsigned long)i->cinh)) { ++ up_read(¤t->mm->mmap_sem); ++ return -EFAULT; ++ } ++ pfn = vma->vm_pgoff; ++ up_read(¤t->mm->mmap_sem); ++ ++ /* Find the corresponding portal */ ++ spin_lock(&mem_lock); ++ list_for_each_entry(mapping, &ctx->portals, list) { ++ if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT)) ++ goto found; ++ } ++ mapping = NULL; ++found: ++ if (mapping) ++ list_del(&mapping->list); ++ spin_unlock(&mem_lock); ++ if (!mapping) ++ return -ENODEV; ++ portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh); ++ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena); ++ if (mapping->user.type == usdpaa_portal_qman) { ++ init_qm_portal(mapping->qportal, ++ &mapping->qman_portal_low); ++ ++ /* Tear down any FQs this portal is referencing */ ++ channel = mapping->qportal->public_cfg.channel; ++ qm_check_and_destroy_fqs(&mapping->qman_portal_low, ++ &channel, ++ check_portal_channel); ++ qm_put_unused_portal(mapping->qportal); ++ } else if (mapping->user.type == usdpaa_portal_bman) { ++ init_bm_portal(mapping->bportal, ++ &mapping->bman_portal_low); ++ bm_put_unused_portal(mapping->bportal); ++ } ++ kfree(mapping); ++ return 0; ++} ++ ++static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest, ++ uint32_t cpu, uint32_t cache, uint32_t window) ++{ ++#ifdef CONFIG_FSL_PAMU ++ int ret; ++ int window_count = 1; ++ struct iommu_domain_geometry geom_attr; ++ struct pamu_stash_attribute stash_attr; ++ ++ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); ++ if (!pcfg->iommu_domain) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed", ++ __func__); ++ goto _no_iommu; ++ } ++ geom_attr.aperture_start = 0; ++ geom_attr.aperture_end = ++ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; ++ geom_attr.force_aperture = true; ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, ++ &geom_attr); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, ++ &window_count); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ stash_attr.cpu = cpu; ++ stash_attr.cache = cache; ++ /* set stash information for the window */ ++ stash_attr.window = 0; ++ ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, ++ DOMAIN_ATTR_FSL_PAMU_STASH, ++ &stash_attr); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, ++ IOMMU_READ | IOMMU_WRITE); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, ++ DOMAIN_ATTR_FSL_PAMU_ENABLE, ++ &window_count); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_detach_device; ++ } ++_no_iommu: ++#endif ++ ++#ifdef CONFIG_FSL_QMAN_CONFIG ++ if (qman_set_sdest(pcfg->public_cfg.channel, sdest)) ++#endif ++ pr_warn("Failed to set QMan portal's stash request queue\n"); ++ ++ return; ++ ++#ifdef CONFIG_FSL_PAMU ++_iommu_detach_device: ++ iommu_detach_device(pcfg->iommu_domain, NULL); ++_iommu_domain_free: ++ iommu_domain_free(pcfg->iommu_domain); ++#endif ++} ++ ++static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx, ++ struct usdpaa_ioctl_raw_portal *arg) ++{ ++ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); ++ int ret; ++ ++ if (!mapping) ++ return -ENOMEM; ++ ++ mapping->user.type = arg->type; ++ mapping->iommu_domain = NULL; ++ if (arg->type == usdpaa_portal_qman) { ++ mapping->qportal = qm_get_unused_portal_idx(arg->index); ++ if (!mapping->qportal) { ++ ret = -ENODEV; ++ goto err; ++ } ++ mapping->phys = &mapping->qportal->addr_phys[0]; ++ arg->index = mapping->qportal->public_cfg.index; ++ arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start; ++ arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start; ++ if (arg->enable_stash) { ++ /* Setup the PAMU with the supplied parameters */ ++ portal_config_pamu(mapping->qportal, arg->sdest, ++ arg->cpu, arg->cache, arg->window); ++ } ++ } else if (mapping->user.type == usdpaa_portal_bman) { ++ mapping->bportal = ++ bm_get_unused_portal_idx(arg->index); ++ if (!mapping->bportal) { ++ ret = -ENODEV; ++ goto err; ++ } ++ mapping->phys = &mapping->bportal->addr_phys[0]; ++ arg->index = mapping->bportal->public_cfg.index; ++ arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start; ++ arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start; ++ } else { ++ ret = -EINVAL; ++ goto err; ++ } ++ /* Need to put pcfg in ctx's list before the mmaps because the mmap ++ * handlers look it up. */ ++ spin_lock(&mem_lock); ++ list_add(&mapping->list, &ctx->portals); ++ spin_unlock(&mem_lock); ++ return 0; ++err: ++ kfree(mapping); ++ return ret; ++} ++ ++static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx, ++ struct usdpaa_ioctl_raw_portal *arg) ++{ ++ struct portal_mapping *mapping; ++ u32 channel; ++ ++ /* Find the corresponding portal */ ++ spin_lock(&mem_lock); ++ list_for_each_entry(mapping, &ctx->portals, list) { ++ if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh) ++ goto found; ++ } ++ mapping = NULL; ++found: ++ if (mapping) ++ list_del(&mapping->list); ++ spin_unlock(&mem_lock); ++ if (!mapping) ++ return -ENODEV; ++ if (mapping->user.type == usdpaa_portal_qman) { ++ init_qm_portal(mapping->qportal, ++ &mapping->qman_portal_low); ++ ++ /* Tear down any FQs this portal is referencing */ ++ channel = mapping->qportal->public_cfg.channel; ++ qm_check_and_destroy_fqs(&mapping->qman_portal_low, ++ &channel, ++ check_portal_channel); ++ qm_put_unused_portal(mapping->qportal); ++ } else if (mapping->user.type == usdpaa_portal_bman) { ++ init_bm_portal(mapping->bportal, ++ &mapping->bman_portal_low); ++ bm_put_unused_portal(mapping->bportal); ++ } ++ kfree(mapping); ++ return 0; ++} ++ ++static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ctx *ctx = fp->private_data; ++ void __user *a = (void __user *)arg; ++ switch (cmd) { ++ case USDPAA_IOCTL_ID_ALLOC: ++ return ioctl_id_alloc(ctx, a); ++ case USDPAA_IOCTL_ID_RELEASE: ++ return ioctl_id_release(ctx, a); ++ case USDPAA_IOCTL_ID_RESERVE: ++ return ioctl_id_reserve(ctx, a); ++ case USDPAA_IOCTL_DMA_MAP: ++ { ++ struct usdpaa_ioctl_dma_map input; ++ int ret; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ ret = ioctl_dma_map(fp, ctx, &input); ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_DMA_UNMAP: ++ return ioctl_dma_unmap(ctx, a); ++ case USDPAA_IOCTL_DMA_LOCK: ++ return ioctl_dma_lock(ctx, a); ++ case USDPAA_IOCTL_DMA_UNLOCK: ++ return ioctl_dma_unlock(ctx, a); ++ case USDPAA_IOCTL_PORTAL_MAP: ++ { ++ struct usdpaa_ioctl_portal_map input; ++ int ret; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ ret = ioctl_portal_map(fp, ctx, &input); ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_PORTAL_UNMAP: ++ { ++ struct usdpaa_portal_map input; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ return ioctl_portal_unmap(ctx, &input); ++ } ++ case USDPAA_IOCTL_DMA_USED: ++ return ioctl_dma_stats(ctx, a); ++ case USDPAA_IOCTL_ALLOC_RAW_PORTAL: ++ { ++ struct usdpaa_ioctl_raw_portal input; ++ int ret; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ ret = ioctl_allocate_raw_portal(fp, ctx, &input); ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_FREE_RAW_PORTAL: ++ { ++ struct usdpaa_ioctl_raw_portal input; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ return ioctl_free_raw_portal(fp, ctx, &input); ++ } ++ } ++ return -EINVAL; ++} ++ ++static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd, ++ unsigned long arg) ++{ ++#ifdef CONFIG_COMPAT ++ struct ctx *ctx = fp->private_data; ++ void __user *a = (void __user *)arg; ++#endif ++ switch (cmd) { ++#ifdef CONFIG_COMPAT ++ case USDPAA_IOCTL_DMA_MAP_COMPAT: ++ { ++ int ret; ++ struct usdpaa_ioctl_dma_map_compat input; ++ struct usdpaa_ioctl_dma_map converted; ++ ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ ++ converted.ptr = compat_ptr(input.ptr); ++ converted.phys_addr = input.phys_addr; ++ converted.len = input.len; ++ converted.flags = input.flags; ++ strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX); ++ converted.has_locking = input.has_locking; ++ converted.did_create = input.did_create; ++ ++ ret = ioctl_dma_map(fp, ctx, &converted); ++ input.ptr = ptr_to_compat(converted.ptr); ++ input.phys_addr = converted.phys_addr; ++ input.len = converted.len; ++ input.flags = converted.flags; ++ strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX); ++ input.has_locking = converted.has_locking; ++ input.did_create = converted.did_create; ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_PORTAL_MAP_COMPAT: ++ { ++ int ret; ++ struct compat_usdpaa_ioctl_portal_map input; ++ struct usdpaa_ioctl_portal_map converted; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ converted.type = input.type; ++ converted.index = input.index; ++ ret = ioctl_portal_map(fp, ctx, &converted); ++ input.addr.cinh = ptr_to_compat(converted.addr.cinh); ++ input.addr.cena = ptr_to_compat(converted.addr.cena); ++ input.channel = converted.channel; ++ input.pools = converted.pools; ++ input.index = converted.index; ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT: ++ { ++ struct usdpaa_portal_map_compat input; ++ struct usdpaa_portal_map converted; ++ ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ converted.cinh = compat_ptr(input.cinh); ++ converted.cena = compat_ptr(input.cena); ++ return ioctl_portal_unmap(ctx, &converted); ++ } ++ case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT: ++ { ++ int ret; ++ struct usdpaa_ioctl_raw_portal converted; ++ struct compat_ioctl_raw_portal input; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ converted.type = input.type; ++ converted.index = input.index; ++ converted.enable_stash = input.enable_stash; ++ converted.cpu = input.cpu; ++ converted.cache = input.cache; ++ converted.window = input.window; ++ converted.sdest = input.sdest; ++ ret = ioctl_allocate_raw_portal(fp, ctx, &converted); ++ ++ input.cinh = converted.cinh; ++ input.cena = converted.cena; ++ input.index = converted.index; ++ ++ if (copy_to_user(a, &input, sizeof(input))) ++ return -EFAULT; ++ return ret; ++ } ++ case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT: ++ { ++ struct usdpaa_ioctl_raw_portal converted; ++ struct compat_ioctl_raw_portal input; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ converted.type = input.type; ++ converted.index = input.index; ++ converted.cinh = input.cinh; ++ converted.cena = input.cena; ++ return ioctl_free_raw_portal(fp, ctx, &converted); ++ } ++#endif ++ default: ++ return usdpaa_ioctl(fp, cmd, arg); ++ } ++ return -EINVAL; ++} ++ ++int usdpaa_get_portal_config(struct file *filp, void *cinh, ++ enum usdpaa_portal_type ptype, unsigned int *irq, ++ void **iir_reg) ++{ ++ /* Walk the list of portals for filp and return the config ++ for the portal that matches the hint */ ++ struct ctx *context; ++ struct portal_mapping *portal; ++ ++ /* First sanitize the filp */ ++ if (filp->f_op->open != usdpaa_open) ++ return -ENODEV; ++ context = filp->private_data; ++ spin_lock(&context->lock); ++ list_for_each_entry(portal, &context->portals, list) { ++ if (portal->user.type == ptype && ++ portal->user.addr.cinh == cinh) { ++ if (ptype == usdpaa_portal_qman) { ++ *irq = portal->qportal->public_cfg.irq; ++ *iir_reg = portal->qportal->addr_virt[1] + ++ QM_REG_IIR; ++ } else { ++ *irq = portal->bportal->public_cfg.irq; ++ *iir_reg = portal->bportal->addr_virt[1] + ++ BM_REG_IIR; ++ } ++ spin_unlock(&context->lock); ++ return 0; ++ } ++ } ++ spin_unlock(&context->lock); ++ return -EINVAL; ++} ++ ++static const struct file_operations usdpaa_fops = { ++ .open = usdpaa_open, ++ .release = usdpaa_release, ++ .mmap = usdpaa_mmap, ++ .get_unmapped_area = usdpaa_get_unmapped_area, ++ .unlocked_ioctl = usdpaa_ioctl, ++ .compat_ioctl = usdpaa_ioctl_compat ++}; ++ ++static struct miscdevice usdpaa_miscdev = { ++ .name = "fsl-usdpaa", ++ .fops = &usdpaa_fops, ++ .minor = MISC_DYNAMIC_MINOR, ++}; ++ ++/* Early-boot memory allocation. The boot-arg "usdpaa_mem=" is used to ++ * indicate how much memory (if any) to allocate during early boot. If the ++ * format "usdpaa_mem=," is used, then will be interpreted as the ++ * number of TLB1 entries to reserve (default is 1). If there are more mappings ++ * than there are TLB1 entries, fault-handling will occur. */ ++ ++static __init int usdpaa_mem(char *arg) ++{ ++ pr_warn("uspdaa_mem argument is depracated\n"); ++ arg_phys_size = memparse(arg, &arg); ++ num_tlb = 1; ++ if (*arg == ',') { ++ unsigned long ul; ++ int err = kstrtoul(arg + 1, 0, &ul); ++ if (err < 0) { ++ num_tlb = 1; ++ pr_warn("ERROR, usdpaa_mem arg is invalid\n"); ++ } else ++ num_tlb = (unsigned int)ul; ++ } ++ return 0; ++} ++early_param("usdpaa_mem", usdpaa_mem); ++ ++static int usdpaa_mem_init(struct reserved_mem *rmem) ++{ ++ phys_start = rmem->base; ++ phys_size = rmem->size; ++ ++ WARN_ON(!(phys_start && phys_size)); ++ ++ return 0; ++} ++RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init); ++ ++__init int fsl_usdpaa_init_early(void) ++{ ++ if (!phys_size || !phys_start) { ++ pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n"); ++ return 0; ++ } ++ if (phys_size % PAGE_SIZE) { ++ pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n"); ++ phys_size = 0; ++ return 0; ++ } ++ if (arg_phys_size && phys_size != arg_phys_size) { ++ pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n", ++ arg_phys_size, phys_size); ++ phys_size = 0; ++ return 0; ++ } ++ pfn_start = phys_start >> PAGE_SHIFT; ++ pfn_size = phys_size >> PAGE_SHIFT; ++#ifdef CONFIG_PPC ++ first_tlb = current_tlb = tlbcam_index; ++ tlbcam_index += num_tlb; ++#endif ++ pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n", ++ phys_start, phys_size, pfn_start, pfn_size, num_tlb); ++ return 0; ++} ++subsys_initcall(fsl_usdpaa_init_early); ++ ++ ++static int __init usdpaa_init(void) ++{ ++ struct mem_fragment *frag; ++ int ret; ++ u64 tmp_size = phys_size; ++ u64 tmp_start = phys_start; ++ u64 tmp_pfn_size = pfn_size; ++ u64 tmp_pfn_start = pfn_start; ++ ++ pr_info("Freescale USDPAA process driver\n"); ++ if (!phys_start) { ++ pr_warn("fsl-usdpaa: no region found\n"); ++ return 0; ++ } ++ ++ while (tmp_size != 0) { ++ u32 frag_size = largest_page_size(tmp_size); ++ frag = kmalloc(sizeof(*frag), GFP_KERNEL); ++ if (!frag) { ++ pr_err("Failed to setup USDPAA memory accounting\n"); ++ return -ENOMEM; ++ } ++ frag->base = tmp_start; ++ frag->len = frag->root_len = frag_size; ++ frag->root_pfn = tmp_pfn_start; ++ frag->pfn_base = tmp_pfn_start; ++ frag->pfn_len = frag_size / PAGE_SIZE; ++ frag->refs = 0; ++ init_waitqueue_head(&frag->wq); ++ frag->owner = NULL; ++ list_add(&frag->list, &mem_list); ++ ++ /* Adjust for this frag */ ++ tmp_start += frag_size; ++ tmp_size -= frag_size; ++ tmp_pfn_start += frag_size / PAGE_SIZE; ++ tmp_pfn_size -= frag_size / PAGE_SIZE; ++ } ++ ret = misc_register(&usdpaa_miscdev); ++ if (ret) ++ pr_err("fsl-usdpaa: failed to register misc device\n"); ++ return ret; ++} ++ ++static void __exit usdpaa_exit(void) ++{ ++ misc_deregister(&usdpaa_miscdev); ++} ++ ++module_init(usdpaa_init); ++module_exit(usdpaa_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Freescale Semiconductor"); ++MODULE_DESCRIPTION("Freescale USDPAA process driver"); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c +@@ -0,0 +1,289 @@ ++/* Copyright (c) 2013 Freescale Semiconductor, Inc. ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/* define a device that allows USPDAA processes to open a file ++ descriptor and specify which IRQ it wants to montior using an ioctl() ++ When an IRQ is received, the device becomes readable so that a process ++ can use read() or select() type calls to monitor for IRQs */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "qman_low.h" ++#include "bman_low.h" ++ ++struct usdpaa_irq_ctx { ++ int irq_set; /* Set to true once the irq is set via ioctl */ ++ unsigned int irq_num; ++ u32 last_irq_count; /* Last value returned from read */ ++ u32 irq_count; /* Number of irqs since last read */ ++ wait_queue_head_t wait_queue; /* Waiting processes */ ++ spinlock_t lock; ++ void *inhibit_addr; /* inhibit register address */ ++ struct file *usdpaa_filp; ++ char irq_name[128]; ++}; ++ ++static int usdpaa_irq_open(struct inode *inode, struct file *filp) ++{ ++ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) ++ return -ENOMEM; ++ ctx->irq_set = 0; ++ ctx->irq_count = 0; ++ ctx->last_irq_count = 0; ++ init_waitqueue_head(&ctx->wait_queue); ++ spin_lock_init(&ctx->lock); ++ filp->private_data = ctx; ++ return 0; ++} ++ ++static int usdpaa_irq_release(struct inode *inode, struct file *filp) ++{ ++ struct usdpaa_irq_ctx *ctx = filp->private_data; ++ if (ctx->irq_set) { ++ /* Inhibit the IRQ */ ++ out_be32(ctx->inhibit_addr, 0x1); ++ irq_set_affinity_hint(ctx->irq_num, NULL); ++ free_irq(ctx->irq_num, ctx); ++ ctx->irq_set = 0; ++ fput(ctx->usdpaa_filp); ++ } ++ kfree(filp->private_data); ++ return 0; ++} ++ ++static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx) ++{ ++ unsigned long flags; ++ struct usdpaa_irq_ctx *ctx = _ctx; ++ spin_lock_irqsave(&ctx->lock, flags); ++ ++ctx->irq_count; ++ spin_unlock_irqrestore(&ctx->lock, flags); ++ wake_up_all(&ctx->wait_queue); ++ /* Set the inhibit register. This will be reenabled ++ once the USDPAA code handles the IRQ */ ++ out_be32(ctx->inhibit_addr, 0x1); ++ pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count); ++ return IRQ_HANDLED; ++} ++ ++static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map) ++{ ++ struct usdpaa_irq_ctx *ctx = fp->private_data; ++ int ret; ++ ++ if (ctx->irq_set) { ++ pr_debug("Setting USDPAA IRQ when it was already set!\n"); ++ return -EBUSY; ++ } ++ ++ ctx->usdpaa_filp = fget(irq_map->fd); ++ if (!ctx->usdpaa_filp) { ++ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd); ++ return -EINVAL; ++ } ++ ++ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh, ++ irq_map->type, &ctx->irq_num, ++ &ctx->inhibit_addr); ++ if (ret) { ++ pr_debug("USDPAA IRQ couldn't identify portal\n"); ++ fput(ctx->usdpaa_filp); ++ return ret; ++ } ++ ++ ctx->irq_set = 1; ++ ++ snprintf(ctx->irq_name, sizeof(ctx->irq_name), ++ "usdpaa_irq %d", ctx->irq_num); ++ ++ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0, ++ ctx->irq_name, ctx); ++ if (ret) { ++ pr_err("USDPAA request_irq(%d) failed, ret= %d\n", ++ ctx->irq_num, ret); ++ ctx->irq_set = 0; ++ fput(ctx->usdpaa_filp); ++ return ret; ++ } ++ ret = irq_set_affinity(ctx->irq_num, ¤t->cpus_allowed); ++ if (ret) ++ pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret); ++ ++ ret = irq_set_affinity_hint(ctx->irq_num, ¤t->cpus_allowed); ++ if (ret) ++ pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret); ++ ++ return 0; ++} ++ ++static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd, ++ unsigned long arg) ++{ ++ int ret; ++ struct usdpaa_ioctl_irq_map irq_map; ++ ++ if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) { ++ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd); ++ return -EINVAL; ++ } ++ ++ ret = copy_from_user(&irq_map, (void __user *)arg, ++ sizeof(irq_map)); ++ if (ret) ++ return ret; ++ return map_irq(fp, &irq_map); ++} ++ ++static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff, ++ size_t count, loff_t *offp) ++{ ++ struct usdpaa_irq_ctx *ctx = filp->private_data; ++ int ret; ++ ++ if (!ctx->irq_set) { ++ pr_debug("Reading USDPAA IRQ before it was set\n"); ++ return -EINVAL; ++ } ++ ++ if (count < sizeof(ctx->irq_count)) { ++ pr_debug("USDPAA IRQ Read too small\n"); ++ return -EINVAL; ++ } ++ if (ctx->irq_count == ctx->last_irq_count) { ++ if (filp->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ ret = wait_event_interruptible(ctx->wait_queue, ++ ctx->irq_count != ctx->last_irq_count); ++ if (ret == -ERESTARTSYS) ++ return ret; ++ } ++ ++ ctx->last_irq_count = ctx->irq_count; ++ ++ if (copy_to_user(buff, &ctx->last_irq_count, ++ sizeof(ctx->last_irq_count))) ++ return -EFAULT; ++ return sizeof(ctx->irq_count); ++} ++ ++static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait) ++{ ++ struct usdpaa_irq_ctx *ctx = filp->private_data; ++ unsigned int ret = 0; ++ unsigned long flags; ++ ++ if (!ctx->irq_set) ++ return POLLHUP; ++ ++ poll_wait(filp, &ctx->wait_queue, wait); ++ ++ spin_lock_irqsave(&ctx->lock, flags); ++ if (ctx->irq_count != ctx->last_irq_count) ++ ret |= POLLIN | POLLRDNORM; ++ spin_unlock_irqrestore(&ctx->lock, flags); ++ return ret; ++} ++ ++static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd, ++ unsigned long arg) ++{ ++#ifdef CONFIG_COMPAT ++ void __user *a = (void __user *)arg; ++#endif ++ switch (cmd) { ++#ifdef CONFIG_COMPAT ++ case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT: ++ { ++ struct compat_ioctl_irq_map input; ++ struct usdpaa_ioctl_irq_map converted; ++ if (copy_from_user(&input, a, sizeof(input))) ++ return -EFAULT; ++ converted.type = input.type; ++ converted.fd = input.fd; ++ converted.portal_cinh = compat_ptr(input.portal_cinh); ++ return map_irq(fp, &converted); ++ } ++#endif ++ default: ++ return usdpaa_irq_ioctl(fp, cmd, arg); ++ } ++} ++ ++static const struct file_operations usdpaa_irq_fops = { ++ .open = usdpaa_irq_open, ++ .release = usdpaa_irq_release, ++ .unlocked_ioctl = usdpaa_irq_ioctl, ++ .compat_ioctl = usdpaa_irq_ioctl_compat, ++ .read = usdpaa_irq_read, ++ .poll = usdpaa_irq_poll ++}; ++ ++static struct miscdevice usdpaa_miscdev = { ++ .name = "fsl-usdpaa-irq", ++ .fops = &usdpaa_irq_fops, ++ .minor = MISC_DYNAMIC_MINOR, ++}; ++ ++static int __init usdpaa_irq_init(void) ++{ ++ int ret; ++ ++ pr_info("Freescale USDPAA process IRQ driver\n"); ++ ret = misc_register(&usdpaa_miscdev); ++ if (ret) ++ pr_err("fsl-usdpaa-irq: failed to register misc device\n"); ++ return ret; ++} ++ ++static void __exit usdpaa_irq_exit(void) ++{ ++ misc_deregister(&usdpaa_miscdev); ++} ++ ++module_init(usdpaa_irq_init); ++module_exit(usdpaa_irq_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Freescale Semiconductor"); ++MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver"); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qbman_driver.c +@@ -0,0 +1,88 @@ ++/* Copyright 2013 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "qman_private.h" ++#include "bman_private.h" ++__init void qman_init_early(void); ++__init void bman_init_early(void); ++ ++static __init int qbman_init(void) ++{ ++ struct device_node *dn; ++ u32 is_portal_available; ++ ++ bman_init(); ++ qman_init(); ++ ++ is_portal_available = 0; ++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { ++ if (!of_device_is_available(dn)) ++ continue; ++ else ++ is_portal_available = 1; ++ } ++ ++ if (!qman_have_ccsr() && is_portal_available) { ++ struct qman_fq fq = { ++ .fqid = 1 ++ }; ++ struct qm_mcr_queryfq_np np; ++ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT; ++ struct timespec nowts, diffts, startts = current_kernel_time(); ++ /* Loop while querying given fqid succeeds or time out */ ++ while (1) { ++ err = qman_query_fq_np(&fq, &np); ++ if (!err) { ++ /* success, control-plane has configured QMan */ ++ break; ++ } else if (err != -ERANGE) { ++ pr_err("QMan: I/O error, continuing anyway\n"); ++ break; ++ } ++ nowts = current_kernel_time(); ++ diffts = timespec_sub(nowts, startts); ++ if (diffts.tv_sec > 0) { ++ if (!retry--) { ++ pr_err("QMan: time out, control-plane" ++ " dead?\n"); ++ break; ++ } ++ pr_warn("QMan: polling for the control-plane" ++ " (%d)\n", retry); ++ } ++ } ++ } ++ bman_resource_init(); ++ qman_resource_init(); ++ return 0; ++} ++subsys_initcall(qbman_init); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_config.c +@@ -0,0 +1,1224 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "qman_private.h" ++#include ++#include ++ ++/* Last updated for v00.800 of the BG */ ++ ++/* Register offsets */ ++#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) ++#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) ++#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) ++#define REG_DD_CFG 0x0200 ++#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) ++#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) ++#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) ++#define REG_PFDR_FPC 0x0400 ++#define REG_PFDR_FP_HEAD 0x0404 ++#define REG_PFDR_FP_TAIL 0x0408 ++#define REG_PFDR_FP_LWIT 0x0410 ++#define REG_PFDR_CFG 0x0414 ++#define REG_SFDR_CFG 0x0500 ++#define REG_SFDR_IN_USE 0x0504 ++#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) ++#define REG_WQ_DEF_ENC_WQID 0x0630 ++#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) ++#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) ++#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) ++#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) ++#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ ++#define REG_CM_CFG 0x0800 ++#define REG_ECSR 0x0a00 ++#define REG_ECIR 0x0a04 ++#define REG_EADR 0x0a08 ++#define REG_ECIR2 0x0a0c ++#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) ++#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) ++#define REG_MCR 0x0b00 ++#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) ++#define REG_MISC_CFG 0x0be0 ++#define REG_HID_CFG 0x0bf0 ++#define REG_IDLE_STAT 0x0bf4 ++#define REG_IP_REV_1 0x0bf8 ++#define REG_IP_REV_2 0x0bfc ++#define REG_FQD_BARE 0x0c00 ++#define REG_PFDR_BARE 0x0c20 ++#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ ++#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ ++#define REG_QCSP_BARE 0x0c80 ++#define REG_QCSP_BAR 0x0c84 ++#define REG_CI_SCHED_CFG 0x0d00 ++#define REG_SRCIDR 0x0d04 ++#define REG_LIODNR 0x0d08 ++#define REG_CI_RLM_AVG 0x0d14 ++#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */ ++#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) ++#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) ++#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) ++#define REG_CEETM_CFG_IDX 0x900 ++#define REG_CEETM_CFG_PRES 0x904 ++#define REG_CEETM_XSFDR_IN_USE 0x908 ++ ++/* Assists for QMAN_MCR */ ++#define MCR_INIT_PFDR 0x01000000 ++#define MCR_get_rslt(v) (u8)((v) >> 24) ++#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0)) ++#define MCR_rslt_ok(r) (rslt == 0xf0) ++#define MCR_rslt_eaccess(r) (rslt == 0xf8) ++#define MCR_rslt_inval(r) (rslt == 0xff) ++ ++struct qman; ++ ++/* Follows WQ_CS_CFG0-5 */ ++enum qm_wq_class { ++ qm_wq_portal = 0, ++ qm_wq_pool = 1, ++ qm_wq_fman0 = 2, ++ qm_wq_fman1 = 3, ++ qm_wq_caam = 4, ++ qm_wq_pme = 5, ++ qm_wq_first = qm_wq_portal, ++ qm_wq_last = qm_wq_pme ++}; ++ ++/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ ++enum qm_memory { ++ qm_memory_fqd, ++ qm_memory_pfdr ++}; ++ ++/* Used by all error interrupt registers except 'inhibit' */ ++#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ ++#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ ++#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ ++#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ ++#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ ++#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ ++#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ ++#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ ++#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ ++#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ ++#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ ++#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ ++#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ ++#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ ++#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ ++#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ ++#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ ++#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ ++ ++/* QMAN_ECIR valid error bit */ ++#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ ++ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ ++ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) ++#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ ++ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ ++ QM_EIRQ_IFSI) ++ ++union qman_ecir { ++ u32 ecir_raw; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved:2; ++ u32 portal_type:1; ++ u32 portal_num:5; ++ u32 fqid:24; ++#else ++ u32 fqid:24; ++ u32 portal_num:5; ++ u32 portal_type:1; ++ u32 __reserved:2; ++#endif ++ } __packed info; ++}; ++ ++union qman_ecir2 { ++ u32 ecir2_raw; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 portal_type:1; ++ u32 __reserved:21; ++ u32 portal_num:10; ++#else ++ u32 portal_num:10; ++ u32 __reserved:21; ++ u32 portal_type:1; ++#endif ++ } __packed info; ++}; ++ ++union qman_eadr { ++ u32 eadr_raw; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved1:4; ++ u32 memid:4; ++ u32 __reserved2:12; ++ u32 eadr:12; ++#else ++ u32 eadr:12; ++ u32 __reserved2:12; ++ u32 memid:4; ++ u32 __reserved1:4; ++#endif ++ } __packed info; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved1:3; ++ u32 memid:5; ++ u32 __reserved:8; ++ u32 eadr:16; ++#else ++ u32 eadr:16; ++ u32 __reserved:8; ++ u32 memid:5; ++ u32 __reserved1:3; ++#endif ++ } __packed info_rev3; ++}; ++ ++struct qman_hwerr_txt { ++ u32 mask; ++ const char *txt; ++}; ++ ++#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b } ++ ++static const struct qman_hwerr_txt qman_hwerr_txts[] = { ++ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"), ++ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"), ++ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"), ++ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"), ++ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), ++ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), ++ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"), ++ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"), ++ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"), ++ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"), ++ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"), ++ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"), ++ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"), ++ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"), ++ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"), ++ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"), ++ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"), ++ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue") ++}; ++#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt)) ++ ++struct qman_error_info_mdata { ++ u16 addr_mask; ++ u16 bits; ++ const char *txt; ++}; ++ ++#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} ++static const struct qman_error_info_mdata error_mdata[] = { ++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"), ++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"), ++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"), ++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"), ++ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"), ++ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"), ++ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"), ++ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"), ++ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"), ++ QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"), ++ QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"), ++ QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"), ++ QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"), ++ QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"), ++ QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"), ++ QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"), ++ QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"), ++ QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"), ++}; ++#define QMAN_ERR_MDATA_COUNT \ ++ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata)) ++ ++/* Add this in Kconfig */ ++#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) ++ ++/** ++ * qm_err_isr__ - Manipulate global interrupt registers ++ * @v: for accessors that write values, this is the 32-bit value ++ * ++ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All ++ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of ++ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means ++ * "write the enable register" rather than "enable the write register"! ++ */ ++#define qm_err_isr_status_read(qm) \ ++ __qm_err_isr_read(qm, qm_isr_status) ++#define qm_err_isr_status_clear(qm, m) \ ++ __qm_err_isr_write(qm, qm_isr_status, m) ++#define qm_err_isr_enable_read(qm) \ ++ __qm_err_isr_read(qm, qm_isr_enable) ++#define qm_err_isr_enable_write(qm, v) \ ++ __qm_err_isr_write(qm, qm_isr_enable, v) ++#define qm_err_isr_disable_read(qm) \ ++ __qm_err_isr_read(qm, qm_isr_disable) ++#define qm_err_isr_disable_write(qm, v) \ ++ __qm_err_isr_write(qm, qm_isr_disable, v) ++#define qm_err_isr_inhibit(qm) \ ++ __qm_err_isr_write(qm, qm_isr_inhibit, 1) ++#define qm_err_isr_uninhibit(qm) \ ++ __qm_err_isr_write(qm, qm_isr_inhibit, 0) ++ ++/* ++ * TODO: unimplemented registers ++ * ++ * Keeping a list here of Qman registers I have not yet covered; ++ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, ++ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, ++ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 ++ */ ++ ++/* Encapsulate "struct qman *" as a cast of the register space address. */ ++ ++static struct qman *qm_create(void *regs) ++{ ++ return (struct qman *)regs; ++} ++ ++static inline u32 __qm_in(struct qman *qm, u32 offset) ++{ ++ return in_be32((void *)qm + offset); ++} ++static inline void __qm_out(struct qman *qm, u32 offset, u32 val) ++{ ++ out_be32((void *)qm + offset, val); ++} ++#define qm_in(reg) __qm_in(qm, REG_##reg) ++#define qm_out(reg, val) __qm_out(qm, REG_##reg, val) ++ ++static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n) ++{ ++ return __qm_in(qm, REG_ERR_ISR + (n << 2)); ++} ++ ++static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val) ++{ ++ __qm_out(qm, REG_ERR_ISR + (n << 2), val); ++} ++ ++static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal, ++ int ed, u8 sernd) ++{ ++ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) || ++ (portal == qm_dc_portal_fman1)); ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff)); ++ else ++ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f)); ++} ++ ++static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class, ++ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5, ++ u8 csw6, u8 csw7) ++{ ++ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | ++ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | ++ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | ++ ((csw6 & 0x7) << 4) | (csw7 & 0x7)); ++} ++ ++static void qm_set_hid(struct qman *qm) ++{ ++ qm_out(HID_CFG, 0); ++} ++ ++static void qm_set_corenet_initiator(struct qman *qm) ++{ ++ qm_out(CI_SCHED_CFG, ++ 0x80000000 | /* write srcciv enable */ ++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) | ++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) | ++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) | ++ CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W); ++} ++ ++static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor, ++ u8 *cfg) ++{ ++ u32 v = qm_in(IP_REV_1); ++ u32 v2 = qm_in(IP_REV_2); ++ *id = (v >> 16); ++ *major = (v >> 8) & 0xff; ++ *minor = v & 0xff; ++ *cfg = v2 & 0xff; ++} ++ ++static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba, ++ int enable, int prio, int stash, u32 size) ++{ ++ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; ++ u32 exp = ilog2(size); ++ /* choke if size isn't within range */ ++ DPA_ASSERT((size >= 4096) && (size <= 1073741824) && ++ is_power_of_2(size)); ++ /* choke if 'ba' has lower-alignment than 'size' */ ++ DPA_ASSERT(!(ba & (size - 1))); ++ __qm_out(qm, offset, upper_32_bits(ba)); ++ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba)); ++ __qm_out(qm, offset + REG_offset_AR, ++ (enable ? 0x80000000 : 0) | ++ (prio ? 0x40000000 : 0) | ++ (stash ? 0x20000000 : 0) | ++ (exp - 1)); ++} ++ ++static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k) ++{ ++ qm_out(PFDR_FP_LWIT, th & 0xffffff); ++ qm_out(PFDR_CFG, k); ++} ++ ++static void qm_set_sfdr_threshold(struct qman *qm, u16 th) ++{ ++ qm_out(SFDR_CFG, th & 0x3ff); ++} ++ ++static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num) ++{ ++ u8 rslt = MCR_get_rslt(qm_in(MCR)); ++ ++ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); ++ /* Make sure the command interface is 'idle' */ ++ if (!MCR_rslt_idle(rslt)) ++ panic("QMAN_MCR isn't idle"); ++ ++ /* Write the MCR command params then the verb */ ++ qm_out(MCP(0), pfdr_start); ++ /* TODO: remove this - it's a workaround for a model bug that is ++ * corrected in more recent versions. We use the workaround until ++ * everyone has upgraded. */ ++ qm_out(MCP(1), (pfdr_start + num - 16)); ++ lwsync(); ++ qm_out(MCR, MCR_INIT_PFDR); ++ /* Poll for the result */ ++ do { ++ rslt = MCR_get_rslt(qm_in(MCR)); ++ } while (!MCR_rslt_idle(rslt)); ++ if (MCR_rslt_ok(rslt)) ++ return 0; ++ if (MCR_rslt_eaccess(rslt)) ++ return -EACCES; ++ if (MCR_rslt_inval(rslt)) ++ return -EINVAL; ++ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); ++ return -ENOSYS; ++} ++ ++/*****************/ ++/* Config driver */ ++/*****************/ ++ ++#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ) ++#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ) ++ ++/* We support only one of these */ ++static struct qman *qm; ++static struct device_node *qm_node; ++ ++/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used ++ * during qman_init_ccsr(). */ ++static dma_addr_t fqd_a, pfdr_a; ++static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ; ++ ++static int qman_fqd(struct reserved_mem *rmem) ++{ ++ fqd_a = rmem->base; ++ fqd_sz = rmem->size; ++ ++ WARN_ON(!(fqd_a && fqd_sz)); ++ ++ return 0; ++} ++RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); ++ ++static int qman_pfdr(struct reserved_mem *rmem) ++{ ++ pfdr_a = rmem->base; ++ pfdr_sz = rmem->size; ++ ++ WARN_ON(!(pfdr_a && pfdr_sz)); ++ ++ return 0; ++} ++RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr); ++ ++size_t get_qman_fqd_size() ++{ ++ return fqd_sz; ++} ++ ++/* Parse the property to extract the memory location and size and ++ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default ++ * size. Also flush this memory range from data cache so that QMAN originated ++ * transactions for this memory region could be marked non-coherent. ++ */ ++static __init int parse_mem_property(struct device_node *node, const char *name, ++ dma_addr_t *addr, size_t *sz, int zero) ++{ ++ int ret; ++ ++ /* If using a "zero-pma", don't try to zero it, even if you asked */ ++ if (zero && of_find_property(node, "zero-pma", &ret)) { ++ pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); ++ zero = 0; ++ } ++ ++ if (zero) { ++ /* map as cacheable, non-guarded */ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ void __iomem *tmpp = ioremap_cache(*addr, *sz); ++#else ++ void __iomem *tmpp = ioremap(*addr, *sz); ++#endif ++ ++ if (!tmpp) ++ return -ENOMEM; ++ memset_io(tmpp, 0, *sz); ++ flush_dcache_range((unsigned long)tmpp, ++ (unsigned long)tmpp + *sz); ++ iounmap(tmpp); ++ } ++ ++ return 0; ++} ++ ++/* TODO: ++ * - there is obviously no handling of errors, ++ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for ++ * both memory resources to zero. ++ */ ++static int __init fsl_qman_init(struct device_node *node) ++{ ++ struct resource res; ++ resource_size_t len; ++ u32 __iomem *regs; ++ const char *s; ++ int ret, standby = 0; ++ u16 id; ++ u8 major, minor, cfg; ++ ret = of_address_to_resource(node, 0, &res); ++ if (ret) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, "reg"); ++ return ret; ++ } ++ s = of_get_property(node, "fsl,hv-claimable", &ret); ++ if (s && !strcmp(s, "standby")) ++ standby = 1; ++ if (!standby) { ++ ret = parse_mem_property(node, "fsl,qman-fqd", ++ &fqd_a, &fqd_sz, 1); ++ pr_info("qman-fqd addr %pad size 0x%zx\n", &fqd_a, fqd_sz); ++ BUG_ON(ret); ++ ret = parse_mem_property(node, "fsl,qman-pfdr", ++ &pfdr_a, &pfdr_sz, 0); ++ pr_info("qman-pfdr addr %pad size 0x%zx\n", &pfdr_a, pfdr_sz); ++ BUG_ON(ret); ++ } ++ /* Global configuration */ ++ len = resource_size(&res); ++ if (len != (unsigned long)len) ++ return -EINVAL; ++ regs = ioremap(res.start, (unsigned long)len); ++ qm = qm_create(regs); ++ qm_node = node; ++ qm_get_version(qm, &id, &major, &minor, &cfg); ++ pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg); ++ if (!qman_ip_rev) { ++ if ((major == 1) && (minor == 0)) { ++ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n"); ++ iounmap(regs); ++ return -ENODEV; ++ } else if ((major == 1) && (minor == 1)) ++ qman_ip_rev = QMAN_REV11; ++ else if ((major == 1) && (minor == 2)) ++ qman_ip_rev = QMAN_REV12; ++ else if ((major == 2) && (minor == 0)) ++ qman_ip_rev = QMAN_REV20; ++ else if ((major == 3) && (minor == 0)) ++ qman_ip_rev = QMAN_REV30; ++ else if ((major == 3) && (minor == 1)) ++ qman_ip_rev = QMAN_REV31; ++ else if ((major == 3) && (minor == 2)) ++ qman_ip_rev = QMAN_REV32; ++ else { ++ pr_warn("unknown Qman version, default to rev1.1\n"); ++ qman_ip_rev = QMAN_REV11; ++ } ++ qman_ip_cfg = cfg; ++ } ++ ++ if (standby) { ++ pr_info(" -> in standby mode\n"); ++ return 0; ++ } ++ return 0; ++} ++ ++int qman_have_ccsr(void) ++{ ++ return qm ? 1 : 0; ++} ++ ++__init int qman_init_early(void) ++{ ++ struct device_node *dn; ++ int ret; ++ ++ for_each_compatible_node(dn, NULL, "fsl,qman") { ++ if (qm) ++ pr_err("%s: only one 'fsl,qman' allowed\n", ++ dn->full_name); ++ else { ++ if (!of_device_is_available(dn)) ++ continue; ++ ++ ret = fsl_qman_init(dn); ++ BUG_ON(ret); ++ } ++ } ++ return 0; ++} ++postcore_initcall_sync(qman_init_early); ++ ++static void log_edata_bits(u32 bit_count) ++{ ++ u32 i, j, mask = 0xffffffff; ++ ++ pr_warn("Qman ErrInt, EDATA:\n"); ++ i = bit_count/32; ++ if (bit_count%32) { ++ i++; ++ mask = ~(mask << bit_count%32); ++ } ++ j = 16-i; ++ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask); ++ j++; ++ for (; j < 16; j++) ++ pr_warn(" 0x%08x\n", qm_in(EDATA(j))); ++} ++ ++static void log_additional_error_info(u32 isr_val, u32 ecsr_val) ++{ ++ union qman_ecir ecir_val; ++ union qman_eadr eadr_val; ++ ++ ecir_val.ecir_raw = qm_in(ECIR); ++ /* Is portal info valid */ ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { ++ union qman_ecir2 ecir2_val; ++ ecir2_val.ecir2_raw = qm_in(ECIR2); ++ if (ecsr_val & PORTAL_ECSR_ERR) { ++ pr_warn("Qman ErrInt: %s id %d\n", ++ (ecir2_val.info.portal_type) ? ++ "DCP" : "SWP", ecir2_val.info.portal_num); ++ } ++ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) { ++ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n", ++ ecir_val.info.fqid); ++ } ++ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { ++ eadr_val.eadr_raw = qm_in(EADR); ++ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n", ++ error_mdata[eadr_val.info_rev3.memid].txt, ++ error_mdata[eadr_val.info_rev3.memid].addr_mask ++ & eadr_val.info_rev3.eadr); ++ log_edata_bits( ++ error_mdata[eadr_val.info_rev3.memid].bits); ++ } ++ } else { ++ if (ecsr_val & PORTAL_ECSR_ERR) { ++ pr_warn("Qman ErrInt: %s id %d\n", ++ (ecir_val.info.portal_type) ? ++ "DCP" : "SWP", ecir_val.info.portal_num); ++ } ++ if (ecsr_val & FQID_ECSR_ERR) { ++ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n", ++ ecir_val.info.fqid); ++ } ++ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { ++ eadr_val.eadr_raw = qm_in(EADR); ++ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n", ++ error_mdata[eadr_val.info.memid].txt, ++ error_mdata[eadr_val.info.memid].addr_mask ++ & eadr_val.info.eadr); ++ log_edata_bits(error_mdata[eadr_val.info.memid].bits); ++ } ++ } ++} ++ ++/* Qman interrupt handler */ ++static irqreturn_t qman_isr(int irq, void *ptr) ++{ ++ u32 isr_val, ier_val, ecsr_val, isr_mask, i; ++ ++ ier_val = qm_err_isr_enable_read(qm); ++ isr_val = qm_err_isr_status_read(qm); ++ ecsr_val = qm_in(ECSR); ++ isr_mask = isr_val & ier_val; ++ ++ if (!isr_mask) ++ return IRQ_NONE; ++ for (i = 0; i < QMAN_HWE_COUNT; i++) { ++ if (qman_hwerr_txts[i].mask & isr_mask) { ++ pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt); ++ if (qman_hwerr_txts[i].mask & ecsr_val) { ++ log_additional_error_info(isr_mask, ecsr_val); ++ /* Re-arm error capture registers */ ++ qm_out(ECSR, ecsr_val); ++ } ++ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) { ++ pr_devel("Qman un-enabling error 0x%x\n", ++ qman_hwerr_txts[i].mask); ++ ier_val &= ~qman_hwerr_txts[i].mask; ++ qm_err_isr_enable_write(qm, ier_val); ++ } ++ } ++ } ++ qm_err_isr_status_clear(qm, isr_val); ++ return IRQ_HANDLED; ++} ++ ++static int __bind_irq(void) ++{ ++ int ret, err_irq; ++ ++ err_irq = of_irq_to_resource(qm_node, 0, NULL); ++ if (err_irq == 0) { ++ pr_info("Can't get %s property '%s'\n", qm_node->full_name, ++ "interrupts"); ++ return -ENODEV; ++ } ++ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node); ++ if (ret) { ++ pr_err("request_irq() failed %d for '%s'\n", ret, ++ qm_node->full_name); ++ return -ENODEV; ++ } ++ /* Write-to-clear any stale bits, (eg. starvation being asserted prior ++ * to resource allocation during driver init). */ ++ qm_err_isr_status_clear(qm, 0xffffffff); ++ /* Enable Error Interrupts */ ++ qm_err_isr_enable_write(qm, 0xffffffff); ++ return 0; ++} ++ ++int qman_init_ccsr(struct device_node *node) ++{ ++ int ret; ++ if (!qman_have_ccsr()) ++ return 0; ++ if (node != qm_node) ++ return -EINVAL; ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ /* TEMP for LS1043 : should be done in uboot */ ++ qm_out(QCSP_BARE, 0x5); ++ qm_out(QCSP_BAR, 0x0); ++#endif ++ /* FQD memory */ ++ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz); ++ /* PFDR memory */ ++ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz); ++ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8); ++ /* thresholds */ ++ qm_set_pfdr_threshold(qm, 512, 64); ++ qm_set_sfdr_threshold(qm, 128); ++ /* clear stale PEBI bit from interrupt status register */ ++ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI); ++ /* corenet initiator settings */ ++ qm_set_corenet_initiator(qm); ++ /* HID settings */ ++ qm_set_hid(qm); ++ /* Set scheduling weights to defaults */ ++ for (ret = qm_wq_first; ret <= qm_wq_last; ret++) ++ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0); ++ /* We are not prepared to accept ERNs for hardware enqueues */ ++ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0); ++ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0); ++ /* Initialise Error Interrupt Handler */ ++ ret = __bind_irq(); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++#define LIO_CFG_LIODN_MASK 0x0fff0000 ++void qman_liodn_fixup(u16 channel) ++{ ++ static int done; ++ static u32 liodn_offset; ++ u32 before, after; ++ int idx = channel - QM_CHANNEL_SWPORTAL0; ++ ++ if (!qman_have_ccsr()) ++ return; ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ before = qm_in(REV3_QCSP_LIO_CFG(idx)); ++ else ++ before = qm_in(QCSP_LIO_CFG(idx)); ++ if (!done) { ++ liodn_offset = before & LIO_CFG_LIODN_MASK; ++ done = 1; ++ return; ++ } ++ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ qm_out(REV3_QCSP_LIO_CFG(idx), after); ++ else ++ qm_out(QCSP_LIO_CFG(idx), after); ++} ++ ++#define IO_CFG_SDEST_MASK 0x00ff0000 ++int qman_set_sdest(u16 channel, unsigned int cpu_idx) ++{ ++ int idx = channel - QM_CHANNEL_SWPORTAL0; ++ u32 before, after; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ if ((qman_ip_rev & 0xFF00) == QMAN_REV31) { ++ /* LS1043A - only one L2 cache */ ++ cpu_idx = 0; ++ } ++ ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { ++ before = qm_in(REV3_QCSP_IO_CFG(idx)); ++ /* Each pair of vcpu share the same SRQ(SDEST) */ ++ cpu_idx /= 2; ++ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); ++ qm_out(REV3_QCSP_IO_CFG(idx), after); ++ } else { ++ before = qm_in(QCSP_IO_CFG(idx)); ++ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); ++ qm_out(QCSP_IO_CFG(idx), after); ++ } ++ return 0; ++} ++ ++#define MISC_CFG_WPM_MASK 0x00000002 ++int qm_set_wpm(int wpm) ++{ ++ u32 before; ++ u32 after; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ ++ before = qm_in(MISC_CFG); ++ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1); ++ qm_out(MISC_CFG, after); ++ return 0; ++} ++ ++int qm_get_wpm(int *wpm) ++{ ++ u32 before; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ ++ before = qm_in(MISC_CFG); ++ *wpm = (before & MISC_CFG_WPM_MASK) >> 1; ++ return 0; ++} ++ ++/* CEETM_CFG_PRES register has PRES field which is calculated by: ++ * PRES = (2^22 / credit update reference period) * QMan clock period ++ * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk ++ */ ++ ++int qman_ceetm_set_prescaler(enum qm_dc_portal portal) ++{ ++ u64 temp; ++ u16 pres; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ ++ temp = 0x400000 * 100; ++ do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD); ++ temp *= 10000000; ++ do_div(temp, qman_clk); ++ pres = (u16) temp; ++ qm_out(CEETM_CFG_IDX, portal); ++ qm_out(CEETM_CFG_PRES, pres); ++ return 0; ++} ++ ++int qman_ceetm_get_prescaler(u16 *pres) ++{ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ *pres = (u16)qm_in(CEETM_CFG_PRES); ++ return 0; ++} ++ ++#define DCP_CFG_CEETME_MASK 0xFFFF0000 ++#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n)) ++int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) ++{ ++ u32 dcp_cfg; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ ++ dcp_cfg = qm_in(DCP_CFG(portal)); ++ dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal); ++ qm_out(DCP_CFG(portal), dcp_cfg); ++ return 0; ++} ++ ++int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) ++{ ++ u32 dcp_cfg; ++ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ dcp_cfg = qm_in(DCP_CFG(portal)); ++ dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal)); ++ qm_out(DCP_CFG(portal), dcp_cfg); ++ return 0; ++} ++ ++int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num) ++{ ++ if (!qman_have_ccsr()) ++ return -ENODEV; ++ *num = qm_in(CEETM_XSFDR_IN_USE); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_get_xsfdr); ++ ++#ifdef CONFIG_SYSFS ++ ++#define DRV_NAME "fsl-qman" ++#define DCP_MAX_ID 3 ++#define DCP_MIN_ID 0 ++ ++static ssize_t show_pfdr_fpc(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC)); ++}; ++ ++static ssize_t show_dlm_avg(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ u32 data; ++ int i; ++ ++ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) ++ return -EINVAL; ++ if (i < DCP_MIN_ID || i > DCP_MAX_ID) ++ return -EINVAL; ++ data = qm_in(DCP_DLM_AVG(i)); ++ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, ++ (data & 0x000000ff)*390625); ++}; ++ ++static ssize_t set_dlm_avg(struct device *dev, ++ struct device_attribute *dev_attr, const char *buf, size_t count) ++{ ++ unsigned long val; ++ int i; ++ ++ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) ++ return -EINVAL; ++ if (i < DCP_MIN_ID || i > DCP_MAX_ID) ++ return -EINVAL; ++ if (kstrtoul(buf, 0, &val)) { ++ dev_dbg(dev, "invalid input %s\n", buf); ++ return -EINVAL; ++ } ++ qm_out(DCP_DLM_AVG(i), val); ++ return count; ++}; ++ ++static ssize_t show_pfdr_cfg(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG)); ++}; ++ ++static ssize_t set_pfdr_cfg(struct device *dev, ++ struct device_attribute *dev_attr, const char *buf, size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val)) { ++ dev_dbg(dev, "invalid input %s\n", buf); ++ return -EINVAL; ++ } ++ qm_out(PFDR_CFG, val); ++ return count; ++}; ++ ++static ssize_t show_sfdr_in_use(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE)); ++}; ++ ++static ssize_t show_idle_stat(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT)); ++}; ++ ++static ssize_t show_ci_rlm_avg(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ u32 data = qm_in(CI_RLM_AVG); ++ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, ++ (data & 0x000000ff)*390625); ++}; ++ ++static ssize_t set_ci_rlm_avg(struct device *dev, ++ struct device_attribute *dev_attr, const char *buf, size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val)) { ++ dev_dbg(dev, "invalid input %s\n", buf); ++ return -EINVAL; ++ } ++ qm_out(CI_RLM_AVG, val); ++ return count; ++}; ++ ++static ssize_t show_err_isr(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); ++}; ++ ++#define SBEC_MAX_ID 14 ++#define SBEC_MIN_ID 0 ++ ++static ssize_t show_sbec(struct device *dev, ++ struct device_attribute *dev_attr, char *buf) ++{ ++ int i; ++ ++ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) ++ return -EINVAL; ++ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) ++ return -EINVAL; ++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); ++}; ++ ++static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL); ++static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg); ++static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL); ++static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR), ++ show_ci_rlm_avg, set_ci_rlm_avg); ++static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); ++static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL); ++ ++static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); ++static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); ++static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); ++static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); ++ ++static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL); ++static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL); ++ ++static struct attribute *qman_dev_attributes[] = { ++ &dev_attr_pfdr_fpc.attr, ++ &dev_attr_pfdr_cfg.attr, ++ &dev_attr_idle_stat.attr, ++ &dev_attr_ci_rlm_avg.attr, ++ &dev_attr_err_isr.attr, ++ &dev_attr_dcp0_dlm_avg.attr, ++ &dev_attr_dcp1_dlm_avg.attr, ++ &dev_attr_dcp2_dlm_avg.attr, ++ &dev_attr_dcp3_dlm_avg.attr, ++ /* sfdr_in_use will be added if necessary */ ++ NULL ++}; ++ ++static struct attribute *qman_dev_ecr_attributes[] = { ++ &dev_attr_sbec_0.attr, ++ &dev_attr_sbec_1.attr, ++ &dev_attr_sbec_2.attr, ++ &dev_attr_sbec_3.attr, ++ &dev_attr_sbec_4.attr, ++ &dev_attr_sbec_5.attr, ++ &dev_attr_sbec_6.attr, ++ &dev_attr_sbec_7.attr, ++ &dev_attr_sbec_8.attr, ++ &dev_attr_sbec_9.attr, ++ &dev_attr_sbec_10.attr, ++ &dev_attr_sbec_11.attr, ++ &dev_attr_sbec_12.attr, ++ &dev_attr_sbec_13.attr, ++ &dev_attr_sbec_14.attr, ++ NULL ++}; ++ ++/* root level */ ++static const struct attribute_group qman_dev_attr_grp = { ++ .name = NULL, ++ .attrs = qman_dev_attributes ++}; ++static const struct attribute_group qman_dev_ecr_grp = { ++ .name = "error_capture", ++ .attrs = qman_dev_ecr_attributes ++}; ++ ++static int of_fsl_qman_remove(struct platform_device *ofdev) ++{ ++ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); ++ return 0; ++}; ++ ++static int of_fsl_qman_probe(struct platform_device *ofdev) ++{ ++ int ret; ++ ++ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp); ++ if (ret) ++ goto done; ++ ret = sysfs_add_file_to_group(&ofdev->dev.kobj, ++ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name); ++ if (ret) ++ goto del_group_0; ++ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp); ++ if (ret) ++ goto del_group_0; ++ ++ goto done; ++ ++del_group_0: ++ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); ++done: ++ if (ret) ++ dev_err(&ofdev->dev, ++ "Cannot create dev attributes ret=%d\n", ret); ++ return ret; ++}; ++ ++static struct of_device_id of_fsl_qman_ids[] = { ++ { ++ .compatible = "fsl,qman", ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, of_fsl_qman_ids); ++ ++#ifdef CONFIG_SUSPEND ++ ++static u32 saved_isdr; ++static int qman_pm_suspend_noirq(struct device *dev) ++{ ++ uint32_t idle_state; ++ ++ suspend_unused_qportal(); ++ /* save isdr, disable all, clear isr */ ++ saved_isdr = qm_err_isr_disable_read(qm); ++ qm_err_isr_disable_write(qm, 0xffffffff); ++ qm_err_isr_status_clear(qm, 0xffffffff); ++ idle_state = qm_in(IDLE_STAT); ++ if (!(idle_state & 0x1)) { ++ pr_err("Qman not idle 0x%x aborting\n", idle_state); ++ qm_err_isr_disable_write(qm, saved_isdr); ++ resume_unused_qportal(); ++ return -EBUSY; ++ } ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state); ++#endif ++ return 0; ++} ++ ++static int qman_pm_resume_noirq(struct device *dev) ++{ ++ /* restore isdr */ ++ qm_err_isr_disable_write(qm, saved_isdr); ++ resume_unused_qportal(); ++ return 0; ++} ++#else ++#define qman_pm_suspend_noirq NULL ++#define qman_pm_resume_noirq NULL ++#endif ++ ++static const struct dev_pm_ops qman_pm_ops = { ++ .suspend_noirq = qman_pm_suspend_noirq, ++ .resume_noirq = qman_pm_resume_noirq, ++}; ++ ++static struct platform_driver of_fsl_qman_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRV_NAME, ++ .of_match_table = of_fsl_qman_ids, ++ .pm = &qman_pm_ops, ++ }, ++ .probe = of_fsl_qman_probe, ++ .remove = of_fsl_qman_remove, ++}; ++ ++static int qman_ctrl_init(void) ++{ ++ return platform_driver_register(&of_fsl_qman_driver); ++} ++ ++static void qman_ctrl_exit(void) ++{ ++ platform_driver_unregister(&of_fsl_qman_driver); ++} ++ ++module_init(qman_ctrl_init); ++module_exit(qman_ctrl_exit); ++ ++#endif /* CONFIG_SYSFS */ +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_debugfs.c +@@ -0,0 +1,1594 @@ ++/* Copyright 2010-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "qman_private.h" ++ ++#define MAX_FQID (0x00ffffff) ++#define QM_FQD_BLOCK_SIZE 64 ++#define QM_FQD_AR (0xC10) ++ ++static u32 fqid_max; ++static u64 qman_ccsr_start; ++static u64 qman_ccsr_size; ++ ++static const char * const state_txt[] = { ++ "Out of Service", ++ "Retired", ++ "Tentatively Scheduled", ++ "Truly Scheduled", ++ "Parked", ++ "Active, Active Held or Held Suspended", ++ "Unknown State 6", ++ "Unknown State 7", ++ NULL, ++}; ++ ++static const u8 fqd_states[] = { ++ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED, ++ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED, ++ QM_MCR_NP_STATE_ACTIVE}; ++ ++struct mask_to_text { ++ u16 mask; ++ const char *txt; ++}; ++ ++struct mask_filter_s { ++ u16 mask; ++ u8 filter; ++}; ++ ++static const struct mask_filter_s mask_filter[] = { ++ {QM_FQCTRL_PREFERINCACHE, 0}, ++ {QM_FQCTRL_PREFERINCACHE, 1}, ++ {QM_FQCTRL_HOLDACTIVE, 0}, ++ {QM_FQCTRL_HOLDACTIVE, 1}, ++ {QM_FQCTRL_AVOIDBLOCK, 0}, ++ {QM_FQCTRL_AVOIDBLOCK, 1}, ++ {QM_FQCTRL_FORCESFDR, 0}, ++ {QM_FQCTRL_FORCESFDR, 1}, ++ {QM_FQCTRL_CPCSTASH, 0}, ++ {QM_FQCTRL_CPCSTASH, 1}, ++ {QM_FQCTRL_CTXASTASHING, 0}, ++ {QM_FQCTRL_CTXASTASHING, 1}, ++ {QM_FQCTRL_ORP, 0}, ++ {QM_FQCTRL_ORP, 1}, ++ {QM_FQCTRL_TDE, 0}, ++ {QM_FQCTRL_TDE, 1}, ++ {QM_FQCTRL_CGE, 0}, ++ {QM_FQCTRL_CGE, 1} ++}; ++ ++static const struct mask_to_text fq_ctrl_text_list[] = { ++ { ++ .mask = QM_FQCTRL_PREFERINCACHE, ++ .txt = "Prefer in cache", ++ }, ++ { ++ .mask = QM_FQCTRL_HOLDACTIVE, ++ .txt = "Hold active in portal", ++ }, ++ { ++ .mask = QM_FQCTRL_AVOIDBLOCK, ++ .txt = "Avoid Blocking", ++ }, ++ { ++ .mask = QM_FQCTRL_FORCESFDR, ++ .txt = "High-priority SFDRs", ++ }, ++ { ++ .mask = QM_FQCTRL_CPCSTASH, ++ .txt = "CPC Stash Enable", ++ }, ++ { ++ .mask = QM_FQCTRL_CTXASTASHING, ++ .txt = "Context-A stashing", ++ }, ++ { ++ .mask = QM_FQCTRL_ORP, ++ .txt = "ORP Enable", ++ }, ++ { ++ .mask = QM_FQCTRL_TDE, ++ .txt = "Tail-Drop Enable", ++ }, ++ { ++ .mask = QM_FQCTRL_CGE, ++ .txt = "Congestion Group Enable", ++ }, ++ { ++ .mask = 0, ++ .txt = NULL, ++ } ++}; ++ ++static const char *get_fqd_ctrl_text(u16 mask) ++{ ++ int i = 0; ++ ++ while (fq_ctrl_text_list[i].txt != NULL) { ++ if (fq_ctrl_text_list[i].mask == mask) ++ return fq_ctrl_text_list[i].txt; ++ i++; ++ } ++ return NULL; ++} ++ ++static const struct mask_to_text stashing_text_list[] = { ++ { ++ .mask = QM_STASHING_EXCL_CTX, ++ .txt = "FQ Ctx Stash" ++ }, ++ { ++ .mask = QM_STASHING_EXCL_DATA, ++ .txt = "Frame Data Stash", ++ }, ++ { ++ .mask = QM_STASHING_EXCL_ANNOTATION, ++ .txt = "Frame Annotation Stash", ++ }, ++ { ++ .mask = 0, ++ .txt = NULL, ++ }, ++}; ++ ++static int user_input_convert(const char __user *user_buf, size_t count, ++ unsigned long *val) ++{ ++ char buf[12]; ++ ++ if (count > sizeof(buf) - 1) ++ return -EINVAL; ++ if (copy_from_user(buf, user_buf, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ if (kstrtoul(buf, 0, val)) ++ return -EINVAL; ++ return 0; ++} ++ ++struct line_buffer_fq { ++ u32 buf[8]; ++ u32 buf_cnt; ++ int line_cnt; ++}; ++ ++static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid, ++ struct seq_file *file) ++{ ++ line_buf->buf[line_buf->buf_cnt] = fqid; ++ line_buf->buf_cnt++; ++ if (line_buf->buf_cnt == 8) { ++ /* Buffer is full, flush it */ ++ if (line_buf->line_cnt != 0) ++ seq_puts(file, ",\n"); ++ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x," ++ "0x%06x,0x%06x,0x%06x", ++ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2], ++ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5], ++ line_buf->buf[6], line_buf->buf[7]); ++ line_buf->buf_cnt = 0; ++ line_buf->line_cnt++; ++ } ++} ++ ++static void flush_line_buffer(struct line_buffer_fq *line_buf, ++ struct seq_file *file) ++{ ++ if (line_buf->buf_cnt) { ++ int y = 0; ++ if (line_buf->line_cnt != 0) ++ seq_puts(file, ",\n"); ++ while (y != line_buf->buf_cnt) { ++ if (y+1 == line_buf->buf_cnt) ++ seq_printf(file, "0x%06x", line_buf->buf[y]); ++ else ++ seq_printf(file, "0x%06x,", line_buf->buf[y]); ++ y++; ++ } ++ line_buf->line_cnt++; ++ } ++ if (line_buf->line_cnt) ++ seq_putc(file, '\n'); ++} ++ ++static struct dentry *dfs_root; /* debugfs root directory */ ++ ++/******************************************************************************* ++ * Query Frame Queue Non Programmable Fields ++ ******************************************************************************/ ++struct query_fq_np_fields_data_s { ++ u32 fqid; ++}; ++static struct query_fq_np_fields_data_s query_fq_np_fields_data = { ++ .fqid = 1, ++}; ++ ++static int query_fq_np_fields_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_queryfq_np np; ++ struct qman_fq fq; ++ ++ fq.fqid = query_fq_np_fields_data.fqid; ++ ret = qman_query_fq_np(&fq, &np); ++ if (ret) ++ return ret; ++ /* Print state */ ++ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n", ++ fq.fqid); ++ seq_printf(file, " force eligible pending: %s\n", ++ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no"); ++ seq_printf(file, " retirement pending: %s\n", ++ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no"); ++ seq_printf(file, " state: %s\n", ++ state_txt[np.state & QM_MCR_NP_STATE_MASK]); ++ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link); ++ seq_printf(file, " odp_seq: %u\n", np.odp_seq); ++ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn); ++ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq); ++ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq); ++ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr); ++ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr); ++ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr); ++ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr); ++ seq_printf(file, " is: ics_surp contains a %s\n", ++ (np.is) ? "deficit" : "surplus"); ++ seq_printf(file, " ics_surp: %u\n", np.ics_surp); ++ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt); ++ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt); ++ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr); ++ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr); ++ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr); ++ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr); ++ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr); ++ return 0; ++} ++ ++static int query_fq_np_fields_open(struct inode *inode, ++ struct file *file) ++{ ++ return single_open(file, query_fq_np_fields_show, NULL); ++} ++ ++static ssize_t query_fq_np_fields_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > MAX_FQID) ++ return -EINVAL; ++ query_fq_np_fields_data.fqid = (u32)val; ++ return count; ++} ++ ++static const struct file_operations query_fq_np_fields_fops = { ++ .owner = THIS_MODULE, ++ .open = query_fq_np_fields_open, ++ .read = seq_read, ++ .write = query_fq_np_fields_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Frame Queue Programmable Fields ++ ******************************************************************************/ ++struct query_fq_fields_data_s { ++ u32 fqid; ++}; ++ ++static struct query_fq_fields_data_s query_fq_fields_data = { ++ .fqid = 1, ++}; ++ ++static int query_fq_fields_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_fqd fqd; ++ struct qman_fq fq; ++ int i = 0; ++ ++ memset(&fqd, 0, sizeof(struct qm_fqd)); ++ fq.fqid = query_fq_fields_data.fqid; ++ ret = qman_query_fq(&fq, &fqd); ++ if (ret) ++ return ret; ++ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n", ++ fq.fqid); ++ seq_printf(file, " orprws: %u\n", fqd.orprws); ++ seq_printf(file, " oa: %u\n", fqd.oa); ++ seq_printf(file, " olws: %u\n", fqd.olws); ++ ++ seq_printf(file, " cgid: %u\n", fqd.cgid); ++ ++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0) ++ seq_puts(file, " fq_ctrl: None\n"); ++ else { ++ i = 0; ++ seq_puts(file, " fq_ctrl:\n"); ++ while (fq_ctrl_text_list[i].txt != NULL) { ++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & ++ fq_ctrl_text_list[i].mask) ++ seq_printf(file, " %s\n", ++ fq_ctrl_text_list[i].txt); ++ i++; ++ } ++ } ++ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel); ++ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq); ++ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred); ++ seq_printf(file, " td_mant: %u\n", fqd.td.mant); ++ seq_printf(file, " td_exp: %u\n", fqd.td.exp); ++ ++ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b); ++ ++ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd)); ++ /* Any stashing configured */ ++ if ((fqd.context_a.stashing.exclusive & 0x7) == 0) ++ seq_puts(file, " ctx_a_stash_exclusive: None\n"); ++ else { ++ seq_puts(file, " ctx_a_stash_exclusive:\n"); ++ i = 0; ++ while (stashing_text_list[i].txt != NULL) { ++ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask) ++ seq_printf(file, " %s\n", ++ stashing_text_list[i].txt); ++ i++; ++ } ++ } ++ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n", ++ fqd.context_a.stashing.annotation_cl); ++ seq_printf(file, " ctx_a_stash_data_cl: %u\n", ++ fqd.context_a.stashing.data_cl); ++ seq_printf(file, " ctx_a_stash_context_cl: %u\n", ++ fqd.context_a.stashing.context_cl); ++ return 0; ++} ++ ++static int query_fq_fields_open(struct inode *inode, ++ struct file *file) ++{ ++ return single_open(file, query_fq_fields_show, NULL); ++} ++ ++static ssize_t query_fq_fields_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > MAX_FQID) ++ return -EINVAL; ++ query_fq_fields_data.fqid = (u32)val; ++ return count; ++} ++ ++static const struct file_operations query_fq_fields_fops = { ++ .owner = THIS_MODULE, ++ .open = query_fq_fields_open, ++ .read = seq_read, ++ .write = query_fq_fields_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Query WQ lengths ++ ******************************************************************************/ ++struct query_wq_lengths_data_s { ++ union { ++ u16 channel_wq; /* ignores wq (3 lsbits) */ ++ struct { ++ u16 id:13; /* qm_channel */ ++ u16 __reserved:3; ++ } __packed channel; ++ }; ++}; ++static struct query_wq_lengths_data_s query_wq_lengths_data; ++static int query_wq_lengths_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_querywq wq; ++ int i; ++ ++ memset(&wq, 0, sizeof(struct qm_mcr_querywq)); ++ wq.channel.id = query_wq_lengths_data.channel.id; ++ ret = qman_query_wq(0, &wq); ++ if (ret) ++ return ret; ++ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id); ++ for (i = 0; i < 8; i++) ++ /* mask out upper 4 bits since they are not part of length */ ++ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff); ++ return 0; ++} ++ ++static int query_wq_lengths_open(struct inode *inode, ++ struct file *file) ++{ ++ return single_open(file, query_wq_lengths_show, NULL); ++} ++ ++static ssize_t query_wq_lengths_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > 0xfff8) ++ return -EINVAL; ++ query_wq_lengths_data.channel.id = (u16)val; ++ return count; ++} ++ ++static const struct file_operations query_wq_lengths_fops = { ++ .owner = THIS_MODULE, ++ .open = query_wq_lengths_open, ++ .read = seq_read, ++ .write = query_wq_lengths_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Query CGR ++ ******************************************************************************/ ++struct query_cgr_s { ++ u8 cgid; ++}; ++static struct query_cgr_s query_cgr_data; ++ ++static int query_cgr_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_querycgr cgrd; ++ struct qman_cgr cgr; ++ int i, j; ++ u32 mask; ++ ++ memset(&cgr, 0, sizeof(cgr)); ++ memset(&cgrd, 0, sizeof(cgrd)); ++ cgr.cgrid = query_cgr_data.cgid; ++ ret = qman_query_cgr(&cgr, &cgrd); ++ if (ret) ++ return ret; ++ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid); ++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn, ++ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn, ++ cgrd.cgr.wr_parm_g.Pn); ++ ++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn, ++ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn, ++ cgrd.cgr.wr_parm_y.Pn); ++ ++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn, ++ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn, ++ cgrd.cgr.wr_parm_r.Pn); ++ ++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", ++ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r); ++ ++ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en); ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { ++ seq_puts(file, " cscn_targ_dcp:\n"); ++ mask = 0x80000000; ++ for (i = 0; i < 32; i++) { ++ if (cgrd.cgr.cscn_targ & mask) ++ seq_printf(file, " send CSCN to dcp %u\n", ++ (31 - i)); ++ mask >>= 1; ++ } ++ ++ seq_puts(file, " cscn_targ_swp:\n"); ++ for (i = 0; i < 4; i++) { ++ mask = 0x80000000; ++ for (j = 0; j < 32; j++) { ++ if (cgrd.cscn_targ_swp[i] & mask) ++ seq_printf(file, " send CSCN to swp" ++ " %u\n", (127 - (i * 32) - j)); ++ mask >>= 1; ++ } ++ } ++ } else { ++ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ); ++ } ++ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en); ++ seq_printf(file, " cs: %u\n", cgrd.cgr.cs); ++ ++ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", ++ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn); ++ ++ seq_printf(file, " mode: %s\n", ++ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ? ++ "frame count" : "byte count"); ++ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd)); ++ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd)); ++ ++ return 0; ++} ++ ++static int query_cgr_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, query_cgr_show, NULL); ++} ++ ++static ssize_t query_cgr_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > 0xff) ++ return -EINVAL; ++ query_cgr_data.cgid = (u8)val; ++ return count; ++} ++ ++static const struct file_operations query_cgr_fops = { ++ .owner = THIS_MODULE, ++ .open = query_cgr_open, ++ .read = seq_read, ++ .write = query_cgr_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Test Write CGR ++ ******************************************************************************/ ++struct test_write_cgr_s { ++ u64 i_bcnt; ++ u8 cgid; ++}; ++static struct test_write_cgr_s test_write_cgr_data; ++ ++static int testwrite_cgr_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_cgrtestwrite result; ++ struct qman_cgr cgr; ++ u64 i_bcnt; ++ ++ memset(&cgr, 0, sizeof(struct qman_cgr)); ++ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite)); ++ cgr.cgrid = test_write_cgr_data.cgid; ++ i_bcnt = test_write_cgr_data.i_bcnt; ++ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result); ++ if (ret) ++ return ret; ++ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid); ++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn, ++ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn, ++ result.cgr.wr_parm_g.Pn); ++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn, ++ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn, ++ result.cgr.wr_parm_y.Pn); ++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn, ++ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn, ++ result.cgr.wr_parm_r.Pn); ++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", ++ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r); ++ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en); ++ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ); ++ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en); ++ seq_printf(file, " cs: %u\n", result.cgr.cs); ++ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", ++ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn); ++ ++ /* Add Mode for Si 2 */ ++ seq_printf(file, " mode: %s\n", ++ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ? ++ "frame count" : "byte count"); ++ ++ seq_printf(file, " i_bcnt: %llu\n", ++ qm_mcr_cgrtestwrite_i_get64(&result)); ++ seq_printf(file, " a_bcnt: %llu\n", ++ qm_mcr_cgrtestwrite_a_get64(&result)); ++ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g); ++ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y); ++ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r); ++ return 0; ++} ++ ++static int testwrite_cgr_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, testwrite_cgr_show, NULL); ++} ++ ++static const struct file_operations testwrite_cgr_fops = { ++ .owner = THIS_MODULE, ++ .open = testwrite_cgr_open, ++ .read = seq_read, ++ .release = single_release, ++}; ++ ++ ++static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset) ++{ ++ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt); ++ return 0; ++} ++static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, testwrite_cgr_ibcnt_show, NULL); ++} ++ ++static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ test_write_cgr_data.i_bcnt = val; ++ return count; ++} ++ ++static const struct file_operations teswrite_cgr_ibcnt_fops = { ++ .owner = THIS_MODULE, ++ .open = testwrite_cgr_ibcnt_open, ++ .read = seq_read, ++ .write = testwrite_cgr_ibcnt_write, ++ .release = single_release, ++}; ++ ++static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset) ++{ ++ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid); ++ return 0; ++} ++static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, testwrite_cgr_cgrid_show, NULL); ++} ++ ++static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > 0xff) ++ return -EINVAL; ++ test_write_cgr_data.cgid = (u8)val; ++ return count; ++} ++ ++static const struct file_operations teswrite_cgr_cgrid_fops = { ++ .owner = THIS_MODULE, ++ .open = testwrite_cgr_cgrid_open, ++ .read = seq_read, ++ .write = testwrite_cgr_cgrid_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Query Congestion State ++ ******************************************************************************/ ++static int query_congestion_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_querycongestion cs; ++ int i, j, in_cong = 0; ++ u32 mask; ++ ++ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion)); ++ ret = qman_query_congestion(&cs); ++ if (ret) ++ return ret; ++ seq_puts(file, "Query Congestion Result\n"); ++ for (i = 0; i < 8; i++) { ++ mask = 0x80000000; ++ for (j = 0; j < 32; j++) { ++ if (cs.state.__state[i] & mask) { ++ in_cong = 1; ++ seq_printf(file, " cg %u: %s\n", (i*32)+j, ++ "in congestion"); ++ } ++ mask >>= 1; ++ } ++ } ++ if (!in_cong) ++ seq_puts(file, " All congestion groups not congested.\n"); ++ return 0; ++} ++ ++static int query_congestion_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, query_congestion_show, NULL); ++} ++ ++static const struct file_operations query_congestion_fops = { ++ .owner = THIS_MODULE, ++ .open = query_congestion_open, ++ .read = seq_read, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * Query CCGR ++ ******************************************************************************/ ++struct query_ccgr_s { ++ u32 ccgid; ++}; ++static struct query_ccgr_s query_ccgr_data; ++ ++static int query_ccgr_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_ceetm_ccgr_query ccgr_query; ++ struct qm_mcc_ceetm_ccgr_query query_opts; ++ int i, j; ++ u32 mask; ++ ++ memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query)); ++ memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query)); ++ ++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) ++ return -EINVAL; ++ ++ seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid); ++ query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24); ++ query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF; ++ ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query); ++ if (ret) ++ return ret; ++ seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid, ++ query_opts.dcpid); ++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ ccgr_query.cm_query.wr_parm_g.MA, ++ ccgr_query.cm_query.wr_parm_g.Mn, ++ ccgr_query.cm_query.wr_parm_g.SA, ++ ccgr_query.cm_query.wr_parm_g.Sn, ++ ccgr_query.cm_query.wr_parm_g.Pn); ++ ++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ ccgr_query.cm_query.wr_parm_y.MA, ++ ccgr_query.cm_query.wr_parm_y.Mn, ++ ccgr_query.cm_query.wr_parm_y.SA, ++ ccgr_query.cm_query.wr_parm_y.Sn, ++ ccgr_query.cm_query.wr_parm_y.Pn); ++ ++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", ++ ccgr_query.cm_query.wr_parm_r.MA, ++ ccgr_query.cm_query.wr_parm_r.Mn, ++ ccgr_query.cm_query.wr_parm_r.SA, ++ ccgr_query.cm_query.wr_parm_r.Sn, ++ ccgr_query.cm_query.wr_parm_r.Pn); ++ ++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", ++ ccgr_query.cm_query.ctl_wr_en_g, ++ ccgr_query.cm_query.ctl_wr_en_y, ++ ccgr_query.cm_query.ctl_wr_en_r); ++ ++ seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en); ++ seq_puts(file, " cscn_targ_dcp:\n"); ++ mask = 0x80000000; ++ for (i = 0; i < 32; i++) { ++ if (ccgr_query.cm_query.cscn_targ_dcp & mask) ++ seq_printf(file, " send CSCN to dcp %u\n", (31 - i)); ++ mask >>= 1; ++ } ++ ++ seq_puts(file, " cscn_targ_swp:\n"); ++ for (i = 0; i < 4; i++) { ++ mask = 0x80000000; ++ for (j = 0; j < 32; j++) { ++ if (ccgr_query.cm_query.cscn_targ_swp[i] & mask) ++ seq_printf(file, " send CSCN to swp" ++ "%u\n", (127 - (i * 32) - j)); ++ mask >>= 1; ++ } ++ } ++ ++ seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en); ++ ++ seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n", ++ ccgr_query.cm_query.cs_thres.TA, ++ ccgr_query.cm_query.cs_thres.Tn); ++ ++ seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n", ++ ccgr_query.cm_query.cs_thres_x.TA, ++ ccgr_query.cm_query.cs_thres_x.Tn); ++ ++ seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n", ++ ccgr_query.cm_query.td_thres.TA, ++ ccgr_query.cm_query.td_thres.Tn); ++ ++ seq_printf(file, " mode: %s\n", ++ (ccgr_query.cm_query.ctl_mode & ++ QMAN_CGR_MODE_FRAME) ? ++ "frame count" : "byte count"); ++ seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt); ++ seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt); ++ ++ return 0; ++} ++ ++static int query_ccgr_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, query_ccgr_show, NULL); ++} ++ ++static ssize_t query_ccgr_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ query_ccgr_data.ccgid = val; ++ return count; ++} ++ ++static const struct file_operations query_ccgr_fops = { ++ .owner = THIS_MODULE, ++ .open = query_ccgr_open, ++ .read = seq_read, ++ .write = query_ccgr_write, ++ .release = single_release, ++}; ++/******************************************************************************* ++ * QMan register ++ ******************************************************************************/ ++struct qman_register_s { ++ u32 val; ++}; ++static struct qman_register_s qman_register_data; ++ ++static void init_ccsrmempeek(void) ++{ ++ struct device_node *dn; ++ const u32 *regaddr_p; ++ ++ dn = of_find_compatible_node(NULL, NULL, "fsl,qman"); ++ if (!dn) { ++ pr_info("No fsl,qman node\n"); ++ return; ++ } ++ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL); ++ if (!regaddr_p) { ++ of_node_put(dn); ++ return; ++ } ++ qman_ccsr_start = of_translate_address(dn, regaddr_p); ++ of_node_put(dn); ++} ++/* This function provides access to QMan ccsr memory map */ ++static int qman_ccsrmempeek(u32 *val, u32 offset) ++{ ++ void __iomem *addr; ++ u64 phys_addr; ++ ++ if (!qman_ccsr_start) ++ return -EINVAL; ++ ++ if (offset > (qman_ccsr_size - sizeof(u32))) ++ return -EINVAL; ++ ++ phys_addr = qman_ccsr_start + offset; ++ addr = ioremap(phys_addr, sizeof(u32)); ++ if (!addr) { ++ pr_err("ccsrmempeek, ioremap failed\n"); ++ return -EINVAL; ++ } ++ *val = in_be32(addr); ++ iounmap(addr); ++ return 0; ++} ++ ++static int qman_ccsrmempeek_show(struct seq_file *file, void *offset) ++{ ++ u32 b; ++ ++ qman_ccsrmempeek(&b, qman_register_data.val); ++ seq_printf(file, "QMan register offset = 0x%x\n", ++ qman_register_data.val); ++ seq_printf(file, "value = 0x%08x\n", b); ++ ++ return 0; ++} ++ ++static int qman_ccsrmempeek_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_ccsrmempeek_show, NULL); ++} ++ ++static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ /* multiple of 4 */ ++ if (val > (qman_ccsr_size - sizeof(u32))) { ++ pr_info("Input 0x%lx > 0x%llx\n", ++ val, (qman_ccsr_size - sizeof(u32))); ++ return -EINVAL; ++ } ++ if (val & 0x3) { ++ pr_info("Input 0x%lx not multiple of 4\n", val); ++ return -EINVAL; ++ } ++ qman_register_data.val = val; ++ return count; ++} ++ ++static const struct file_operations qman_ccsrmempeek_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_ccsrmempeek_open, ++ .read = seq_read, ++ .write = qman_ccsrmempeek_write, ++}; ++ ++/******************************************************************************* ++ * QMan state ++ ******************************************************************************/ ++static int qman_fqd_state_show(struct seq_file *file, void *offset) ++{ ++ struct qm_mcr_queryfq_np np; ++ struct qman_fq fq; ++ struct line_buffer_fq line_buf; ++ int ret, i; ++ u8 *state = file->private; ++ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)]; ++ ++ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); ++ memset(&line_buf, 0, sizeof(line_buf)); ++ ++ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]); ++ ++ for (i = 1; i < fqid_max; i++) { ++ fq.fqid = i; ++ ret = qman_query_fq_np(&fq, &np); ++ if (ret) ++ return ret; ++ if (*state == (np.state & QM_MCR_NP_STATE_MASK)) ++ add_to_line_buffer(&line_buf, fq.fqid, file); ++ /* Keep a summary count of all states */ ++ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states)) ++ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; ++ } ++ flush_line_buffer(&line_buf, file); ++ ++ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) { ++ seq_printf(file, "%s count = %u\n", state_txt[i], ++ qm_fq_state_cnt[i]); ++ } ++ return 0; ++} ++ ++static int qman_fqd_state_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_fqd_state_show, inode->i_private); ++} ++ ++static const struct file_operations qman_fqd_state_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_fqd_state_open, ++ .read = seq_read, ++}; ++ ++static int qman_fqd_ctrl_show(struct seq_file *file, void *offset) ++{ ++ struct qm_fqd fqd; ++ struct qman_fq fq; ++ u32 fq_en_cnt = 0, fq_di_cnt = 0; ++ int ret, i; ++ struct mask_filter_s *data = file->private; ++ const char *ctrl_txt = get_fqd_ctrl_text(data->mask); ++ struct line_buffer_fq line_buf; ++ ++ memset(&line_buf, 0, sizeof(line_buf)); ++ seq_printf(file, "List of fq ids with: %s :%s\n", ++ ctrl_txt, (data->filter) ? "enabled" : "disabled"); ++ for (i = 1; i < fqid_max; i++) { ++ fq.fqid = i; ++ memset(&fqd, 0, sizeof(struct qm_fqd)); ++ ret = qman_query_fq(&fq, &fqd); ++ if (ret) ++ return ret; ++ if (data->filter) { ++ if (fqd.fq_ctrl & data->mask) ++ add_to_line_buffer(&line_buf, fq.fqid, file); ++ } else { ++ if (!(fqd.fq_ctrl & data->mask)) ++ add_to_line_buffer(&line_buf, fq.fqid, file); ++ } ++ if (fqd.fq_ctrl & data->mask) ++ fq_en_cnt++; ++ else ++ fq_di_cnt++; ++ } ++ flush_line_buffer(&line_buf, file); ++ ++ seq_printf(file, "Total FQD with: %s : enabled = %u\n", ++ ctrl_txt, fq_en_cnt); ++ seq_printf(file, "Total FQD with: %s : disabled = %u\n", ++ ctrl_txt, fq_di_cnt); ++ return 0; ++} ++ ++/******************************************************************************* ++ * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE ++ ******************************************************************************/ ++static int qman_fqd_ctrl_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_fqd_ctrl_show, inode->i_private); ++} ++ ++static const struct file_operations qman_fqd_ctrl_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_fqd_ctrl_open, ++ .read = seq_read, ++}; ++ ++/******************************************************************************* ++ * QMan ctrl summary ++ ******************************************************************************/ ++/******************************************************************************* ++ * QMan summary state ++ ******************************************************************************/ ++static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset) ++{ ++ struct qm_mcr_queryfq_np np; ++ struct qman_fq fq; ++ int ret, i; ++ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)]; ++ ++ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); ++ ++ for (i = 1; i < fqid_max; i++) { ++ fq.fqid = i; ++ ret = qman_query_fq_np(&fq, &np); ++ if (ret) ++ return ret; ++ /* Keep a summary count of all states */ ++ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states)) ++ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) { ++ seq_printf(file, "%s count = %u\n", state_txt[i], ++ qm_fq_state_cnt[i]); ++ } ++ return 0; ++} ++ ++static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset) ++{ ++ struct qm_fqd fqd; ++ struct qman_fq fq; ++ int ret, i , j; ++ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2]; ++ ++ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt)); ++ ++ for (i = 1; i < fqid_max; i++) { ++ memset(&fqd, 0, sizeof(struct qm_fqd)); ++ fq.fqid = i; ++ ret = qman_query_fq(&fq, &fqd); ++ if (ret) ++ return ret; ++ /* Keep a summary count of all states */ ++ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2) ++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & ++ mask_filter[j].mask) ++ qm_prog_cnt[j/2]++; ++ } ++ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) { ++ seq_printf(file, "%s count = %u\n", ++ get_fqd_ctrl_text(mask_filter[i*2].mask), ++ qm_prog_cnt[i]); ++ } ++ return 0; ++} ++ ++static int qman_fqd_summary_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ ++ /* Display summary of non programmable fields */ ++ ret = qman_fqd_non_prog_summary_show(file, offset); ++ if (ret) ++ return ret; ++ seq_puts(file, "-----------------------------------------\n"); ++ /* Display programmable fields */ ++ ret = qman_fqd_prog_summary_show(file, offset); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++static int qman_fqd_summary_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_fqd_summary_show, NULL); ++} ++ ++static const struct file_operations qman_fqd_summary_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_fqd_summary_open, ++ .read = seq_read, ++}; ++ ++/******************************************************************************* ++ * QMan destination work queue ++ ******************************************************************************/ ++struct qman_dest_wq_s { ++ u16 wq_id; ++}; ++static struct qman_dest_wq_s qman_dest_wq_data = { ++ .wq_id = 0, ++}; ++ ++static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset) ++{ ++ struct qm_fqd fqd; ++ struct qman_fq fq; ++ int ret, i; ++ u16 *wq, wq_id = qman_dest_wq_data.wq_id; ++ struct line_buffer_fq line_buf; ++ ++ memset(&line_buf, 0, sizeof(line_buf)); ++ /* use vmalloc : need to allocate large memory region and don't ++ * require the memory to be physically contiguous. */ ++ wq = vzalloc(sizeof(u16) * (0xFFFF+1)); ++ if (!wq) ++ return -ENOMEM; ++ ++ seq_printf(file, "List of fq ids with destination work queue id" ++ " = 0x%x\n", wq_id); ++ ++ for (i = 1; i < fqid_max; i++) { ++ fq.fqid = i; ++ memset(&fqd, 0, sizeof(struct qm_fqd)); ++ ret = qman_query_fq(&fq, &fqd); ++ if (ret) { ++ vfree(wq); ++ return ret; ++ } ++ if (wq_id == fqd.dest_wq) ++ add_to_line_buffer(&line_buf, fq.fqid, file); ++ wq[fqd.dest_wq]++; ++ } ++ flush_line_buffer(&line_buf, file); ++ ++ seq_puts(file, "Summary of all FQD destination work queue values\n"); ++ for (i = 0; i < 0xFFFF; i++) { ++ if (wq[i]) ++ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, " ++ "count = %u\n", i >> 3, i & 0x3, i, wq[i]); ++ } ++ vfree(wq); ++ return 0; ++} ++ ++static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf, ++ size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > 0xFFFF) ++ return -EINVAL; ++ qman_dest_wq_data.wq_id = val; ++ return count; ++} ++ ++static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_fqd_dest_wq_show, NULL); ++} ++ ++static const struct file_operations qman_fqd_dest_wq_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_fqd_dest_wq_open, ++ .read = seq_read, ++ .write = qman_fqd_dest_wq_write, ++}; ++ ++/******************************************************************************* ++ * QMan Intra-Class Scheduling Credit ++ ******************************************************************************/ ++static int qman_fqd_cred_show(struct seq_file *file, void *offset) ++{ ++ struct qm_fqd fqd; ++ struct qman_fq fq; ++ int ret, i; ++ u32 fq_cnt = 0; ++ struct line_buffer_fq line_buf; ++ ++ memset(&line_buf, 0, sizeof(line_buf)); ++ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0" ++ "\n"); ++ ++ for (i = 1; i < fqid_max; i++) { ++ fq.fqid = i; ++ memset(&fqd, 0, sizeof(struct qm_fqd)); ++ ret = qman_query_fq(&fq, &fqd); ++ if (ret) ++ return ret; ++ if (fqd.ics_cred > 0) { ++ add_to_line_buffer(&line_buf, fq.fqid, file); ++ fq_cnt++; ++ } ++ } ++ flush_line_buffer(&line_buf, file); ++ ++ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt); ++ return 0; ++} ++ ++static int qman_fqd_cred_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, qman_fqd_cred_show, NULL); ++} ++ ++static const struct file_operations qman_fqd_cred_fops = { ++ .owner = THIS_MODULE, ++ .open = qman_fqd_cred_open, ++ .read = seq_read, ++}; ++ ++/******************************************************************************* ++ * Class Queue Fields ++ ******************************************************************************/ ++struct query_cq_fields_data_s { ++ u32 cqid; ++}; ++ ++static struct query_cq_fields_data_s query_cq_fields_data = { ++ .cqid = 1, ++}; ++ ++static int query_cq_fields_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ struct qm_mcr_ceetm_cq_query query_result; ++ unsigned int cqid; ++ unsigned int portal; ++ ++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) ++ return -EINVAL; ++ ++ cqid = query_cq_fields_data.cqid & 0x00FFFFFF; ++ portal = query_cq_fields_data.cqid >> 24; ++ if (portal > qm_dc_portal_fman1) ++ return -EINVAL; ++ ++ ret = qman_ceetm_query_cq(cqid, portal, &query_result); ++ if (ret) ++ return ret; ++ seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n", ++ cqid, portal); ++ seq_printf(file, " ccgid: %u\n", query_result.ccgid); ++ seq_printf(file, " state: %u\n", query_result.state); ++ seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr); ++ seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr); ++ seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr); ++ seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr); ++ seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr); ++ seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr); ++ seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr); ++ seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr); ++ seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr); ++ seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr); ++ seq_printf(file, " frame_count: %u\n", query_result.frm_cnt); ++ ++ return 0; ++} ++ ++static int query_cq_fields_open(struct inode *inode, ++ struct file *file) ++{ ++ return single_open(file, query_cq_fields_show, NULL); ++} ++ ++static ssize_t query_cq_fields_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ query_cq_fields_data.cqid = (u32)val; ++ return count; ++} ++ ++static const struct file_operations query_cq_fields_fops = { ++ .owner = THIS_MODULE, ++ .open = query_cq_fields_open, ++ .read = seq_read, ++ .write = query_cq_fields_write, ++ .release = single_release, ++}; ++ ++/******************************************************************************* ++ * READ CEETM_XSFDR_IN_USE ++ ******************************************************************************/ ++struct query_ceetm_xsfdr_data_s { ++ enum qm_dc_portal dcp_portal; ++}; ++ ++static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data; ++ ++static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset) ++{ ++ int ret; ++ unsigned int xsfdr_in_use; ++ enum qm_dc_portal portal; ++ ++ ++ if (qman_ip_rev < QMAN_REV31) ++ return -EINVAL; ++ ++ portal = query_ceetm_xsfdr_data.dcp_portal; ++ ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use); ++ if (ret) { ++ seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n", ++ portal); ++ return ret; ++ } ++ ++ seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal, ++ (xsfdr_in_use & 0x1FFF)); ++ return 0; ++} ++ ++static int query_ceetm_xsfdr_open(struct inode *inode, ++ struct file *file) ++{ ++ return single_open(file, query_ceetm_xsfdr_show, NULL); ++} ++ ++static ssize_t query_ceetm_xsfdr_write(struct file *f, ++ const char __user *buf, size_t count, loff_t *off) ++{ ++ int ret; ++ unsigned long val; ++ ++ ret = user_input_convert(buf, count, &val); ++ if (ret) ++ return ret; ++ if (val > qm_dc_portal_fman1) ++ return -EINVAL; ++ query_ceetm_xsfdr_data.dcp_portal = (u32)val; ++ return count; ++} ++ ++static const struct file_operations query_ceetm_xsfdr_fops = { ++ .owner = THIS_MODULE, ++ .open = query_ceetm_xsfdr_open, ++ .read = seq_read, ++ .write = query_ceetm_xsfdr_write, ++ .release = single_release, ++}; ++ ++/* helper macros used in qman_debugfs_module_init */ ++#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \ ++ do { \ ++ d = debugfs_create_file(name, \ ++ mode, parent, \ ++ data, \ ++ fops); \ ++ if (d == NULL) { \ ++ ret = -ENOMEM; \ ++ goto _return; \ ++ } \ ++ } while (0) ++ ++/* dfs_root as parent */ ++#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \ ++ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops) ++ ++/* fqd_root as parent */ ++#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \ ++ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops) ++ ++/* fqd state */ ++#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \ ++ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \ ++ (void *)&mask_filter[index], &qman_fqd_ctrl_fops) ++ ++static int __init qman_debugfs_module_init(void) ++{ ++ int ret = 0; ++ struct dentry *d, *fqd_root; ++ u32 reg; ++ ++ fqid_max = 0; ++ init_ccsrmempeek(); ++ if (qman_ccsr_start) { ++ if (!qman_ccsrmempeek(®, QM_FQD_AR)) { ++ /* extract the size of the FQD window */ ++ reg = reg & 0x3f; ++ /* calculate valid frame queue descriptor range */ ++ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE; ++ } ++ } ++ dfs_root = debugfs_create_dir("qman", NULL); ++ fqd_root = debugfs_create_dir("fqd", dfs_root); ++ if (dfs_root == NULL || fqd_root == NULL) { ++ ret = -ENOMEM; ++ pr_err("Cannot create qman/fqd debugfs dir\n"); ++ goto _return; ++ } ++ if (fqid_max) { ++ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO, ++ NULL, &qman_ccsrmempeek_fops); ++ } ++ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO, ++ &query_fq_np_fields_data, &query_fq_np_fields_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO, ++ &query_fq_fields_data, &query_fq_fields_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO, ++ &query_wq_lengths_data, &query_wq_lengths_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO, ++ &query_cgr_data, &query_cgr_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO, ++ NULL, &query_congestion_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO, ++ NULL, &testwrite_cgr_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO, ++ NULL, &teswrite_cgr_cgrid_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO, ++ NULL, &teswrite_cgr_ibcnt_fops); ++ ++ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO, ++ &query_ccgr_data, &query_ccgr_fops); ++ /* Create files with fqd_root as parent */ ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED], ++ &qman_fqd_state_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED], ++ &qman_fqd_state_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED], ++ &qman_fqd_state_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED], ++ &qman_fqd_state_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO, ++ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE], ++ &qman_fqd_state_fops); ++ QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO, ++ &query_cq_fields_data, &query_cq_fields_fops); ++ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO, ++ &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops); ++ ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1); ++ ++ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO, ++ NULL, &qman_fqd_summary_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO, ++ NULL, &qman_fqd_dest_wq_fops); ++ ++ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO, ++ NULL, &qman_fqd_cred_fops); ++ ++ return 0; ++ ++_return: ++ debugfs_remove_recursive(dfs_root); ++ return ret; ++} ++ ++static void __exit qman_debugfs_module_exit(void) ++{ ++ debugfs_remove_recursive(dfs_root); ++} ++ ++module_init(qman_debugfs_module_init); ++module_exit(qman_debugfs_module_exit); ++MODULE_LICENSE("Dual BSD/GPL"); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_driver.c +@@ -0,0 +1,977 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_private.h" ++ ++#include /* hard_smp_processor_id() if !CONFIG_SMP */ ++#ifdef CONFIG_HOTPLUG_CPU ++#include ++#endif ++ ++/* Global variable containing revision id (even on non-control plane systems ++ * where CCSR isn't available) */ ++u16 qman_ip_rev; ++EXPORT_SYMBOL(qman_ip_rev); ++u8 qman_ip_cfg; ++EXPORT_SYMBOL(qman_ip_cfg); ++u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; ++EXPORT_SYMBOL(qm_channel_pool1); ++u16 qm_channel_caam = QMAN_CHANNEL_CAAM; ++EXPORT_SYMBOL(qm_channel_caam); ++u16 qm_channel_pme = QMAN_CHANNEL_PME; ++EXPORT_SYMBOL(qm_channel_pme); ++u16 qm_channel_dce = QMAN_CHANNEL_DCE; ++EXPORT_SYMBOL(qm_channel_dce); ++u16 qman_portal_max; ++EXPORT_SYMBOL(qman_portal_max); ++ ++u32 qman_clk; ++struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; ++/* the qman ceetm instances on the given SoC */ ++u8 num_ceetms; ++ ++/* For these variables, and the portal-initialisation logic, the ++ * comments in bman_driver.c apply here so won't be repeated. */ ++static struct qman_portal *shared_portals[NR_CPUS]; ++static int num_shared_portals; ++static int shared_portals_idx; ++static LIST_HEAD(unused_pcfgs); ++static DEFINE_SPINLOCK(unused_pcfgs_lock); ++ ++/* A SDQCR mask comprising all the available/visible pool channels */ ++static u32 pools_sdqcr; ++ ++#define STR_ERR_NOPROP "No '%s' property in node %s\n" ++#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n" ++#define STR_FQID_RANGE "fsl,fqid-range" ++#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range" ++#define STR_CGRID_RANGE "fsl,cgrid-range" ++ ++/* A "fsl,fqid-range" node; release the given range to the allocator */ ++static __init int fsl_fqid_range_init(struct device_node *node) ++{ ++ int ret; ++ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret); ++ if (!range) { ++ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name); ++ return -EINVAL; ++ } ++ qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ pr_info("Qman: FQID allocator includes range %d:%d\n", ++ be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ return 0; ++} ++ ++/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */ ++static __init int fsl_pool_channel_range_sdqcr(struct device_node *node) ++{ ++ int ret; ++ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); ++ if (!chanid) { ++ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); ++ return -EINVAL; ++ } ++ for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++) ++ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret); ++ return 0; ++} ++ ++/* A "fsl,pool-channel-range" node; release the given range to the allocator */ ++static __init int fsl_pool_channel_range_init(struct device_node *node) ++{ ++ int ret; ++ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); ++ if (!chanid) { ++ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); ++ return -EINVAL; ++ } ++ qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1])); ++ pr_info("Qman: pool channel allocator includes range %d:%d\n", ++ be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1])); ++ return 0; ++} ++ ++/* A "fsl,cgrid-range" node; release the given range to the allocator */ ++static __init int fsl_cgrid_range_init(struct device_node *node) ++{ ++ struct qman_cgr cgr; ++ int ret, errors = 0; ++ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret); ++ if (!range) { ++ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name); ++ return -EINVAL; ++ } ++ qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ pr_info("Qman: CGRID allocator includes range %d:%d\n", ++ be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) { ++ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL); ++ if (ret) ++ errors++; ++ } ++ if (errors) ++ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n", ++ errors, (errors > 1) ? "s" : "", range[0], range[1]); ++ return 0; ++} ++ ++static __init int fsl_ceetm_init(struct device_node *node) ++{ ++ enum qm_dc_portal dcp_portal; ++ struct qm_ceetm_sp *sp; ++ struct qm_ceetm_lni *lni; ++ int ret, i; ++ const u32 *range; ++ ++ /* Find LFQID range */ ++ range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret); ++ if (!range) { ++ pr_err("No fsl,ceetm-lfqid-range in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node" ++ " %s\n", node->full_name); ++ return -EINVAL; ++ } ++ ++ dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16; ++ if (dcp_portal > qm_dc_portal_fman1) { ++ pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal); ++ return -EINVAL; ++ } ++ ++ if (dcp_portal == qm_dc_portal_fman0) ++ qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ if (dcp_portal == qm_dc_portal_fman1) ++ qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ pr_debug("Qman: The lfqid allocator of CEETM %d includes range" ++ " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ ++ qman_ceetms[dcp_portal].idx = dcp_portal; ++ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals); ++ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis); ++ ++ /* Find Sub-portal range */ ++ range = of_get_property(node, "fsl,ceetm-sp-range", &ret); ++ if (!range) { ++ pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < be32_to_cpu(range[1]); i++) { ++ sp = kzalloc(sizeof(*sp), GFP_KERNEL); ++ if (!sp) { ++ pr_err("Can't alloc memory for sub-portal %d\n", ++ range[0] + i); ++ return -ENOMEM; ++ } ++ sp->idx = be32_to_cpu(range[0]) + i; ++ sp->dcp_idx = dcp_portal; ++ sp->is_claimed = 0; ++ list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals); ++ sp++; ++ } ++ pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n", ++ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal); ++ qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]); ++ qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]); ++ ++ /* Find LNI range */ ++ range = of_get_property(node, "fsl,ceetm-lni-range", &ret); ++ if (!range) { ++ pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < be32_to_cpu(range[1]); i++) { ++ lni = kzalloc(sizeof(*lni), GFP_KERNEL); ++ if (!lni) { ++ pr_err("Can't alloc memory for LNI %d\n", ++ range[0] + i); ++ return -ENOMEM; ++ } ++ lni->idx = be32_to_cpu(range[0]) + i; ++ lni->dcp_idx = dcp_portal; ++ lni->is_claimed = 0; ++ INIT_LIST_HEAD(&lni->channels); ++ list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis); ++ lni++; ++ } ++ pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n", ++ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal); ++ qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]); ++ qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]); ++ ++ /* Find CEETM channel range */ ++ range = of_get_property(node, "fsl,ceetm-channel-range", &ret); ++ if (!range) { ++ pr_err("No fsl,ceetm-channel-range in node %s\n", ++ node->full_name); ++ return -EINVAL; ++ } ++ if (ret != 8) { ++ pr_err("fsl,ceetm-channel-range is not a 2-cell range in node" ++ "%s\n", node->full_name); ++ return -EINVAL; ++ } ++ ++ if (dcp_portal == qm_dc_portal_fman0) ++ qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ if (dcp_portal == qm_dc_portal_fman1) ++ qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ pr_debug("Qman: The channel allocator of CEETM %d includes" ++ " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1])); ++ ++ /* Set CEETM PRES register */ ++ ret = qman_ceetm_set_prescaler(dcp_portal); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++static void qman_get_ip_revision(struct device_node *dn) ++{ ++ u16 ip_rev = 0; ++ u8 ip_cfg = QMAN_REV_CFG_0; ++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { ++ if (!of_device_is_available(dn)) ++ continue; ++ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") || ++ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) { ++ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n"); ++ BUG_ON(1); ++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") || ++ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) { ++ ip_rev = QMAN_REV11; ++ qman_portal_max = 10; ++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") || ++ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) { ++ ip_rev = QMAN_REV12; ++ qman_portal_max = 10; ++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") || ++ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) { ++ ip_rev = QMAN_REV20; ++ qman_portal_max = 3; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.0.0")) { ++ ip_rev = QMAN_REV30; ++ qman_portal_max = 50; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.0.1")) { ++ ip_rev = QMAN_REV30; ++ qman_portal_max = 25; ++ ip_cfg = QMAN_REV_CFG_1; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.1.0")) { ++ ip_rev = QMAN_REV31; ++ qman_portal_max = 50; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.1.1")) { ++ ip_rev = QMAN_REV31; ++ qman_portal_max = 25; ++ ip_cfg = QMAN_REV_CFG_1; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.1.2")) { ++ ip_rev = QMAN_REV31; ++ qman_portal_max = 18; ++ ip_cfg = QMAN_REV_CFG_2; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.1.3")) { ++ ip_rev = QMAN_REV31; ++ qman_portal_max = 10; ++ ip_cfg = QMAN_REV_CFG_3; ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.2.0")) { ++ ip_rev = QMAN_REV32; ++ qman_portal_max = 10; ++ ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043 ++ } else if (of_device_is_compatible(dn, ++ "fsl,qman-portal-3.2.1")) { ++ ip_rev = QMAN_REV32; ++ qman_portal_max = 10; ++ ip_cfg = QMAN_REV_CFG_3; ++ } else { ++ pr_warn("unknown QMan version in portal node," ++ "default to rev1.1\n"); ++ ip_rev = QMAN_REV11; ++ qman_portal_max = 10; ++ } ++ ++ if (!qman_ip_rev) { ++ if (ip_rev) { ++ qman_ip_rev = ip_rev; ++ qman_ip_cfg = ip_cfg; ++ } else { ++ pr_warn("unknown Qman version," ++ " default to rev1.1\n"); ++ qman_ip_rev = QMAN_REV11; ++ qman_ip_cfg = QMAN_REV_CFG_0; ++ } ++ } else if (ip_rev && (qman_ip_rev != ip_rev)) ++ pr_warn("Revision=0x%04x, but portal '%s' has" ++ " 0x%04x\n", ++ qman_ip_rev, dn->full_name, ip_rev); ++ if (qman_ip_rev == ip_rev) ++ break; ++ } ++} ++ ++/* Parse a portal node, perform generic mapping duties and return the config. It ++ * is not known at this stage for what purpose (or even if) the portal will be ++ * used. */ ++static struct qm_portal_config * __init parse_pcfg(struct device_node *node) ++{ ++ struct qm_portal_config *pcfg; ++ const u32 *index_p; ++ u32 index, channel; ++ int irq, ret; ++ resource_size_t len; ++ ++ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); ++ if (!pcfg) { ++ pr_err("can't allocate portal config"); ++ return NULL; ++ } ++ ++ /* ++ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a ++ * 'struct device' in order to get the PAMU stashing setup and the QMan ++ * portal [driver] won't function at all without ring stashing ++ * ++ * Making the QMan portal driver nice and proper is part of the ++ * upstreaming effort ++ */ ++ pcfg->dev.bus = &platform_bus_type; ++ pcfg->dev.of_node = node; ++#ifdef CONFIG_FSL_PAMU ++ pcfg->dev.archdata.iommu_domain = NULL; ++#endif ++ ++ ret = of_address_to_resource(node, DPA_PORTAL_CE, ++ &pcfg->addr_phys[DPA_PORTAL_CE]); ++ if (ret) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, ++ "reg::CE"); ++ goto err; ++ } ++ ret = of_address_to_resource(node, DPA_PORTAL_CI, ++ &pcfg->addr_phys[DPA_PORTAL_CI]); ++ if (ret) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, ++ "reg::CI"); ++ goto err; ++ } ++ index_p = of_get_property(node, "cell-index", &ret); ++ if (!index_p || (ret != 4)) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, ++ "cell-index"); ++ goto err; ++ } ++ index = be32_to_cpu(*index_p); ++ if (index >= qman_portal_max) { ++ pr_err("QMan portal index %d is beyond max (%d)\n", ++ index, qman_portal_max); ++ goto err; ++ } ++ ++ channel = index + QM_CHANNEL_SWPORTAL0; ++ pcfg->public_cfg.channel = channel; ++ pcfg->public_cfg.cpu = -1; ++ irq = irq_of_parse_and_map(node, 0); ++ if (irq == 0) { ++ pr_err("Can't get %s property '%s'\n", node->full_name, ++ "interrupts"); ++ goto err; ++ } ++ pcfg->public_cfg.irq = irq; ++ pcfg->public_cfg.index = index; ++#ifdef CONFIG_FSL_QMAN_CONFIG ++ /* We need the same LIODN offset for all portals */ ++ qman_liodn_fixup(pcfg->public_cfg.channel); ++#endif ++ ++ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); ++ if (len != (unsigned long)len) ++ goto err; ++ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns( ++ pcfg->addr_phys[DPA_PORTAL_CE].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE])); ++ ++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap( ++ pcfg->addr_phys[DPA_PORTAL_CI].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI])); ++#else ++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( ++ pcfg->addr_phys[DPA_PORTAL_CE].start, ++ (unsigned long)len, ++ 0); ++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( ++ pcfg->addr_phys[DPA_PORTAL_CI].start, ++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), ++ _PAGE_GUARDED | _PAGE_NO_CACHE); ++#endif ++ return pcfg; ++err: ++ kfree(pcfg); ++ return NULL; ++} ++ ++static struct qm_portal_config *get_pcfg(struct list_head *list) ++{ ++ struct qm_portal_config *pcfg; ++ if (list_empty(list)) ++ return NULL; ++ pcfg = list_entry(list->prev, struct qm_portal_config, list); ++ list_del(&pcfg->list); ++ return pcfg; ++} ++ ++static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx) ++{ ++ struct qm_portal_config *pcfg; ++ if (list_empty(list)) ++ return NULL; ++ list_for_each_entry(pcfg, list, list) { ++ if (pcfg->public_cfg.index == idx) { ++ list_del(&pcfg->list); ++ return pcfg; ++ } ++ } ++ return NULL; ++} ++ ++static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) ++{ ++#ifdef CONFIG_FSL_PAMU ++ int ret; ++ int window_count = 1; ++ struct iommu_domain_geometry geom_attr; ++ struct pamu_stash_attribute stash_attr; ++ ++ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); ++ if (!pcfg->iommu_domain) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed", ++ __func__); ++ goto _no_iommu; ++ } ++ geom_attr.aperture_start = 0; ++ geom_attr.aperture_end = ++ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; ++ geom_attr.force_aperture = true; ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, ++ &geom_attr); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, ++ &window_count); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ stash_attr.cpu = cpu; ++ stash_attr.cache = PAMU_ATTR_CACHE_L1; ++ /* set stash information for the window */ ++ stash_attr.window = 0; ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, ++ DOMAIN_ATTR_FSL_PAMU_STASH, ++ &stash_attr); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, ++ IOMMU_READ | IOMMU_WRITE); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d", ++ __func__, ret); ++ goto _iommu_domain_free; ++ } ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, ++ DOMAIN_ATTR_FSL_PAMU_ENABLE, ++ &window_count); ++ if (ret < 0) { ++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d", ++ __func__, ret); ++ goto _iommu_detach_device; ++ } ++ ++_no_iommu: ++#endif ++#ifdef CONFIG_FSL_QMAN_CONFIG ++ if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) ++#endif ++ pr_warn("Failed to set QMan portal's stash request queue\n"); ++ ++ return; ++ ++#ifdef CONFIG_FSL_PAMU ++_iommu_detach_device: ++ iommu_detach_device(pcfg->iommu_domain, NULL); ++_iommu_domain_free: ++ iommu_domain_free(pcfg->iommu_domain); ++#endif ++} ++ ++struct qm_portal_config *qm_get_unused_portal_idx(u32 idx) ++{ ++ struct qm_portal_config *ret; ++ spin_lock(&unused_pcfgs_lock); ++ if (idx == QBMAN_ANY_PORTAL_IDX) ++ ret = get_pcfg(&unused_pcfgs); ++ else ++ ret = get_pcfg_idx(&unused_pcfgs, idx); ++ spin_unlock(&unused_pcfgs_lock); ++ /* Bind stashing LIODNs to the CPU we are currently executing on, and ++ * set the portal to use the stashing request queue corresonding to the ++ * cpu as well. The user-space driver assumption is that the pthread has ++ * to already be affine to one cpu only before opening a portal. If that ++ * check is circumvented, the only risk is a performance degradation - ++ * stashing will go to whatever cpu they happened to be running on when ++ * opening the device file, and if that isn't the cpu they subsequently ++ * bind to and do their polling on, tough. */ ++ if (ret) ++ portal_set_cpu(ret, hard_smp_processor_id()); ++ return ret; ++} ++ ++struct qm_portal_config *qm_get_unused_portal(void) ++{ ++ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); ++} ++ ++void qm_put_unused_portal(struct qm_portal_config *pcfg) ++{ ++ spin_lock(&unused_pcfgs_lock); ++ list_add(&pcfg->list, &unused_pcfgs); ++ spin_unlock(&unused_pcfgs_lock); ++} ++ ++static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) ++{ ++ struct qman_portal *p; ++ ++ pcfg->iommu_domain = NULL; ++ portal_set_cpu(pcfg, pcfg->public_cfg.cpu); ++ p = qman_create_affine_portal(pcfg, NULL); ++ if (p) { ++ u32 irq_sources = 0; ++ /* Determine what should be interrupt-vs-poll driven */ ++#ifdef CONFIG_FSL_DPA_PIRQ_SLOW ++ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | ++ QM_PIRQ_CSCI | QM_PIRQ_CCSCI; ++#endif ++#ifdef CONFIG_FSL_DPA_PIRQ_FAST ++ irq_sources |= QM_PIRQ_DQRI; ++#endif ++ qman_p_irqsource_add(p, irq_sources); ++ pr_info("Qman portal %sinitialised, cpu %d\n", ++ pcfg->public_cfg.is_shared ? "(shared) " : "", ++ pcfg->public_cfg.cpu); ++ } else ++ pr_crit("Qman portal failure on cpu %d\n", ++ pcfg->public_cfg.cpu); ++ return p; ++} ++ ++static void init_slave(int cpu) ++{ ++ struct qman_portal *p; ++ struct cpumask oldmask = current->cpus_allowed; ++ set_cpus_allowed_ptr(current, get_cpu_mask(cpu)); ++ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); ++ if (!p) ++ pr_err("Qman slave portal failure on cpu %d\n", cpu); ++ else ++ pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu); ++ set_cpus_allowed_ptr(current, &oldmask); ++ if (shared_portals_idx >= num_shared_portals) ++ shared_portals_idx = 0; ++} ++ ++static struct cpumask want_unshared __initdata; ++static struct cpumask want_shared __initdata; ++ ++static int __init parse_qportals(char *str) ++{ ++ return parse_portals_bootarg(str, &want_shared, &want_unshared, ++ "qportals"); ++} ++__setup("qportals=", parse_qportals); ++ ++static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, ++ unsigned int cpu) ++{ ++#ifdef CONFIG_FSL_PAMU ++ struct pamu_stash_attribute stash_attr; ++ int ret; ++ ++ if (pcfg->iommu_domain) { ++ stash_attr.cpu = cpu; ++ stash_attr.cache = PAMU_ATTR_CACHE_L1; ++ /* set stash information for the window */ ++ stash_attr.window = 0; ++ ret = iommu_domain_set_attr(pcfg->iommu_domain, ++ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); ++ if (ret < 0) { ++ pr_err("Failed to update pamu stash setting\n"); ++ return; ++ } ++ } ++#endif ++#ifdef CONFIG_FSL_QMAN_CONFIG ++ if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) ++ pr_warn("Failed to update portal's stash request queue\n"); ++#endif ++} ++ ++static int qman_offline_cpu(unsigned int cpu) ++{ ++ struct qman_portal *p; ++ const struct qm_portal_config *pcfg; ++ p = (struct qman_portal *)affine_portals[cpu]; ++ if (p) { ++ pcfg = qman_get_qm_portal_config(p); ++ if (pcfg) { ++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); ++ qman_portal_update_sdest(pcfg, 0); ++ } ++ } ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static int qman_online_cpu(unsigned int cpu) ++{ ++ struct qman_portal *p; ++ const struct qm_portal_config *pcfg; ++ p = (struct qman_portal *)affine_portals[cpu]; ++ if (p) { ++ pcfg = qman_get_qm_portal_config(p); ++ if (pcfg) { ++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); ++ qman_portal_update_sdest(pcfg, cpu); ++ } ++ } ++ return 0; ++} ++ ++static int qman_hotplug_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned int cpu = (unsigned long)hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ qman_online_cpu(cpu); ++ break; ++ case CPU_DOWN_PREPARE: ++ case CPU_DOWN_PREPARE_FROZEN: ++ qman_offline_cpu(cpu); ++ default: ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block qman_hotplug_cpu_notifier = { ++ .notifier_call = qman_hotplug_cpu_callback, ++}; ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++__init int qman_init(void) ++{ ++ struct cpumask slave_cpus; ++ struct cpumask unshared_cpus = *cpu_none_mask; ++ struct cpumask shared_cpus = *cpu_none_mask; ++ LIST_HEAD(unshared_pcfgs); ++ LIST_HEAD(shared_pcfgs); ++ struct device_node *dn; ++ struct qm_portal_config *pcfg; ++ struct qman_portal *p; ++ int cpu, ret; ++ const u32 *clk; ++ struct cpumask offline_cpus; ++ ++ /* Initialise the Qman (CCSR) device */ ++ for_each_compatible_node(dn, NULL, "fsl,qman") { ++ if (!qman_init_ccsr(dn)) ++ pr_info("Qman err interrupt handler present\n"); ++ else ++ pr_err("Qman CCSR setup failed\n"); ++ ++ clk = of_get_property(dn, "clock-frequency", NULL); ++ if (!clk) ++ pr_warn("Can't find Qman clock frequency\n"); ++ else ++ qman_clk = be32_to_cpu(*clk); ++ } ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ /* Setup lookup table for FQ demux */ ++ ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64); ++ if (ret) ++ return ret; ++#endif ++ ++ /* Get qman ip revision */ ++ qman_get_ip_revision(dn); ++ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { ++ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; ++ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; ++ qm_channel_pme = QMAN_CHANNEL_PME_REV3; ++ } ++ ++ if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2)) ++ qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312; ++ ++ /* ++ * Parse the ceetm node to get how many ceetm instances are supported ++ * on the current silicon. num_ceetms must be confirmed before portals ++ * are intiailized. ++ */ ++ num_ceetms = 0; ++ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") ++ num_ceetms++; ++ ++ /* Parse pool channels into the SDQCR mask. (Must happen before portals ++ * are initialised.) */ ++ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { ++ ret = fsl_pool_channel_range_sdqcr(dn); ++ if (ret) ++ return ret; ++ } ++ ++ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus()); ++ /* Initialise portals. See bman_driver.c for comments */ ++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") { ++ if (!of_device_is_available(dn)) ++ continue; ++ pcfg = parse_pcfg(dn); ++ if (pcfg) { ++ pcfg->public_cfg.pools = pools_sdqcr; ++ list_add_tail(&pcfg->list, &unused_pcfgs); ++ } ++ } ++ for_each_possible_cpu(cpu) { ++ if (cpumask_test_cpu(cpu, &want_shared)) { ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &shared_pcfgs); ++ cpumask_set_cpu(cpu, &shared_cpus); ++ } ++ if (cpumask_test_cpu(cpu, &want_unshared)) { ++ if (cpumask_test_cpu(cpu, &shared_cpus)) ++ continue; ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &unshared_pcfgs); ++ cpumask_set_cpu(cpu, &unshared_cpus); ++ } ++ } ++ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { ++ for_each_online_cpu(cpu) { ++ pcfg = get_pcfg(&unused_pcfgs); ++ if (!pcfg) ++ break; ++ pcfg->public_cfg.cpu = cpu; ++ list_add_tail(&pcfg->list, &unshared_pcfgs); ++ cpumask_set_cpu(cpu, &unshared_cpus); ++ } ++ } ++ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); ++ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); ++ if (cpumask_empty(&slave_cpus)) { ++ if (!list_empty(&shared_pcfgs)) { ++ cpumask_or(&unshared_cpus, &unshared_cpus, ++ &shared_cpus); ++ cpumask_clear(&shared_cpus); ++ list_splice_tail(&shared_pcfgs, &unshared_pcfgs); ++ INIT_LIST_HEAD(&shared_pcfgs); ++ } ++ } else { ++ if (list_empty(&shared_pcfgs)) { ++ pcfg = get_pcfg(&unshared_pcfgs); ++ if (!pcfg) { ++ pr_crit("No QMan portals available!\n"); ++ return 0; ++ } ++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); ++ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); ++ list_add_tail(&pcfg->list, &shared_pcfgs); ++ } ++ } ++ list_for_each_entry(pcfg, &unshared_pcfgs, list) { ++ pcfg->public_cfg.is_shared = 0; ++ p = init_pcfg(pcfg); ++ if (!p) { ++ pr_crit("Unable to configure portals\n"); ++ return 0; ++ } ++ } ++ list_for_each_entry(pcfg, &shared_pcfgs, list) { ++ pcfg->public_cfg.is_shared = 1; ++ p = init_pcfg(pcfg); ++ if (p) ++ shared_portals[num_shared_portals++] = p; ++ } ++ if (!cpumask_empty(&slave_cpus)) ++ for_each_cpu(cpu, &slave_cpus) ++ init_slave(cpu); ++ pr_info("Qman portals initialised\n"); ++ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); ++ for_each_cpu(cpu, &offline_cpus) ++ qman_offline_cpu(cpu); ++#ifdef CONFIG_HOTPLUG_CPU ++ register_hotcpu_notifier(&qman_hotplug_cpu_notifier); ++#endif ++ return 0; ++} ++ ++__init int qman_resource_init(void) ++{ ++ struct device_node *dn; ++ int ret; ++ ++ /* Initialise FQID allocation ranges */ ++ for_each_compatible_node(dn, NULL, "fsl,fqid-range") { ++ ret = fsl_fqid_range_init(dn); ++ if (ret) ++ return ret; ++ } ++ /* Initialise CGRID allocation ranges */ ++ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") { ++ ret = fsl_cgrid_range_init(dn); ++ if (ret) ++ return ret; ++ } ++ /* Parse pool channels into the allocator. (Must happen after portals ++ * are initialised.) */ ++ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { ++ ret = fsl_pool_channel_range_init(dn); ++ if (ret) ++ return ret; ++ } ++ ++ /* Parse CEETM */ ++ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") { ++ ret = fsl_ceetm_init(dn); ++ if (ret) ++ return ret; ++ } ++ return 0; ++} ++ ++#ifdef CONFIG_SUSPEND ++void suspend_unused_qportal(void) ++{ ++ struct qm_portal_config *pcfg; ++ ++ if (list_empty(&unused_pcfgs)) ++ return; ++ ++ list_for_each_entry(pcfg, &unused_pcfgs, list) { ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Need to save qportal %d\n", pcfg->public_cfg.index); ++#endif ++ /* save isdr, disable all via isdr, clear isr */ ++ pcfg->saved_isdr = ++ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); ++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + ++ 0xe08); ++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] + ++ 0xe00); ++ } ++ return; ++} ++ ++void resume_unused_qportal(void) ++{ ++ struct qm_portal_config *pcfg; ++ ++ if (list_empty(&unused_pcfgs)) ++ return; ++ ++ list_for_each_entry(pcfg, &unused_pcfgs, list) { ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index); ++#endif ++ /* restore isdr */ ++ __raw_writel(pcfg->saved_isdr, ++ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08); ++ } ++ return; ++} ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_high.c +@@ -0,0 +1,5669 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_low.h" ++ ++/* Compilation constants */ ++#define DQRR_MAXFILL 15 ++#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ ++#define IRQNAME "QMan portal %d" ++#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ ++ ++/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's ++ * positive, and rounding to the closest value if it's zero. NB, this macro ++ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types ++ * that are compatible with this. NB, these arguments should not be expressions ++ * unless it is safe for them to be evaluated multiple times. Eg. do not pass ++ * in "some_value++" as a parameter to the macro! */ ++#define ROUNDING(n, d, r) \ ++ (((r) < 0) ? div64_u64((n), (d)) : \ ++ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \ ++ div64_u64(((n) + ((d) / 2)), (d)))) ++ ++/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about ++ * inter-processor locking only. Note, FQLOCK() is always called either under a ++ * local_irq_save() or from interrupt context - hence there's no need for irq ++ * protection (and indeed, attempting to nest irq-protection doesn't work, as ++ * the "irq en/disable" machinery isn't recursive...). */ ++#define FQLOCK(fq) \ ++ do { \ ++ struct qman_fq *__fq478 = (fq); \ ++ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ ++ spin_lock(&__fq478->fqlock); \ ++ } while (0) ++#define FQUNLOCK(fq) \ ++ do { \ ++ struct qman_fq *__fq478 = (fq); \ ++ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ ++ spin_unlock(&__fq478->fqlock); \ ++ } while (0) ++ ++static inline void fq_set(struct qman_fq *fq, u32 mask) ++{ ++ set_bits(mask, &fq->flags); ++} ++static inline void fq_clear(struct qman_fq *fq, u32 mask) ++{ ++ clear_bits(mask, &fq->flags); ++} ++static inline int fq_isset(struct qman_fq *fq, u32 mask) ++{ ++ return fq->flags & mask; ++} ++static inline int fq_isclear(struct qman_fq *fq, u32 mask) ++{ ++ return !(fq->flags & mask); ++} ++ ++struct qman_portal { ++ struct qm_portal p; ++ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */ ++ unsigned long irq_sources; ++ u32 use_eqcr_ci_stashing; ++ u32 slowpoll; /* only used when interrupts are off */ ++ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */ ++#endif ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ raw_spinlock_t sharing_lock; /* only used if is_shared */ ++ int is_shared; ++ struct qman_portal *sharing_redirect; ++#endif ++ u32 sdqcr; ++ int dqrr_disable_ref; ++ /* A portal-specific handler for DCP ERNs. If this is NULL, the global ++ * handler is called instead. */ ++ qman_cb_dc_ern cb_dc_ern; ++ /* When the cpu-affine portal is activated, this is non-NULL */ ++ const struct qm_portal_config *config; ++ /* This is needed for providing a non-NULL device to dma_map_***() */ ++ struct platform_device *pdev; ++ struct dpa_rbtree retire_table; ++ char irqname[MAX_IRQNAME]; ++ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ ++ struct qman_cgrs *cgrs; ++ /* linked-list of CSCN handlers. */ ++ struct list_head cgr_cbs; ++ /* list lock */ ++ spinlock_t cgr_lock; ++ /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */ ++ struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX]; ++ /* 256-element array, each is a linked-list of CCSCN handlers. */ ++ struct list_head ccgr_cbs[QMAN_CEETM_MAX]; ++ /* list lock */ ++ spinlock_t ccgr_lock; ++ /* track if memory was allocated by the driver */ ++ u8 alloced; ++ /* power management data */ ++ u32 save_isdr; ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to ++ * do byte swaps of DQRR read only memory. First entry must be aligned ++ * to 2 ** 10 to ensure DQRR index calculations based shadow copy ++ * address (6 bits for address shift + 4 bits for the DQRR size). ++ */ ++ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] __aligned(1024); ++#endif ++}; ++ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++#define PORTAL_IRQ_LOCK(p, irqflags) \ ++ do { \ ++ if ((p)->is_shared) \ ++ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ ++ else \ ++ local_irq_save(irqflags); \ ++ } while (0) ++#define PORTAL_IRQ_UNLOCK(p, irqflags) \ ++ do { \ ++ if ((p)->is_shared) \ ++ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ ++ irqflags); \ ++ else \ ++ local_irq_restore(irqflags); \ ++ } while (0) ++#else ++#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) ++#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) ++#endif ++ ++/* Global handler for DCP ERNs. Used when the portal receiving the message does ++ * not have a portal-specific handler. */ ++static qman_cb_dc_ern cb_dc_ern; ++ ++static cpumask_t affine_mask; ++static DEFINE_SPINLOCK(affine_mask_lock); ++static u16 affine_channels[NR_CPUS]; ++static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); ++void *affine_portals[NR_CPUS]; ++ ++/* "raw" gets the cpu-local struct whether it's a redirect or not. */ ++static inline struct qman_portal *get_raw_affine_portal(void) ++{ ++ return &get_cpu_var(qman_affine_portal); ++} ++/* For ops that can redirect, this obtains the portal to use */ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++static inline struct qman_portal *get_affine_portal(void) ++{ ++ struct qman_portal *p = get_raw_affine_portal(); ++ if (p->sharing_redirect) ++ return p->sharing_redirect; ++ return p; ++} ++#else ++#define get_affine_portal() get_raw_affine_portal() ++#endif ++/* For every "get", there must be a "put" */ ++static inline void put_affine_portal(void) ++{ ++ put_cpu_var(qman_affine_portal); ++} ++/* Exception: poll functions assume the caller is cpu-affine and in no risk of ++ * re-entrance, which are the two reasons we usually use the get/put_cpu_var() ++ * semantic - ie. to disable pre-emption. Some use-cases expect the execution ++ * context to remain as non-atomic during poll-triggered callbacks as it was ++ * when the poll API was first called (eg. NAPI), so we go out of our way in ++ * this case to not disable pre-emption. */ ++static inline struct qman_portal *get_poll_portal(void) ++{ ++ return &get_cpu_var(qman_affine_portal); ++} ++#define put_poll_portal() ++ ++/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux ++ * retirement notifications (the fact they are sometimes h/w-consumed means that ++ * contextB isn't always a s/w demux - and as we can't know which case it is ++ * when looking at the notification, we have to use the slow lookup for all of ++ * them). NB, it's possible to have multiple FQ objects refer to the same FQID ++ * (though at most one of them should be the consumer), so this table isn't for ++ * all FQs - FQs are added when retirement commands are issued, and removed when ++ * they complete, which also massively reduces the size of this table. */ ++IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid); ++ ++/* This is what everything can wait on, even if it migrates to a different cpu ++ * to the one whose affine portal it is waiting on. */ ++static DECLARE_WAIT_QUEUE_HEAD(affine_queue); ++ ++static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) ++{ ++ int ret = fqtree_push(&p->retire_table, fq); ++ if (ret) ++ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); ++ return ret; ++} ++ ++static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) ++{ ++ fqtree_del(&p->retire_table, fq); ++} ++ ++static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) ++{ ++ return fqtree_find(&p->retire_table, fqid); ++} ++ ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++static void **qman_fq_lookup_table; ++static size_t qman_fq_lookup_table_size; ++ ++int qman_setup_fq_lookup_table(size_t num_entries) ++{ ++ num_entries++; ++ /* Allocate 1 more entry since the first entry is not used */ ++ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *))); ++ if (!qman_fq_lookup_table) { ++ pr_err("QMan: Could not allocate fq lookup table\n"); ++ return -ENOMEM; ++ } ++ qman_fq_lookup_table_size = num_entries; ++ pr_info("QMan: Allocated lookup table at %p, entry count %lu\n", ++ qman_fq_lookup_table, ++ (unsigned long)qman_fq_lookup_table_size); ++ return 0; ++} ++ ++/* global structure that maintains fq object mapping */ ++static DEFINE_SPINLOCK(fq_hash_table_lock); ++ ++static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) ++{ ++ u32 i; ++ ++ spin_lock(&fq_hash_table_lock); ++ /* Can't use index zero because this has special meaning ++ * in context_b field. */ ++ for (i = 1; i < qman_fq_lookup_table_size; i++) { ++ if (qman_fq_lookup_table[i] == NULL) { ++ *entry = i; ++ qman_fq_lookup_table[i] = fq; ++ spin_unlock(&fq_hash_table_lock); ++ return 0; ++ } ++ } ++ spin_unlock(&fq_hash_table_lock); ++ return -ENOMEM; ++} ++ ++static void clear_fq_table_entry(u32 entry) ++{ ++ spin_lock(&fq_hash_table_lock); ++ BUG_ON(entry >= qman_fq_lookup_table_size); ++ qman_fq_lookup_table[entry] = NULL; ++ spin_unlock(&fq_hash_table_lock); ++} ++ ++static inline struct qman_fq *get_fq_table_entry(u32 entry) ++{ ++ BUG_ON(entry >= qman_fq_lookup_table_size); ++ return qman_fq_lookup_table[entry]; ++} ++#endif ++ ++static inline void cpu_to_hw_fqd(struct qm_fqd *fqd) ++{ ++ /* Byteswap the FQD to HW format */ ++ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl); ++ fqd->dest_wq = cpu_to_be16(fqd->dest_wq); ++ fqd->ics_cred = cpu_to_be16(fqd->ics_cred); ++ fqd->context_b = cpu_to_be32(fqd->context_b); ++ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque); ++} ++ ++static inline void hw_fqd_to_cpu(struct qm_fqd *fqd) ++{ ++ /* Byteswap the FQD to CPU format */ ++ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl); ++ fqd->dest_wq = be16_to_cpu(fqd->dest_wq); ++ fqd->ics_cred = be16_to_cpu(fqd->ics_cred); ++ fqd->context_b = be32_to_cpu(fqd->context_b); ++ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque); ++} ++ ++/* Swap a 40 bit address */ ++static inline u64 cpu_to_be40(u64 in) ++{ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ return in; ++#else ++ u64 out = 0; ++ u8 *p = (u8 *) &out; ++ p[0] = in >> 32; ++ p[1] = in >> 24; ++ p[2] = in >> 16; ++ p[3] = in >> 8; ++ p[4] = in >> 0; ++ return out; ++#endif ++} ++static inline u64 be40_to_cpu(u64 in) ++{ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ return in; ++#else ++ u64 out = 0; ++ u8 *pout = (u8 *) &out; ++ u8 *pin = (u8 *) ∈ ++ pout[0] = pin[4]; ++ pout[1] = pin[3]; ++ pout[2] = pin[2]; ++ pout[3] = pin[1]; ++ pout[4] = pin[0]; ++ return out; ++#endif ++} ++ ++/* Swap a 24 bit value */ ++static inline u32 cpu_to_be24(u32 in) ++{ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ return in; ++#else ++ u32 out = 0; ++ u8 *p = (u8 *) &out; ++ p[0] = in >> 16; ++ p[1] = in >> 8; ++ p[2] = in >> 0; ++ return out; ++#endif ++} ++ ++static inline u32 be24_to_cpu(u32 in) ++{ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ return in; ++#else ++ u32 out = 0; ++ u8 *pout = (u8 *) &out; ++ u8 *pin = (u8 *) ∈ ++ pout[0] = pin[2]; ++ pout[1] = pin[1]; ++ pout[2] = pin[0]; ++ return out; ++#endif ++} ++ ++static inline u64 be48_to_cpu(u64 in) ++{ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ return in; ++#else ++ u64 out = 0; ++ u8 *pout = (u8 *) &out; ++ u8 *pin = (u8 *) ∈ ++ ++ pout[0] = pin[5]; ++ pout[1] = pin[4]; ++ pout[2] = pin[3]; ++ pout[3] = pin[2]; ++ pout[4] = pin[1]; ++ pout[5] = pin[0]; ++ return out; ++#endif ++} ++static inline void cpu_to_hw_fd(struct qm_fd *fd) ++{ ++ fd->opaque_addr = cpu_to_be64(fd->opaque_addr); ++ fd->status = cpu_to_be32(fd->status); ++ fd->opaque = cpu_to_be32(fd->opaque); ++} ++ ++static inline void hw_fd_to_cpu(struct qm_fd *fd) ++{ ++ fd->opaque_addr = be64_to_cpu(fd->opaque_addr); ++ fd->status = be32_to_cpu(fd->status); ++ fd->opaque = be32_to_cpu(fd->opaque); ++} ++ ++static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query) ++{ ++ cq_query->ccgid = be16_to_cpu(cq_query->ccgid); ++ cq_query->state = be16_to_cpu(cq_query->state); ++ cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr); ++ cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr); ++ cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr); ++ cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr); ++ cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr); ++ cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr); ++ cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr); ++ cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr); ++ cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr); ++ cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr); ++ cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt); ++} ++ ++static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q) ++{ ++ int i; ++ ++ ccgr_q->cm_query.cs_thres.hword = ++ be16_to_cpu(ccgr_q->cm_query.cs_thres.hword); ++ ccgr_q->cm_query.cs_thres_x.hword = ++ be16_to_cpu(ccgr_q->cm_query.cs_thres_x.hword); ++ ccgr_q->cm_query.td_thres.hword = ++ be16_to_cpu(ccgr_q->cm_query.td_thres.hword); ++ ccgr_q->cm_query.wr_parm_g.word = ++ be32_to_cpu(ccgr_q->cm_query.wr_parm_g.word); ++ ccgr_q->cm_query.wr_parm_y.word = ++ be32_to_cpu(ccgr_q->cm_query.wr_parm_y.word); ++ ccgr_q->cm_query.wr_parm_r.word = ++ be32_to_cpu(ccgr_q->cm_query.wr_parm_r.word); ++ ccgr_q->cm_query.cscn_targ_dcp = ++ be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp); ++ ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt); ++ ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt); ++ for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++) ++ ccgr_q->cm_query.cscn_targ_swp[i] = ++ be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]); ++} ++ ++/* In the case that slow- and fast-path handling are both done by qman_poll() ++ * (ie. because there is no interrupt handling), we ought to balance how often ++ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer ++ * sources, so we call the fast poll 'n' times before calling the slow poll ++ * once. The idle decrementer constant is used when the last slow-poll detected ++ * no work to do, and the busy decrementer constant when the last slow-poll had ++ * work to do. */ ++#define SLOW_POLL_IDLE 1000 ++#define SLOW_POLL_BUSY 10 ++static u32 __poll_portal_slow(struct qman_portal *p, u32 is); ++static inline unsigned int __poll_portal_fast(struct qman_portal *p, ++ unsigned int poll_limit); ++ ++/* Portal interrupt handler */ ++static irqreturn_t portal_isr(__always_unused int irq, void *ptr) ++{ ++ struct qman_portal *p = ptr; ++ /* ++ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because ++ * it could race against a Query Congestion State command also given ++ * as part of the handling of this interrupt source. We mustn't ++ * clear it a second time in this top-level function. ++ */ ++ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & ++ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI)); ++ u32 is = qm_isr_status_read(&p->p) & p->irq_sources; ++ /* DQRR-handling if it's interrupt-driven */ ++ if (is & QM_PIRQ_DQRI) ++ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); ++ /* Handling of anything else that's interrupt-driven */ ++ clear |= __poll_portal_slow(p, is); ++ qm_isr_status_clear(&p->p, clear); ++ return IRQ_HANDLED; ++} ++ ++/* This inner version is used privately by qman_create_affine_portal(), as well ++ * as by the exported qman_stop_dequeues(). */ ++static inline void qman_stop_dequeues_ex(struct qman_portal *p) ++{ ++ unsigned long irqflags __maybe_unused; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ if (!(p->dqrr_disable_ref++)) ++ qm_dqrr_set_maxfill(&p->p, 0); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++} ++ ++static int drain_mr_fqrni(struct qm_portal *p) ++{ ++ const struct qm_mr_entry *msg; ++loop: ++ msg = qm_mr_current(p); ++ if (!msg) { ++ /* if MR was full and h/w had other FQRNI entries to produce, we ++ * need to allow it time to produce those entries once the ++ * existing entries are consumed. A worst-case situation ++ * (fully-loaded system) means h/w sequencers may have to do 3-4 ++ * other things before servicing the portal's MR pump, each of ++ * which (if slow) may take ~50 qman cycles (which is ~200 ++ * processor cycles). So rounding up and then multiplying this ++ * worst-case estimate by a factor of 10, just to be ++ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume ++ * one entry at a time, so h/w has an opportunity to produce new ++ * entries well before the ring has been fully consumed, so ++ * we're being *really* paranoid here. */ ++ u64 now, then = mfatb(); ++ do { ++ now = mfatb(); ++ } while ((then + 10000) > now); ++ msg = qm_mr_current(p); ++ if (!msg) ++ return 0; ++ } ++ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { ++ /* We aren't draining anything but FQRNIs */ ++ pr_err("QMan found verb 0x%x in MR\n", msg->verb); ++ return -1; ++ } ++ qm_mr_next(p); ++ qm_mr_cci_consume(p, 1); ++ goto loop; ++} ++ ++#ifdef CONFIG_SUSPEND ++static int _qman_portal_suspend_noirq(struct device *dev) ++{ ++ struct qman_portal *p = (struct qman_portal *)dev->platform_data; ++#ifdef CONFIG_PM_DEBUG ++ struct platform_device *pdev = to_platform_device(dev); ++#endif ++ ++ p->save_isdr = qm_isr_disable_read(&p->p); ++ qm_isr_disable_write(&p->p, 0xffffffff); ++ qm_isr_status_clear(&p->p, 0xffffffff); ++#ifdef CONFIG_PM_DEBUG ++ pr_info("Suspend for %s\n", pdev->name); ++#endif ++ return 0; ++} ++ ++static int _qman_portal_resume_noirq(struct device *dev) ++{ ++ struct qman_portal *p = (struct qman_portal *)dev->platform_data; ++ ++ /* restore isdr */ ++ qm_isr_disable_write(&p->p, p->save_isdr); ++ return 0; ++} ++#else ++#define _qman_portal_suspend_noirq NULL ++#define _qman_portal_resume_noirq NULL ++#endif ++ ++struct dev_pm_domain qman_portal_device_pm_domain = { ++ .ops = { ++ USE_PLATFORM_PM_SLEEP_OPS ++ .suspend_noirq = _qman_portal_suspend_noirq, ++ .resume_noirq = _qman_portal_resume_noirq, ++ } ++}; ++ ++struct qman_portal *qman_create_portal( ++ struct qman_portal *portal, ++ const struct qm_portal_config *config, ++ const struct qman_cgrs *cgrs) ++{ ++ struct qm_portal *__p; ++ char buf[16]; ++ int ret; ++ u32 isdr; ++ ++ if (!portal) { ++ portal = kmalloc(sizeof(*portal), GFP_KERNEL); ++ if (!portal) ++ return portal; ++ portal->alloced = 1; ++ } else ++ portal->alloced = 0; ++ ++ __p = &portal->p; ++ ++#if (defined CONFIG_PPC || defined CONFIG_PPC64) && defined CONFIG_FSL_PAMU ++ /* PAMU is required for stashing */ ++ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? ++ 1 : 0); ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ portal->use_eqcr_ci_stashing = 1; ++#else ++ portal->use_eqcr_ci_stashing = 0; ++#endif ++ ++ /* prep the low-level portal struct with the mapped addresses from the ++ * config, everything that follows depends on it and "config" is more ++ * for (de)reference... */ ++ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; ++ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; ++ /* ++ * If CI-stashing is used, the current defaults use a threshold of 3, ++ * and stash with high-than-DQRR priority. ++ */ ++ if (qm_eqcr_init(__p, qm_eqcr_pvb, ++ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { ++ pr_err("Qman EQCR initialisation failed\n"); ++ goto fail_eqcr; ++ } ++ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb, ++ qm_dqrr_cdc, DQRR_MAXFILL)) { ++ pr_err("Qman DQRR initialisation failed\n"); ++ goto fail_dqrr; ++ } ++ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) { ++ pr_err("Qman MR initialisation failed\n"); ++ goto fail_mr; ++ } ++ if (qm_mc_init(__p)) { ++ pr_err("Qman MC initialisation failed\n"); ++ goto fail_mc; ++ } ++ if (qm_isr_init(__p)) { ++ pr_err("Qman ISR initialisation failed\n"); ++ goto fail_isr; ++ } ++ /* static interrupt-gating controls */ ++ qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH); ++ qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH); ++ qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD); ++ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); ++ if (!portal->cgrs) ++ goto fail_cgrs; ++ /* initial snapshot is no-depletion */ ++ qman_cgrs_init(&portal->cgrs[1]); ++ if (cgrs) ++ portal->cgrs[0] = *cgrs; ++ else ++ /* if the given mask is NULL, assume all CGRs can be seen */ ++ qman_cgrs_fill(&portal->cgrs[0]); ++ INIT_LIST_HEAD(&portal->cgr_cbs); ++ spin_lock_init(&portal->cgr_lock); ++ if (num_ceetms) { ++ for (ret = 0; ret < num_ceetms; ret++) { ++ portal->ccgrs[ret] = kmalloc(2 * ++ sizeof(struct qman_ccgrs), GFP_KERNEL); ++ if (!portal->ccgrs[ret]) ++ goto fail_ccgrs; ++ qman_ccgrs_init(&portal->ccgrs[ret][1]); ++ qman_ccgrs_fill(&portal->ccgrs[ret][0]); ++ INIT_LIST_HEAD(&portal->ccgr_cbs[ret]); ++ } ++ } ++ spin_lock_init(&portal->ccgr_lock); ++ portal->bits = 0; ++ portal->slowpoll = 0; ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ portal->eqci_owned = NULL; ++#endif ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ raw_spin_lock_init(&portal->sharing_lock); ++ portal->is_shared = config->public_cfg.is_shared; ++ portal->sharing_redirect = NULL; ++#endif ++ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | ++ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | ++ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; ++ portal->dqrr_disable_ref = 0; ++ portal->cb_dc_ern = NULL; ++ sprintf(buf, "qportal-%d", config->public_cfg.channel); ++ portal->pdev = platform_device_alloc(buf, -1); ++ if (!portal->pdev) { ++ pr_err("qman_portal - platform_device_alloc() failed\n"); ++ goto fail_devalloc; ++ } ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); ++ portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask; ++#else ++ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) { ++ pr_err("qman_portal - dma_set_mask() failed\n"); ++ goto fail_devadd; ++ } ++#endif ++ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain; ++ portal->pdev->dev.platform_data = portal; ++ ret = platform_device_add(portal->pdev); ++ if (ret) { ++ pr_err("qman_portal - platform_device_add() failed\n"); ++ goto fail_devadd; ++ } ++ dpa_rbtree_init(&portal->retire_table); ++ isdr = 0xffffffff; ++ qm_isr_disable_write(__p, isdr); ++ portal->irq_sources = 0; ++ qm_isr_enable_write(__p, portal->irq_sources); ++ qm_isr_status_clear(__p, 0xffffffff); ++ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); ++ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, ++ portal)) { ++ pr_err("request_irq() failed\n"); ++ goto fail_irq; ++ } ++ if ((config->public_cfg.cpu != -1) && ++ irq_can_set_affinity(config->public_cfg.irq) && ++ irq_set_affinity(config->public_cfg.irq, ++ cpumask_of(config->public_cfg.cpu))) { ++ pr_err("irq_set_affinity() failed\n"); ++ goto fail_affinity; ++ } ++ ++ /* Need EQCR to be empty before continuing */ ++ isdr ^= QM_PIRQ_EQCI; ++ qm_isr_disable_write(__p, isdr); ++ ret = qm_eqcr_get_fill(__p); ++ if (ret) { ++ pr_err("Qman EQCR unclean\n"); ++ goto fail_eqcr_empty; ++ } ++ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); ++ qm_isr_disable_write(__p, isdr); ++ if (qm_dqrr_current(__p) != NULL) { ++ pr_err("Qman DQRR unclean\n"); ++ qm_dqrr_cdc_consume_n(__p, 0xffff); ++ } ++ if (qm_mr_current(__p) != NULL) { ++ /* special handling, drain just in case it's a few FQRNIs */ ++ if (drain_mr_fqrni(__p)) { ++ const struct qm_mr_entry *e = qm_mr_current(__p); ++ /* ++ * Message ring cannot be empty no need to check ++ * qm_mr_current returned successfully ++ */ ++ pr_err("Qman MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x", ++ e->verb, e->ern.rc, e->ern.fd.addr_lo); ++ goto fail_dqrr_mr_empty; ++ } ++ } ++ /* Success */ ++ portal->config = config; ++ qm_isr_disable_write(__p, 0); ++ qm_isr_uninhibit(__p); ++ /* Write a sane SDQCR */ ++ qm_dqrr_sdqcr_set(__p, portal->sdqcr); ++ return portal; ++fail_dqrr_mr_empty: ++fail_eqcr_empty: ++fail_affinity: ++ free_irq(config->public_cfg.irq, portal); ++fail_irq: ++ platform_device_del(portal->pdev); ++fail_devadd: ++ platform_device_put(portal->pdev); ++fail_devalloc: ++ if (num_ceetms) ++ for (ret = 0; ret < num_ceetms; ret++) ++ kfree(portal->ccgrs[ret]); ++fail_ccgrs: ++ kfree(portal->cgrs); ++fail_cgrs: ++ qm_isr_finish(__p); ++fail_isr: ++ qm_mc_finish(__p); ++fail_mc: ++ qm_mr_finish(__p); ++fail_mr: ++ qm_dqrr_finish(__p); ++fail_dqrr: ++ qm_eqcr_finish(__p); ++fail_eqcr: ++ if (portal->alloced) ++ kfree(portal); ++ return NULL; ++} ++ ++struct qman_portal *qman_create_affine_portal( ++ const struct qm_portal_config *config, ++ const struct qman_cgrs *cgrs) ++{ ++ struct qman_portal *res; ++ struct qman_portal *portal; ++ ++ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu); ++ res = qman_create_portal(portal, config, cgrs); ++ if (res) { ++ spin_lock(&affine_mask_lock); ++ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); ++ affine_channels[config->public_cfg.cpu] = ++ config->public_cfg.channel; ++ affine_portals[config->public_cfg.cpu] = portal; ++ spin_unlock(&affine_mask_lock); ++ } ++ return res; ++} ++ ++/* These checks are BUG_ON()s because the driver is already supposed to avoid ++ * these cases. */ ++struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, ++ int cpu) ++{ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ struct qman_portal *p; ++ p = &per_cpu(qman_affine_portal, cpu); ++ /* Check that we don't already have our own portal */ ++ BUG_ON(p->config); ++ /* Check that we aren't already slaving to another portal */ ++ BUG_ON(p->is_shared); ++ /* Check that 'redirect' is prepared to have us */ ++ BUG_ON(!redirect->config->public_cfg.is_shared); ++ /* These are the only elements to initialise when redirecting */ ++ p->irq_sources = 0; ++ p->sharing_redirect = redirect; ++ affine_portals[cpu] = p; ++ return p; ++#else ++ BUG(); ++ return NULL; ++#endif ++} ++ ++void qman_destroy_portal(struct qman_portal *qm) ++{ ++ const struct qm_portal_config *pcfg; ++ int i; ++ ++ /* Stop dequeues on the portal */ ++ qm_dqrr_sdqcr_set(&qm->p, 0); ++ ++ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or ++ * something related to QM_PIRQ_EQCI, this may need fixing. ++ * Also, due to the prefetching model used for CI updates in the enqueue ++ * path, this update will only invalidate the CI cacheline *after* ++ * working on it, so we need to call this twice to ensure a full update ++ * irrespective of where the enqueue processing was at when the teardown ++ * began. */ ++ qm_eqcr_cce_update(&qm->p); ++ qm_eqcr_cce_update(&qm->p); ++ pcfg = qm->config; ++ ++ free_irq(pcfg->public_cfg.irq, qm); ++ ++ kfree(qm->cgrs); ++ if (num_ceetms) ++ for (i = 0; i < num_ceetms; i++) ++ kfree(qm->ccgrs[i]); ++ qm_isr_finish(&qm->p); ++ qm_mc_finish(&qm->p); ++ qm_mr_finish(&qm->p); ++ qm_dqrr_finish(&qm->p); ++ qm_eqcr_finish(&qm->p); ++ ++ platform_device_del(qm->pdev); ++ platform_device_put(qm->pdev); ++ ++ qm->config = NULL; ++ if (qm->alloced) ++ kfree(qm); ++} ++ ++const struct qm_portal_config *qman_destroy_affine_portal(void) ++{ ++ /* We don't want to redirect if we're a slave, use "raw" */ ++ struct qman_portal *qm = get_raw_affine_portal(); ++ const struct qm_portal_config *pcfg; ++ int cpu; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (qm->sharing_redirect) { ++ qm->sharing_redirect = NULL; ++ put_affine_portal(); ++ return NULL; ++ } ++ qm->is_shared = 0; ++#endif ++ pcfg = qm->config; ++ cpu = pcfg->public_cfg.cpu; ++ ++ qman_destroy_portal(qm); ++ ++ spin_lock(&affine_mask_lock); ++ cpumask_clear_cpu(cpu, &affine_mask); ++ spin_unlock(&affine_mask_lock); ++ put_affine_portal(); ++ return pcfg; ++} ++ ++const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p) ++{ ++ return &p->config->public_cfg; ++} ++EXPORT_SYMBOL(qman_p_get_portal_config); ++ ++const struct qman_portal_config *qman_get_portal_config(void) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ const struct qman_portal_config *ret = qman_p_get_portal_config(p); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_get_portal_config); ++ ++/* Inline helper to reduce nesting in __poll_portal_slow() */ ++static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_mr_entry *msg, u8 verb) ++{ ++ FQLOCK(fq); ++ switch (verb) { ++ case QM_MR_VERB_FQRL: ++ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); ++ fq_clear(fq, QMAN_FQ_STATE_ORL); ++ table_del_fq(p, fq); ++ break; ++ case QM_MR_VERB_FQRN: ++ DPA_ASSERT((fq->state == qman_fq_state_parked) || ++ (fq->state == qman_fq_state_sched)); ++ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); ++ fq_clear(fq, QMAN_FQ_STATE_CHANGING); ++ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) ++ fq_set(fq, QMAN_FQ_STATE_NE); ++ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) ++ fq_set(fq, QMAN_FQ_STATE_ORL); ++ else ++ table_del_fq(p, fq); ++ fq->state = qman_fq_state_retired; ++ break; ++ case QM_MR_VERB_FQPN: ++ DPA_ASSERT(fq->state == qman_fq_state_sched); ++ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); ++ fq->state = qman_fq_state_parked; ++ } ++ FQUNLOCK(fq); ++} ++ ++static u32 __poll_portal_slow(struct qman_portal *p, u32 is) ++{ ++ const struct qm_mr_entry *msg; ++ struct qm_mr_entry swapped_msg; ++ int k; ++ ++ if (is & QM_PIRQ_CSCI) { ++ struct qman_cgrs rr, c; ++ struct qm_mc_result *mcr; ++ struct qman_cgr *cgr; ++ unsigned long irqflags __maybe_unused; ++ ++ spin_lock_irqsave(&p->cgr_lock, irqflags); ++ /* ++ * The CSCI bit must be cleared _before_ issuing the ++ * Query Congestion State command, to ensure that a long ++ * CGR State Change callback cannot miss an intervening ++ * state change. ++ */ ++ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); ++ qm_mc_start(&p->p); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ for (k = 0; k < 8; k++) ++ mcr->querycongestion.state.__state[k] = be32_to_cpu( ++ mcr->querycongestion.state.__state[k]); ++ /* mask out the ones I'm not interested in */ ++ qman_cgrs_and(&rr, (const struct qman_cgrs *) ++ &mcr->querycongestion.state, &p->cgrs[0]); ++ /* check previous snapshot for delta, enter/exit congestion */ ++ qman_cgrs_xor(&c, &rr, &p->cgrs[1]); ++ /* update snapshot */ ++ qman_cgrs_cp(&p->cgrs[1], &rr); ++ /* Invoke callback */ ++ list_for_each_entry(cgr, &p->cgr_cbs, node) ++ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) ++ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); ++ spin_unlock_irqrestore(&p->cgr_lock, irqflags); ++ } ++ if (is & QM_PIRQ_CCSCI) { ++ struct qman_ccgrs rr, c, congestion_result; ++ struct qm_mc_result *mcr; ++ struct qm_mc_command *mcc; ++ struct qm_ceetm_ccg *ccg; ++ unsigned long irqflags __maybe_unused; ++ int i, j; ++ ++ spin_lock_irqsave(&p->ccgr_lock, irqflags); ++ /* ++ * The CCSCI bit must be cleared _before_ issuing the ++ * Query Congestion State command, to ensure that a long ++ * CCGR State Change callback cannot miss an intervening ++ * state change. ++ */ ++ qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI); ++ ++ for (i = 0; i < num_ceetms; i++) { ++ for (j = 0; j < 2; j++) { ++ mcc = qm_mc_start(&p->p); ++ mcc->ccgr_query.ccgrid = cpu_to_be16( ++ CEETM_QUERY_CONGESTION_STATE | j); ++ mcc->ccgr_query.dcpid = i; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ for (k = 0; k < 8; k++) ++ mcr->ccgr_query.congestion_state.state. ++ __state[k] = be32_to_cpu( ++ mcr->ccgr_query. ++ congestion_state.state. ++ __state[k]); ++ congestion_result.q[j] = ++ mcr->ccgr_query.congestion_state.state; ++ } ++ /* mask out the ones I'm not interested in */ ++ qman_ccgrs_and(&rr, &congestion_result, ++ &p->ccgrs[i][0]); ++ /* ++ * check previous snapshot for delta, enter/exit ++ * congestion. ++ */ ++ qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]); ++ /* update snapshot */ ++ qman_ccgrs_cp(&p->ccgrs[i][1], &rr); ++ /* Invoke callback */ ++ list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node) ++ if (ccg->cb && qman_ccgrs_get(&c, ++ (ccg->parent->idx << 4) | ccg->idx)) ++ ccg->cb(ccg, ccg->cb_ctx, ++ qman_ccgrs_get(&rr, ++ (ccg->parent->idx << 4) ++ | ccg->idx)); ++ } ++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); ++ } ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (is & QM_PIRQ_EQCI) { ++ unsigned long irqflags; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ p->eqci_owned = NULL; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ wake_up(&affine_queue); ++ } ++#endif ++ ++ if (is & QM_PIRQ_EQRI) { ++ unsigned long irqflags __maybe_unused; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ qm_eqcr_cce_update(&p->p); ++ qm_eqcr_set_ithresh(&p->p, 0); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ wake_up(&affine_queue); ++ } ++ ++ if (is & QM_PIRQ_MRI) { ++ struct qman_fq *fq; ++ u8 verb, num = 0; ++mr_loop: ++ qm_mr_pvb_update(&p->p); ++ msg = qm_mr_current(&p->p); ++ if (!msg) ++ goto mr_done; ++ swapped_msg = *msg; ++ hw_fd_to_cpu(&swapped_msg.ern.fd); ++ verb = msg->verb & QM_MR_VERB_TYPE_MASK; ++ /* The message is a software ERN iff the 0x20 bit is set */ ++ if (verb & 0x20) { ++ switch (verb) { ++ case QM_MR_VERB_FQRNI: ++ /* nada, we drop FQRNIs on the floor */ ++ break; ++ case QM_MR_VERB_FQRN: ++ case QM_MR_VERB_FQRL: ++ /* Lookup in the retirement table */ ++ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid)); ++ BUG_ON(!fq); ++ fq_state_change(p, fq, &swapped_msg, verb); ++ if (fq->cb.fqs) ++ fq->cb.fqs(p, fq, &swapped_msg); ++ break; ++ case QM_MR_VERB_FQPN: ++ /* Parked */ ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ fq = get_fq_table_entry( ++ be32_to_cpu(msg->fq.contextB)); ++#else ++ fq = (void *)(uintptr_t) ++ be32_to_cpu(msg->fq.contextB); ++#endif ++ fq_state_change(p, fq, msg, verb); ++ if (fq->cb.fqs) ++ fq->cb.fqs(p, fq, &swapped_msg); ++ break; ++ case QM_MR_VERB_DC_ERN: ++ /* DCP ERN */ ++ if (p->cb_dc_ern) ++ p->cb_dc_ern(p, msg); ++ else if (cb_dc_ern) ++ cb_dc_ern(p, msg); ++ else { ++ static int warn_once; ++ if (!warn_once) { ++ pr_crit("Leaking DCP ERNs!\n"); ++ warn_once = 1; ++ } ++ } ++ break; ++ default: ++ pr_crit("Invalid MR verb 0x%02x\n", verb); ++ } ++ } else { ++ /* Its a software ERN */ ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag)); ++#else ++ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag); ++#endif ++ fq->cb.ern(p, fq, &swapped_msg); ++ } ++ num++; ++ qm_mr_next(&p->p); ++ goto mr_loop; ++mr_done: ++ qm_mr_cci_consume(&p->p, num); ++ } ++ /* ++ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific ++ * processing. If that interrupt source has meanwhile been re-asserted, ++ * we mustn't clear it here (or in the top-level interrupt handler). ++ */ ++ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); ++} ++ ++/* remove some slowish-path stuff from the "fast path" and make sure it isn't ++ * inlined. */ ++static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) ++{ ++ p->vdqcr_owned = NULL; ++ FQLOCK(fq); ++ fq_clear(fq, QMAN_FQ_STATE_VDQCR); ++ FQUNLOCK(fq); ++ wake_up(&affine_queue); ++} ++ ++/* Copy a DQRR entry ensuring reads reach QBMan in order */ ++static inline void safe_copy_dqrr(struct qm_dqrr_entry *dst, ++ const struct qm_dqrr_entry *src) ++{ ++ int i = 0; ++ const u64 *s64 = (u64*)src; ++ u64 *d64 = (u64*)dst; ++ ++ /* DQRR only has 32 bytes of valid data so only need to ++ * copy 4 - 64 bit values */ ++ *d64 = *s64; ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ { ++ u32 res, zero = 0; ++ /* Create a dependancy after copying first bytes ensures no wrap ++ transaction generated to QBMan */ ++ /* Logical AND the value pointed to by s64 with 0x0 and ++ store the result in res */ ++ asm volatile("and %[result], %[in1], %[in2]" ++ : [result] "=r" (res) ++ : [in1] "r" (zero), [in2] "r" (*s64) ++ : "memory"); ++ /* Add res to s64 - this creates a dependancy on the result of ++ reading the value of s64 before the next read. The side ++ effect of this is that the core must stall until the first ++ aligned read is complete therefore preventing a WRAP ++ transaction to be seen by the QBMan */ ++ asm volatile("add %[result], %[in1], %[in2]" ++ : [result] "=r" (s64) ++ : [in1] "r" (res), [in2] "r" (s64) ++ : "memory"); ++ } ++#endif ++ /* Copy the last 3 64 bit parts */ ++ d64++; s64++; ++ for (;i<3; i++) ++ *d64++ = *s64++; ++} ++ ++/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states ++ * that would conflict with other things if they ran at the same time on the ++ * same cpu are; ++ * ++ * (i) setting/clearing vdqcr_owned, and ++ * (ii) clearing the NE (Not Empty) flag. ++ * ++ * Both are safe. Because; ++ * ++ * (i) this clearing can only occur after qman_volatile_dequeue() has set the ++ * vdqcr_owned field (which it does before setting VDQCR), and ++ * qman_volatile_dequeue() blocks interrupts and preemption while this is ++ * done so that we can't interfere. ++ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as ++ * with (i) that API prevents us from interfering until it's safe. ++ * ++ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far ++ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett ++ * advantage comes from this function not having to "lock" anything at all. ++ * ++ * Note also that the callbacks are invoked at points which are safe against the ++ * above potential conflicts, but that this function itself is not re-entrant ++ * (this is because the function tracks one end of each FIFO in the portal and ++ * we do *not* want to lock that). So the consequence is that it is safe for ++ * user callbacks to call into any Qman API *except* qman_poll() (as that's the ++ * sole API that could be invoking the callback through this function). ++ */ ++static inline unsigned int __poll_portal_fast(struct qman_portal *p, ++ unsigned int poll_limit) ++{ ++ const struct qm_dqrr_entry *dq; ++ struct qman_fq *fq; ++ enum qman_cb_dqrr_result res; ++ unsigned int limit = 0; ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++ struct qm_dqrr_entry *shadow; ++ const struct qm_dqrr_entry *orig_dq; ++#endif ++loop: ++ qm_dqrr_pvb_update(&p->p); ++ dq = qm_dqrr_current(&p->p); ++ if (!dq) ++ goto done; ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++ /* If running on an LE system the fields of the ++ dequeue entry must be swapped. Because the ++ QMan HW will ignore writes the DQRR entry is ++ copied and the index stored within the copy */ ++ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; ++ /* Use safe copy here to avoid WRAP transaction */ ++ safe_copy_dqrr(shadow, dq); ++ orig_dq = dq; ++ dq = shadow; ++ shadow->fqid = be32_to_cpu(shadow->fqid); ++ shadow->contextB = be32_to_cpu(shadow->contextB); ++ shadow->seqnum = be16_to_cpu(shadow->seqnum); ++ hw_fd_to_cpu(&shadow->fd); ++#endif ++ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { ++ /* VDQCR: don't trust contextB as the FQ may have been ++ * configured for h/w consumption and we're draining it ++ * post-retirement. */ ++ fq = p->vdqcr_owned; ++ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need ++ * to check for clearing it when doing volatile dequeues. It's ++ * one less thing to check in the critical path (SDQCR). */ ++ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) ++ fq_clear(fq, QMAN_FQ_STATE_NE); ++ /* this is duplicated from the SDQCR code, but we have stuff to ++ * do before *and* after this callback, and we don't want ++ * multiple if()s in the critical path (SDQCR). */ ++ res = fq->cb.dqrr(p, fq, dq); ++ if (res == qman_cb_dqrr_stop) ++ goto done; ++ /* Check for VDQCR completion */ ++ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) ++ clear_vdqcr(p, fq); ++ } else { ++ /* SDQCR: contextB points to the FQ */ ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ fq = get_fq_table_entry(dq->contextB); ++#else ++ fq = (void *)(uintptr_t)dq->contextB; ++#endif ++ /* Now let the callback do its stuff */ ++ res = fq->cb.dqrr(p, fq, dq); ++ ++ /* The callback can request that we exit without consuming this ++ * entry nor advancing; */ ++ if (res == qman_cb_dqrr_stop) ++ goto done; ++ } ++ /* Interpret 'dq' from a driver perspective. */ ++ /* Parking isn't possible unless HELDACTIVE was set. NB, ++ * FORCEELIGIBLE implies HELDACTIVE, so we only need to ++ * check for HELDACTIVE to cover both. */ ++ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || ++ (res != qman_cb_dqrr_park)); ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++ if (res != qman_cb_dqrr_defer) ++ qm_dqrr_cdc_consume_1ptr(&p->p, orig_dq, ++ (res == qman_cb_dqrr_park)); ++#else ++ /* Defer just means "skip it, I'll consume it myself later on" */ ++ if (res != qman_cb_dqrr_defer) ++ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park)); ++#endif ++ /* Move forward */ ++ qm_dqrr_next(&p->p); ++ /* Entry processed and consumed, increment our counter. The callback can ++ * request that we exit after consuming the entry, and we also exit if ++ * we reach our processing limit, so loop back only if neither of these ++ * conditions is met. */ ++ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop)) ++ goto loop; ++done: ++ return limit; ++} ++ ++u32 qman_irqsource_get(void) ++{ ++ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they ++ * should shut the user out if they are not the primary CPU hosting the ++ * portal. That's why we use the "raw" interface. */ ++ struct qman_portal *p = get_raw_affine_portal(); ++ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE; ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_irqsource_get); ++ ++int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused) ++{ ++ __maybe_unused unsigned long irqflags; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (p->sharing_redirect) ++ return -EINVAL; ++ else ++#endif ++ { ++ bits = bits & QM_PIRQ_VISIBLE; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ /* Clear any previously remaining interrupt conditions in ++ * QCSP_ISR. This prevents raising a false interrupt when ++ * interrupt conditions are enabled in QCSP_IER. ++ */ ++ qm_isr_status_clear(&p->p, bits); ++ set_bits(bits, &p->irq_sources); ++ qm_isr_enable_write(&p->p, p->irq_sources); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_irqsource_add); ++ ++int qman_irqsource_add(u32 bits __maybe_unused) ++{ ++ struct qman_portal *p = get_raw_affine_portal(); ++ int ret; ++ ret = qman_p_irqsource_add(p, bits); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_irqsource_add); ++ ++int qman_p_irqsource_remove(struct qman_portal *p, u32 bits) ++{ ++ __maybe_unused unsigned long irqflags; ++ u32 ier; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (p->sharing_redirect) { ++ put_affine_portal(); ++ return -EINVAL; ++ } ++#endif ++ /* Our interrupt handler only processes+clears status register bits that ++ * are in p->irq_sources. As we're trimming that mask, if one of them ++ * were to assert in the status register just before we remove it from ++ * the enable register, there would be an interrupt-storm when we ++ * release the IRQ lock. So we wait for the enable register update to ++ * take effect in h/w (by reading it back) and then clear all other bits ++ * in the status register. Ie. we clear them from ISR once it's certain ++ * IER won't allow them to reassert. */ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ bits &= QM_PIRQ_VISIBLE; ++ clear_bits(bits, &p->irq_sources); ++ qm_isr_enable_write(&p->p, p->irq_sources); ++ ++ ier = qm_isr_enable_read(&p->p); ++ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a ++ * data-dependency, ie. to protect against re-ordering. */ ++ qm_isr_status_clear(&p->p, ~ier); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_irqsource_remove); ++ ++int qman_irqsource_remove(u32 bits) ++{ ++ struct qman_portal *p = get_raw_affine_portal(); ++ int ret; ++ ret = qman_p_irqsource_remove(p, bits); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_irqsource_remove); ++ ++const cpumask_t *qman_affine_cpus(void) ++{ ++ return &affine_mask; ++} ++EXPORT_SYMBOL(qman_affine_cpus); ++ ++u16 qman_affine_channel(int cpu) ++{ ++ if (cpu < 0) { ++ struct qman_portal *portal = get_raw_affine_portal(); ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ BUG_ON(portal->sharing_redirect); ++#endif ++ cpu = portal->config->public_cfg.cpu; ++ put_affine_portal(); ++ } ++ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask)); ++ return affine_channels[cpu]; ++} ++EXPORT_SYMBOL(qman_affine_channel); ++ ++void *qman_get_affine_portal(int cpu) ++{ ++ return affine_portals[cpu]; ++} ++EXPORT_SYMBOL(qman_get_affine_portal); ++ ++int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) ++{ ++ int ret; ++ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (unlikely(p->sharing_redirect)) ++ ret = -EINVAL; ++ else ++#endif ++ { ++ BUG_ON(p->irq_sources & QM_PIRQ_DQRI); ++ ret = __poll_portal_fast(p, limit); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(qman_p_poll_dqrr); ++ ++int qman_poll_dqrr(unsigned int limit) ++{ ++ struct qman_portal *p = get_poll_portal(); ++ int ret; ++ ret = qman_p_poll_dqrr(p, limit); ++ put_poll_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_poll_dqrr); ++ ++u32 qman_p_poll_slow(struct qman_portal *p) ++{ ++ u32 ret; ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (unlikely(p->sharing_redirect)) ++ ret = (u32)-1; ++ else ++#endif ++ { ++ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; ++ ret = __poll_portal_slow(p, is); ++ qm_isr_status_clear(&p->p, ret); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(qman_p_poll_slow); ++ ++u32 qman_poll_slow(void) ++{ ++ struct qman_portal *p = get_poll_portal(); ++ u32 ret; ++ ret = qman_p_poll_slow(p); ++ put_poll_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_poll_slow); ++ ++/* Legacy wrapper */ ++void qman_p_poll(struct qman_portal *p) ++{ ++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE ++ if (unlikely(p->sharing_redirect)) ++ return; ++#endif ++ if ((~p->irq_sources) & QM_PIRQ_SLOW) { ++ if (!(p->slowpoll--)) { ++ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; ++ u32 active = __poll_portal_slow(p, is); ++ if (active) { ++ qm_isr_status_clear(&p->p, active); ++ p->slowpoll = SLOW_POLL_BUSY; ++ } else ++ p->slowpoll = SLOW_POLL_IDLE; ++ } ++ } ++ if ((~p->irq_sources) & QM_PIRQ_DQRI) ++ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); ++} ++EXPORT_SYMBOL(qman_p_poll); ++ ++void qman_poll(void) ++{ ++ struct qman_portal *p = get_poll_portal(); ++ qman_p_poll(p); ++ put_poll_portal(); ++} ++EXPORT_SYMBOL(qman_poll); ++ ++void qman_p_stop_dequeues(struct qman_portal *p) ++{ ++ qman_stop_dequeues_ex(p); ++} ++EXPORT_SYMBOL(qman_p_stop_dequeues); ++ ++void qman_stop_dequeues(void) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ qman_p_stop_dequeues(p); ++ put_affine_portal(); ++} ++EXPORT_SYMBOL(qman_stop_dequeues); ++ ++void qman_p_start_dequeues(struct qman_portal *p) ++{ ++ unsigned long irqflags __maybe_unused; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ DPA_ASSERT(p->dqrr_disable_ref > 0); ++ if (!(--p->dqrr_disable_ref)) ++ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++} ++EXPORT_SYMBOL(qman_p_start_dequeues); ++ ++void qman_start_dequeues(void) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ qman_p_start_dequeues(p); ++ put_affine_portal(); ++} ++EXPORT_SYMBOL(qman_start_dequeues); ++ ++void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) ++{ ++ unsigned long irqflags __maybe_unused; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ pools &= p->config->public_cfg.pools; ++ p->sdqcr |= pools; ++ qm_dqrr_sdqcr_set(&p->p, p->sdqcr); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++} ++EXPORT_SYMBOL(qman_p_static_dequeue_add); ++ ++void qman_static_dequeue_add(u32 pools) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ qman_p_static_dequeue_add(p, pools); ++ put_affine_portal(); ++} ++EXPORT_SYMBOL(qman_static_dequeue_add); ++ ++void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools) ++{ ++ unsigned long irqflags __maybe_unused; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ pools &= p->config->public_cfg.pools; ++ p->sdqcr &= ~pools; ++ qm_dqrr_sdqcr_set(&p->p, p->sdqcr); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++} ++EXPORT_SYMBOL(qman_p_static_dequeue_del); ++ ++void qman_static_dequeue_del(u32 pools) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ qman_p_static_dequeue_del(p, pools); ++ put_affine_portal(); ++} ++EXPORT_SYMBOL(qman_static_dequeue_del); ++ ++u32 qman_p_static_dequeue_get(struct qman_portal *p) ++{ ++ return p->sdqcr; ++} ++EXPORT_SYMBOL(qman_p_static_dequeue_get); ++ ++u32 qman_static_dequeue_get(void) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ u32 ret = qman_p_static_dequeue_get(p); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_static_dequeue_get); ++ ++void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq, ++ int park_request) ++{ ++ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); ++} ++EXPORT_SYMBOL(qman_p_dca); ++ ++void qman_dca(struct qm_dqrr_entry *dq, int park_request) ++{ ++ struct qman_portal *p = get_affine_portal(); ++ qman_p_dca(p, dq, park_request); ++ put_affine_portal(); ++} ++EXPORT_SYMBOL(qman_dca); ++ ++/*******************/ ++/* Frame queue API */ ++/*******************/ ++ ++static const char *mcr_result_str(u8 result) ++{ ++ switch (result) { ++ case QM_MCR_RESULT_NULL: ++ return "QM_MCR_RESULT_NULL"; ++ case QM_MCR_RESULT_OK: ++ return "QM_MCR_RESULT_OK"; ++ case QM_MCR_RESULT_ERR_FQID: ++ return "QM_MCR_RESULT_ERR_FQID"; ++ case QM_MCR_RESULT_ERR_FQSTATE: ++ return "QM_MCR_RESULT_ERR_FQSTATE"; ++ case QM_MCR_RESULT_ERR_NOTEMPTY: ++ return "QM_MCR_RESULT_ERR_NOTEMPTY"; ++ case QM_MCR_RESULT_PENDING: ++ return "QM_MCR_RESULT_PENDING"; ++ case QM_MCR_RESULT_ERR_BADCOMMAND: ++ return "QM_MCR_RESULT_ERR_BADCOMMAND"; ++ } ++ return ""; ++} ++ ++int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) ++{ ++ struct qm_fqd fqd; ++ struct qm_mcr_queryfq_np np; ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ ++ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { ++ int ret = qman_alloc_fqid(&fqid); ++ if (ret) ++ return ret; ++ } ++ spin_lock_init(&fq->fqlock); ++ fq->fqid = fqid; ++ fq->flags = flags; ++ fq->state = qman_fq_state_oos; ++ fq->cgr_groupid = 0; ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) ++ return -ENOMEM; ++#endif ++ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) ++ return 0; ++ /* Everything else is AS_IS support */ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ mcc->queryfq.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); ++ if (mcr->result != QM_MCR_RESULT_OK) { ++ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); ++ goto err; ++ } ++ fqd = mcr->queryfq.fqd; ++ hw_fqd_to_cpu(&fqd); ++ mcc = qm_mc_start(&p->p); ++ mcc->queryfq_np.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); ++ if (mcr->result != QM_MCR_RESULT_OK) { ++ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); ++ goto err; ++ } ++ np = mcr->queryfq_np; ++ /* Phew, have queryfq and queryfq_np results, stitch together ++ * the FQ object from those. */ ++ fq->cgr_groupid = fqd.cgid; ++ switch (np.state & QM_MCR_NP_STATE_MASK) { ++ case QM_MCR_NP_STATE_OOS: ++ break; ++ case QM_MCR_NP_STATE_RETIRED: ++ fq->state = qman_fq_state_retired; ++ if (np.frm_cnt) ++ fq_set(fq, QMAN_FQ_STATE_NE); ++ break; ++ case QM_MCR_NP_STATE_TEN_SCHED: ++ case QM_MCR_NP_STATE_TRU_SCHED: ++ case QM_MCR_NP_STATE_ACTIVE: ++ fq->state = qman_fq_state_sched; ++ if (np.state & QM_MCR_NP_STATE_R) ++ fq_set(fq, QMAN_FQ_STATE_CHANGING); ++ break; ++ case QM_MCR_NP_STATE_PARKED: ++ fq->state = qman_fq_state_parked; ++ break; ++ default: ++ DPA_ASSERT(NULL == "invalid FQ state"); ++ } ++ if (fqd.fq_ctrl & QM_FQCTRL_CGE) ++ fq->state |= QMAN_FQ_STATE_CGR_EN; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return 0; ++err: ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) ++ qman_release_fqid(fqid); ++ return -EIO; ++} ++EXPORT_SYMBOL(qman_create_fq); ++ ++void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) ++{ ++ ++ /* We don't need to lock the FQ as it is a pre-condition that the FQ be ++ * quiesced. Instead, run some checks. */ ++ switch (fq->state) { ++ case qman_fq_state_parked: ++ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); ++ case qman_fq_state_oos: ++ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) ++ qman_release_fqid(fq->fqid); ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ clear_fq_table_entry(fq->key); ++#endif ++ return; ++ default: ++ break; ++ } ++ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); ++} ++EXPORT_SYMBOL(qman_destroy_fq); ++ ++u32 qman_fq_fqid(struct qman_fq *fq) ++{ ++ return fq->fqid; ++} ++EXPORT_SYMBOL(qman_fq_fqid); ++ ++void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) ++{ ++ if (state) ++ *state = fq->state; ++ if (flags) ++ *flags = fq->flags; ++} ++EXPORT_SYMBOL(qman_fq_state); ++ ++int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? ++ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; ++ ++ if ((fq->state != qman_fq_state_oos) && ++ (fq->state != qman_fq_state_parked)) ++ return -EINVAL; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) ++ return -EINVAL; ++#endif ++ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { ++ /* And can't be set at the same time as TDTHRESH */ ++ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) ++ return -EINVAL; ++ } ++ /* Issue an INITFQ_[PARKED|SCHED] management command */ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ FQLOCK(fq); ++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || ++ ((fq->state != qman_fq_state_oos) && ++ (fq->state != qman_fq_state_parked)))) { ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return -EBUSY; ++ } ++ mcc = qm_mc_start(&p->p); ++ if (opts) ++ mcc->initfq = *opts; ++ mcc->initfq.fqid = cpu_to_be32(fq->fqid); ++ mcc->initfq.count = 0; ++ ++ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a ++ * demux pointer. Otherwise, the caller-provided value is allowed to ++ * stand, don't overwrite it. */ ++ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { ++ dma_addr_t phys_fq; ++ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ mcc->initfq.fqd.context_b = fq->key; ++#else ++ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; ++#endif ++ /* and the physical address - NB, if the user wasn't trying to ++ * set CONTEXTA, clear the stashing settings. */ ++ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { ++ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; ++ memset(&mcc->initfq.fqd.context_a, 0, ++ sizeof(mcc->initfq.fqd.context_a)); ++ } else { ++ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), ++ DMA_TO_DEVICE); ++ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); ++ } ++ } ++ if (flags & QMAN_INITFQ_FLAG_LOCAL) { ++ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel; ++ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { ++ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; ++ mcc->initfq.fqd.dest.wq = 4; ++ } ++ } ++ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask); ++ cpu_to_hw_fqd(&mcc->initfq.fqd); ++ qm_mc_commit(&p->p, myverb); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return -EIO; ++ } ++ if (opts) { ++ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { ++ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) ++ fq_set(fq, QMAN_FQ_STATE_CGR_EN); ++ else ++ fq_clear(fq, QMAN_FQ_STATE_CGR_EN); ++ } ++ if (opts->we_mask & QM_INITFQ_WE_CGID) ++ fq->cgr_groupid = opts->fqd.cgid; ++ } ++ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? ++ qman_fq_state_sched : qman_fq_state_parked; ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return 0; ++} ++EXPORT_SYMBOL(qman_init_fq); ++ ++int qman_schedule_fq(struct qman_fq *fq) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ int ret = 0; ++ u8 res; ++ ++ if (fq->state != qman_fq_state_parked) ++ return -EINVAL; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) ++ return -EINVAL; ++#endif ++ /* Issue a ALTERFQ_SCHED management command */ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ FQLOCK(fq); ++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || ++ (fq->state != qman_fq_state_parked))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ mcc = qm_mc_start(&p->p); ++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ ret = -EIO; ++ goto out; ++ } ++ fq->state = qman_fq_state_sched; ++out: ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_schedule_fq); ++ ++int qman_retire_fq(struct qman_fq *fq, u32 *flags) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ int rval; ++ u8 res; ++ ++ if ((fq->state != qman_fq_state_parked) && ++ (fq->state != qman_fq_state_sched)) ++ return -EINVAL; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) ++ return -EINVAL; ++#endif ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ FQLOCK(fq); ++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || ++ (fq->state == qman_fq_state_retired) || ++ (fq->state == qman_fq_state_oos))) { ++ rval = -EBUSY; ++ goto out; ++ } ++ rval = table_push_fq(p, fq); ++ if (rval) ++ goto out; ++ mcc = qm_mc_start(&p->p); ++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); ++ res = mcr->result; ++ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING, ++ * and defer the flags until FQRNI or FQRN (respectively) show up. But ++ * "Friendly" is to process OK immediately, and not set CHANGING. We do ++ * friendly, otherwise the caller doesn't necessarily have a fully ++ * "retired" FQ on return even if the retirement was immediate. However ++ * this does mean some code duplication between here and ++ * fq_state_change(). */ ++ if (likely(res == QM_MCR_RESULT_OK)) { ++ rval = 0; ++ /* Process 'fq' right away, we'll ignore FQRNI */ ++ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) ++ fq_set(fq, QMAN_FQ_STATE_NE); ++ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) ++ fq_set(fq, QMAN_FQ_STATE_ORL); ++ else ++ table_del_fq(p, fq); ++ if (flags) ++ *flags = fq->flags; ++ fq->state = qman_fq_state_retired; ++ if (fq->cb.fqs) { ++ /* Another issue with supporting "immediate" retirement ++ * is that we're forced to drop FQRNIs, because by the ++ * time they're seen it may already be "too late" (the ++ * fq may have been OOS'd and free()'d already). But if ++ * the upper layer wants a callback whether it's ++ * immediate or not, we have to fake a "MR" entry to ++ * look like an FQRNI... */ ++ struct qm_mr_entry msg; ++ msg.verb = QM_MR_VERB_FQRNI; ++ msg.fq.fqs = mcr->alterfq.fqs; ++ msg.fq.fqid = fq->fqid; ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ msg.fq.contextB = fq->key; ++#else ++ msg.fq.contextB = (u32)(uintptr_t)fq; ++#endif ++ fq->cb.fqs(p, fq, &msg); ++ } ++ } else if (res == QM_MCR_RESULT_PENDING) { ++ rval = 1; ++ fq_set(fq, QMAN_FQ_STATE_CHANGING); ++ } else { ++ rval = -EIO; ++ table_del_fq(p, fq); ++ } ++out: ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return rval; ++} ++EXPORT_SYMBOL(qman_retire_fq); ++ ++int qman_oos_fq(struct qman_fq *fq) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ int ret = 0; ++ u8 res; ++ ++ if (fq->state != qman_fq_state_retired) ++ return -EINVAL; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) ++ return -EINVAL; ++#endif ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ FQLOCK(fq); ++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || ++ (fq->state != qman_fq_state_retired))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ mcc = qm_mc_start(&p->p); ++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ ret = -EIO; ++ goto out; ++ } ++ fq->state = qman_fq_state_oos; ++out: ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_oos_fq); ++ ++int qman_fq_flow_control(struct qman_fq *fq, int xon) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ int ret = 0; ++ u8 res; ++ u8 myverb; ++ ++ if ((fq->state == qman_fq_state_oos) || ++ (fq->state == qman_fq_state_retired) || ++ (fq->state == qman_fq_state_parked)) ++ return -EINVAL; ++ ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) ++ return -EINVAL; ++#endif ++ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ FQLOCK(fq); ++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || ++ (fq->state == qman_fq_state_parked) || ++ (fq->state == qman_fq_state_oos) || ++ (fq->state == qman_fq_state_retired))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ mcc = qm_mc_start(&p->p); ++ mcc->alterfq.fqid = fq->fqid; ++ mcc->alterfq.count = 0; ++ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; ++ ++ qm_mc_commit(&p->p, myverb); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ ret = -EIO; ++ goto out; ++ } ++out: ++ FQUNLOCK(fq); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_fq_flow_control); ++ ++int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ mcc->queryfq.fqid = cpu_to_be32(fq->fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) ++ *fqd = mcr->queryfq.fqd; ++ hw_fqd_to_cpu(fqd); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) ++ return -EIO; ++ return 0; ++} ++EXPORT_SYMBOL(qman_query_fq); ++ ++int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ mcc->queryfq.fqid = cpu_to_be32(fq->fqid); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) { ++ *np = mcr->queryfq_np; ++ np->fqd_link = be24_to_cpu(np->fqd_link); ++ np->odp_seq = be16_to_cpu(np->odp_seq); ++ np->orp_nesn = be16_to_cpu(np->orp_nesn); ++ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq); ++ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq); ++ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr); ++ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr); ++ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr); ++ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr); ++ np->ics_surp = be16_to_cpu(np->ics_surp); ++ np->byte_cnt = be32_to_cpu(np->byte_cnt); ++ np->frm_cnt = be24_to_cpu(np->frm_cnt); ++ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr); ++ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr); ++ np->od1_sfdr = be16_to_cpu(np->od1_sfdr); ++ np->od2_sfdr = be16_to_cpu(np->od2_sfdr); ++ np->od3_sfdr = be16_to_cpu(np->od3_sfdr); ++ } ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res == QM_MCR_RESULT_ERR_FQID) ++ return -ERANGE; ++ else if (res != QM_MCR_RESULT_OK) ++ return -EIO; ++ return 0; ++} ++EXPORT_SYMBOL(qman_query_fq_np); ++ ++int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res, myverb; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : ++ QM_MCR_VERB_QUERYWQ; ++ mcc = qm_mc_start(&p->p); ++ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id); ++ qm_mc_commit(&p->p, myverb); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) { ++ int i, array_len; ++ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id); ++ array_len = ARRAY_SIZE(mcr->querywq.wq_len); ++ for (i = 0; i < array_len; i++) ++ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]); ++ } ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); ++ return -EIO; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_query_wq); ++ ++int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, ++ struct qm_mcr_cgrtestwrite *result) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ mcc->cgrtestwrite.cgid = cgr->cgrid; ++ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); ++ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; ++ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) ++ *result = mcr->cgrtestwrite; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); ++ return -EIO; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_testwrite_cgr); ++ ++int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ int i; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ mcc->querycgr.cgid = cgr->cgrid; ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) ++ *cgrd = mcr->querycgr; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); ++ return -EIO; ++ } ++ cgrd->cgr.wr_parm_g.word = ++ be32_to_cpu(cgrd->cgr.wr_parm_g.word); ++ cgrd->cgr.wr_parm_y.word = ++ be32_to_cpu(cgrd->cgr.wr_parm_y.word); ++ cgrd->cgr.wr_parm_r.word = ++ be32_to_cpu(cgrd->cgr.wr_parm_r.word); ++ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ); ++ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres); ++ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++) ++ be32_to_cpus(&cgrd->cscn_targ_swp[i]); ++ return 0; ++} ++EXPORT_SYMBOL(qman_query_cgr); ++ ++int qman_query_congestion(struct qm_mcr_querycongestion *congestion) ++{ ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ int i; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ qm_mc_start(&p->p); ++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_MCC_VERB_QUERYCONGESTION); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) ++ memcpy_fromio(congestion, &mcr->querycongestion, ++ sizeof(*congestion)); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); ++ return -EIO; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++) ++ be32_to_cpus(&congestion->state.__state[i]); ++ return 0; ++} ++EXPORT_SYMBOL(qman_query_congestion); ++ ++/* internal function used as a wait_event() expression */ ++static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) ++{ ++ unsigned long irqflags __maybe_unused; ++ int ret = -EBUSY; ++ PORTAL_IRQ_LOCK(p, irqflags); ++ if (!p->vdqcr_owned) { ++ FQLOCK(fq); ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ goto escape; ++ fq_set(fq, QMAN_FQ_STATE_VDQCR); ++ FQUNLOCK(fq); ++ p->vdqcr_owned = fq; ++ ret = 0; ++ } ++escape: ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ if (!ret) ++ qm_dqrr_vdqcr_set(&p->p, vdqcr); ++ return ret; ++} ++ ++static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) ++{ ++ int ret; ++ *p = get_affine_portal(); ++ ret = set_p_vdqcr(*p, fq, vdqcr); ++ put_affine_portal(); ++ return ret; ++} ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq, ++ u32 vdqcr, u32 flags) ++{ ++ int ret = 0; ++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) ++ ret = wait_event_interruptible(affine_queue, ++ !(ret = set_p_vdqcr(p, fq, vdqcr))); ++ else ++ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr))); ++ return ret; ++} ++ ++static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, ++ u32 vdqcr, u32 flags) ++{ ++ int ret = 0; ++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) ++ ret = wait_event_interruptible(affine_queue, ++ !(ret = set_vdqcr(p, fq, vdqcr))); ++ else ++ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr))); ++ return ret; ++} ++#endif ++ ++int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq, ++ u32 flags __maybe_unused, u32 vdqcr) ++{ ++ int ret; ++ ++ if ((fq->state != qman_fq_state_parked) && ++ (fq->state != qman_fq_state_retired)) ++ return -EINVAL; ++ if (vdqcr & QM_VDQCR_FQID_MASK) ++ return -EINVAL; ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ return -EBUSY; ++ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_VOLATILE_FLAG_WAIT) ++ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags); ++ else ++#endif ++ ret = set_p_vdqcr(p, fq, vdqcr); ++ if (ret) ++ return ret; ++ /* VDQCR is set */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_VOLATILE_FLAG_FINISH) { ++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) ++ /* NB: don't propagate any error - the caller wouldn't ++ * know whether the VDQCR was issued or not. A signal ++ * could arrive after returning anyway, so the caller ++ * can check signal_pending() if that's an issue. */ ++ wait_event_interruptible(affine_queue, ++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); ++ else ++ wait_event(affine_queue, ++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_volatile_dequeue); ++ ++int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, ++ u32 vdqcr) ++{ ++ struct qman_portal *p; ++ int ret; ++ ++ if ((fq->state != qman_fq_state_parked) && ++ (fq->state != qman_fq_state_retired)) ++ return -EINVAL; ++ if (vdqcr & QM_VDQCR_FQID_MASK) ++ return -EINVAL; ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ return -EBUSY; ++ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_VOLATILE_FLAG_WAIT) ++ ret = wait_vdqcr_start(&p, fq, vdqcr, flags); ++ else ++#endif ++ ret = set_vdqcr(&p, fq, vdqcr); ++ if (ret) ++ return ret; ++ /* VDQCR is set */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_VOLATILE_FLAG_FINISH) { ++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) ++ /* NB: don't propagate any error - the caller wouldn't ++ * know whether the VDQCR was issued or not. A signal ++ * could arrive after returning anyway, so the caller ++ * can check signal_pending() if that's an issue. */ ++ wait_event_interruptible(affine_queue, ++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); ++ else ++ wait_event(affine_queue, ++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_volatile_dequeue); ++ ++static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) ++{ ++ if (avail) ++ qm_eqcr_cce_prefetch(&p->p); ++ else ++ qm_eqcr_cce_update(&p->p); ++} ++ ++int qman_eqcr_is_empty(void) ++{ ++ unsigned long irqflags __maybe_unused; ++ struct qman_portal *p = get_affine_portal(); ++ u8 avail; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ update_eqcr_ci(p, 0); ++ avail = qm_eqcr_get_fill(&p->p); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return avail == 0; ++} ++EXPORT_SYMBOL(qman_eqcr_is_empty); ++ ++void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) ++{ ++ if (affine) { ++ unsigned long irqflags __maybe_unused; ++ struct qman_portal *p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ p->cb_dc_ern = handler; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ } else ++ cb_dc_ern = handler; ++} ++EXPORT_SYMBOL(qman_set_dc_ern); ++ ++static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq; ++ u8 avail; ++ PORTAL_IRQ_LOCK(p, (*irqflags)); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (p->eqci_owned) { ++ PORTAL_IRQ_UNLOCK(p, (*irqflags)); ++ return NULL; ++ } ++ p->eqci_owned = fq; ++ } ++#endif ++ if (p->use_eqcr_ci_stashing) { ++ /* ++ * The stashing case is easy, only update if we need to in ++ * order to try and liberate ring entries. ++ */ ++ eq = qm_eqcr_start_stash(&p->p); ++ } else { ++ /* ++ * The non-stashing case is harder, need to prefetch ahead of ++ * time. ++ */ ++ avail = qm_eqcr_get_avail(&p->p); ++ if (avail < 2) ++ update_eqcr_ci(p, avail); ++ eq = qm_eqcr_start_no_stash(&p->p); ++ } ++ ++ if (unlikely(!eq)) { ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) ++ p->eqci_owned = NULL; ++#endif ++ PORTAL_IRQ_UNLOCK(p, (*irqflags)); ++ return NULL; ++ } ++ if (flags & QMAN_ENQUEUE_FLAG_DCA) ++ eq->dca = QM_EQCR_DCA_ENABLE | ++ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? ++ QM_EQCR_DCA_PARK : 0) | ++ ((flags >> 8) & QM_EQCR_DCA_IDXMASK); ++ eq->fqid = cpu_to_be32(fq->fqid); ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ eq->tag = cpu_to_be32(fq->key); ++#else ++ eq->tag = cpu_to_be32((u32)(uintptr_t)fq); ++#endif ++ eq->fd = *fd; ++ cpu_to_hw_fd(&eq->fd); ++ return eq; ++} ++ ++static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq; ++ *p = get_affine_portal(); ++ eq = try_p_eq_start(*p, irqflags, fq, fd, flags); ++ if (!eq) ++ put_affine_portal(); ++ return eq; ++} ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags); ++ if (!eq) ++ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH); ++ return eq; ++} ++static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq; ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return NULL if signal occurs before completion. Signal ++ * can occur during return. Caller must check for signal */ ++ wait_event_interruptible(affine_queue, ++ (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); ++ else ++ wait_event(affine_queue, ++ (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); ++ return eq; ++} ++static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags); ++ if (!eq) ++ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH); ++ return eq; ++} ++static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p, ++ unsigned long *irqflags __maybe_unused, ++ struct qman_fq *fq, ++ const struct qm_fd *fd, ++ u32 flags) ++{ ++ struct qm_eqcr_entry *eq; ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return NULL if signal occurs before completion. Signal ++ * can occur during return. Caller must check for signal */ ++ wait_event_interruptible(affine_queue, ++ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); ++ else ++ wait_event(affine_queue, ++ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); ++ return eq; ++} ++#endif ++ ++int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags) ++{ ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ /* Factor the below out, it's used from qman_enqueue_orp() too */ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_enqueue); ++ ++int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) ++{ ++ struct qman_portal *p; ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_eq_start(&p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ /* Factor the below out, it's used from qman_enqueue_orp() too */ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_enqueue); ++ ++int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags, ++ struct qman_fq *orp, u16 orp_seqnum) ++{ ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* Process ORP-specifics here */ ++ if (flags & QMAN_ENQUEUE_FLAG_NLIS) ++ orp_seqnum |= QM_EQCR_SEQNUM_NLIS; ++ else { ++ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; ++ if (flags & QMAN_ENQUEUE_FLAG_NESN) ++ orp_seqnum |= QM_EQCR_SEQNUM_NESN; ++ else ++ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ ++ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; ++ } ++ eq->seqnum = cpu_to_be16(orp_seqnum); ++ eq->orp = cpu_to_be32(orp->fqid); ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | ++ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? ++ 0 : QM_EQCR_VERB_CMD_ENQUEUE) | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_enqueue_orp); ++ ++int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, ++ struct qman_fq *orp, u16 orp_seqnum) ++{ ++ struct qman_portal *p; ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_eq_start(&p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* Process ORP-specifics here */ ++ if (flags & QMAN_ENQUEUE_FLAG_NLIS) ++ orp_seqnum |= QM_EQCR_SEQNUM_NLIS; ++ else { ++ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; ++ if (flags & QMAN_ENQUEUE_FLAG_NESN) ++ orp_seqnum |= QM_EQCR_SEQNUM_NESN; ++ else ++ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ ++ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; ++ } ++ eq->seqnum = cpu_to_be16(orp_seqnum); ++ eq->orp = cpu_to_be32(orp->fqid); ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | ++ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? ++ 0 : QM_EQCR_VERB_CMD_ENQUEUE) | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_enqueue_orp); ++ ++int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags, ++ qman_cb_precommit cb, void *cb_arg) ++{ ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* invoke user supplied callback function before writing commit verb */ ++ if (cb(cb_arg)) { ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ return -EINVAL; ++ } ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ /* Factor the below out, it's used from qman_enqueue_orp() too */ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_p_enqueue_precommit); ++ ++int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, ++ u32 flags, qman_cb_precommit cb, void *cb_arg) ++{ ++ struct qman_portal *p; ++ struct qm_eqcr_entry *eq; ++ unsigned long irqflags __maybe_unused; ++ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT) ++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags); ++ else ++#endif ++ eq = try_eq_start(&p, &irqflags, fq, fd, flags); ++ if (!eq) ++ return -EBUSY; ++ /* invoke user supplied callback function before writing commit verb */ ++ if (cb(cb_arg)) { ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return -EINVAL; ++ } ++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ ++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | ++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); ++ /* Factor the below out, it's used from qman_enqueue_orp() too */ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && ++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { ++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) ++ /* NB: return success even if signal occurs before ++ * condition is true. pvb_commit guarantees success */ ++ wait_event_interruptible(affine_queue, ++ (p->eqci_owned != fq)); ++ else ++ wait_event(affine_queue, (p->eqci_owned != fq)); ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_enqueue_precommit); ++ ++int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, ++ struct qm_mcc_initcgr *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ u8 verb = QM_MCC_VERB_MODIFYCGR; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ mcc = qm_mc_start(&p->p); ++ if (opts) ++ mcc->initcgr = *opts; ++ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask); ++ mcc->initcgr.cgr.wr_parm_g.word = ++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word); ++ mcc->initcgr.cgr.wr_parm_y.word = ++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word); ++ mcc->initcgr.cgr.wr_parm_r.word = ++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word); ++ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ); ++ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres); ++ ++ mcc->initcgr.cgid = cgr->cgrid; ++ if (flags & QMAN_CGR_FLAG_USE_INIT) ++ verb = QM_MCC_VERB_INITCGR; ++ qm_mc_commit(&p->p, verb); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); ++ res = mcr->result; ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; ++} ++EXPORT_SYMBOL(qman_modify_cgr); ++ ++#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \ ++ QM_CHANNEL_SWPORTAL0)) ++#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n)) ++#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0) ++ ++static u8 qman_cgr_cpus[__CGR_NUM]; ++ ++int qman_create_cgr(struct qman_cgr *cgr, u32 flags, ++ struct qm_mcc_initcgr *opts) ++{ ++ unsigned long irqflags __maybe_unused; ++ struct qm_mcr_querycgr cgr_state; ++ struct qm_mcc_initcgr local_opts; ++ int ret; ++ struct qman_portal *p; ++ ++ /* We have to check that the provided CGRID is within the limits of the ++ * data-structures, for obvious reasons. However we'll let h/w take ++ * care of determining whether it's within the limits of what exists on ++ * the SoC. */ ++ if (cgr->cgrid >= __CGR_NUM) ++ return -EINVAL; ++ ++ preempt_disable(); ++ p = get_affine_portal(); ++ qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); ++ preempt_enable(); ++ ++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); ++ cgr->chan = p->config->public_cfg.channel; ++ spin_lock_irqsave(&p->cgr_lock, irqflags); ++ ++ /* if no opts specified, just add it to the list */ ++ if (!opts) ++ goto add_list; ++ ++ ret = qman_query_cgr(cgr, &cgr_state); ++ if (ret) ++ goto release_lock; ++ if (opts) ++ local_opts = *opts; ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ local_opts.cgr.cscn_targ_upd_ctrl = ++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); ++ else ++ /* Overwrite TARG */ ++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | ++ TARG_MASK(p); ++ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; ++ ++ /* send init if flags indicate so */ ++ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) ++ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); ++ else ++ ret = qman_modify_cgr(cgr, 0, &local_opts); ++ if (ret) ++ goto release_lock; ++add_list: ++ list_add(&cgr->node, &p->cgr_cbs); ++ ++ /* Determine if newly added object requires its callback to be called */ ++ ret = qman_query_cgr(cgr, &cgr_state); ++ if (ret) { ++ /* we can't go back, so proceed and return success, but screen ++ * and wail to the log file */ ++ pr_crit("CGR HW state partially modified\n"); ++ ret = 0; ++ goto release_lock; ++ } ++ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], ++ cgr->cgrid)) ++ cgr->cb(p, cgr, 1); ++release_lock: ++ spin_unlock_irqrestore(&p->cgr_lock, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_create_cgr); ++ ++int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, ++ struct qm_mcc_initcgr *opts) ++{ ++ unsigned long irqflags __maybe_unused; ++ struct qm_mcc_initcgr local_opts; ++ struct qm_mcr_querycgr cgr_state; ++ int ret; ++ ++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) { ++ pr_warn("This QMan version doesn't support to send CSCN to DCP portal\n"); ++ return -EINVAL; ++ } ++ /* We have to check that the provided CGRID is within the limits of the ++ * data-structures, for obvious reasons. However we'll let h/w take ++ * care of determining whether it's within the limits of what exists on ++ * the SoC. ++ */ ++ if (cgr->cgrid >= __CGR_NUM) ++ return -EINVAL; ++ ++ ret = qman_query_cgr(cgr, &cgr_state); ++ if (ret) ++ return ret; ++ ++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); ++ if (opts) ++ local_opts = *opts; ++ ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ local_opts.cgr.cscn_targ_upd_ctrl = ++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | ++ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; ++ else ++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | ++ TARG_DCP_MASK(dcp_portal); ++ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; ++ ++ /* send init if flags indicate so */ ++ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) ++ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, ++ &local_opts); ++ else ++ ret = qman_modify_cgr(cgr, 0, &local_opts); ++ ++ return ret; ++} ++EXPORT_SYMBOL(qman_create_cgr_to_dcp); ++ ++int qman_delete_cgr(struct qman_cgr *cgr) ++{ ++ unsigned long irqflags __maybe_unused; ++ struct qm_mcr_querycgr cgr_state; ++ struct qm_mcc_initcgr local_opts; ++ int ret = 0; ++ struct qman_cgr *i; ++ struct qman_portal *p = get_affine_portal(); ++ ++ if (cgr->chan != p->config->public_cfg.channel) { ++ pr_crit("Attempting to delete cgr from different portal " ++ "than it was create: create 0x%x, delete 0x%x\n", ++ cgr->chan, p->config->public_cfg.channel); ++ ret = -EINVAL; ++ goto put_portal; ++ } ++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); ++ spin_lock_irqsave(&p->cgr_lock, irqflags); ++ list_del(&cgr->node); ++ /* ++ * If there are no other CGR objects for this CGRID in the list, update ++ * CSCN_TARG accordingly ++ */ ++ list_for_each_entry(i, &p->cgr_cbs, node) ++ if ((i->cgrid == cgr->cgrid) && i->cb) ++ goto release_lock; ++ ret = qman_query_cgr(cgr, &cgr_state); ++ if (ret) { ++ /* add back to the list */ ++ list_add(&cgr->node, &p->cgr_cbs); ++ goto release_lock; ++ } ++ /* Overwrite TARG */ ++ local_opts.we_mask = QM_CGR_WE_CSCN_TARG; ++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) ++ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); ++ else ++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & ++ ~(TARG_MASK(p)); ++ ret = qman_modify_cgr(cgr, 0, &local_opts); ++ if (ret) ++ /* add back to the list */ ++ list_add(&cgr->node, &p->cgr_cbs); ++release_lock: ++ spin_unlock_irqrestore(&p->cgr_lock, irqflags); ++put_portal: ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_delete_cgr); ++ ++struct cgr_comp { ++ struct qman_cgr *cgr; ++ struct completion completion; ++}; ++ ++static int qman_delete_cgr_thread(void *p) ++{ ++ struct cgr_comp *cgr_comp = (struct cgr_comp *)p; ++ int res; ++ ++ res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr); ++ complete(&cgr_comp->completion); ++ ++ return res; ++} ++ ++void qman_delete_cgr_safe(struct qman_cgr *cgr) ++{ ++ struct task_struct *thread; ++ struct cgr_comp cgr_comp; ++ ++ preempt_disable(); ++ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { ++ init_completion(&cgr_comp.completion); ++ cgr_comp.cgr = cgr; ++ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, ++ "cgr_del"); ++ ++ if (likely(!IS_ERR(thread))) { ++ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); ++ wake_up_process(thread); ++ wait_for_completion(&cgr_comp.completion); ++ preempt_enable(); ++ return; ++ } ++ } ++ qman_delete_cgr(cgr); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(qman_delete_cgr_safe); ++ ++int qm_get_clock(u64 *clock_hz) ++{ ++ if (!qman_clk) { ++ pr_warn("Qman clock speed is unknown\n"); ++ return -EINVAL; ++ } ++ *clock_hz = (u64)qman_clk; ++ return 0; ++} ++EXPORT_SYMBOL(qm_get_clock); ++ ++int qm_set_clock(u64 clock_hz) ++{ ++ if (qman_clk) ++ return -1; ++ qman_clk = (u32)clock_hz; ++ return 0; ++} ++EXPORT_SYMBOL(qm_set_clock); ++ ++/* CEETM management command */ ++static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->lfqmt_config = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_LFQMT_CONFIG); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE LFQMT failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++int qman_ceetm_query_lfqmt(int lfqid, ++ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->lfqmt_query.lfqid = lfqid; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) ++ *lfqmt_query = mcr->lfqmt_query; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY LFQMT failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_query_lfqmt); ++ ++static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->cq_config = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ res = mcr->result; ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG); ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE CQ failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, ++ struct qm_mcr_ceetm_cq_query *cq_query) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->cq_query.cqid = cpu_to_be16(cqid); ++ mcc->cq_query.dcpid = dcpid; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) { ++ *cq_query = mcr->cq_query; ++ hw_cq_query_to_cpu(cq_query); ++ } ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY CQ failed\n"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_query_cq); ++ ++static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->dct_config = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE DCT failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts, ++ struct qm_mcr_ceetm_dct_query *dct_query) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p = get_affine_portal(); ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->dct_query = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY DCT failed\n"); ++ return -EIO; ++ } ++ ++ *dct_query = mcr->dct_query; ++ return 0; ++} ++ ++static int qman_ceetm_configure_class_scheduler( ++ struct qm_mcc_ceetm_class_scheduler_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->csch_config = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel, ++ struct qm_mcr_ceetm_class_scheduler_query *query) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->csch_query.cqcid = cpu_to_be16(channel->idx); ++ mcc->csch_query.dcpid = channel->dcp_idx; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY CLASS SCHEDULER failed\n"); ++ return -EIO; ++ } ++ *query = mcr->csch_query; ++ return 0; ++} ++ ++static int qman_ceetm_configure_mapping_shaper_tcfc( ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->mst_config = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static int qman_ceetm_query_mapping_shaper_tcfc( ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts, ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->mst_query = *opts; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); ++ res = mcr->result; ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY CHANNEL MAPPING failed\n"); ++ return -EIO; ++ } ++ ++ *response = mcr->mst_query; ++ return 0; ++} ++ ++static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->ccgr_config = *opts; ++ ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG); ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CONFIGURE CCGR failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query, ++ struct qm_mcr_ceetm_ccgr_query *response) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid); ++ mcc->ccgr_query.dcpid = ccgr_query->dcpid; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); ++ ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) { ++ *response = mcr->ccgr_query; ++ hw_ccgr_query_to_cpu(response); ++ } ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: QUERY CCGR failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_query_ccgr); ++ ++static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq, ++ u8 command_type, u16 xsfdr, ++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ switch (command_type) { ++ case 0: ++ case 1: ++ mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx; ++ break; ++ case 2: ++ mcc->cq_ppxr.xsfdr = xsfdr; ++ break; ++ default: ++ break; ++ } ++ mcc->cq_ppxr.ct = command_type; ++ mcc->cq_ppxr.dcpid = cq->parent->dcp_idx; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n"); ++ return -EIO; ++ } ++ *cq_ppxr = mcr->cq_ppxr; ++ return 0; ++} ++ ++static int qman_ceetm_query_statistics(u16 cid, ++ enum qm_dc_portal dcp_idx, ++ u16 command_type, ++ struct qm_mcr_ceetm_statistics_query *query_result) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->stats_query_write.cid = cid; ++ mcc->stats_query_write.dcpid = dcp_idx; ++ mcc->stats_query_write.ct = command_type; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); ++ ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_STATISTICS_QUERY_WRITE); ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: STATISTICS QUERY failed\n"); ++ return -EIO; ++ } ++ *query_result = mcr->stats_query; ++ return 0; ++} ++ ++int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx, ++ u16 command_type, u64 frame_count, ++ u64 byte_count) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ mcc->stats_query_write.cid = cid; ++ mcc->stats_query_write.dcpid = dcp_idx; ++ mcc->stats_query_write.ct = command_type; ++ mcc->stats_query_write.frm_cnt = frame_count; ++ mcc->stats_query_write.byte_cnt = byte_count; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); ++ ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_STATISTICS_QUERY_WRITE); ++ ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ ++ res = mcr->result; ++ if (res != QM_MCR_RESULT_OK) { ++ pr_err("CEETM: STATISTICS WRITE failed\n"); ++ return -EIO; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_query_write_statistics); ++ ++int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate, ++ int rounding) ++{ ++ u16 pres; ++ u64 temp; ++ u64 qman_freq; ++ int ret; ++ ++ /* Read PRES from CEET_CFG_PRES register */ ++ ret = qman_ceetm_get_prescaler(&pres); ++ if (ret) ++ return -EINVAL; ++ ++ ret = qm_get_clock(&qman_freq); ++ if (ret) ++ return -EINVAL; ++ ++ /* token-rate = bytes-per-second * update-reference-period ++ * ++ * Where token-rate is N/8192 for a integer N, and ++ * update-reference-period is (2^22)/(PRES*QHz), where PRES ++ * is the prescalar value and QHz is the QMan clock frequency. ++ * So: ++ * ++ * token-rate = (byte-per-second*2^22)/PRES*QHZ) ++ * ++ * Converting to bits-per-second gives; ++ * ++ * token-rate = (bps*2^19) / (PRES*QHZ) ++ * N = (bps*2^32) / (PRES*QHz) ++ * ++ * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps ++ * (yet minimise rounding error if 'bps' is small), we reorganise ++ * the formula to use two 16-bit shifts rather than 1 32-bit shift. ++ * N = (((bps*2^16)/PRES)*2^16)/QHz ++ */ ++ temp = ROUNDING((bps << 16), pres, rounding); ++ temp = ROUNDING((temp << 16), qman_freq, rounding); ++ token_rate->whole = temp >> 13; ++ token_rate->fraction = temp & (((u64)1 << 13) - 1); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_bps2tokenrate); ++ ++int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps, ++ int rounding) ++{ ++ u16 pres; ++ u64 temp; ++ u64 qman_freq; ++ int ret; ++ ++ /* Read PRES from CEET_CFG_PRES register */ ++ ret = qman_ceetm_get_prescaler(&pres); ++ if (ret) ++ return -EINVAL; ++ ++ ret = qm_get_clock(&qman_freq); ++ if (ret) ++ return -EINVAL; ++ ++ /* bytes-per-second = token-rate / update-reference-period ++ * ++ * where "token-rate" is N/8192 for an integer N, and ++ * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is ++ * the prescalar value and QHz is the QMan clock frequency. So; ++ * ++ * bytes-per-second = (N/8192) / (4194304/PRES*QHz) ++ * = N*PRES*QHz / (4194304*8192) ++ * = N*PRES*QHz / (2^35) ++ * ++ * Converting to bits-per-second gives; ++ * ++ * bps = N*PRES*QHZ / (2^32) ++ * ++ * Note, the numerator has a maximum width of 72 bits! So to ++ * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum ++ * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before ++ * multiplying by N (goes to maximum of 63 bits). ++ * ++ * temp = PRES*QHZ / (2^16) ++ * kbps = temp*N / (2^16) ++ */ ++ temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding); ++ temp *= ((token_rate->whole << 13) + token_rate->fraction); ++ *bps = ROUNDING(temp, (u64)(1) << 16, rounding); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_tokenrate2bps); ++ ++int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx, ++ unsigned int sp_idx) ++{ ++ struct qm_ceetm_sp *p; ++ ++ DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) || ++ (dcp_idx == qm_dc_portal_fman1)); ++ ++ if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) || ++ (sp_idx >= (qman_ceetms[dcp_idx].sp_range[0] + ++ qman_ceetms[dcp_idx].sp_range[1]))) { ++ pr_err("Sub-portal index doesn't exist\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) { ++ if ((p->idx == sp_idx) && (p->is_claimed == 0)) { ++ p->is_claimed = 1; ++ *sp = p; ++ return 0; ++ } ++ } ++ pr_err("The sub-portal#%d is not available!\n", sp_idx); ++ return -ENODEV; ++} ++EXPORT_SYMBOL(qman_ceetm_sp_claim); ++ ++int qman_ceetm_sp_release(struct qm_ceetm_sp *sp) ++{ ++ struct qm_ceetm_sp *p; ++ ++ if (sp->lni && sp->lni->is_claimed == 1) { ++ pr_err("The dependency of sub-portal has not been released!\n"); ++ return -EBUSY; ++ } ++ ++ list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) { ++ if (p->idx == sp->idx) { ++ p->is_claimed = 0; ++ p->lni = NULL; ++ } ++ } ++ /* Disable CEETM mode of this sub-portal */ ++ qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx); ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_sp_release); ++ ++int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx, ++ unsigned int lni_idx) ++{ ++ struct qm_ceetm_lni *p; ++ ++ if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) || ++ (lni_idx >= (qman_ceetms[dcp_idx].lni_range[0] + ++ qman_ceetms[dcp_idx].lni_range[1]))) { ++ pr_err("The lni index is out of range\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) { ++ if ((p->idx == lni_idx) && (p->is_claimed == 0)) { ++ *lni = p; ++ p->is_claimed = 1; ++ return 0; ++ } ++ } ++ ++ pr_err("The LNI#%d is not available!\n", lni_idx); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(qman_ceetm_lni_claim); ++ ++int qman_ceetm_lni_release(struct qm_ceetm_lni *lni) ++{ ++ struct qm_ceetm_lni *p; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (!list_empty(&lni->channels)) { ++ pr_err("The LNI dependencies are not released!\n"); ++ return -EBUSY; ++ } ++ ++ list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) { ++ if (p->idx == lni->idx) { ++ p->shaper_enable = 0; ++ p->shaper_couple = 0; ++ p->cr_token_rate.whole = 0; ++ p->cr_token_rate.fraction = 0; ++ p->er_token_rate.whole = 0; ++ p->er_token_rate.fraction = 0; ++ p->cr_token_bucket_limit = 0; ++ p->er_token_bucket_limit = 0; ++ p->is_claimed = 0; ++ } ++ } ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ memset(&config_opts.shaper_config, 0, ++ sizeof(config_opts.shaper_config)); ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_release); ++ ++int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx); ++ config_opts.dcpid = sp->dcp_idx; ++ config_opts.sp_mapping.map_lni_id = lni->idx; ++ sp->lni = lni; ++ ++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) ++ return -EINVAL; ++ ++ /* Enable CEETM mode for this sub-portal */ ++ return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx); ++} ++EXPORT_SYMBOL(qman_ceetm_sp_set_lni); ++ ++int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx); ++ query_opts.dcpid = sp->dcp_idx; ++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { ++ pr_err("Can't get SP <-> LNI mapping\n"); ++ return -EINVAL; ++ } ++ *lni_idx = query_result.sp_mapping_query.map_lni_id; ++ sp->lni->idx = query_result.sp_mapping_query.map_lni_id; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_sp_get_lni); ++ ++int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, ++ int oal) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (lni->shaper_enable) { ++ pr_err("The shaper has already been enabled\n"); ++ return -EINVAL; ++ } ++ lni->shaper_enable = 1; ++ lni->shaper_couple = coupled; ++ lni->oal = oal; ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ config_opts.shaper_config.cpl = coupled; ++ config_opts.shaper_config.oal = oal; ++ config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole ++ << 13) | lni->cr_token_rate.fraction); ++ config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole ++ << 13) | lni->er_token_rate.fraction); ++ config_opts.shaper_config.crtbl = ++ cpu_to_be16(lni->cr_token_bucket_limit); ++ config_opts.shaper_config.ertbl = ++ cpu_to_be16(lni->er_token_bucket_limit); ++ config_opts.shaper_config.mps = 60; ++ ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper); ++ ++int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (!lni->shaper_enable) { ++ pr_err("The shaper has been disabled\n"); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ config_opts.shaper_config.cpl = lni->shaper_couple; ++ config_opts.shaper_config.oal = lni->oal; ++ config_opts.shaper_config.crtbl = ++ cpu_to_be16(lni->cr_token_bucket_limit); ++ config_opts.shaper_config.ertbl = ++ cpu_to_be16(lni->er_token_bucket_limit); ++ /* Set CR/ER rate with all 1's to configure an infinite rate, thus ++ * disable the shaping. ++ */ ++ config_opts.shaper_config.crtcr = 0xFFFFFF; ++ config_opts.shaper_config.ertcr = 0xFFFFFF; ++ config_opts.shaper_config.mps = 60; ++ lni->shaper_enable = 0; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper); ++ ++int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni) ++{ ++ return lni->shaper_enable; ++} ++EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled); ++ ++int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ lni->cr_token_rate.whole = token_rate->whole; ++ lni->cr_token_rate.fraction = token_rate->fraction; ++ lni->cr_token_bucket_limit = token_limit; ++ if (!lni->shaper_enable) ++ return 0; ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, ++ &query_result); ++ if (ret) { ++ pr_err("Fail to get current LNI shaper setting\n"); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13) ++ | (token_rate->fraction)); ++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); ++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; ++ config_opts.shaper_config.oal = query_result.shaper_query.oal; ++ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; ++ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; ++ config_opts.shaper_config.mps = query_result.shaper_query.mps; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate); ++ ++int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni, ++ u64 bps, ++ u16 token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); ++ if (ret) { ++ pr_err("Can not convert bps to token rate\n"); ++ return -EINVAL; ++ } ++ ++ return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps); ++ ++int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret) { ++ pr_err("The LNI CR rate or limit is not set\n"); ++ return -EINVAL; ++ } ++ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13; ++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) & ++ 0x1FFF; ++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate); ++ ++int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni, ++ u64 *bps, u16 *token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit); ++ if (ret) { ++ pr_err("The LNI CR rate or limit is not available\n"); ++ return -EINVAL; ++ } ++ ++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps); ++ ++int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ lni->er_token_rate.whole = token_rate->whole; ++ lni->er_token_rate.fraction = token_rate->fraction; ++ lni->er_token_bucket_limit = token_limit; ++ if (!lni->shaper_enable) ++ return 0; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, ++ &query_result); ++ if (ret) { ++ pr_err("Fail to get current LNI shaper setting\n"); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ config_opts.shaper_config.ertcr = cpu_to_be24( ++ (token_rate->whole << 13) | (token_rate->fraction)); ++ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit); ++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; ++ config_opts.shaper_config.oal = query_result.shaper_query.oal; ++ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; ++ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; ++ config_opts.shaper_config.mps = query_result.shaper_query.mps; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate); ++ ++int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni, ++ u64 bps, ++ u16 token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); ++ if (ret) { ++ pr_err("Can not convert bps to token rate\n"); ++ return -EINVAL; ++ } ++ return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps); ++ ++int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret) { ++ pr_err("The LNI ER rate or limit is not set\n"); ++ return -EINVAL; ++ } ++ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13; ++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) & ++ 0x1FFF; ++ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate); ++ ++int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni, ++ u64 *bps, u16 *token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit); ++ if (ret) { ++ pr_err("The LNI ER rate or limit is not available\n"); ++ return -EINVAL; ++ } ++ ++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps); ++ ++#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4) ++#define QMAN_CEETM_LNITCFCC_ENABLE 0x8 ++int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, ++ unsigned int cq_level, ++ int traffic_class) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ u64 lnitcfcc; ++ ++ if ((cq_level > 15) | (traffic_class > 7)) { ++ pr_err("The CQ or traffic class id is out of range\n"); ++ return -EINVAL; ++ } ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { ++ pr_err("Fail to query tcfcc\n"); ++ return -EINVAL; ++ } ++ ++ lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc); ++ if (traffic_class == -1) { ++ /* disable tcfc for this CQ */ ++ lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE << ++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); ++ } else { ++ lnitcfcc &= ~((u64)0xF << ++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); ++ lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE | ++ traffic_class)) << ++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level); ++ } ++ config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc); ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); ++ config_opts.dcpid = lni->dcp_idx; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc); ++ ++#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7 ++int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level, ++ int *traffic_class) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ u8 lnitcfcc; ++ ++ if (cq_level > 15) { ++ pr_err("the CQ level is out of range\n"); ++ return -EINVAL; ++ } ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx); ++ query_opts.dcpid = lni->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret) ++ return ret; ++ lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >> ++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); ++ if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE) ++ *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK; ++ else ++ *traffic_class = -1; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc); ++ ++int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, ++ struct qm_ceetm_lni *lni) ++{ ++ struct qm_ceetm_channel *p; ++ u32 channel_idx; ++ int ret = 0; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (lni->dcp_idx == qm_dc_portal_fman0) { ++ ret = qman_alloc_ceetm0_channel(&channel_idx); ++ } else if (lni->dcp_idx == qm_dc_portal_fman1) { ++ ret = qman_alloc_ceetm1_channel(&channel_idx); ++ } else { ++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", ++ lni->dcp_idx); ++ return -EINVAL; ++ } ++ ++ if (ret) { ++ pr_err("The is no channel available for LNI#%d\n", lni->idx); ++ return -ENODEV; ++ } ++ ++ p = kzalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ p->idx = channel_idx; ++ p->dcp_idx = lni->dcp_idx; ++ p->lni_idx = lni->idx; ++ list_add_tail(&p->node, &lni->channels); ++ INIT_LIST_HEAD(&p->class_queues); ++ INIT_LIST_HEAD(&p->ccgs); ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel_idx); ++ config_opts.dcpid = lni->dcp_idx; ++ config_opts.channel_mapping.map_lni_id = lni->idx; ++ config_opts.channel_mapping.map_shaped = 0; ++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { ++ pr_err("Can't map channel#%d for LNI#%d\n", ++ channel_idx, lni->idx); ++ return -EINVAL; ++ } ++ *channel = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_claim); ++ ++int qman_ceetm_channel_release(struct qm_ceetm_channel *channel) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ if (!list_empty(&channel->class_queues)) { ++ pr_err("CEETM channel#%d has class queue unreleased!\n", ++ channel->idx); ++ return -EBUSY; ++ } ++ if (!list_empty(&channel->ccgs)) { ++ pr_err("CEETM channel#%d has ccg unreleased!\n", ++ channel->idx); ++ return -EBUSY; ++ } ++ ++ /* channel->dcp_idx corresponds to known fman validation */ ++ if ((channel->dcp_idx != qm_dc_portal_fman0) && ++ (channel->dcp_idx != qm_dc_portal_fman1)) { ++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", ++ channel->dcp_idx); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ memset(&config_opts.shaper_config, 0, ++ sizeof(config_opts.shaper_config)); ++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { ++ pr_err("Can't reset channel shapping parameters\n"); ++ return -EINVAL; ++ } ++ ++ if (channel->dcp_idx == qm_dc_portal_fman0) { ++ qman_release_ceetm0_channelid(channel->idx); ++ } else if (channel->dcp_idx == qm_dc_portal_fman1) { ++ qman_release_ceetm1_channelid(channel->idx); ++ } else { ++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", ++ channel->dcp_idx); ++ return -EINVAL; ++ } ++ list_del(&channel->node); ++ kfree(channel); ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_release); ++ ++int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, ++ int coupled) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (channel->shaper_enable == 1) { ++ pr_err("This channel shaper has been enabled!\n"); ++ return -EINVAL; ++ } ++ ++ channel->shaper_enable = 1; ++ channel->shaper_couple = coupled; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { ++ pr_err("Can't query channel mapping\n"); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.channel_mapping.map_lni_id = ++ query_result.channel_mapping_query.map_lni_id; ++ config_opts.channel_mapping.map_shaped = 1; ++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { ++ pr_err("Can't enable shaper for channel #%d\n", channel->idx); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ config_opts.shaper_config.cpl = coupled; ++ config_opts.shaper_config.crtcr = ++ cpu_to_be24((channel->cr_token_rate.whole ++ << 13) | ++ channel->cr_token_rate.fraction); ++ config_opts.shaper_config.ertcr = ++ cpu_to_be24(channel->er_token_rate.whole ++ << 13 | ++ channel->er_token_rate.fraction); ++ config_opts.shaper_config.crtbl = ++ cpu_to_be16(channel->cr_token_bucket_limit); ++ config_opts.shaper_config.ertbl = ++ cpu_to_be16(channel->er_token_bucket_limit); ++ ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper); ++ ++int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { ++ pr_err("Can't query channel mapping\n"); ++ return -EINVAL; ++ } ++ ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.channel_mapping.map_shaped = 0; ++ config_opts.channel_mapping.map_lni_id = ++ query_result.channel_mapping_query.map_lni_id; ++ ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper); ++ ++int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { ++ pr_err("Can't query channel mapping\n"); ++ return -EINVAL; ++ } ++ ++ return query_result.channel_mapping_query.map_shaped; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled); ++ ++int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret) { ++ pr_err("Fail to get the current channel shaper setting\n"); ++ return -EINVAL; ++ } ++ ++ channel->cr_token_rate.whole = token_rate->whole; ++ channel->cr_token_rate.fraction = token_rate->fraction; ++ channel->cr_token_bucket_limit = token_limit; ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole ++ << 13) | (token_rate->fraction)); ++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); ++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; ++ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; ++ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate); ++ ++int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel, ++ u64 bps, u16 token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); ++ if (ret) { ++ pr_err("Can not convert bps to token rate\n"); ++ return -EINVAL; ++ } ++ return qman_ceetm_channel_set_commit_rate(channel, &token_rate, ++ token_limit); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps); ++ ++int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret | !query_result.shaper_query.crtcr | ++ !query_result.shaper_query.crtbl) { ++ pr_err("The channel commit rate or limit is not set\n"); ++ return -EINVAL; ++ } ++ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13; ++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) & ++ 0x1FFF; ++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate); ++ ++int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel, ++ u64 *bps, u16 *token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate, ++ token_limit); ++ if (ret) { ++ pr_err("The channel CR rate or limit is not available\n"); ++ return -EINVAL; ++ } ++ ++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps); ++ ++int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret) { ++ pr_err("Fail to get the current channel shaper setting\n"); ++ return -EINVAL; ++ } ++ ++ channel->er_token_rate.whole = token_rate->whole; ++ channel->er_token_rate.fraction = token_rate->fraction; ++ channel->er_token_bucket_limit = token_limit; ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.shaper_config.ertcr = cpu_to_be24( ++ (token_rate->whole << 13) | (token_rate->fraction)); ++ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit); ++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl; ++ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; ++ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate); ++ ++int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel, ++ u64 bps, u16 token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0); ++ if (ret) { ++ pr_err("Can not convert bps to token rate\n"); ++ return -EINVAL; ++ } ++ return qman_ceetm_channel_set_excess_rate(channel, &token_rate, ++ token_limit); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps); ++ ++int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret | !query_result.shaper_query.ertcr | ++ !query_result.shaper_query.ertbl) { ++ pr_err("The channel excess rate or limit is not set\n"); ++ return -EINVAL; ++ } ++ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13; ++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) & ++ 0x1FFF; ++ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate); ++ ++int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel, ++ u64 *bps, u16 *token_limit) ++{ ++ struct qm_ceetm_rate token_rate; ++ int ret; ++ ++ ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate, ++ token_limit); ++ if (ret) { ++ pr_err("The channel ER rate or limit is not available\n"); ++ return -EINVAL; ++ } ++ ++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps); ++ ++int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, ++ u16 token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; ++ ++ if (channel->shaper_enable) { ++ pr_err("This channel is a shaped one\n"); ++ return -EINVAL; ++ } ++ ++ channel->cr_token_bucket_limit = token_limit; ++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit); ++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_weight); ++ ++int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, ++ u16 *token_limit) ++{ ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; ++ int ret; ++ ++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER | ++ channel->idx); ++ query_opts.dcpid = channel->dcp_idx; ++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); ++ if (ret | !query_result.shaper_query.crtbl) { ++ pr_err("This unshaped channel's uFQ wight is unavailable\n"); ++ return -EINVAL; ++ } ++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_weight); ++ ++int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b, ++ unsigned int prio_a, unsigned int prio_b) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config config_opts; ++ struct qm_mcr_ceetm_class_scheduler_query query_result; ++ int i; ++ ++ if (prio_a > 7) { ++ pr_err("The priority of group A is out of range\n"); ++ return -EINVAL; ++ } ++ if (group_b && (prio_b > 7)) { ++ pr_err("The priority of group B is out of range\n"); ++ return -EINVAL; ++ } ++ ++ if (qman_ceetm_query_class_scheduler(channel, &query_result)) { ++ pr_err("Can't query channel#%d's scheduler!\n", channel->idx); ++ return -EINVAL; ++ } ++ ++ config_opts.cqcid = cpu_to_be16(channel->idx); ++ config_opts.dcpid = channel->dcp_idx; ++ config_opts.gpc_combine_flag = !group_b; ++ config_opts.gpc_prio_a = prio_a; ++ config_opts.gpc_prio_b = prio_b; ++ ++ for (i = 0; i < 8; i++) ++ config_opts.w[i] = query_result.w[i]; ++ config_opts.crem = query_result.crem; ++ config_opts.erem = query_result.erem; ++ ++ return qman_ceetm_configure_class_scheduler(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_group); ++ ++int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b, ++ unsigned int *prio_a, unsigned int *prio_b) ++{ ++ struct qm_mcr_ceetm_class_scheduler_query query_result; ++ ++ if (qman_ceetm_query_class_scheduler(channel, &query_result)) { ++ pr_err("Can't query channel#%d's scheduler!\n", channel->idx); ++ return -EINVAL; ++ } ++ *group_b = !query_result.gpc_combine_flag; ++ *prio_a = query_result.gpc_prio_a; ++ *prio_b = query_result.gpc_prio_b; ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_get_group); ++ ++#define GROUP_A_ELIGIBILITY_SET (1 << 8) ++#define GROUP_B_ELIGIBILITY_SET (1 << 9) ++#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n)) ++int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel ++ *channel, int group_b, int cre) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config csch_config; ++ struct qm_mcr_ceetm_class_scheduler_query csch_query; ++ int i; ++ ++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { ++ pr_err("Cannot get the channel %d scheduler setting.\n", ++ channel->idx); ++ return -EINVAL; ++ } ++ csch_config.cqcid = cpu_to_be16(channel->idx); ++ csch_config.dcpid = channel->dcp_idx; ++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; ++ csch_config.gpc_prio_a = csch_query.gpc_prio_a; ++ csch_config.gpc_prio_b = csch_query.gpc_prio_b; ++ ++ for (i = 0; i < 8; i++) ++ csch_config.w[i] = csch_query.w[i]; ++ csch_config.erem = csch_query.erem; ++ if (group_b) ++ csch_config.crem = (be16_to_cpu(csch_query.crem) ++ & ~GROUP_B_ELIGIBILITY_SET) ++ | (cre ? GROUP_B_ELIGIBILITY_SET : 0); ++ else ++ csch_config.crem = (be16_to_cpu(csch_query.crem) ++ & ~GROUP_A_ELIGIBILITY_SET) ++ | (cre ? GROUP_A_ELIGIBILITY_SET : 0); ++ ++ csch_config.crem = cpu_to_be16(csch_config.crem); ++ ++ if (qman_ceetm_configure_class_scheduler(&csch_config)) { ++ pr_err("Cannot config channel %d's scheduler with " ++ "group_%c's cr eligibility\n", channel->idx, ++ group_b ? 'b' : 'a'); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility); ++ ++int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel ++ *channel, int group_b, int ere) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config csch_config; ++ struct qm_mcr_ceetm_class_scheduler_query csch_query; ++ int i; ++ ++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { ++ pr_err("Cannot get the channel %d scheduler setting.\n", ++ channel->idx); ++ return -EINVAL; ++ } ++ csch_config.cqcid = cpu_to_be16(channel->idx); ++ csch_config.dcpid = channel->dcp_idx; ++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; ++ csch_config.gpc_prio_a = csch_query.gpc_prio_a; ++ csch_config.gpc_prio_b = csch_query.gpc_prio_b; ++ ++ for (i = 0; i < 8; i++) ++ csch_config.w[i] = csch_query.w[i]; ++ csch_config.crem = csch_query.crem; ++ if (group_b) ++ csch_config.erem = (be16_to_cpu(csch_query.erem) ++ & ~GROUP_B_ELIGIBILITY_SET) ++ | (ere ? GROUP_B_ELIGIBILITY_SET : 0); ++ else ++ csch_config.erem = (be16_to_cpu(csch_query.erem) ++ & ~GROUP_A_ELIGIBILITY_SET) ++ | (ere ? GROUP_A_ELIGIBILITY_SET : 0); ++ ++ csch_config.erem = cpu_to_be16(csch_config.erem); ++ ++ if (qman_ceetm_configure_class_scheduler(&csch_config)) { ++ pr_err("Cannot config channel %d's scheduler with " ++ "group_%c's er eligibility\n", channel->idx, ++ group_b ? 'b' : 'a'); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility); ++ ++int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel, ++ unsigned int idx, int cre) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config csch_config; ++ struct qm_mcr_ceetm_class_scheduler_query csch_query; ++ int i; ++ ++ if (idx > 7) { ++ pr_err("CQ index is out of range\n"); ++ return -EINVAL; ++ } ++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { ++ pr_err("Cannot get the channel %d scheduler setting.\n", ++ channel->idx); ++ return -EINVAL; ++ } ++ csch_config.cqcid = cpu_to_be16(channel->idx); ++ csch_config.dcpid = channel->dcp_idx; ++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; ++ csch_config.gpc_prio_a = csch_query.gpc_prio_a; ++ csch_config.gpc_prio_b = csch_query.gpc_prio_b; ++ for (i = 0; i < 8; i++) ++ csch_config.w[i] = csch_query.w[i]; ++ csch_config.erem = csch_query.erem; ++ csch_config.crem = (be16_to_cpu(csch_query.crem) ++ & ~CQ_ELIGIBILITY_SET(idx)) | ++ (cre ? CQ_ELIGIBILITY_SET(idx) : 0); ++ csch_config.crem = cpu_to_be16(csch_config.crem); ++ if (qman_ceetm_configure_class_scheduler(&csch_config)) { ++ pr_err("Cannot config channel scheduler to set " ++ "cr eligibility mask for CQ#%d\n", idx); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility); ++ ++int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel, ++ unsigned int idx, int ere) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config csch_config; ++ struct qm_mcr_ceetm_class_scheduler_query csch_query; ++ int i; ++ ++ if (idx > 7) { ++ pr_err("CQ index is out of range\n"); ++ return -EINVAL; ++ } ++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) { ++ pr_err("Cannot get the channel %d scheduler setting.\n", ++ channel->idx); ++ return -EINVAL; ++ } ++ csch_config.cqcid = cpu_to_be16(channel->idx); ++ csch_config.dcpid = channel->dcp_idx; ++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag; ++ csch_config.gpc_prio_a = csch_query.gpc_prio_a; ++ csch_config.gpc_prio_b = csch_query.gpc_prio_b; ++ for (i = 0; i < 8; i++) ++ csch_config.w[i] = csch_query.w[i]; ++ csch_config.crem = csch_query.crem; ++ csch_config.erem = (be16_to_cpu(csch_query.erem) ++ & ~CQ_ELIGIBILITY_SET(idx)) | ++ (ere ? CQ_ELIGIBILITY_SET(idx) : 0); ++ csch_config.erem = cpu_to_be16(csch_config.erem); ++ if (qman_ceetm_configure_class_scheduler(&csch_config)) { ++ pr_err("Cannot config channel scheduler to set " ++ "er eligibility mask for CQ#%d\n", idx); ++ return -EINVAL; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility); ++ ++int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, unsigned int idx, ++ struct qm_ceetm_ccg *ccg) ++{ ++ struct qm_ceetm_cq *p; ++ struct qm_mcc_ceetm_cq_config cq_config; ++ ++ if (idx > 7) { ++ pr_err("The independent class queue id is out of range\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &channel->class_queues, node) { ++ if (p->idx == idx) { ++ pr_err("The CQ#%d has been claimed!\n", idx); ++ return -EINVAL; ++ } ++ } ++ ++ p = kmalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) { ++ pr_err("Can't allocate memory for CQ#%d!\n", idx); ++ return -ENOMEM; ++ } ++ ++ list_add_tail(&p->node, &channel->class_queues); ++ p->idx = idx; ++ p->is_claimed = 1; ++ p->parent = channel; ++ INIT_LIST_HEAD(&p->bound_lfqids); ++ ++ if (ccg) { ++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); ++ cq_config.dcpid = channel->dcp_idx; ++ cq_config.ccgid = cpu_to_be16(ccg->idx); ++ if (qman_ceetm_configure_cq(&cq_config)) { ++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", ++ idx, ccg->idx); ++ list_del(&p->node); ++ kfree(p); ++ return -EINVAL; ++ } ++ } ++ ++ *cq = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cq_claim); ++ ++int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, unsigned int idx, ++ struct qm_ceetm_ccg *ccg) ++{ ++ struct qm_ceetm_cq *p; ++ struct qm_mcc_ceetm_cq_config cq_config; ++ ++ if ((idx < 8) || (idx > 15)) { ++ pr_err("This grouped class queue id is out of range\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &channel->class_queues, node) { ++ if (p->idx == idx) { ++ pr_err("The CQ#%d has been claimed!\n", idx); ++ return -EINVAL; ++ } ++ } ++ ++ p = kmalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) { ++ pr_err("Can't allocate memory for CQ#%d!\n", idx); ++ return -ENOMEM; ++ } ++ ++ list_add_tail(&p->node, &channel->class_queues); ++ p->idx = idx; ++ p->is_claimed = 1; ++ p->parent = channel; ++ INIT_LIST_HEAD(&p->bound_lfqids); ++ ++ if (ccg) { ++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); ++ cq_config.dcpid = channel->dcp_idx; ++ cq_config.ccgid = cpu_to_be16(ccg->idx); ++ if (qman_ceetm_configure_cq(&cq_config)) { ++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", ++ idx, ccg->idx); ++ list_del(&p->node); ++ kfree(p); ++ return -EINVAL; ++ } ++ } ++ *cq = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cq_claim_A); ++ ++int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, unsigned int idx, ++ struct qm_ceetm_ccg *ccg) ++{ ++ struct qm_ceetm_cq *p; ++ struct qm_mcc_ceetm_cq_config cq_config; ++ ++ if ((idx < 12) || (idx > 15)) { ++ pr_err("This grouped class queue id is out of range\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &channel->class_queues, node) { ++ if (p->idx == idx) { ++ pr_err("The CQ#%d has been claimed!\n", idx); ++ return -EINVAL; ++ } ++ } ++ ++ p = kmalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) { ++ pr_err("Can't allocate memory for CQ#%d!\n", idx); ++ return -ENOMEM; ++ } ++ ++ list_add_tail(&p->node, &channel->class_queues); ++ p->idx = idx; ++ p->is_claimed = 1; ++ p->parent = channel; ++ INIT_LIST_HEAD(&p->bound_lfqids); ++ ++ if (ccg) { ++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx); ++ cq_config.dcpid = channel->dcp_idx; ++ cq_config.ccgid = cpu_to_be16(ccg->idx); ++ if (qman_ceetm_configure_cq(&cq_config)) { ++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n", ++ idx, ccg->idx); ++ list_del(&p->node); ++ kfree(p); ++ return -EINVAL; ++ } ++ } ++ *cq = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cq_claim_B); ++ ++int qman_ceetm_cq_release(struct qm_ceetm_cq *cq) ++{ ++ if (!list_empty(&cq->bound_lfqids)) { ++ pr_err("The CQ#%d has unreleased LFQID\n", cq->idx); ++ return -EBUSY; ++ } ++ list_del(&cq->node); ++ qman_ceetm_drain_cq(cq); ++ kfree(cq); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cq_release); ++ ++int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, ++ struct qm_ceetm_weight_code *weight_code) ++{ ++ struct qm_mcc_ceetm_class_scheduler_config config_opts; ++ struct qm_mcr_ceetm_class_scheduler_query query_result; ++ int i; ++ ++ if (cq->idx < 8) { ++ pr_err("Can not set weight for ungrouped class queue\n"); ++ return -EINVAL; ++ } ++ ++ if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) { ++ pr_err("Can't query channel#%d's scheduler!\n", ++ cq->parent->idx); ++ return -EINVAL; ++ } ++ ++ config_opts.cqcid = cpu_to_be16(cq->parent->idx); ++ config_opts.dcpid = cq->parent->dcp_idx; ++ config_opts.crem = query_result.crem; ++ config_opts.erem = query_result.erem; ++ config_opts.gpc_combine_flag = query_result.gpc_combine_flag; ++ config_opts.gpc_prio_a = query_result.gpc_prio_a; ++ config_opts.gpc_prio_b = query_result.gpc_prio_b; ++ ++ for (i = 0; i < 8; i++) ++ config_opts.w[i] = query_result.w[i]; ++ config_opts.w[cq->idx - 8] = ((weight_code->y << 3) | ++ (weight_code->x & 0x7)); ++ return qman_ceetm_configure_class_scheduler(&config_opts); ++} ++EXPORT_SYMBOL(qman_ceetm_set_queue_weight); ++ ++int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, ++ struct qm_ceetm_weight_code *weight_code) ++{ ++ struct qm_mcr_ceetm_class_scheduler_query query_result; ++ ++ if (cq->idx < 8) { ++ pr_err("Can not get weight for ungrouped class queue\n"); ++ return -EINVAL; ++ } ++ ++ if (qman_ceetm_query_class_scheduler(cq->parent, ++ &query_result)) { ++ pr_err("Can't get the weight code for CQ#%d!\n", cq->idx); ++ return -EINVAL; ++ } ++ weight_code->y = query_result.w[cq->idx - 8] >> 3; ++ weight_code->x = query_result.w[cq->idx - 8] & 0x7; ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_get_queue_weight); ++ ++/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as: ++ * effective weight = 2^x / (1 - (y/64)) ++ * = 2^(x+6) / (64 - y) ++ */ ++static void reduce_fraction(u32 *n, u32 *d) ++{ ++ u32 factor = 2; ++ u32 lesser = (*n < *d) ? *n : *d; ++ /* If factor exceeds the square-root of the lesser of *n and *d, ++ * then there's no point continuing. Proof: if there was a factor ++ * bigger than the square root, that would imply there exists ++ * another factor smaller than the square-root with which it ++ * multiplies to give 'lesser' - but that's a contradiction ++ * because the other factor would have already been found and ++ * divided out. ++ */ ++ while ((factor * factor) <= lesser) { ++ /* If 'factor' is a factor of *n and *d, divide them both ++ * by 'factor' as many times as possible. ++ */ ++ while (!(*n % factor) && !(*d % factor)) { ++ *n /= factor; ++ *d /= factor; ++ lesser /= factor; ++ } ++ if (factor == 2) ++ factor = 3; ++ else ++ factor += 2; ++ } ++} ++ ++int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code, ++ u32 *numerator, ++ u32 *denominator) ++{ ++ *numerator = (u32) 1 << (weight_code->x + 6); ++ *denominator = 64 - weight_code->y; ++ reduce_fraction(numerator, denominator); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_wbfs2ratio); ++ ++/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive). ++ * So find 'x' by range, and then estimate 'y' using: ++ * 64 - y = 2^(x + 6) / weight ++ * = 2^(x + 6) / (n/d) ++ * = d * 2^(x+6) / n ++ * y = 64 - (d * 2^(x+6) / n) ++ */ ++int qman_ceetm_ratio2wbfs(u32 numerator, ++ u32 denominator, ++ struct qm_ceetm_weight_code *weight_code, ++ int rounding) ++{ ++ unsigned int y, x = 0; ++ /* search incrementing 'x' until: ++ * weight < 2^(x+1) ++ * n/d < 2^(x+1) ++ * n < d * 2^(x+1) ++ */ ++ while ((x < 8) && (numerator >= (denominator << (x + 1)))) ++ x++; ++ if (x >= 8) ++ return -ERANGE; ++ /* because of the subtraction, use '-rounding' */ ++ y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding); ++ if (y >= 32) ++ return -ERANGE; ++ weight_code->x = x; ++ weight_code->y = y; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_ratio2wbfs); ++ ++int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio) ++{ ++ struct qm_ceetm_weight_code weight_code; ++ ++ if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) { ++ pr_err("Cannot get wbfs code for cq %x\n", cq->idx); ++ return -EINVAL; ++ } ++ return qman_ceetm_set_queue_weight(cq, &weight_code); ++} ++EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio); ++ ++int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio) ++{ ++ struct qm_ceetm_weight_code weight_code; ++ u32 n, d; ++ ++ if (qman_ceetm_get_queue_weight(cq, &weight_code)) { ++ pr_err("Cannot query the weight code for cq%x\n", cq->idx); ++ return -EINVAL; ++ } ++ ++ if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) { ++ pr_err("Cannot get the ratio with wbfs code\n"); ++ return -EINVAL; ++ } ++ ++ *ratio = (n * 100) / d; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio); ++ ++int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, ++ u64 *frame_count, u64 *byte_count) ++{ ++ struct qm_mcr_ceetm_statistics_query result; ++ u16 cid, command_type; ++ enum qm_dc_portal dcp_idx; ++ int ret; ++ ++ cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx); ++ dcp_idx = cq->parent->dcp_idx; ++ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) ++ command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS; ++ else ++ command_type = CEETM_QUERY_DEQUEUE_STATISTICS; ++ ++ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); ++ if (ret) { ++ pr_err("Can't query the statistics of CQ#%d!\n", cq->idx); ++ return -EINVAL; ++ } ++ ++ *frame_count = be40_to_cpu(result.frm_cnt); ++ *byte_count = be48_to_cpu(result.byte_cnt); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics); ++ ++int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq) ++{ ++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr; ++ int ret; ++ ++ do { ++ ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr); ++ if (ret) { ++ pr_err("Failed to pop frame from CQ\n"); ++ return -EINVAL; ++ } ++ } while (!(ppxr.stat & 0x2)); ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_drain_cq); ++ ++#define CEETM_LFQMT_LFQID_MSB 0xF00000 ++#define CEETM_LFQMT_LFQID_LSB 0x000FFF ++int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, ++ struct qm_ceetm_cq *cq) ++{ ++ struct qm_ceetm_lfq *p; ++ u32 lfqid; ++ int ret = 0; ++ struct qm_mcc_ceetm_lfqmt_config lfqmt_config; ++ ++ if (cq->parent->dcp_idx == qm_dc_portal_fman0) { ++ ret = qman_alloc_ceetm0_lfqid(&lfqid); ++ } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) { ++ ret = qman_alloc_ceetm1_lfqid(&lfqid); ++ } else { ++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", ++ cq->parent->dcp_idx); ++ return -EINVAL; ++ } ++ ++ if (ret) { ++ pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx); ++ return -ENODEV; ++ } ++ p = kmalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ p->idx = lfqid; ++ p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB); ++ p->parent = cq->parent; ++ list_add_tail(&p->node, &cq->bound_lfqids); ++ ++ lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB | ++ (cq->parent->dcp_idx << 16) | ++ (lfqid & CEETM_LFQMT_LFQID_LSB)); ++ lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx)); ++ lfqmt_config.dctidx = cpu_to_be16(p->dctidx); ++ if (qman_ceetm_configure_lfqmt(&lfqmt_config)) { ++ pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n", ++ lfqid, cq->idx); ++ list_del(&p->node); ++ kfree(p); ++ return -EINVAL; ++ } ++ *lfq = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lfq_claim); ++ ++int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq) ++{ ++ if (lfq->parent->dcp_idx == qm_dc_portal_fman0) { ++ qman_release_ceetm0_lfqid(lfq->idx); ++ } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) { ++ qman_release_ceetm1_lfqid(lfq->idx); ++ } else { ++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n", ++ lfq->parent->dcp_idx); ++ return -EINVAL; ++ } ++ list_del(&lfq->node); ++ kfree(lfq); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lfq_release); ++ ++int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a, ++ u32 context_b) ++{ ++ struct qm_mcc_ceetm_dct_config dct_config; ++ lfq->context_a = context_a; ++ lfq->context_b = context_b; ++ dct_config.dctidx = cpu_to_be16((u16)lfq->dctidx); ++ dct_config.dcpid = lfq->parent->dcp_idx; ++ dct_config.context_b = cpu_to_be32(context_b); ++ dct_config.context_a = cpu_to_be64(context_a); ++ ++ return qman_ceetm_configure_dct(&dct_config); ++} ++EXPORT_SYMBOL(qman_ceetm_lfq_set_context); ++ ++int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a, ++ u32 *context_b) ++{ ++ struct qm_mcc_ceetm_dct_query dct_query; ++ struct qm_mcr_ceetm_dct_query query_result; ++ ++ dct_query.dctidx = cpu_to_be16(lfq->dctidx); ++ dct_query.dcpid = lfq->parent->dcp_idx; ++ if (qman_ceetm_query_dct(&dct_query, &query_result)) { ++ pr_err("Can't query LFQID#%d's context!\n", lfq->idx); ++ return -EINVAL; ++ } ++ *context_a = be64_to_cpu(query_result.context_a); ++ *context_b = be32_to_cpu(query_result.context_b); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_lfq_get_context); ++ ++int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq) ++{ ++ spin_lock_init(&fq->fqlock); ++ fq->fqid = lfq->idx; ++ fq->flags = QMAN_FQ_FLAG_NO_MODIFY; ++ if (lfq->ern) ++ fq->cb.ern = lfq->ern; ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) ++ return -ENOMEM; ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_create_fq); ++ ++#define MAX_CCG_IDX 0x000F ++int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, ++ struct qm_ceetm_channel *channel, ++ unsigned int idx, ++ void (*cscn)(struct qm_ceetm_ccg *, ++ void *cb_ctx, ++ int congested), ++ void *cb_ctx) ++{ ++ struct qm_ceetm_ccg *p; ++ ++ if (idx > MAX_CCG_IDX) { ++ pr_err("The given ccg index is out of range\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(p, &channel->ccgs, node) { ++ if (p->idx == idx) { ++ pr_err("The CCG#%d has been claimed\n", idx); ++ return -EINVAL; ++ } ++ } ++ ++ p = kmalloc(sizeof(*p), GFP_KERNEL); ++ if (!p) { ++ pr_err("Can't allocate memory for CCG#%d!\n", idx); ++ return -ENOMEM; ++ } ++ ++ list_add_tail(&p->node, &channel->ccgs); ++ ++ p->idx = idx; ++ p->parent = channel; ++ p->cb = cscn; ++ p->cb_ctx = cb_ctx; ++ INIT_LIST_HEAD(&p->cb_node); ++ ++ *ccg = p; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_ccg_claim); ++ ++int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg) ++{ ++ unsigned long irqflags __maybe_unused; ++ struct qm_mcc_ceetm_ccgr_config config_opts; ++ int ret = 0; ++ struct qman_portal *p = get_affine_portal(); ++ ++ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config)); ++ spin_lock_irqsave(&p->ccgr_lock, irqflags); ++ if (!list_empty(&ccg->cb_node)) ++ list_del(&ccg->cb_node); ++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | ++ (ccg->parent->idx << 4) | ccg->idx); ++ config_opts.dcpid = ccg->parent->dcp_idx; ++ config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD); ++ config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p)); ++ ret = qman_ceetm_configure_ccgr(&config_opts); ++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); ++ put_affine_portal(); ++ ++ list_del(&ccg->node); ++ kfree(ccg); ++ return ret; ++} ++EXPORT_SYMBOL(qman_ceetm_ccg_release); ++ ++int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask, ++ const struct qm_ceetm_ccg_params *params) ++{ ++ struct qm_mcc_ceetm_ccgr_config config_opts; ++ unsigned long irqflags __maybe_unused; ++ int ret; ++ struct qman_portal *p; ++ ++ if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM)) ++ return -EINVAL; ++ ++ p = get_affine_portal(); ++ ++ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config)); ++ spin_lock_irqsave(&p->ccgr_lock, irqflags); ++ ++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | ++ (ccg->parent->idx << 4) | ccg->idx); ++ config_opts.dcpid = ccg->parent->dcp_idx; ++ config_opts.we_mask = we_mask; ++ if (we_mask & QM_CCGR_WE_CSCN_EN) { ++ config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD; ++ config_opts.cm_config.cscn_tupd = cpu_to_be16( ++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p)); ++ } ++ config_opts.we_mask = cpu_to_be16(config_opts.we_mask); ++ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g; ++ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y; ++ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r; ++ config_opts.cm_config.ctl_td_en = params->td_en; ++ config_opts.cm_config.ctl_td_mode = params->td_mode; ++ config_opts.cm_config.ctl_cscn_en = params->cscn_en; ++ config_opts.cm_config.ctl_mode = params->mode; ++ config_opts.cm_config.oal = params->oal; ++ config_opts.cm_config.cs_thres.hword = ++ cpu_to_be16(params->cs_thres_in.hword); ++ config_opts.cm_config.cs_thres_x.hword = ++ cpu_to_be16(params->cs_thres_out.hword); ++ config_opts.cm_config.td_thres.hword = ++ cpu_to_be16(params->td_thres.hword); ++ config_opts.cm_config.wr_parm_g.word = ++ cpu_to_be32(params->wr_parm_g.word); ++ config_opts.cm_config.wr_parm_y.word = ++ cpu_to_be32(params->wr_parm_y.word); ++ config_opts.cm_config.wr_parm_r.word = ++ cpu_to_be32(params->wr_parm_r.word); ++ ret = qman_ceetm_configure_ccgr(&config_opts); ++ if (ret) { ++ pr_err("Configure CCGR CM failed!\n"); ++ goto release_lock; ++ } ++ ++ if (we_mask & QM_CCGR_WE_CSCN_EN) ++ if (list_empty(&ccg->cb_node)) ++ list_add(&ccg->cb_node, ++ &p->ccgr_cbs[ccg->parent->dcp_idx]); ++release_lock: ++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++EXPORT_SYMBOL(qman_ceetm_ccg_set); ++ ++#define CEETM_CCGR_CTL_MASK 0x01 ++int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, ++ struct qm_ceetm_ccg_params *params) ++{ ++ struct qm_mcc_ceetm_ccgr_query query_opts; ++ struct qm_mcr_ceetm_ccgr_query query_result; ++ ++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | ++ (ccg->parent->idx << 4) | ccg->idx); ++ query_opts.dcpid = ccg->parent->dcp_idx; ++ ++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { ++ pr_err("Can't query CCGR#%d\n", ccg->idx); ++ return -EINVAL; ++ } ++ ++ params->wr_parm_r.word = query_result.cm_query.wr_parm_r.word; ++ params->wr_parm_y.word = query_result.cm_query.wr_parm_y.word; ++ params->wr_parm_g.word = query_result.cm_query.wr_parm_g.word; ++ params->td_thres.hword = query_result.cm_query.td_thres.hword; ++ params->cs_thres_out.hword = query_result.cm_query.cs_thres_x.hword; ++ params->cs_thres_in.hword = query_result.cm_query.cs_thres.hword; ++ params->oal = query_result.cm_query.oal; ++ params->wr_en_g = query_result.cm_query.ctl_wr_en_g; ++ params->wr_en_y = query_result.cm_query.ctl_wr_en_y; ++ params->wr_en_r = query_result.cm_query.ctl_wr_en_r; ++ params->td_en = query_result.cm_query.ctl_td_en; ++ params->td_mode = query_result.cm_query.ctl_td_mode; ++ params->cscn_en = query_result.cm_query.ctl_cscn_en; ++ params->mode = query_result.cm_query.ctl_mode; ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_ccg_get); ++ ++int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, ++ u64 *frame_count, u64 *byte_count) ++{ ++ struct qm_mcr_ceetm_statistics_query result; ++ u16 cid, command_type; ++ enum qm_dc_portal dcp_idx; ++ int ret; ++ ++ cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx); ++ dcp_idx = ccg->parent->dcp_idx; ++ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) ++ command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS; ++ else ++ command_type = CEETM_QUERY_REJECT_STATISTICS; ++ ++ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); ++ if (ret) { ++ pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx); ++ return -EINVAL; ++ } ++ ++ *frame_count = be40_to_cpu(result.frm_cnt); ++ *byte_count = be48_to_cpu(result.byte_cnt); ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics); ++ ++int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, ++ u16 swp_idx, ++ unsigned int *cscn_enabled) ++{ ++ struct qm_mcc_ceetm_ccgr_query query_opts; ++ struct qm_mcr_ceetm_ccgr_query query_result; ++ int i; ++ ++ DPA_ASSERT(swp_idx < 127); ++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | ++ (ccg->parent->idx << 4) | ccg->idx); ++ query_opts.dcpid = ccg->parent->dcp_idx; ++ ++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { ++ pr_err("Can't query CCGR#%d\n", ccg->idx); ++ return -EINVAL; ++ } ++ ++ i = swp_idx / 32; ++ i = 3 - i; ++ *cscn_enabled = query_result.cm_query.cscn_targ_swp[i] >> ++ (31 - swp_idx % 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cscn_swp_get); ++ ++int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, ++ u16 dcp_idx, ++ u8 vcgid, ++ unsigned int cscn_enabled, ++ u16 we_mask, ++ const struct qm_ceetm_ccg_params *params) ++{ ++ struct qm_mcc_ceetm_ccgr_config config_opts; ++ int ret; ++ ++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE | ++ (ccg->parent->idx << 4) | ccg->idx); ++ config_opts.dcpid = ccg->parent->dcp_idx; ++ config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD | ++ QM_CCGR_WE_CDV); ++ config_opts.cm_config.cdv = vcgid; ++ config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) | ++ QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx); ++ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g; ++ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y; ++ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r; ++ config_opts.cm_config.ctl_td_en = params->td_en; ++ config_opts.cm_config.ctl_td_mode = params->td_mode; ++ config_opts.cm_config.ctl_cscn_en = params->cscn_en; ++ config_opts.cm_config.ctl_mode = params->mode; ++ config_opts.cm_config.cs_thres.hword = ++ cpu_to_be16(params->cs_thres_in.hword); ++ config_opts.cm_config.cs_thres_x.hword = ++ cpu_to_be16(params->cs_thres_out.hword); ++ config_opts.cm_config.td_thres.hword = ++ cpu_to_be16(params->td_thres.hword); ++ config_opts.cm_config.wr_parm_g.word = ++ cpu_to_be32(params->wr_parm_g.word); ++ config_opts.cm_config.wr_parm_y.word = ++ cpu_to_be32(params->wr_parm_y.word); ++ config_opts.cm_config.wr_parm_r.word = ++ cpu_to_be32(params->wr_parm_r.word); ++ ++ ret = qman_ceetm_configure_ccgr(&config_opts); ++ if (ret) { ++ pr_err("Configure CSCN_TARG_DCP failed!\n"); ++ return -EINVAL; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set); ++ ++int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, ++ u16 dcp_idx, ++ u8 *vcgid, ++ unsigned int *cscn_enabled) ++{ ++ struct qm_mcc_ceetm_ccgr_query query_opts; ++ struct qm_mcr_ceetm_ccgr_query query_result; ++ ++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY | ++ (ccg->parent->idx << 4) | ccg->idx); ++ query_opts.dcpid = ccg->parent->dcp_idx; ++ ++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { ++ pr_err("Can't query CCGR#%d\n", ccg->idx); ++ return -EINVAL; ++ } ++ ++ *vcgid = query_result.cm_query.cdv; ++ *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> dcp_idx) & 0x1; ++ return 0; ++} ++EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get); ++ ++int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state, ++ unsigned int dcp_idx) ++{ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ u8 res; ++ int i, j; ++ ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ ++ mcc = qm_mc_start(&p->p); ++ for (i = 0; i < 2; i++) { ++ mcc->ccgr_query.ccgrid = ++ cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i); ++ mcc->ccgr_query.dcpid = dcp_idx; ++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); ++ ++ while (!(mcr = qm_mc_result(&p->p))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_CEETM_VERB_CCGR_QUERY); ++ res = mcr->result; ++ if (res == QM_MCR_RESULT_OK) { ++ for (j = 0; j < 8; j++) ++ mcr->ccgr_query.congestion_state.state. ++ __state[j] = be32_to_cpu(mcr->ccgr_query. ++ congestion_state.state.__state[j]); ++ *(ccg_state + i) = ++ mcr->ccgr_query.congestion_state.state; ++ } else { ++ pr_err("QUERY CEETM CONGESTION STATE failed\n"); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ return -EIO; ++ } ++ } ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return 0; ++} ++ ++int qman_set_wpm(int wpm_enable) ++{ ++ return qm_set_wpm(wpm_enable); ++} ++EXPORT_SYMBOL(qman_set_wpm); ++ ++int qman_get_wpm(int *wpm_enable) ++{ ++ return qm_get_wpm(wpm_enable); ++} ++EXPORT_SYMBOL(qman_get_wpm); ++ ++int qman_shutdown_fq(u32 fqid) ++{ ++ struct qman_portal *p; ++ unsigned long irqflags __maybe_unused; ++ int ret; ++ struct qm_portal *low_p; ++ p = get_affine_portal(); ++ PORTAL_IRQ_LOCK(p, irqflags); ++ low_p = &p->p; ++ ret = qm_shutdown_fq(&low_p, 1, fqid); ++ PORTAL_IRQ_UNLOCK(p, irqflags); ++ put_affine_portal(); ++ return ret; ++} ++ ++const struct qm_portal_config *qman_get_qm_portal_config( ++ struct qman_portal *portal) ++{ ++ return portal->sharing_redirect ? NULL : portal->config; ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_low.h +@@ -0,0 +1,1427 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_private.h" ++ ++/***************************/ ++/* Portal register assists */ ++/***************************/ ++ ++/* Cache-inhibited register offsets */ ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ ++#define QM_REG_EQCR_PI_CINH 0x0000 ++#define QM_REG_EQCR_CI_CINH 0x0004 ++#define QM_REG_EQCR_ITR 0x0008 ++#define QM_REG_DQRR_PI_CINH 0x0040 ++#define QM_REG_DQRR_CI_CINH 0x0044 ++#define QM_REG_DQRR_ITR 0x0048 ++#define QM_REG_DQRR_DCAP 0x0050 ++#define QM_REG_DQRR_SDQCR 0x0054 ++#define QM_REG_DQRR_VDQCR 0x0058 ++#define QM_REG_DQRR_PDQCR 0x005c ++#define QM_REG_MR_PI_CINH 0x0080 ++#define QM_REG_MR_CI_CINH 0x0084 ++#define QM_REG_MR_ITR 0x0088 ++#define QM_REG_CFG 0x0100 ++#define QM_REG_ISR 0x0e00 ++#define QM_REG_IIR 0x0e0c ++#define QM_REG_ITPR 0x0e14 ++ ++/* Cache-enabled register offsets */ ++#define QM_CL_EQCR 0x0000 ++#define QM_CL_DQRR 0x1000 ++#define QM_CL_MR 0x2000 ++#define QM_CL_EQCR_PI_CENA 0x3000 ++#define QM_CL_EQCR_CI_CENA 0x3100 ++#define QM_CL_DQRR_PI_CENA 0x3200 ++#define QM_CL_DQRR_CI_CENA 0x3300 ++#define QM_CL_MR_PI_CENA 0x3400 ++#define QM_CL_MR_CI_CENA 0x3500 ++#define QM_CL_CR 0x3800 ++#define QM_CL_RR0 0x3900 ++#define QM_CL_RR1 0x3940 ++ ++#endif ++ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ ++#define QM_REG_EQCR_PI_CINH 0x3000 ++#define QM_REG_EQCR_CI_CINH 0x3040 ++#define QM_REG_EQCR_ITR 0x3080 ++#define QM_REG_DQRR_PI_CINH 0x3100 ++#define QM_REG_DQRR_CI_CINH 0x3140 ++#define QM_REG_DQRR_ITR 0x3180 ++#define QM_REG_DQRR_DCAP 0x31C0 ++#define QM_REG_DQRR_SDQCR 0x3200 ++#define QM_REG_DQRR_VDQCR 0x3240 ++#define QM_REG_DQRR_PDQCR 0x3280 ++#define QM_REG_MR_PI_CINH 0x3300 ++#define QM_REG_MR_CI_CINH 0x3340 ++#define QM_REG_MR_ITR 0x3380 ++#define QM_REG_CFG 0x3500 ++#define QM_REG_ISR 0x3600 ++#define QM_REG_IIR 0x36C0 ++#define QM_REG_ITPR 0x3740 ++ ++/* Cache-enabled register offsets */ ++#define QM_CL_EQCR 0x0000 ++#define QM_CL_DQRR 0x1000 ++#define QM_CL_MR 0x2000 ++#define QM_CL_EQCR_PI_CENA 0x3000 ++#define QM_CL_EQCR_CI_CENA 0x3040 ++#define QM_CL_DQRR_PI_CENA 0x3100 ++#define QM_CL_DQRR_CI_CENA 0x3140 ++#define QM_CL_MR_PI_CENA 0x3300 ++#define QM_CL_MR_CI_CENA 0x3340 ++#define QM_CL_CR 0x3800 ++#define QM_CL_RR0 0x3900 ++#define QM_CL_RR1 0x3940 ++ ++#endif ++ ++ ++/* BTW, the drivers (and h/w programming model) already obtain the required ++ * synchronisation for portal accesses via lwsync(), hwsync(), and ++ * data-dependencies. Use of barrier()s or other order-preserving primitives ++ * simply degrade performance. Hence the use of the __raw_*() interfaces, which ++ * simply ensure that the compiler treats the portal registers as volatile (ie. ++ * non-coherent). */ ++ ++/* Cache-inhibited register access. */ ++#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o))) ++#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \ ++ (qm)->addr_ci + (o)); ++#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg) ++#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val) ++ ++/* Cache-enabled (index) register access */ ++#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o)) ++#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o)) ++#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o))) ++#define __qm_cl_out(qm, o, val) \ ++ do { \ ++ u32 *__tmpclout = (qm)->addr_ce + (o); \ ++ __raw_writel(cpu_to_be32(val), __tmpclout); \ ++ dcbf(__tmpclout); \ ++ } while (0) ++#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o)) ++#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA) ++#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA) ++#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA) ++#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val) ++#define qm_cl_invalidate(reg)\ ++ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA) ++ ++/* Cache-enabled ring access */ ++#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) ++ ++/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf ++ * analysis, look at using the "extra" bit in the ring index registers to avoid ++ * cyclic issues. */ ++static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) ++{ ++ /* 'first' is included, 'last' is excluded */ ++ if (first <= last) ++ return last - first; ++ return ringsize + last - first; ++} ++ ++/* Portal modes. ++ * Enum types; ++ * pmode == production mode ++ * cmode == consumption mode, ++ * dmode == h/w dequeue mode. ++ * Enum values use 3 letter codes. First letter matches the portal mode, ++ * remaining two letters indicate; ++ * ci == cache-inhibited portal register ++ * ce == cache-enabled portal register ++ * vb == in-band valid-bit (cache-enabled) ++ * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only ++ * As for "enum qm_dqrr_dmode", it should be self-explanatory. ++ */ ++enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ ++ qm_eqcr_pci = 0, /* PI index, cache-inhibited */ ++ qm_eqcr_pce = 1, /* PI index, cache-enabled */ ++ qm_eqcr_pvb = 2 /* valid-bit */ ++}; ++enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ ++ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ ++ qm_dqrr_dpull = 1 /* PDQCR */ ++}; ++enum qm_dqrr_pmode { /* s/w-only */ ++ qm_dqrr_pci, /* reads DQRR_PI_CINH */ ++ qm_dqrr_pce, /* reads DQRR_PI_CENA */ ++ qm_dqrr_pvb /* reads valid-bit */ ++}; ++enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ ++ qm_dqrr_cci = 0, /* CI index, cache-inhibited */ ++ qm_dqrr_cce = 1, /* CI index, cache-enabled */ ++ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */ ++}; ++enum qm_mr_pmode { /* s/w-only */ ++ qm_mr_pci, /* reads MR_PI_CINH */ ++ qm_mr_pce, /* reads MR_PI_CENA */ ++ qm_mr_pvb /* reads valid-bit */ ++}; ++enum qm_mr_cmode { /* matches QCSP_CFG::MM */ ++ qm_mr_cci = 0, /* CI index, cache-inhibited */ ++ qm_mr_cce = 1 /* CI index, cache-enabled */ ++}; ++ ++ ++/* ------------------------- */ ++/* --- Portal structures --- */ ++ ++#define QM_EQCR_SIZE 8 ++#define QM_DQRR_SIZE 16 ++#define QM_MR_SIZE 8 ++ ++struct qm_eqcr { ++ struct qm_eqcr_entry *ring, *cursor; ++ u8 ci, available, ithresh, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ u32 busy; ++ enum qm_eqcr_pmode pmode; ++#endif ++}; ++ ++struct qm_dqrr { ++ const struct qm_dqrr_entry *ring, *cursor; ++ u8 pi, ci, fill, ithresh, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ enum qm_dqrr_dmode dmode; ++ enum qm_dqrr_pmode pmode; ++ enum qm_dqrr_cmode cmode; ++#endif ++}; ++ ++struct qm_mr { ++ const struct qm_mr_entry *ring, *cursor; ++ u8 pi, ci, fill, ithresh, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ enum qm_mr_pmode pmode; ++ enum qm_mr_cmode cmode; ++#endif ++}; ++ ++struct qm_mc { ++ struct qm_mc_command *cr; ++ struct qm_mc_result *rr; ++ u8 rridx, vbit; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ enum { ++ /* Can be _mc_start()ed */ ++ qman_mc_idle, ++ /* Can be _mc_commit()ed or _mc_abort()ed */ ++ qman_mc_user, ++ /* Can only be _mc_retry()ed */ ++ qman_mc_hw ++ } state; ++#endif ++}; ++ ++#define QM_PORTAL_ALIGNMENT ____cacheline_aligned ++ ++struct qm_addr { ++ void __iomem *addr_ce; /* cache-enabled */ ++ void __iomem *addr_ci; /* cache-inhibited */ ++}; ++ ++struct qm_portal { ++ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to ++ * and including 'mc' fits within a cacheline (yay!). The 'config' part ++ * is setup-only, so isn't a cause for a concern. In other words, don't ++ * rearrange this structure on a whim, there be dragons ... */ ++ struct qm_addr addr; ++ struct qm_eqcr eqcr; ++ struct qm_dqrr dqrr; ++ struct qm_mr mr; ++ struct qm_mc mc; ++} QM_PORTAL_ALIGNMENT; ++ ++ ++/* ---------------- */ ++/* --- EQCR API --- */ ++ ++/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ ++#define EQCR_CARRYCLEAR(p) \ ++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) ++ ++/* Bit-wise logic to convert a ring pointer to a ring index */ ++static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) ++{ ++ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1); ++} ++ ++/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ ++static inline void EQCR_INC(struct qm_eqcr *eqcr) ++{ ++ /* NB: this is odd-looking, but experiments show that it generates fast ++ * code with essentially no branching overheads. We increment to the ++ * next EQCR pointer and handle overflow and 'vbit'. */ ++ struct qm_eqcr_entry *partial = eqcr->cursor + 1; ++ eqcr->cursor = EQCR_CARRYCLEAR(partial); ++ if (partial != eqcr->cursor) ++ eqcr->vbit ^= QM_EQCR_VERB_VBIT; ++} ++ ++static inline int qm_eqcr_init(struct qm_portal *portal, ++ enum qm_eqcr_pmode pmode, ++ unsigned int eq_stash_thresh, ++ int eq_stash_prio) ++{ ++ /* This use of 'register', as well as all other occurrences, is because ++ * it has been observed to generate much faster code with gcc than is ++ * otherwise the case. */ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ u32 cfg; ++ u8 pi; ++ ++ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR; ++ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); ++ qm_cl_invalidate(EQCR_CI); ++ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); ++ eqcr->cursor = eqcr->ring + pi; ++ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? ++ QM_EQCR_VERB_VBIT : 0; ++ eqcr->available = QM_EQCR_SIZE - 1 - ++ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); ++ eqcr->ithresh = qm_in(EQCR_ITR); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 0; ++ eqcr->pmode = pmode; ++#endif ++ cfg = (qm_in(CFG) & 0x00ffffff) | ++ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ ++ (eq_stash_prio << 26) | /* QCSP_CFG: EP */ ++ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ ++ qm_out(CFG, cfg); ++ return 0; ++} ++ ++static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) ++{ ++ return (qm_in(CFG) >> 28) & 0x7; ++} ++ ++static inline void qm_eqcr_finish(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ u8 pi, ci; ++ u32 cfg; ++ ++ /* ++ * Disable EQCI stashing because the QMan only ++ * presents the value it previously stashed to ++ * maintain coherency. Setting the stash threshold ++ * to 1 then 0 ensures that QMan has resyncronized ++ * its internal copy so that the portal is clean ++ * when it is reinitialized in the future ++ */ ++ cfg = (qm_in(CFG) & 0x0fffffff) | ++ (1 << 28); /* QCSP_CFG: EST */ ++ qm_out(CFG, cfg); ++ cfg &= 0x0fffffff; /* stash threshold = 0 */ ++ qm_out(CFG, cfg); ++ ++ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); ++ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); ++ ++ /* Refresh EQCR CI cache value */ ++ qm_cl_invalidate(EQCR_CI); ++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); ++ ++ DPA_ASSERT(!eqcr->busy); ++ if (pi != EQCR_PTR2IDX(eqcr->cursor)) ++ pr_crit("losing uncommited EQCR entries\n"); ++ if (ci != eqcr->ci) ++ pr_crit("missing existing EQCR completions\n"); ++ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) ++ pr_crit("EQCR destroyed unquiesced\n"); ++} ++ ++static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal ++ *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ DPA_ASSERT(!eqcr->busy); ++ if (!eqcr->available) ++ return NULL; ++ ++ ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 1; ++#endif ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(eqcr->cursor); ++#endif ++ return eqcr->cursor; ++} ++ ++static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal ++ *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ u8 diff, old_ci; ++ ++ DPA_ASSERT(!eqcr->busy); ++ if (!eqcr->available) { ++ old_ci = eqcr->ci; ++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); ++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); ++ eqcr->available += diff; ++ if (!diff) ++ return NULL; ++ } ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 1; ++#endif ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(eqcr->cursor); ++#endif ++ return eqcr->cursor; ++} ++ ++static inline void qm_eqcr_abort(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; ++ DPA_ASSERT(eqcr->busy); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 0; ++#endif ++} ++ ++static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next( ++ struct qm_portal *portal, u8 myverb) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ DPA_ASSERT(eqcr->busy); ++ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb); ++ if (eqcr->available == 1) ++ return NULL; ++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; ++ dcbf(eqcr->cursor); ++ EQCR_INC(eqcr); ++ eqcr->available--; ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(eqcr->cursor); ++#endif ++ return eqcr->cursor; ++} ++ ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++#define EQCR_COMMIT_CHECKS(eqcr) \ ++do { \ ++ DPA_ASSERT(eqcr->busy); \ ++ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0xffffff00)); \ ++ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0xffffff00)); \ ++} while (0) ++#else ++#define EQCR_COMMIT_CHECKS(eqcr) \ ++do { \ ++ DPA_ASSERT(eqcr->busy); \ ++ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & \ ++ cpu_to_be32(0x00ffffff))); \ ++ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & \ ++ cpu_to_be32(0x00ffffff))); \ ++} while (0) ++#endif ++ ++static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ EQCR_COMMIT_CHECKS(eqcr); ++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci); ++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; ++ EQCR_INC(eqcr); ++ eqcr->available--; ++ dcbf(eqcr->cursor); ++ hwsync(); ++ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor)); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 0; ++#endif ++} ++ ++static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; ++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); ++ qm_cl_invalidate(EQCR_PI); ++ qm_cl_touch_rw(EQCR_PI); ++} ++ ++static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ EQCR_COMMIT_CHECKS(eqcr); ++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); ++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; ++ EQCR_INC(eqcr); ++ eqcr->available--; ++ dcbf(eqcr->cursor); ++ lwsync(); ++ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor)); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 0; ++#endif ++} ++ ++static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ struct qm_eqcr_entry *eqcursor; ++ EQCR_COMMIT_CHECKS(eqcr); ++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb); ++ lwsync(); ++ eqcursor = eqcr->cursor; ++ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit; ++ dcbf(eqcursor); ++ EQCR_INC(eqcr); ++ eqcr->available--; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ eqcr->busy = 0; ++#endif ++} ++ ++static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ u8 diff, old_ci = eqcr->ci; ++ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); ++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); ++ eqcr->available += diff; ++ return diff; ++} ++ ++static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; ++ qm_cl_touch_ro(EQCR_CI); ++} ++ ++static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ u8 diff, old_ci = eqcr->ci; ++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); ++ qm_cl_invalidate(EQCR_CI); ++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); ++ eqcr->available += diff; ++ return diff; ++} ++ ++static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ return eqcr->ithresh; ++} ++ ++static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ eqcr->ithresh = ithresh; ++ qm_out(EQCR_ITR, ithresh); ++} ++ ++static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ return eqcr->available; ++} ++ ++static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) ++{ ++ register struct qm_eqcr *eqcr = &portal->eqcr; ++ return QM_EQCR_SIZE - 1 - eqcr->available; ++} ++ ++ ++/* ---------------- */ ++/* --- DQRR API --- */ ++ ++/* FIXME: many possible improvements; ++ * - look at changing the API to use pointer rather than index parameters now ++ * that 'cursor' is a pointer, ++ * - consider moving other parameters to pointer if it could help (ci) ++ */ ++ ++#define DQRR_CARRYCLEAR(p) \ ++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6))) ++ ++static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e) ++{ ++ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); ++} ++ ++static inline const struct qm_dqrr_entry *DQRR_INC( ++ const struct qm_dqrr_entry *e) ++{ ++ return DQRR_CARRYCLEAR(e + 1); ++} ++ ++static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) ++{ ++ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) | ++ ((mf & (QM_DQRR_SIZE - 1)) << 20)); ++} ++ ++static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); ++ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); ++ qm_out(DQRR_CI_CINH, dqrr->ci); ++} ++ ++static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); ++ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); ++ qm_cl_out(DQRR_CI, dqrr->ci); ++} ++ ++static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ ++ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ ++ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); ++ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); ++} ++ ++static inline int qm_dqrr_init(struct qm_portal *portal, ++ const struct qm_portal_config *config, ++ enum qm_dqrr_dmode dmode, ++ __maybe_unused enum qm_dqrr_pmode pmode, ++ enum qm_dqrr_cmode cmode, u8 max_fill) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ u32 cfg; ++ ++ /* Make sure the DQRR will be idle when we enable */ ++ qm_out(DQRR_SDQCR, 0); ++ qm_out(DQRR_VDQCR, 0); ++ qm_out(DQRR_PDQCR, 0); ++ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR; ++ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); ++ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); ++ dqrr->cursor = dqrr->ring + dqrr->ci; ++ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); ++ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? ++ QM_DQRR_VERB_VBIT : 0; ++ dqrr->ithresh = qm_in(DQRR_ITR); ++ ++ /* Free up pending DQRR entries if any as per current DCM */ ++ if (dqrr->fill) { ++ enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3; ++ ++#ifdef CONFIG_FSL_DPA_CHECKING ++ dqrr->cmode = dcm; ++#endif ++ switch (dcm) { ++ case qm_dqrr_cci: ++ qm_dqrr_cci_consume(portal, dqrr->fill); ++ break; ++ case qm_dqrr_cce: ++ qm_dqrr_cce_consume(portal, dqrr->fill); ++ break; ++ case qm_dqrr_cdc: ++ qm_dqrr_cdc_consume_n(portal, (QM_DQRR_SIZE - 1)); ++ break; ++ default: ++ DPA_ASSERT(0); ++ } ++ } ++ ++#ifdef CONFIG_FSL_DPA_CHECKING ++ dqrr->dmode = dmode; ++ dqrr->pmode = pmode; ++ dqrr->cmode = cmode; ++#endif ++ /* Invalidate every ring entry before beginning */ ++ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) ++ dcbi(qm_cl(dqrr->ring, cfg)); ++ cfg = (qm_in(CFG) & 0xff000f00) | ++ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ ++ ((dmode & 1) << 18) | /* DP */ ++ ((cmode & 3) << 16) | /* DCM */ ++ 0xa0 | /* RE+SE */ ++ (0 ? 0x40 : 0) | /* Ignore RP */ ++ (0 ? 0x10 : 0); /* Ignore SP */ ++ qm_out(CFG, cfg); ++ qm_dqrr_set_maxfill(portal, max_fill); ++ return 0; ++} ++ ++static inline void qm_dqrr_finish(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if ((dqrr->cmode != qm_dqrr_cdc) && ++ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) ++ pr_crit("Ignoring completed DQRR entries\n"); ++#endif ++} ++ ++static inline const struct qm_dqrr_entry *qm_dqrr_current( ++ struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ if (!dqrr->fill) ++ return NULL; ++ return dqrr->cursor; ++} ++ ++static inline u8 qm_dqrr_cursor(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ return DQRR_PTR2IDX(dqrr->cursor); ++} ++ ++static inline u8 qm_dqrr_next(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->fill); ++ dqrr->cursor = DQRR_INC(dqrr->cursor); ++ return --dqrr->fill; ++} ++ ++static inline u8 qm_dqrr_pci_update(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ u8 diff, old_pi = dqrr->pi; ++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci); ++ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); ++ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); ++ dqrr->fill += diff; ++ return diff; ++} ++ ++static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); ++ qm_cl_invalidate(DQRR_PI); ++ qm_cl_touch_ro(DQRR_PI); ++} ++ ++static inline u8 qm_dqrr_pce_update(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ u8 diff, old_pi = dqrr->pi; ++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); ++ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1); ++ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); ++ dqrr->fill += diff; ++ return diff; ++} ++ ++static inline void qm_dqrr_pvb_update(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); ++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb); ++#if (defined CONFIG_PPC || defined CONFIG_PPC64) && !defined CONFIG_FSL_PAMU ++ /* ++ * On PowerPC platforms if PAMU is not available we need to ++ * manually invalidate the cache. When PAMU is available the ++ * cache is updated by stashing operations generated by QMan ++ */ ++ dcbi(res); ++ dcbt_ro(res); ++#endif ++ ++ /* when accessing 'verb', use __raw_readb() to ensure that compiler ++ * inlining doesn't try to optimise out "excess reads". */ ++ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { ++ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); ++ if (!dqrr->pi) ++ dqrr->vbit ^= QM_DQRR_VERB_VBIT; ++ dqrr->fill++; ++ } ++} ++ ++ ++static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); ++ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); ++ qm_out(DQRR_CI_CINH, dqrr->ci); ++} ++ ++static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); ++ qm_cl_invalidate(DQRR_CI); ++ qm_cl_touch_rw(DQRR_CI); ++} ++ ++static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); ++ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); ++ qm_cl_out(DQRR_CI, dqrr->ci); ++} ++ ++static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx, ++ int park) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ DPA_ASSERT(idx < QM_DQRR_SIZE); ++ qm_out(DQRR_DCAP, (0 << 8) | /* S */ ++ ((park ? 1 : 0) << 6) | /* PK */ ++ idx); /* DCAP_CI */ ++} ++ ++static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, ++ const struct qm_dqrr_entry *dq, ++ int park) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ u8 idx = DQRR_PTR2IDX(dq); ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ DPA_ASSERT((dqrr->ring + idx) == dq); ++ DPA_ASSERT(idx < QM_DQRR_SIZE); ++ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ ++ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ ++ idx); /* DQRR_DCAP::DCAP_CI */ ++} ++ ++static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); ++} ++ ++static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ qm_cl_invalidate(DQRR_CI); ++ qm_cl_touch_ro(DQRR_CI); ++} ++ ++static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); ++ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1); ++} ++ ++static inline u8 qm_dqrr_get_ci(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); ++ return dqrr->ci; ++} ++ ++static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx) ++{ ++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); ++ qm_out(DQRR_DCAP, (0 << 8) | /* S */ ++ (1 << 6) | /* PK */ ++ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */ ++} ++ ++static inline void qm_dqrr_park_current(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); ++ qm_out(DQRR_DCAP, (0 << 8) | /* S */ ++ (1 << 6) | /* PK */ ++ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */ ++} ++ ++static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) ++{ ++ qm_out(DQRR_SDQCR, sdqcr); ++} ++ ++static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal) ++{ ++ return qm_in(DQRR_SDQCR); ++} ++ ++static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) ++{ ++ qm_out(DQRR_VDQCR, vdqcr); ++} ++ ++static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal) ++{ ++ return qm_in(DQRR_VDQCR); ++} ++ ++static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr) ++{ ++ qm_out(DQRR_PDQCR, pdqcr); ++} ++ ++static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal) ++{ ++ return qm_in(DQRR_PDQCR); ++} ++ ++static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal) ++{ ++ register struct qm_dqrr *dqrr = &portal->dqrr; ++ return dqrr->ithresh; ++} ++ ++static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) ++{ ++ qm_out(DQRR_ITR, ithresh); ++} ++ ++static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal) ++{ ++ return (qm_in(CFG) & 0x00f00000) >> 20; ++} ++ ++ ++/* -------------- */ ++/* --- MR API --- */ ++ ++#define MR_CARRYCLEAR(p) \ ++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6))) ++ ++static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e) ++{ ++ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1); ++} ++ ++static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e) ++{ ++ return MR_CARRYCLEAR(e + 1); ++} ++ ++static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, ++ enum qm_mr_cmode cmode) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ u32 cfg; ++ ++ mr->ring = portal->addr.addr_ce + QM_CL_MR; ++ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); ++ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); ++ mr->cursor = mr->ring + mr->ci; ++ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); ++ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; ++ mr->ithresh = qm_in(MR_ITR); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mr->pmode = pmode; ++ mr->cmode = cmode; ++#endif ++ cfg = (qm_in(CFG) & 0xfffff0ff) | ++ ((cmode & 1) << 8); /* QCSP_CFG:MM */ ++ qm_out(CFG, cfg); ++ return 0; ++} ++ ++static inline void qm_mr_finish(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ if (mr->ci != MR_PTR2IDX(mr->cursor)) ++ pr_crit("Ignoring completed MR entries\n"); ++} ++ ++static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ if (!mr->fill) ++ return NULL; ++ return mr->cursor; ++} ++ ++static inline u8 qm_mr_cursor(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ return MR_PTR2IDX(mr->cursor); ++} ++ ++static inline u8 qm_mr_next(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->fill); ++ mr->cursor = MR_INC(mr->cursor); ++ return --mr->fill; ++} ++ ++static inline u8 qm_mr_pci_update(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ u8 diff, old_pi = mr->pi; ++ DPA_ASSERT(mr->pmode == qm_mr_pci); ++ mr->pi = qm_in(MR_PI_CINH); ++ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); ++ mr->fill += diff; ++ return diff; ++} ++ ++static inline void qm_mr_pce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->pmode == qm_mr_pce); ++ qm_cl_invalidate(MR_PI); ++ qm_cl_touch_ro(MR_PI); ++} ++ ++static inline u8 qm_mr_pce_update(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ u8 diff, old_pi = mr->pi; ++ DPA_ASSERT(mr->pmode == qm_mr_pce); ++ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1); ++ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); ++ mr->fill += diff; ++ return diff; ++} ++ ++static inline void qm_mr_pvb_update(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); ++ DPA_ASSERT(mr->pmode == qm_mr_pvb); ++ /* when accessing 'verb', use __raw_readb() to ensure that compiler ++ * inlining doesn't try to optimise out "excess reads". */ ++ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { ++ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); ++ if (!mr->pi) ++ mr->vbit ^= QM_MR_VERB_VBIT; ++ mr->fill++; ++ res = MR_INC(res); ++ } ++ dcbit_ro(res); ++} ++ ++static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->cmode == qm_mr_cci); ++ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); ++ qm_out(MR_CI_CINH, mr->ci); ++} ++ ++static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->cmode == qm_mr_cci); ++ mr->ci = MR_PTR2IDX(mr->cursor); ++ qm_out(MR_CI_CINH, mr->ci); ++} ++ ++static inline void qm_mr_cce_prefetch(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->cmode == qm_mr_cce); ++ qm_cl_invalidate(MR_CI); ++ qm_cl_touch_rw(MR_CI); ++} ++ ++static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->cmode == qm_mr_cce); ++ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); ++ qm_cl_out(MR_CI, mr->ci); ++} ++ ++static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ DPA_ASSERT(mr->cmode == qm_mr_cce); ++ mr->ci = MR_PTR2IDX(mr->cursor); ++ qm_cl_out(MR_CI, mr->ci); ++} ++ ++static inline u8 qm_mr_get_ci(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ return mr->ci; ++} ++ ++static inline u8 qm_mr_get_ithresh(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ return mr->ithresh; ++} ++ ++static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) ++{ ++ qm_out(MR_ITR, ithresh); ++} ++ ++ ++/* ------------------------------ */ ++/* --- Management command API --- */ ++ ++static inline int qm_mc_init(struct qm_portal *portal) ++{ ++ register struct qm_mc *mc = &portal->mc; ++ mc->cr = portal->addr.addr_ce + QM_CL_CR; ++ mc->rr = portal->addr.addr_ce + QM_CL_RR0; ++ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & ++ QM_MCC_VERB_VBIT) ? 0 : 1; ++ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = qman_mc_idle; ++#endif ++ return 0; ++} ++ ++static inline void qm_mc_finish(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == qman_mc_idle); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ if (mc->state != qman_mc_idle) ++ pr_crit("Losing incomplete MC command\n"); ++#endif ++} ++ ++static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal) ++{ ++ register struct qm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == qman_mc_idle); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = qman_mc_user; ++#endif ++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64) ++ dcbz_64(mc->cr); ++#endif ++ return mc->cr; ++} ++ ++static inline void qm_mc_abort(struct qm_portal *portal) ++{ ++ __maybe_unused register struct qm_mc *mc = &portal->mc; ++ DPA_ASSERT(mc->state == qman_mc_user); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = qman_mc_idle; ++#endif ++} ++ ++static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) ++{ ++ register struct qm_mc *mc = &portal->mc; ++ struct qm_mc_result *rr = mc->rr + mc->rridx; ++ DPA_ASSERT(mc->state == qman_mc_user); ++ lwsync(); ++ mc->cr->__dont_write_directly__verb = myverb | mc->vbit; ++ dcbf(mc->cr); ++ dcbit_ro(rr); ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = qman_mc_hw; ++#endif ++} ++ ++static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal) ++{ ++ register struct qm_mc *mc = &portal->mc; ++ struct qm_mc_result *rr = mc->rr + mc->rridx; ++ DPA_ASSERT(mc->state == qman_mc_hw); ++ /* The inactive response register's verb byte always returns zero until ++ * its command is submitted and completed. This includes the valid-bit, ++ * in case you were wondering... */ ++ if (!__raw_readb(&rr->verb)) { ++ dcbit_ro(rr); ++ return NULL; ++ } ++ mc->rridx ^= 1; ++ mc->vbit ^= QM_MCC_VERB_VBIT; ++#ifdef CONFIG_FSL_DPA_CHECKING ++ mc->state = qman_mc_idle; ++#endif ++ return rr; ++} ++ ++ ++/* ------------------------------------- */ ++/* --- Portal interrupt register API --- */ ++ ++static inline int qm_isr_init(__always_unused struct qm_portal *portal) ++{ ++ return 0; ++} ++ ++static inline void qm_isr_finish(__always_unused struct qm_portal *portal) ++{ ++} ++ ++static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod) ++{ ++ qm_out(ITPR, iperiod); ++} ++ ++static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n) ++{ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6)); ++#else ++ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2)); ++#endif ++} ++ ++static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, ++ u32 val) ++{ ++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val); ++#else ++ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val); ++#endif ++} ++ ++/* Cleanup FQs */ ++static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count, ++ u32 fqid) ++{ ++ ++ struct qm_mc_command *mcc; ++ struct qm_mc_result *mcr; ++ u8 state; ++ int orl_empty, fq_empty, i, drain = 0; ++ u32 result; ++ u32 channel, wq; ++ u16 dest_wq; ++ ++ /* Determine the state of the FQID */ ++ mcc = qm_mc_start(portal[0]); ++ mcc->queryfq_np.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP); ++ while (!(mcr = qm_mc_result(portal[0]))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); ++ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; ++ if (state == QM_MCR_NP_STATE_OOS) ++ return 0; /* Already OOS, no need to do anymore checks */ ++ ++ /* Query which channel the FQ is using */ ++ mcc = qm_mc_start(portal[0]); ++ mcc->queryfq.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ); ++ while (!(mcr = qm_mc_result(portal[0]))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); ++ ++ /* Need to store these since the MCR gets reused */ ++ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq); ++ wq = dest_wq & 0x7; ++ channel = dest_wq>>3; ++ ++ switch (state) { ++ case QM_MCR_NP_STATE_TEN_SCHED: ++ case QM_MCR_NP_STATE_TRU_SCHED: ++ case QM_MCR_NP_STATE_ACTIVE: ++ case QM_MCR_NP_STATE_PARKED: ++ orl_empty = 0; ++ mcc = qm_mc_start(portal[0]); ++ mcc->alterfq.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE); ++ while (!(mcr = qm_mc_result(portal[0]))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_MCR_VERB_ALTER_RETIRE); ++ result = mcr->result; /* Make a copy as we reuse MCR below */ ++ ++ if (result == QM_MCR_RESULT_PENDING) { ++ /* Need to wait for the FQRN in the message ring, which ++ will only occur once the FQ has been drained. In ++ order for the FQ to drain the portal needs to be set ++ to dequeue from the channel the FQ is scheduled on */ ++ const struct qm_mr_entry *msg; ++ const struct qm_dqrr_entry *dqrr = NULL; ++ int found_fqrn = 0; ++ u16 dequeue_wq = 0; ++ ++ /* Flag that we need to drain FQ */ ++ drain = 1; ++ ++ if (channel >= qm_channel_pool1 && ++ channel < (qm_channel_pool1 + 15)) { ++ /* Pool channel, enable the bit in the portal */ ++ dequeue_wq = (channel - ++ qm_channel_pool1 + 1)<<4 | wq; ++ } else if (channel < qm_channel_pool1) { ++ /* Dedicated channel */ ++ dequeue_wq = wq; ++ } else { ++ pr_info("Cannot recover FQ 0x%x, it is " ++ "scheduled on channel 0x%x", ++ fqid, channel); ++ return -EBUSY; ++ } ++ /* Set the sdqcr to drain this channel */ ++ if (channel < qm_channel_pool1) ++ for (i = 0; i < portal_count; i++) ++ qm_dqrr_sdqcr_set(portal[i], ++ QM_SDQCR_TYPE_ACTIVE | ++ QM_SDQCR_CHANNELS_DEDICATED); ++ else ++ for (i = 0; i < portal_count; i++) ++ qm_dqrr_sdqcr_set( ++ portal[i], ++ QM_SDQCR_TYPE_ACTIVE | ++ QM_SDQCR_CHANNELS_POOL_CONV ++ (channel)); ++ while (!found_fqrn) { ++ /* Keep draining DQRR while checking the MR*/ ++ for (i = 0; i < portal_count; i++) { ++ qm_dqrr_pvb_update(portal[i]); ++ dqrr = qm_dqrr_current(portal[i]); ++ while (dqrr) { ++ qm_dqrr_cdc_consume_1ptr( ++ portal[i], dqrr, 0); ++ qm_dqrr_pvb_update(portal[i]); ++ qm_dqrr_next(portal[i]); ++ dqrr = qm_dqrr_current( ++ portal[i]); ++ } ++ /* Process message ring too */ ++ qm_mr_pvb_update(portal[i]); ++ msg = qm_mr_current(portal[i]); ++ while (msg) { ++ if ((msg->verb & ++ QM_MR_VERB_TYPE_MASK) ++ == QM_MR_VERB_FQRN) ++ found_fqrn = 1; ++ qm_mr_next(portal[i]); ++ qm_mr_cci_consume_to_current( ++ portal[i]); ++ qm_mr_pvb_update(portal[i]); ++ msg = qm_mr_current(portal[i]); ++ } ++ cpu_relax(); ++ } ++ } ++ } ++ if (result != QM_MCR_RESULT_OK && ++ result != QM_MCR_RESULT_PENDING) { ++ /* error */ ++ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n", ++ fqid, result); ++ return -1; ++ } ++ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { ++ /* ORL had no entries, no need to wait until the ++ ERNs come in */ ++ orl_empty = 1; ++ } ++ /* Retirement succeeded, check to see if FQ needs ++ to be drained */ ++ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { ++ /* FQ is Not Empty, drain using volatile DQ commands */ ++ fq_empty = 0; ++ do { ++ const struct qm_dqrr_entry *dqrr = NULL; ++ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); ++ qm_dqrr_vdqcr_set(portal[0], vdqcr); ++ ++ /* Wait for a dequeue to occur */ ++ while (dqrr == NULL) { ++ qm_dqrr_pvb_update(portal[0]); ++ dqrr = qm_dqrr_current(portal[0]); ++ if (!dqrr) ++ cpu_relax(); ++ } ++ /* Process the dequeues, making sure to ++ empty the ring completely */ ++ while (dqrr) { ++ if (be32_to_cpu(dqrr->fqid) == fqid && ++ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY) ++ fq_empty = 1; ++ qm_dqrr_cdc_consume_1ptr(portal[0], ++ dqrr, 0); ++ qm_dqrr_pvb_update(portal[0]); ++ qm_dqrr_next(portal[0]); ++ dqrr = qm_dqrr_current(portal[0]); ++ } ++ } while (fq_empty == 0); ++ } ++ for (i = 0; i < portal_count; i++) ++ qm_dqrr_sdqcr_set(portal[i], 0); ++ ++ /* Wait for the ORL to have been completely drained */ ++ while (orl_empty == 0) { ++ const struct qm_mr_entry *msg; ++ qm_mr_pvb_update(portal[0]); ++ msg = qm_mr_current(portal[0]); ++ while (msg) { ++ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == ++ QM_MR_VERB_FQRL) ++ orl_empty = 1; ++ qm_mr_next(portal[0]); ++ qm_mr_cci_consume_to_current(portal[0]); ++ qm_mr_pvb_update(portal[0]); ++ msg = qm_mr_current(portal[0]); ++ } ++ cpu_relax(); ++ } ++ mcc = qm_mc_start(portal[0]); ++ mcc->alterfq.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); ++ while (!(mcr = qm_mc_result(portal[0]))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_MCR_VERB_ALTER_OOS); ++ if (mcr->result != QM_MCR_RESULT_OK) { ++ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n", ++ fqid, mcr->result); ++ return -1; ++ } ++ return 0; ++ case QM_MCR_NP_STATE_RETIRED: ++ /* Send OOS Command */ ++ mcc = qm_mc_start(portal[0]); ++ mcc->alterfq.fqid = cpu_to_be32(fqid); ++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); ++ while (!(mcr = qm_mc_result(portal[0]))) ++ cpu_relax(); ++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == ++ QM_MCR_VERB_ALTER_OOS); ++ if (mcr->result) { ++ pr_err("OOS Failed on FQID 0x%x\n", fqid); ++ return -1; ++ } ++ return 0; ++ } ++ return -1; ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_private.h +@@ -0,0 +1,398 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpa_sys.h" ++#include ++#include ++ ++#if defined(CONFIG_FSL_PAMU) ++#include ++#endif ++ ++#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64) ++#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP" ++#endif ++ ++#define QBMAN_ANY_PORTAL_IDX 0xffffffff ++ /* ----------------- */ ++ /* Congestion Groups */ ++ /* ----------------- */ ++/* This wrapper represents a bit-array for the state of the 256 Qman congestion ++ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore ++ * those that don't concern us. We harness the structure and accessor details ++ * already used in the management command to query congestion groups. */ ++struct qman_cgrs { ++ struct __qm_mcr_querycongestion q; ++}; ++static inline void qman_cgrs_init(struct qman_cgrs *c) ++{ ++ memset(c, 0, sizeof(*c)); ++} ++static inline void qman_cgrs_fill(struct qman_cgrs *c) ++{ ++ memset(c, 0xff, sizeof(*c)); ++} ++static inline int qman_cgrs_get(struct qman_cgrs *c, int num) ++{ ++ return QM_MCR_QUERYCONGESTION(&c->q, num); ++} ++static inline void qman_cgrs_set(struct qman_cgrs *c, int num) ++{ ++ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num)); ++} ++static inline void qman_cgrs_unset(struct qman_cgrs *c, int num) ++{ ++ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num)); ++} ++static inline int qman_cgrs_next(struct qman_cgrs *c, int num) ++{ ++ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num)) ++ ; ++ return num; ++} ++static inline void qman_cgrs_cp(struct qman_cgrs *dest, ++ const struct qman_cgrs *src) ++{ ++ *dest = *src; ++} ++static inline void qman_cgrs_and(struct qman_cgrs *dest, ++ const struct qman_cgrs *a, const struct qman_cgrs *b) ++{ ++ int ret; ++ u32 *_d = dest->q.__state; ++ const u32 *_a = a->q.__state; ++ const u32 *_b = b->q.__state; ++ for (ret = 0; ret < 8; ret++) ++ *(_d++) = *(_a++) & *(_b++); ++} ++static inline void qman_cgrs_xor(struct qman_cgrs *dest, ++ const struct qman_cgrs *a, const struct qman_cgrs *b) ++{ ++ int ret; ++ u32 *_d = dest->q.__state; ++ const u32 *_a = a->q.__state; ++ const u32 *_b = b->q.__state; ++ for (ret = 0; ret < 8; ret++) ++ *(_d++) = *(_a++) ^ *(_b++); ++} ++ ++ /* ----------------------- */ ++ /* CEETM Congestion Groups */ ++ /* ----------------------- */ ++/* This wrapper represents a bit-array for the state of the 512 Qman CEETM ++ * congestion groups. ++ */ ++struct qman_ccgrs { ++ struct __qm_mcr_querycongestion q[2]; ++}; ++static inline void qman_ccgrs_init(struct qman_ccgrs *c) ++{ ++ memset(c, 0, sizeof(*c)); ++} ++static inline void qman_ccgrs_fill(struct qman_ccgrs *c) ++{ ++ memset(c, 0xff, sizeof(*c)); ++} ++static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num) ++{ ++ if (num < __CGR_NUM) ++ return QM_MCR_QUERYCONGESTION(&c->q[0], num); ++ else ++ return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM)); ++} ++static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num) ++{ ++ while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num)) ++ ; ++ return num; ++} ++static inline void qman_ccgrs_cp(struct qman_ccgrs *dest, ++ const struct qman_ccgrs *src) ++{ ++ *dest = *src; ++} ++static inline void qman_ccgrs_and(struct qman_ccgrs *dest, ++ const struct qman_ccgrs *a, const struct qman_ccgrs *b) ++{ ++ int ret, i; ++ u32 *_d; ++ const u32 *_a, *_b; ++ for (i = 0; i < 2; i++) { ++ _d = dest->q[i].__state; ++ _a = a->q[i].__state; ++ _b = b->q[i].__state; ++ for (ret = 0; ret < 8; ret++) ++ *(_d++) = *(_a++) & *(_b++); ++ } ++} ++static inline void qman_ccgrs_xor(struct qman_ccgrs *dest, ++ const struct qman_ccgrs *a, const struct qman_ccgrs *b) ++{ ++ int ret, i; ++ u32 *_d; ++ const u32 *_a, *_b; ++ for (i = 0; i < 2; i++) { ++ _d = dest->q[i].__state; ++ _a = a->q[i].__state; ++ _b = b->q[i].__state; ++ for (ret = 0; ret < 8; ret++) ++ *(_d++) = *(_a++) ^ *(_b++); ++ } ++} ++ ++/* used by CCSR and portal interrupt code */ ++enum qm_isr_reg { ++ qm_isr_status = 0, ++ qm_isr_enable = 1, ++ qm_isr_disable = 2, ++ qm_isr_inhibit = 3 ++}; ++ ++struct qm_portal_config { ++ /* Corenet portal addresses; ++ * [0]==cache-enabled, [1]==cache-inhibited. */ ++ __iomem void *addr_virt[2]; ++ struct resource addr_phys[2]; ++ struct device dev; ++ struct iommu_domain *iommu_domain; ++ /* Allow these to be joined in lists */ ++ struct list_head list; ++ /* User-visible portal configuration settings */ ++ struct qman_portal_config public_cfg; ++ /* power management saved data */ ++ u32 saved_isdr; ++}; ++ ++/* Revision info (for errata and feature handling) */ ++#define QMAN_REV11 0x0101 ++#define QMAN_REV12 0x0102 ++#define QMAN_REV20 0x0200 ++#define QMAN_REV30 0x0300 ++#define QMAN_REV31 0x0301 ++#define QMAN_REV32 0x0302 ++ ++/* QMan REV_2 register contains the Cfg option */ ++#define QMAN_REV_CFG_0 0x0 ++#define QMAN_REV_CFG_1 0x1 ++#define QMAN_REV_CFG_2 0x2 ++#define QMAN_REV_CFG_3 0x3 ++ ++extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ ++extern u8 qman_ip_cfg; ++extern u32 qman_clk; ++extern u16 qman_portal_max; ++ ++#ifdef CONFIG_FSL_QMAN_CONFIG ++/* Hooks from qman_driver.c to qman_config.c */ ++int qman_init_ccsr(struct device_node *node); ++void qman_liodn_fixup(u16 channel); ++int qman_set_sdest(u16 channel, unsigned int cpu_idx); ++size_t get_qman_fqd_size(void); ++#else ++static inline size_t get_qman_fqd_size(void) ++{ ++ return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ); ++} ++#endif ++ ++int qm_set_wpm(int wpm); ++int qm_get_wpm(int *wpm); ++ ++/* Hooks from qman_driver.c in to qman_high.c */ ++struct qman_portal *qman_create_portal( ++ struct qman_portal *portal, ++ const struct qm_portal_config *config, ++ const struct qman_cgrs *cgrs); ++ ++struct qman_portal *qman_create_affine_portal( ++ const struct qm_portal_config *config, ++ const struct qman_cgrs *cgrs); ++struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, ++ int cpu); ++const struct qm_portal_config *qman_destroy_affine_portal(void); ++void qman_destroy_portal(struct qman_portal *qm); ++ ++/* Hooks from fsl_usdpaa.c to qman_driver.c */ ++struct qm_portal_config *qm_get_unused_portal(void); ++struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx); ++ ++void qm_put_unused_portal(struct qm_portal_config *pcfg); ++void qm_set_liodns(struct qm_portal_config *pcfg); ++ ++/* This CGR feature is supported by h/w and required by unit-tests and the ++ * debugfs hooks, so is implemented in the driver. However it allows an explicit ++ * corruption of h/w fields by s/w that are usually incorruptible (because the ++ * counters are usually maintained entirely within h/w). As such, we declare ++ * this API internally. */ ++int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, ++ struct qm_mcr_cgrtestwrite *result); ++ ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++/* If the fq object pointer is greater than the size of context_b field, ++ * than a lookup table is required. */ ++int qman_setup_fq_lookup_table(size_t num_entries); ++#endif ++ ++ ++/*************************************************/ ++/* QMan s/w corenet portal, low-level i/face */ ++/*************************************************/ ++ ++/* Note: most functions are only used by the high-level interface, so are ++ * inlined from qman_low.h. The stuff below is for use by other parts of the ++ * driver. */ ++ ++/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one ++ * dequeue TYPE. Choose TOKEN (8-bit). ++ * If SOURCE == CHANNELS, ++ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). ++ * You can choose DEDICATED_PRECEDENCE if the portal channel should have ++ * priority. ++ * If SOURCE == SPECIFICWQ, ++ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the ++ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the ++ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the ++ * same value. ++ */ ++#define QM_SDQCR_SOURCE_CHANNELS 0x0 ++#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 ++#define QM_SDQCR_COUNT_EXACT1 0x0 ++#define QM_SDQCR_COUNT_UPTO3 0x20000000 ++#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 ++#define QM_SDQCR_TYPE_MASK 0x03000000 ++#define QM_SDQCR_TYPE_NULL 0x0 ++#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 ++#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 ++#define QM_SDQCR_TYPE_ACTIVE 0x03000000 ++#define QM_SDQCR_TOKEN_MASK 0x00ff0000 ++#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) ++#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) ++#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 ++#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 ++#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 ++#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) ++#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) ++ ++/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ ++#define QM_VDQCR_FQID_MASK 0x00ffffff ++#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) ++ ++/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT. ++ * If MODE==SCHEDULED ++ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE. ++ * If CHANNELS, ++ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels. ++ * You can choose DEDICATED_PRECEDENCE if the portal channel should have ++ * priority. ++ * If SPECIFICWQ, ++ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the ++ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the ++ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the ++ * same value. ++ * If MODE==UNSCHEDULED ++ * Choose FQID(). ++ */ ++#define QM_PDQCR_MODE_SCHEDULED 0x0 ++#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000 ++#define QM_PDQCR_SCHEDULED_CHANNELS 0x0 ++#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000 ++#define QM_PDQCR_COUNT_EXACT1 0x0 ++#define QM_PDQCR_COUNT_UPTO3 0x20000000 ++#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000 ++#define QM_PDQCR_TYPE_MASK 0x03000000 ++#define QM_PDQCR_TYPE_NULL 0x0 ++#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000 ++#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000 ++#define QM_PDQCR_TYPE_ACTIVE 0x03000000 ++#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000 ++#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) ++#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7 ++#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000 ++#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4) ++#define QM_PDQCR_SPECIFICWQ_WQ(n) (n) ++#define QM_PDQCR_FQID(n) ((n) & 0xffffff) ++ ++/* Used by all portal interrupt registers except 'inhibit' ++ * Channels with frame availability ++ */ ++#define QM_PIRQ_DQAVAIL 0x0000ffff ++ ++/* The DQAVAIL interrupt fields break down into these bits; */ ++#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ ++#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ ++#define QM_DQAVAIL_MASK 0xffff ++/* This mask contains all the "irqsource" bits visible to API users */ ++#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) ++ ++/* These are qm__(). So for example, qm_disable_write() means "write ++ * the disable register" rather than "disable the ability to write". */ ++#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status) ++#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m) ++#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable) ++#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v) ++#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable) ++#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v) ++/* TODO: unfortunate name-clash here, reword? */ ++#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1) ++#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0) ++ ++#ifdef CONFIG_FSL_QMAN_CONFIG ++int qman_have_ccsr(void); ++#else ++#define qman_have_ccsr 0 ++#endif ++ ++__init int qman_init(void); ++__init int qman_resource_init(void); ++ ++/* CEETM related */ ++#define QMAN_CEETM_MAX 2 ++extern u8 num_ceetms; ++extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; ++int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); ++int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); ++int qman_ceetm_set_prescaler(enum qm_dc_portal portal); ++int qman_ceetm_get_prescaler(u16 *pres); ++int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, ++ struct qm_mcr_ceetm_cq_query *cq_query); ++int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query, ++ struct qm_mcr_ceetm_ccgr_query *response); ++int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num); ++ ++extern void *affine_portals[NR_CPUS]; ++const struct qm_portal_config *qman_get_qm_portal_config( ++ struct qman_portal *portal); ++ ++/* power management */ ++#ifdef CONFIG_SUSPEND ++void suspend_unused_qportal(void); ++void resume_unused_qportal(void); ++#endif +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_test.c +@@ -0,0 +1,57 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_test.h" ++ ++MODULE_AUTHOR("Geoff Thorpe"); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("Qman testing"); ++ ++static int test_init(void) ++{ ++ int loop = 1; ++ while (loop--) { ++#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO ++ qman_test_hotpotato(); ++#endif ++#ifdef CONFIG_FSL_QMAN_TEST_HIGH ++ qman_test_high(); ++#endif ++ } ++ return 0; ++} ++ ++static void test_exit(void) ++{ ++} ++ ++module_init(test_init); ++module_exit(test_exit); +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_test.h +@@ -0,0 +1,45 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++void qman_test_hotpotato(void); ++void qman_test_high(void); ++ +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_test_high.c +@@ -0,0 +1,216 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_test.h" ++ ++/*************/ ++/* constants */ ++/*************/ ++ ++#define CGR_ID 27 ++#define POOL_ID 2 ++#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID ++#define NUM_ENQUEUES 10 ++#define NUM_PARTIAL 4 ++#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ ++ QM_SDQCR_TYPE_PRIO_QOS | \ ++ QM_SDQCR_TOKEN_SET(0x98) | \ ++ QM_SDQCR_CHANNELS_DEDICATED | \ ++ QM_SDQCR_CHANNELS_POOL(POOL_ID)) ++#define PORTAL_OPAQUE ((void *)0xf00dbeef) ++#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) ++ ++/*************************************/ ++/* Predeclarations (eg. for fq_base) */ ++/*************************************/ ++ ++static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, ++ struct qman_fq *, ++ const struct qm_dqrr_entry *); ++static void cb_ern(struct qman_portal *, struct qman_fq *, ++ const struct qm_mr_entry *); ++static void cb_fqs(struct qman_portal *, struct qman_fq *, ++ const struct qm_mr_entry *); ++ ++/***************/ ++/* global vars */ ++/***************/ ++ ++static struct qm_fd fd, fd_dq; ++static struct qman_fq fq_base = { ++ .cb.dqrr = cb_dqrr, ++ .cb.ern = cb_ern, ++ .cb.fqs = cb_fqs ++}; ++static DECLARE_WAIT_QUEUE_HEAD(waitqueue); ++static int retire_complete, sdqcr_complete; ++ ++/**********************/ ++/* internal functions */ ++/**********************/ ++ ++/* Helpers for initialising and "incrementing" a frame descriptor */ ++static void fd_init(struct qm_fd *__fd) ++{ ++ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU); ++ __fd->format = qm_fd_contig_big; ++ __fd->length29 = 0x0000ffff; ++ __fd->cmd = 0xfeedf00d; ++} ++ ++static void fd_inc(struct qm_fd *__fd) ++{ ++ u64 t = qm_fd_addr_get64(__fd); ++ int z = t >> 40; ++ t <<= 1; ++ if (z) ++ t |= 1; ++ qm_fd_addr_set64(__fd, t); ++ __fd->length29--; ++ __fd->cmd++; ++} ++ ++/* The only part of the 'fd' we can't memcmp() is the ppid */ ++static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) ++{ ++ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; ++ if (!r) ++ r = a->format - b->format; ++ if (!r) ++ r = a->opaque - b->opaque; ++ if (!r) ++ r = a->cmd - b->cmd; ++ return r; ++} ++ ++/********/ ++/* test */ ++/********/ ++ ++static void do_enqueues(struct qman_fq *fq) ++{ ++ unsigned int loop; ++ for (loop = 0; loop < NUM_ENQUEUES; loop++) { ++ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | ++ (((loop + 1) == NUM_ENQUEUES) ? ++ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) ++ panic("qman_enqueue() failed\n"); ++ fd_inc(&fd); ++ } ++} ++ ++void qman_test_high(void) ++{ ++ unsigned int flags; ++ int res; ++ struct qman_fq *fq = &fq_base; ++ ++ pr_info("qman_test_high starting\n"); ++ fd_init(&fd); ++ fd_init(&fd_dq); ++ ++ /* Initialise (parked) FQ */ ++ if (qman_create_fq(0, FQ_FLAGS, fq)) ++ panic("qman_create_fq() failed\n"); ++ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) ++ panic("qman_init_fq() failed\n"); ++ ++ /* Do enqueues + VDQCR, twice. (Parked FQ) */ ++ do_enqueues(fq); ++ pr_info("VDQCR (till-empty);\n"); ++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, ++ QM_VDQCR_NUMFRAMES_TILLEMPTY)) ++ panic("qman_volatile_dequeue() failed\n"); ++ do_enqueues(fq); ++ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); ++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, ++ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) ++ panic("qman_volatile_dequeue() failed\n"); ++ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, ++ NUM_ENQUEUES); ++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS, ++ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) ++ panic("qman_volatile_dequeue() failed\n"); ++ ++ do_enqueues(fq); ++ pr_info("scheduled dequeue (till-empty)\n"); ++ if (qman_schedule_fq(fq)) ++ panic("qman_schedule_fq() failed\n"); ++ wait_event(waitqueue, sdqcr_complete); ++ ++ /* Retire and OOS the FQ */ ++ res = qman_retire_fq(fq, &flags); ++ if (res < 0) ++ panic("qman_retire_fq() failed\n"); ++ wait_event(waitqueue, retire_complete); ++ if (flags & QMAN_FQ_STATE_BLOCKOOS) ++ panic("leaking frames\n"); ++ if (qman_oos_fq(fq)) ++ panic("qman_oos_fq() failed\n"); ++ qman_destroy_fq(fq, 0); ++ pr_info("qman_test_high finished\n"); ++} ++ ++static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dq) ++{ ++ if (fd_cmp(&fd_dq, &dq->fd)) { ++ pr_err("BADNESS: dequeued frame doesn't match;\n"); ++ pr_err("Expected 0x%llx, got 0x%llx\n", ++ (unsigned long long)fd_dq.length29, ++ (unsigned long long)dq->fd.length29); ++ BUG(); ++ } ++ fd_inc(&fd_dq); ++ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { ++ sdqcr_complete = 1; ++ wake_up(&waitqueue); ++ } ++ return qman_cb_dqrr_consume; ++} ++ ++static void cb_ern(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_mr_entry *msg) ++{ ++ panic("cb_ern() unimplemented"); ++} ++ ++static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_mr_entry *msg) ++{ ++ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); ++ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) ++ panic("unexpected FQS message"); ++ pr_info("Retirement message received\n"); ++ retire_complete = 1; ++ wake_up(&waitqueue); ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c +@@ -0,0 +1,502 @@ ++/* Copyright 2009-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include "qman_test.h" ++ ++/* Algorithm: ++ * ++ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates ++ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The ++ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will ++ * shuttle a "hot potato" frame around them such that every forwarding action ++ * moves it from one cpu to another. (The use of more than one handler per cpu ++ * is to allow enough handlers/FQs to truly test the significance of caching - ++ * ie. when cache-expiries are occurring.) ++ * ++ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the ++ * first and last words of the frame data will undergo a transformation step on ++ * each forwarding action. To achieve this, each handler will be assigned a ++ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is ++ * received by a handler, the mixer of the expected sender is XOR'd into all ++ * words of the entire frame, which is then validated against the original ++ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of ++ * the current handler. Apart from validating that the frame is taking the ++ * expected path, this also provides some quasi-realistic overheads to each ++ * forwarding action - dereferencing *all* the frame data, computation, and ++ * conditional branching. There is a "special" handler designated to act as the ++ * instigator of the test by creating an enqueuing the "hot potato" frame, and ++ * to determine when the test has completed by counting HP_LOOPS iterations. ++ * ++ * Init phases: ++ * ++ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them ++ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU ++ * handlers and link-list them (but do no other handler setup). ++ * ++ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each ++ * hp_cpu's 'iterator' to point to its first handler. With each loop, ++ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler ++ * and advance the iterator for the next loop. This includes a final fixup, ++ * which connects the last handler to the first (and which is why phase 2 ++ * and 3 are separate). ++ * ++ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each ++ * hp_cpu's 'iterator' to point to its first handler. With each loop, ++ * initialise FQ objects and advance the iterator for the next loop. ++ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ ++ * initialisation targets the correct cpu. ++ */ ++ ++/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes ++ * the fn from irq context, which is too restrictive). */ ++struct bstrap { ++ void (*fn)(void); ++ atomic_t started; ++}; ++static int bstrap_fn(void *__bstrap) ++{ ++ struct bstrap *bstrap = __bstrap; ++ atomic_inc(&bstrap->started); ++ bstrap->fn(); ++ while (!kthread_should_stop()) ++ msleep(1); ++ return 0; ++} ++static int on_all_cpus(void (*fn)(void)) ++{ ++ int cpu; ++ for_each_cpu(cpu, cpu_online_mask) { ++ struct bstrap bstrap = { ++ .fn = fn, ++ .started = ATOMIC_INIT(0) ++ }; ++ struct task_struct *k = kthread_create(bstrap_fn, &bstrap, ++ "hotpotato%d", cpu); ++ int ret; ++ if (IS_ERR(k)) ++ return -ENOMEM; ++ kthread_bind(k, cpu); ++ wake_up_process(k); ++ /* If we call kthread_stop() before the "wake up" has had an ++ * effect, then the thread may exit with -EINTR without ever ++ * running the function. So poll until it's started before ++ * requesting it to stop. */ ++ while (!atomic_read(&bstrap.started)) ++ msleep(10); ++ ret = kthread_stop(k); ++ if (ret) ++ return ret; ++ } ++ return 0; ++} ++ ++struct hp_handler { ++ ++ /* The following data is stashed when 'rx' is dequeued; */ ++ /* -------------- */ ++ /* The Rx FQ, dequeues of which will stash the entire hp_handler */ ++ struct qman_fq rx; ++ /* The Tx FQ we should forward to */ ++ struct qman_fq tx; ++ /* The value we XOR post-dequeue, prior to validating */ ++ u32 rx_mixer; ++ /* The value we XOR pre-enqueue, after validating */ ++ u32 tx_mixer; ++ /* what the hotpotato address should be on dequeue */ ++ dma_addr_t addr; ++ u32 *frame_ptr; ++ ++ /* The following data isn't (necessarily) stashed on dequeue; */ ++ /* -------------- */ ++ u32 fqid_rx, fqid_tx; ++ /* list node for linking us into 'hp_cpu' */ ++ struct list_head node; ++ /* Just to check ... */ ++ unsigned int processor_id; ++} ____cacheline_aligned; ++ ++struct hp_cpu { ++ /* identify the cpu we run on; */ ++ unsigned int processor_id; ++ /* root node for the per-cpu list of handlers */ ++ struct list_head handlers; ++ /* list node for linking us into 'hp_cpu_list' */ ++ struct list_head node; ++ /* when repeatedly scanning 'hp_list', each time linking the n'th ++ * handlers together, this is used as per-cpu iterator state */ ++ struct hp_handler *iterator; ++}; ++ ++/* Each cpu has one of these */ ++static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); ++ ++/* links together the hp_cpu structs, in first-come first-serve order. */ ++static LIST_HEAD(hp_cpu_list); ++static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); ++ ++static unsigned int hp_cpu_list_length; ++ ++/* the "special" handler, that starts and terminates the test. */ ++static struct hp_handler *special_handler; ++static int loop_counter; ++ ++/* handlers are allocated out of this, so they're properly aligned. */ ++static struct kmem_cache *hp_handler_slab; ++ ++/* this is the frame data */ ++static void *__frame_ptr; ++static u32 *frame_ptr; ++static dma_addr_t frame_dma; ++ ++/* the main function waits on this */ ++static DECLARE_WAIT_QUEUE_HEAD(queue); ++ ++#define HP_PER_CPU 2 ++#define HP_LOOPS 8 ++/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ ++#define HP_NUM_WORDS 80 ++/* First word of the LFSR-based frame data */ ++#define HP_FIRST_WORD 0xabbaf00d ++ ++static inline u32 do_lfsr(u32 prev) ++{ ++ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); ++} ++ ++static void allocate_frame_data(void) ++{ ++ u32 lfsr = HP_FIRST_WORD; ++ int loop; ++ struct platform_device *pdev = platform_device_alloc("foobar", -1); ++ if (!pdev) ++ panic("platform_device_alloc() failed"); ++ if (platform_device_add(pdev)) ++ panic("platform_device_add() failed"); ++ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); ++ if (!__frame_ptr) ++ panic("kmalloc() failed"); ++ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) & ++ ~(unsigned long)63); ++ for (loop = 0; loop < HP_NUM_WORDS; loop++) { ++ frame_ptr[loop] = lfsr; ++ lfsr = do_lfsr(lfsr); ++ } ++ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, ++ DMA_BIDIRECTIONAL); ++ platform_device_del(pdev); ++ platform_device_put(pdev); ++} ++ ++static void deallocate_frame_data(void) ++{ ++ kfree(__frame_ptr); ++} ++ ++static inline void process_frame_data(struct hp_handler *handler, ++ const struct qm_fd *fd) ++{ ++ u32 *p = handler->frame_ptr; ++ u32 lfsr = HP_FIRST_WORD; ++ int loop; ++ if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) { ++ pr_err("Got 0x%llx expected 0x%llx\n", ++ qm_fd_addr_get64(fd), handler->addr); ++ panic("bad frame address"); ++ } ++ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { ++ *p ^= handler->rx_mixer; ++ if (*p != lfsr) ++ panic("corrupt frame data"); ++ *p ^= handler->tx_mixer; ++ lfsr = do_lfsr(lfsr); ++ } ++} ++ ++static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dqrr) ++{ ++ struct hp_handler *handler = (struct hp_handler *)fq; ++ ++ process_frame_data(handler, &dqrr->fd); ++ if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) ++ panic("qman_enqueue() failed"); ++ return qman_cb_dqrr_consume; ++} ++ ++static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dqrr) ++{ ++ struct hp_handler *handler = (struct hp_handler *)fq; ++ ++ process_frame_data(handler, &dqrr->fd); ++ if (++loop_counter < HP_LOOPS) { ++ if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) ++ panic("qman_enqueue() failed"); ++ } else { ++ pr_info("Received final (%dth) frame\n", loop_counter); ++ wake_up(&queue); ++ } ++ return qman_cb_dqrr_consume; ++} ++ ++static void create_per_cpu_handlers(void) ++{ ++ struct hp_handler *handler; ++ int loop; ++ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus); ++ ++ hp_cpu->processor_id = smp_processor_id(); ++ spin_lock(&hp_lock); ++ list_add_tail(&hp_cpu->node, &hp_cpu_list); ++ hp_cpu_list_length++; ++ spin_unlock(&hp_lock); ++ INIT_LIST_HEAD(&hp_cpu->handlers); ++ for (loop = 0; loop < HP_PER_CPU; loop++) { ++ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); ++ if (!handler) ++ panic("kmem_cache_alloc() failed"); ++ handler->processor_id = hp_cpu->processor_id; ++ handler->addr = frame_dma; ++ handler->frame_ptr = frame_ptr; ++ list_add_tail(&handler->node, &hp_cpu->handlers); ++ } ++ put_cpu_var(hp_cpus); ++} ++ ++static void destroy_per_cpu_handlers(void) ++{ ++ struct list_head *loop, *tmp; ++ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus); ++ ++ spin_lock(&hp_lock); ++ list_del(&hp_cpu->node); ++ spin_unlock(&hp_lock); ++ list_for_each_safe(loop, tmp, &hp_cpu->handlers) { ++ u32 flags; ++ struct hp_handler *handler = list_entry(loop, struct hp_handler, ++ node); ++ if (qman_retire_fq(&handler->rx, &flags)) ++ panic("qman_retire_fq(rx) failed"); ++ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); ++ if (qman_oos_fq(&handler->rx)) ++ panic("qman_oos_fq(rx) failed"); ++ qman_destroy_fq(&handler->rx, 0); ++ qman_destroy_fq(&handler->tx, 0); ++ qman_release_fqid(handler->fqid_rx); ++ list_del(&handler->node); ++ kmem_cache_free(hp_handler_slab, handler); ++ } ++ put_cpu_var(hp_cpus); ++} ++ ++static inline u8 num_cachelines(u32 offset) ++{ ++ u8 res = (offset + (L1_CACHE_BYTES - 1)) ++ / (L1_CACHE_BYTES); ++ if (res > 3) ++ return 3; ++ return res; ++} ++#define STASH_DATA_CL \ ++ num_cachelines(HP_NUM_WORDS * 4) ++#define STASH_CTX_CL \ ++ num_cachelines(offsetof(struct hp_handler, fqid_rx)) ++ ++static void init_handler(void *__handler) ++{ ++ struct qm_mcc_initfq opts; ++ struct hp_handler *handler = __handler; ++ BUG_ON(handler->processor_id != smp_processor_id()); ++ /* Set up rx */ ++ memset(&handler->rx, 0, sizeof(handler->rx)); ++ if (handler == special_handler) ++ handler->rx.cb.dqrr = special_dqrr; ++ else ++ handler->rx.cb.dqrr = normal_dqrr; ++ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx)) ++ panic("qman_create_fq(rx) failed"); ++ memset(&opts, 0, sizeof(opts)); ++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; ++ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; ++ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL; ++ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL; ++ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | ++ QMAN_INITFQ_FLAG_LOCAL, &opts)) ++ panic("qman_init_fq(rx) failed"); ++ /* Set up tx */ ++ memset(&handler->tx, 0, sizeof(handler->tx)); ++ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, ++ &handler->tx)) ++ panic("qman_create_fq(tx) failed"); ++} ++ ++static void init_phase2(void) ++{ ++ int loop; ++ u32 fqid = 0; ++ u32 lfsr = 0xdeadbeef; ++ struct hp_cpu *hp_cpu; ++ struct hp_handler *handler; ++ ++ for (loop = 0; loop < HP_PER_CPU; loop++) { ++ list_for_each_entry(hp_cpu, &hp_cpu_list, node) { ++ int ret; ++ if (!loop) ++ hp_cpu->iterator = list_first_entry( ++ &hp_cpu->handlers, ++ struct hp_handler, node); ++ else ++ hp_cpu->iterator = list_entry( ++ hp_cpu->iterator->node.next, ++ struct hp_handler, node); ++ /* Rx FQID is the previous handler's Tx FQID */ ++ hp_cpu->iterator->fqid_rx = fqid; ++ /* Allocate new FQID for Tx */ ++ ret = qman_alloc_fqid(&fqid); ++ if (ret) ++ panic("qman_alloc_fqid() failed"); ++ hp_cpu->iterator->fqid_tx = fqid; ++ /* Rx mixer is the previous handler's Tx mixer */ ++ hp_cpu->iterator->rx_mixer = lfsr; ++ /* Get new mixer for Tx */ ++ lfsr = do_lfsr(lfsr); ++ hp_cpu->iterator->tx_mixer = lfsr; ++ } ++ } ++ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ ++ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); ++ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); ++ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef)); ++ handler->fqid_rx = fqid; ++ handler->rx_mixer = lfsr; ++ /* and tag it as our "special" handler */ ++ special_handler = handler; ++} ++ ++static void init_phase3(void) ++{ ++ int loop; ++ struct hp_cpu *hp_cpu; ++ ++ for (loop = 0; loop < HP_PER_CPU; loop++) { ++ list_for_each_entry(hp_cpu, &hp_cpu_list, node) { ++ if (!loop) ++ hp_cpu->iterator = list_first_entry( ++ &hp_cpu->handlers, ++ struct hp_handler, node); ++ else ++ hp_cpu->iterator = list_entry( ++ hp_cpu->iterator->node.next, ++ struct hp_handler, node); ++ preempt_disable(); ++ if (hp_cpu->processor_id == smp_processor_id()) ++ init_handler(hp_cpu->iterator); ++ else ++ smp_call_function_single(hp_cpu->processor_id, ++ init_handler, hp_cpu->iterator, 1); ++ preempt_enable(); ++ } ++ } ++} ++ ++static void send_first_frame(void *ignore) ++{ ++ u32 *p = special_handler->frame_ptr; ++ u32 lfsr = HP_FIRST_WORD; ++ int loop; ++ struct qm_fd fd; ++ ++ BUG_ON(special_handler->processor_id != smp_processor_id()); ++ memset(&fd, 0, sizeof(fd)); ++ qm_fd_addr_set64(&fd, special_handler->addr); ++ fd.format = qm_fd_contig_big; ++ fd.length29 = HP_NUM_WORDS * 4; ++ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { ++ if (*p != lfsr) ++ panic("corrupt frame data"); ++ *p ^= special_handler->tx_mixer; ++ lfsr = do_lfsr(lfsr); ++ } ++ pr_info("Sending first frame\n"); ++ if (qman_enqueue(&special_handler->tx, &fd, 0)) ++ panic("qman_enqueue() failed"); ++} ++ ++void qman_test_hotpotato(void) ++{ ++ if (cpumask_weight(cpu_online_mask) < 2) { ++ pr_info("qman_test_hotpotato, skip - only 1 CPU\n"); ++ return; ++ } ++ ++ pr_info("qman_test_hotpotato starting\n"); ++ ++ hp_cpu_list_length = 0; ++ loop_counter = 0; ++ hp_handler_slab = kmem_cache_create("hp_handler_slab", ++ sizeof(struct hp_handler), L1_CACHE_BYTES, ++ SLAB_HWCACHE_ALIGN, NULL); ++ if (!hp_handler_slab) ++ panic("kmem_cache_create() failed"); ++ ++ allocate_frame_data(); ++ ++ /* Init phase 1 */ ++ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); ++ if (on_all_cpus(create_per_cpu_handlers)) ++ panic("on_each_cpu() failed"); ++ pr_info("Number of cpus: %d, total of %d handlers\n", ++ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); ++ ++ init_phase2(); ++ ++ init_phase3(); ++ ++ preempt_disable(); ++ if (special_handler->processor_id == smp_processor_id()) ++ send_first_frame(NULL); ++ else ++ smp_call_function_single(special_handler->processor_id, ++ send_first_frame, NULL, 1); ++ preempt_enable(); ++ ++ wait_event(queue, loop_counter == HP_LOOPS); ++ deallocate_frame_data(); ++ if (on_all_cpus(destroy_per_cpu_handlers)) ++ panic("on_each_cpu() failed"); ++ kmem_cache_destroy(hp_handler_slab); ++ pr_info("qman_test_hotpotato finished\n"); ++} +--- /dev/null ++++ b/drivers/staging/fsl_qbman/qman_utility.c +@@ -0,0 +1,129 @@ ++/* Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qman_private.h" ++ ++/* ----------------- */ ++/* --- FQID Pool --- */ ++ ++struct qman_fqid_pool { ++ /* Base and size of the FQID range */ ++ u32 fqid_base; ++ u32 total; ++ /* Number of FQIDs currently "allocated" */ ++ u32 used; ++ /* Allocation optimisation. When 'usedfqid_base = fqid_start; ++ pool->total = num; ++ pool->used = 0; ++ pool->next = 0; ++ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL); ++ if (!pool->bits) { ++ kfree(pool); ++ return NULL; ++ } ++ /* If num is not an even multiple of QLONG_BITS (or even 8, for ++ * byte-oriented searching) then we fill the trailing bits with 1, to ++ * make them look allocated (permanently). */ ++ for (i = num + 1; i < QNUM_BITS(num); i++) ++ set_bit(i, pool->bits); ++ return pool; ++} ++EXPORT_SYMBOL(qman_fqid_pool_create); ++ ++int qman_fqid_pool_destroy(struct qman_fqid_pool *pool) ++{ ++ int ret = pool->used; ++ kfree(pool->bits); ++ kfree(pool); ++ return ret; ++} ++EXPORT_SYMBOL(qman_fqid_pool_destroy); ++ ++int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid) ++{ ++ int ret; ++ if (pool->used == pool->total) ++ return -ENOMEM; ++ *fqid = pool->fqid_base + pool->next; ++ ret = test_and_set_bit(pool->next, pool->bits); ++ BUG_ON(ret); ++ if (++pool->used == pool->total) ++ return 0; ++ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next); ++ if (pool->next >= pool->total) ++ pool->next = find_first_zero_bit(pool->bits, pool->total); ++ BUG_ON(pool->next >= pool->total); ++ return 0; ++} ++EXPORT_SYMBOL(qman_fqid_pool_alloc); ++ ++void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid) ++{ ++ int ret; ++ ++ fqid -= pool->fqid_base; ++ ret = test_and_clear_bit(fqid, pool->bits); ++ BUG_ON(!ret); ++ if (pool->used-- == pool->total) ++ pool->next = fqid; ++} ++EXPORT_SYMBOL(qman_fqid_pool_free); ++ ++u32 qman_fqid_pool_used(struct qman_fqid_pool *pool) ++{ ++ return pool->used; ++} ++EXPORT_SYMBOL(qman_fqid_pool_used); +--- /dev/null ++++ b/include/linux/fsl_bman.h +@@ -0,0 +1,532 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef FSL_BMAN_H ++#define FSL_BMAN_H ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* Last updated for v00.79 of the BG */ ++ ++/* Portal processing (interrupt) sources */ ++#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ ++#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */ ++ ++/* This wrapper represents a bit-array for the depletion state of the 64 Bman ++ * buffer pools. */ ++struct bman_depletion { ++ u32 __state[2]; ++}; ++#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } } ++#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } } ++#define __bmdep_word(x) ((x) >> 5) ++#define __bmdep_shift(x) ((x) & 0x1f) ++#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x)) ++static inline void bman_depletion_init(struct bman_depletion *c) ++{ ++ c->__state[0] = c->__state[1] = 0; ++} ++static inline void bman_depletion_fill(struct bman_depletion *c) ++{ ++ c->__state[0] = c->__state[1] = ~0; ++} ++static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid) ++{ ++ return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid); ++} ++static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid) ++{ ++ c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid); ++} ++static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid) ++{ ++ c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid); ++} ++ ++/* ------------------------------------------------------- */ ++/* --- Bman data structures (and associated constants) --- */ ++ ++/* Represents s/w corenet portal mapped data structures */ ++struct bm_rcr_entry; /* RCR (Release Command Ring) entries */ ++struct bm_mc_command; /* MC (Management Command) command */ ++struct bm_mc_result; /* MC result */ ++ ++/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer ++ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI, ++ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */ ++struct bm_buffer { ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 __reserved1; ++ u8 bpid; ++ u16 hi; /* High 16-bits of 48-bit address */ ++ u32 lo; /* Low 32-bits of 48-bit address */ ++#else ++ u32 lo; ++ u16 hi; ++ u8 bpid; ++ u8 __reserved; ++#endif ++ }; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u64 __notaddress:16; ++ u64 addr:48; ++#else ++ u64 addr:48; ++ u64 __notaddress:16; ++#endif ++ }; ++ u64 opaque; ++ }; ++} __aligned(8); ++static inline u64 bm_buffer_get64(const struct bm_buffer *buf) ++{ ++ return buf->addr; ++} ++static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) ++{ ++ return (dma_addr_t)buf->addr; ++} ++/* Macro, so we compile better if 'v' isn't always 64-bit */ ++#define bm_buffer_set64(buf, v) \ ++ do { \ ++ struct bm_buffer *__buf931 = (buf); \ ++ __buf931->hi = upper_32_bits(v); \ ++ __buf931->lo = lower_32_bits(v); \ ++ } while (0) ++ ++/* See 1.5.3.5.4: "Release Command" */ ++struct bm_rcr_entry { ++ union { ++ struct { ++ u8 __dont_write_directly__verb; ++ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ ++ u8 __reserved1[62]; ++ }; ++ struct bm_buffer bufs[8]; ++ }; ++} __packed; ++#define BM_RCR_VERB_VBIT 0x80 ++#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ ++#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 ++#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 ++#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ ++ ++/* See 1.5.3.1: "Acquire Command" */ ++/* See 1.5.3.2: "Query Command" */ ++struct bm_mcc_acquire { ++ u8 bpid; ++ u8 __reserved1[62]; ++} __packed; ++struct bm_mcc_query { ++ u8 __reserved2[63]; ++} __packed; ++struct bm_mc_command { ++ u8 __dont_write_directly__verb; ++ union { ++ struct bm_mcc_acquire acquire; ++ struct bm_mcc_query query; ++ }; ++} __packed; ++#define BM_MCC_VERB_VBIT 0x80 ++#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ ++#define BM_MCC_VERB_CMD_ACQUIRE 0x10 ++#define BM_MCC_VERB_CMD_QUERY 0x40 ++#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ ++ ++/* See 1.5.3.3: "Acquire Response" */ ++/* See 1.5.3.4: "Query Response" */ ++struct bm_pool_state { ++ u8 __reserved1[32]; ++ /* "availability state" and "depletion state" */ ++ struct { ++ u8 __reserved1[8]; ++ /* Access using bman_depletion_***() */ ++ struct bman_depletion state; ++ } as, ds; ++}; ++struct bm_mc_result { ++ union { ++ struct { ++ u8 verb; ++ u8 __reserved1[63]; ++ }; ++ union { ++ struct { ++ u8 __reserved1; ++ u8 bpid; ++ u8 __reserved2[62]; ++ }; ++ struct bm_buffer bufs[8]; ++ } acquire; ++ struct bm_pool_state query; ++ }; ++} __packed; ++#define BM_MCR_VERB_VBIT 0x80 ++#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK ++#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE ++#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY ++#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 ++#define BM_MCR_VERB_CMD_ERR_ECC 0x70 ++#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ ++/* Determine the "availability state" of pool 'p' from a query result 'r' */ ++#define BM_MCR_QUERY_AVAILABILITY(r, p) \ ++ bman_depletion_get(&r->query.as.state, p) ++/* Determine the "depletion state" of pool 'p' from a query result 'r' */ ++#define BM_MCR_QUERY_DEPLETION(r, p) \ ++ bman_depletion_get(&r->query.ds.state, p) ++ ++/*******************************************************************/ ++/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ ++/*******************************************************************/ ++ ++ /* Portal and Buffer Pools */ ++ /* ----------------------- */ ++/* Represents a managed portal */ ++struct bman_portal; ++ ++/* This object type represents Bman buffer pools. */ ++struct bman_pool; ++ ++struct bman_portal_config { ++ /* This is used for any "core-affine" portals, ie. default portals ++ * associated to the corresponding cpu. -1 implies that there is no core ++ * affinity configured. */ ++ int cpu; ++ /* portal interrupt line */ ++ int irq; ++ /* the unique index of this portal */ ++ u32 index; ++ /* Is this portal shared? (If so, it has coarser locking and demuxes ++ * processing on behalf of other CPUs.) */ ++ int is_shared; ++ /* These are the buffer pool IDs that may be used via this portal. */ ++ struct bman_depletion mask; ++}; ++ ++/* This callback type is used when handling pool depletion entry/exit. The ++ * 'cb_ctx' value is the opaque value associated with the pool object in ++ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on ++ * depletion-exit. */ ++typedef void (*bman_cb_depletion)(struct bman_portal *bm, ++ struct bman_pool *pool, void *cb_ctx, int depleted); ++ ++/* This struct specifies parameters for a bman_pool object. */ ++struct bman_pool_params { ++ /* index of the buffer pool to encapsulate (0-63), ignored if ++ * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */ ++ u32 bpid; ++ /* bit-mask of BMAN_POOL_FLAG_*** options */ ++ u32 flags; ++ /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */ ++ bman_cb_depletion cb; ++ /* opaque user value passed as a parameter to 'cb' */ ++ void *cb_ctx; ++ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB: ++ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and* ++ * when run in the control plane (which controls Bman CCSR). This array ++ * matches the definition of bm_pool_set(). */ ++ u32 thresholds[4]; ++}; ++ ++/* Flags to bman_new_pool() */ ++#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */ ++#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */ ++#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */ ++#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */ ++#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */ ++#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */ ++ ++/* Flags to bman_release() */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */ ++#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ ++#endif ++#endif ++#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */ ++ ++/* Flags to bman_acquire() */ ++#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */ ++ ++ /* Portal Management */ ++ /* ----------------- */ ++/** ++ * bman_get_portal_config - get portal configuration settings ++ * ++ * This returns a read-only view of the current cpu's affine portal settings. ++ */ ++const struct bman_portal_config *bman_get_portal_config(void); ++ ++/** ++ * bman_irqsource_get - return the portal work that is interrupt-driven ++ * ++ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently ++ * enabled for interrupt handling on the current cpu's affine portal. These ++ * sources will trigger the portal interrupt and the interrupt handler (or a ++ * tasklet/bottom-half it defers to) will perform the corresponding processing ++ * work. The bman_poll_***() functions will only process sources that are not in ++ * this bitmask. If the current CPU is sharing a portal hosted on another CPU, ++ * this always returns zero. ++ */ ++u32 bman_irqsource_get(void); ++ ++/** ++ * bman_irqsource_add - add processing sources to be interrupt-driven ++ * @bits: bitmask of BM_PIRQ_**I processing sources ++ * ++ * Adds processing sources that should be interrupt-driven (rather than ++ * processed via bman_poll_***() functions). Returns zero for success, or ++ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ ++int bman_irqsource_add(u32 bits); ++ ++/** ++ * bman_irqsource_remove - remove processing sources from being interrupt-driven ++ * @bits: bitmask of BM_PIRQ_**I processing sources ++ * ++ * Removes processing sources from being interrupt-driven, so that they will ++ * instead be processed via bman_poll_***() functions. Returns zero for success, ++ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ ++int bman_irqsource_remove(u32 bits); ++ ++/** ++ * bman_affine_cpus - return a mask of cpus that have affine portals ++ */ ++const cpumask_t *bman_affine_cpus(void); ++ ++/** ++ * bman_poll_slow - process anything that isn't interrupt-driven. ++ * ++ * This function does any portal processing that isn't interrupt-driven. If the ++ * current CPU is sharing a portal hosted on another CPU, this function will ++ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources ++ * indicating what interrupt sources were actually processed by the call. ++ * ++ * NB, unlike the legacy wrapper bman_poll(), this function will ++ * deterministically check for the presence of portal processing work and do it, ++ * which implies some latency even if there's nothing to do. The bman_poll() ++ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by ++ * checking for (and doing) portal processing infrequently. Ie. such that ++ * qman_poll() and bman_poll() can be called from core-processing loops. Use ++ * bman_poll_slow() when you yourself are deciding when to incur the overhead of ++ * processing. ++ */ ++u32 bman_poll_slow(void); ++ ++/** ++ * bman_poll - process anything that isn't interrupt-driven. ++ * ++ * Dispatcher logic on a cpu can use this to trigger any maintenance of the ++ * affine portal. This function does whatever processing is not triggered by ++ * interrupts. This is a legacy wrapper that can be used in core-processing ++ * loops but mitigates the performance overhead of portal processing by ++ * adaptively bypassing true portal processing most of the time. (Processing is ++ * done once every 10 calls if the previous processing revealed that work needed ++ * to be done, or once very 1000 calls if the previous processing revealed no ++ * work needed doing.) If you wish to control this yourself, call ++ * bman_poll_slow() instead, which always checks for portal processing work. ++ */ ++void bman_poll(void); ++ ++/** ++ * bman_rcr_is_empty - Determine if portal's RCR is empty ++ * ++ * For use in situations where a cpu-affine caller needs to determine when all ++ * releases for the local portal have been processed by Bman but can't use the ++ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release(). ++ * The function forces tracking of RCR consumption (which normally doesn't ++ * happen until release processing needs to find space to put new release ++ * commands), and returns zero if the ring still has unprocessed entries, ++ * non-zero if it is empty. ++ */ ++int bman_rcr_is_empty(void); ++ ++/** ++ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs ++ * @result: is set by the API to the base BPID of the allocated range ++ * @count: the number of BPIDs required ++ * @align: required alignment of the allocated range ++ * @partial: non-zero if the API can return fewer than @count BPIDs ++ * ++ * Returns the number of buffer pools allocated, or a negative error code. If ++ * @partial is non zero, the allocation request may return a smaller range of ++ * BPs than requested (though alignment will be as requested). If @partial is ++ * zero, the return value will either be 'count' or negative. ++ */ ++int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial); ++static inline int bman_alloc_bpid(u32 *result) ++{ ++ int ret = bman_alloc_bpid_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++ ++/** ++ * bman_release_bpid_range - Release the specified range of buffer pool IDs ++ * @bpid: the base BPID of the range to deallocate ++ * @count: the number of BPIDs in the range ++ * ++ * This function can also be used to seed the allocator with ranges of BPIDs ++ * that it can subsequently allocate from. ++ */ ++void bman_release_bpid_range(u32 bpid, unsigned int count); ++static inline void bman_release_bpid(u32 bpid) ++{ ++ bman_release_bpid_range(bpid, 1); ++} ++ ++int bman_reserve_bpid_range(u32 bpid, unsigned int count); ++static inline int bman_reserve_bpid(u32 bpid) ++{ ++ return bman_reserve_bpid_range(bpid, 1); ++} ++ ++void bman_seed_bpid_range(u32 bpid, unsigned int count); ++ ++ ++int bman_shutdown_pool(u32 bpid); ++ ++ /* Pool management */ ++ /* --------------- */ ++/** ++ * bman_new_pool - Allocates a Buffer Pool object ++ * @params: parameters specifying the buffer pool ID and behaviour ++ * ++ * Creates a pool object for the given @params. A portal and the depletion ++ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag ++ * is set. NB, the fields from @params are copied into the new pool object, so ++ * the structure provided by the caller can be released or reused after the ++ * function returns. ++ */ ++struct bman_pool *bman_new_pool(const struct bman_pool_params *params); ++ ++/** ++ * bman_free_pool - Deallocates a Buffer Pool object ++ * @pool: the pool object to release ++ * ++ */ ++void bman_free_pool(struct bman_pool *pool); ++ ++/** ++ * bman_get_params - Returns a pool object's parameters. ++ * @pool: the pool object ++ * ++ * The returned pointer refers to state within the pool object so must not be ++ * modified and can no longer be read once the pool object is destroyed. ++ */ ++const struct bman_pool_params *bman_get_params(const struct bman_pool *pool); ++ ++/** ++ * bman_release - Release buffer(s) to the buffer pool ++ * @pool: the buffer pool object to release to ++ * @bufs: an array of buffers to release ++ * @num: the number of buffers in @bufs (1-8) ++ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options ++ * ++ * Adds the given buffers to RCR entries. If the portal @p was created with the ++ * "COMPACT" flag, then it will be using a compaction algorithm to improve ++ * utilisation of RCR. As such, these buffers may join an existing ring entry ++ * and/or it may not be issued right away so as to allow future releases to join ++ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this ++ * behaviour by committing the RCR entry (or entries) right away. If the RCR ++ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT ++ * is selected, in which case it will sleep waiting for space to become ++ * available in RCR. If the function receives a signal before such time (and ++ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise, ++ * it returns zero. ++ */ ++int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, ++ u32 flags); ++ ++/** ++ * bman_acquire - Acquire buffer(s) from a buffer pool ++ * @pool: the buffer pool object to acquire from ++ * @bufs: array for storing the acquired buffers ++ * @num: the number of buffers desired (@bufs is at least this big) ++ * ++ * Issues an "Acquire" command via the portal's management command interface. ++ * The return value will be the number of buffers obtained from the pool, or a ++ * negative error code if a h/w error or pool starvation was encountered. In ++ * the latter case, the content of @bufs is undefined. ++ */ ++int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, ++ u32 flags); ++ ++/** ++ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool ++ * @pool: the buffer pool object the stockpile belongs ++ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options ++ * ++ * Adds stockpile buffers to RCR entries until the stockpile is empty. ++ * The return value will be a negative error code if a h/w error occurred. ++ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full, ++ * -EAGAIN will be returned. ++ */ ++int bman_flush_stockpile(struct bman_pool *pool, u32 flags); ++ ++/** ++ * bman_query_pools - Query all buffer pool states ++ * @state: storage for the queried availability and depletion states ++ */ ++int bman_query_pools(struct bm_pool_state *state); ++ ++#ifdef CONFIG_FSL_BMAN_CONFIG ++/** ++ * bman_query_free_buffers - Query how many free buffers are in buffer pool ++ * @pool: the buffer pool object to query ++ * ++ * Return the number of the free buffers ++ */ ++u32 bman_query_free_buffers(struct bman_pool *pool); ++ ++/** ++ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds ++ * @pool: the buffer pool object to which the thresholds will be set ++ * @thresholds: the new thresholds ++ */ ++int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds); ++#endif ++ ++/** ++ * The below bman_p_***() variant might be called in a situation that the cpu ++ * which the portal affine to is not online yet. ++ * @bman_portal specifies which portal the API will use. ++*/ ++int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits); ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* FSL_BMAN_H */ +--- /dev/null ++++ b/include/linux/fsl_qman.h +@@ -0,0 +1,3888 @@ ++/* Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef FSL_QMAN_H ++#define FSL_QMAN_H ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* Last updated for v00.800 of the BG */ ++ ++/* Hardware constants */ ++#define QM_CHANNEL_SWPORTAL0 0 ++#define QMAN_CHANNEL_POOL1 0x21 ++#define QMAN_CHANNEL_CAAM 0x80 ++#define QMAN_CHANNEL_PME 0xa0 ++#define QMAN_CHANNEL_POOL1_REV3 0x401 ++#define QMAN_CHANNEL_CAAM_REV3 0x840 ++#define QMAN_CHANNEL_PME_REV3 0x860 ++#define QMAN_CHANNEL_DCE 0x8a0 ++#define QMAN_CHANNEL_DCE_QMANREV312 0x880 ++extern u16 qm_channel_pool1; ++extern u16 qm_channel_caam; ++extern u16 qm_channel_pme; ++extern u16 qm_channel_dce; ++enum qm_dc_portal { ++ qm_dc_portal_fman0 = 0, ++ qm_dc_portal_fman1 = 1, ++ qm_dc_portal_caam = 2, ++ qm_dc_portal_pme = 3, ++ qm_dc_portal_rman = 4, ++ qm_dc_portal_dce = 5 ++}; ++ ++/* Portal processing (interrupt) sources */ ++#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */ ++#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ ++#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ ++#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ ++#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ ++#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ ++/* This mask contains all the interrupt sources that need handling except DQRI, ++ * ie. that if present should trigger slow-path processing. */ ++#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ ++ QM_PIRQ_MRI | QM_PIRQ_CCSCI) ++ ++/* --- Clock speed --- */ ++/* A qman driver instance may or may not know the current qman clock speed. ++ * However, certain CEETM calculations may not be possible if this is not known. ++ * The 'set' function will only succeed (return zero) if the driver did not ++ * already know the clock speed. Likewise, the 'get' function will only succeed ++ * if the driver does know the clock speed (either because it knew when booting, ++ * or was told via 'set'). In cases where software is running on a driver ++ * instance that does not know the clock speed (eg. on a hypervised data-plane), ++ * and the user can obtain the current qman clock speed by other means (eg. from ++ * a message sent from the control-plane), then the 'set' function can be used ++ * to enable rate-calculations in a driver where it would otherwise not be ++ * possible. */ ++int qm_get_clock(u64 *clock_hz); ++int qm_set_clock(u64 clock_hz); ++ ++/* For qman_static_dequeue_*** APIs */ ++#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff ++/* for n in [1,15] */ ++#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) ++/* for conversion from n of qm_channel */ ++static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) ++{ ++ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); ++} ++ ++/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use ++ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use ++ * FQID(n) to fill in the frame queue ID. */ ++#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 ++#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 ++#define QM_VDQCR_EXACT 0x40000000 ++#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 ++#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) ++#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) ++#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) ++ ++ ++/* ------------------------------------------------------- */ ++/* --- Qman data structures (and associated constants) --- */ ++ ++/* Represents s/w corenet portal mapped data structures */ ++struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */ ++struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */ ++struct qm_mr_entry; /* MR (Message Ring) entries */ ++struct qm_mc_command; /* MC (Management Command) command */ ++struct qm_mc_result; /* MC result */ ++ ++/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */ ++#define QM_FD_FORMAT_SG 0x4 ++#define QM_FD_FORMAT_LONG 0x2 ++#define QM_FD_FORMAT_COMPOUND 0x1 ++enum qm_fd_format { ++ /* 'contig' implies a contiguous buffer, whereas 'sg' implies a ++ * scatter-gather table. 'big' implies a 29-bit length with no offset ++ * field, otherwise length is 20-bit and offset is 9-bit. 'compound' ++ * implies a s/g-like table, where each entry itself represents a frame ++ * (contiguous or scatter-gather) and the 29-bit "length" is ++ * interpreted purely for congestion calculations, ie. a "congestion ++ * weight". */ ++ qm_fd_contig = 0, ++ qm_fd_contig_big = QM_FD_FORMAT_LONG, ++ qm_fd_sg = QM_FD_FORMAT_SG, ++ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, ++ qm_fd_compound = QM_FD_FORMAT_COMPOUND ++}; ++ ++/* Capitalised versions are un-typed but can be used in static expressions */ ++#define QM_FD_CONTIG 0 ++#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG ++#define QM_FD_SG QM_FD_FORMAT_SG ++#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG) ++#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND ++ ++/* See 1.5.1.1: "Frame Descriptor (FD)" */ ++struct qm_fd { ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 dd:2; /* dynamic debug */ ++ u8 liodn_offset:6; ++ u8 bpid:8; /* Buffer Pool ID */ ++ u8 eliodn_offset:4; ++ u8 __reserved:4; ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++#else ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u8 __reserved:4; ++ u8 eliodn_offset:4; ++ u8 bpid:8; /* Buffer Pool ID */ ++ u8 liodn_offset:6; ++ u8 dd:2; /* dynamic debug */ ++#endif ++ }; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u64 __notaddress:24; ++ u64 addr:40; ++#else ++ u64 addr:40; ++ u64 __notaddress:24; ++#endif ++ }; ++ u64 opaque_addr; ++ }; ++ /* The 'format' field indicates the interpretation of the remaining 29 ++ * bits of the 32-bit word. For packing reasons, it is duplicated in the ++ * other union elements. Note, union'd structs are difficult to use with ++ * static initialisation under gcc, in which case use the "opaque" form ++ * with one of the macros. */ ++ union { ++ /* For easier/faster copying of this part of the fd (eg. from a ++ * DQRR entry to an EQCR entry) copy 'opaque' */ ++ u32 opaque; ++ /* If 'format' is _contig or _sg, 20b length and 9b offset */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ enum qm_fd_format format:3; ++ u16 offset:9; ++ u32 length20:20; ++#else ++ u32 length20:20; ++ u16 offset:9; ++ enum qm_fd_format format:3; ++#endif ++ }; ++ /* If 'format' is _contig_big or _sg_big, 29b length */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ enum qm_fd_format _format1:3; ++ u32 length29:29; ++#else ++ u32 length29:29; ++ enum qm_fd_format _format1:3; ++#endif ++ }; ++ /* If 'format' is _compound, 29b "congestion weight" */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ enum qm_fd_format _format2:3; ++ u32 cong_weight:29; ++#else ++ u32 cong_weight:29; ++ enum qm_fd_format _format2:3; ++#endif ++ }; ++ }; ++ union { ++ u32 cmd; ++ u32 status; ++ }; ++} __aligned(8); ++#define QM_FD_DD_NULL 0x00 ++#define QM_FD_PID_MASK 0x3f ++static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) ++{ ++ return fd->addr; ++} ++ ++static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) ++{ ++ return (dma_addr_t)fd->addr; ++} ++/* Macro, so we compile better if 'v' isn't always 64-bit */ ++#define qm_fd_addr_set64(fd, v) \ ++ do { \ ++ struct qm_fd *__fd931 = (fd); \ ++ __fd931->addr = v; \ ++ } while (0) ++ ++/* For static initialisation of FDs (which is complicated by the use of unions ++ * in "struct qm_fd"), use the following macros. Note that; ++ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation ++ * use-case), ++ * - use capitalised QM_FD_*** formats for static initialisation. ++ */ ++#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \ ++ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ ++ { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \ ++ { cmd } } ++#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \ ++ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ ++ { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \ ++ { cmd } } ++ ++/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */ ++#define QM_SG_OFFSET_MASK 0x1FFF ++struct qm_sg_entry { ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 __reserved1[3]; ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++#else ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u8 __reserved1[3]; ++#endif ++ }; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u64 __notaddress:24; ++ u64 addr:40; ++#else ++ u64 addr:40; ++ u64 __notaddress:24; ++#endif ++ }; ++ u64 opaque; ++ }; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 extension:1; /* Extension bit */ ++ u32 final:1; /* Final bit */ ++ u32 length:30; ++#else ++ u32 length:30; ++ u32 final:1; /* Final bit */ ++ u32 extension:1; /* Extension bit */ ++#endif ++ }; ++ u32 sgt_efl; ++ }; ++ u8 __reserved2; ++ u8 bpid; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 __reserved3:3; ++ u16 offset:13; ++#else ++ u16 offset:13; ++ u16 __reserved3:3; ++#endif ++ }; ++ u16 opaque_offset; ++ }; ++} __packed; ++union qm_sg_efl { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 extension:1; /* Extension bit */ ++ u32 final:1; /* Final bit */ ++ u32 length:30; ++#else ++ u32 length:30; ++ u32 final:1; /* Final bit */ ++ u32 extension:1; /* Extension bit */ ++#endif ++ }; ++ u32 efl; ++}; ++static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) ++{ ++ return (dma_addr_t)be64_to_cpu(sg->opaque) & 0xffffffffffULL; ++} ++static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg) ++{ ++ union qm_sg_efl u; ++ ++ u.efl = be32_to_cpu(sg->sgt_efl); ++ return u.extension; ++} ++static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg) ++{ ++ union qm_sg_efl u; ++ ++ u.efl = be32_to_cpu(sg->sgt_efl); ++ return u.final; ++} ++static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg) ++{ ++ union qm_sg_efl u; ++ ++ u.efl = be32_to_cpu(sg->sgt_efl); ++ return u.length; ++} ++static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg) ++{ ++ return sg->bpid; ++} ++static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg) ++{ ++ u32 opaque_offset = be16_to_cpu(sg->opaque_offset); ++ ++ return opaque_offset & 0x1fff; ++} ++ ++/* Macro, so we compile better if 'v' isn't always 64-bit */ ++#define qm_sg_entry_set64(sg, v) \ ++ do { \ ++ struct qm_sg_entry *__sg931 = (sg); \ ++ __sg931->opaque = cpu_to_be64(v); \ ++ } while (0) ++#define qm_sg_entry_set_ext(sg, v) \ ++ do { \ ++ union qm_sg_efl __u932; \ ++ __u932.efl = be32_to_cpu((sg)->sgt_efl); \ ++ __u932.extension = v; \ ++ (sg)->sgt_efl = cpu_to_be32(__u932.efl); \ ++ } while (0) ++#define qm_sg_entry_set_final(sg, v) \ ++ do { \ ++ union qm_sg_efl __u933; \ ++ __u933.efl = be32_to_cpu((sg)->sgt_efl); \ ++ __u933.final = v; \ ++ (sg)->sgt_efl = cpu_to_be32(__u933.efl); \ ++ } while (0) ++#define qm_sg_entry_set_len(sg, v) \ ++ do { \ ++ union qm_sg_efl __u934; \ ++ __u934.efl = be32_to_cpu((sg)->sgt_efl); \ ++ __u934.length = v; \ ++ (sg)->sgt_efl = cpu_to_be32(__u934.efl); \ ++ } while (0) ++#define qm_sg_entry_set_bpid(sg, v) \ ++ do { \ ++ struct qm_sg_entry *__u935 = (sg); \ ++ __u935->bpid = v; \ ++ } while (0) ++#define qm_sg_entry_set_offset(sg, v) \ ++ do { \ ++ struct qm_sg_entry *__u936 = (sg); \ ++ __u936->opaque_offset = cpu_to_be16(v); \ ++ } while (0) ++ ++/* See 1.5.8.1: "Enqueue Command" */ ++struct qm_eqcr_entry { ++ u8 __dont_write_directly__verb; ++ u8 dca; ++ u16 seqnum; ++ u32 orp; /* 24-bit */ ++ u32 fqid; /* 24-bit */ ++ u32 tag; ++ struct qm_fd fd; ++ u8 __reserved3[32]; ++} __packed; ++#define QM_EQCR_VERB_VBIT 0x80 ++#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ ++#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 ++#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */ ++#define QM_EQCR_VERB_COLOUR_GREEN 0x00 ++#define QM_EQCR_VERB_COLOUR_YELLOW 0x08 ++#define QM_EQCR_VERB_COLOUR_RED 0x10 ++#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18 ++#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */ ++#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */ ++#define QM_EQCR_DCA_ENABLE 0x80 ++#define QM_EQCR_DCA_PARK 0x40 ++#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */ ++#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ ++#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ ++#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ ++#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */ ++ ++/* See 1.5.8.2: "Frame Dequeue Response" */ ++struct qm_dqrr_entry { ++ u8 verb; ++ u8 stat; ++ u16 seqnum; /* 15-bit */ ++ u8 tok; ++ u8 __reserved2[3]; ++ u32 fqid; /* 24-bit */ ++ u32 contextB; ++ struct qm_fd fd; ++ u8 __reserved4[32]; ++}; ++#define QM_DQRR_VERB_VBIT 0x80 ++#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ ++#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ ++#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ ++#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ ++#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ ++#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ ++#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ ++#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ ++ ++/* See 1.5.8.3: "ERN Message Response" */ ++/* See 1.5.8.4: "FQ State Change Notification" */ ++struct qm_mr_entry { ++ u8 verb; ++ union { ++ struct { ++ u8 dca; ++ u16 seqnum; ++ u8 rc; /* Rejection Code */ ++ u32 orp:24; ++ u32 fqid; /* 24-bit */ ++ u32 tag; ++ struct qm_fd fd; ++ } __packed ern; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ ++ u8 __reserved1:3; ++ enum qm_dc_portal portal:3; ++#else ++ enum qm_dc_portal portal:3; ++ u8 __reserved1:3; ++ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ ++#endif ++ u16 __reserved2; ++ u8 rc; /* Rejection Code */ ++ u32 __reserved3:24; ++ u32 fqid; /* 24-bit */ ++ u32 tag; ++ struct qm_fd fd; ++ } __packed dcern; ++ struct { ++ u8 fqs; /* Frame Queue Status */ ++ u8 __reserved1[6]; ++ u32 fqid; /* 24-bit */ ++ u32 contextB; ++ u8 __reserved2[16]; ++ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ ++ }; ++ u8 __reserved2[32]; ++} __packed; ++#define QM_MR_VERB_VBIT 0x80 ++/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs ++ * originating from direct-connect portals ("dcern") use 0x20 as a verb which ++ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from ++ * the other MR types by noting if the 0x20 bit is unset. */ ++#define QM_MR_VERB_TYPE_MASK 0x27 ++#define QM_MR_VERB_DC_ERN 0x20 ++#define QM_MR_VERB_FQRN 0x21 ++#define QM_MR_VERB_FQRNI 0x22 ++#define QM_MR_VERB_FQRL 0x23 ++#define QM_MR_VERB_FQPN 0x24 ++#define QM_MR_RC_MASK 0xf0 /* contains one of; */ ++#define QM_MR_RC_CGR_TAILDROP 0x00 ++#define QM_MR_RC_WRED 0x10 ++#define QM_MR_RC_ERROR 0x20 ++#define QM_MR_RC_ORPWINDOW_EARLY 0x30 ++#define QM_MR_RC_ORPWINDOW_LATE 0x40 ++#define QM_MR_RC_FQ_TAILDROP 0x50 ++#define QM_MR_RC_ORPWINDOW_RETIRED 0x60 ++#define QM_MR_RC_ORP_ZERO 0x70 ++#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ ++#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ ++#define QM_MR_DCERN_COLOUR_GREEN 0x00 ++#define QM_MR_DCERN_COLOUR_YELLOW 0x01 ++#define QM_MR_DCERN_COLOUR_RED 0x02 ++#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03 ++ ++/* An identical structure of FQD fields is present in the "Init FQ" command and ++ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. ++ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the ++ * latter has two inlines to assist with converting to/from the mant+exp ++ * representation. */ ++struct qm_fqd_stashing { ++ /* See QM_STASHING_EXCL_<...> */ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 exclusive; ++ u8 __reserved1:2; ++ /* Numbers of cachelines */ ++ u8 annotation_cl:2; ++ u8 data_cl:2; ++ u8 context_cl:2; ++#else ++ u8 context_cl:2; ++ u8 data_cl:2; ++ u8 annotation_cl:2; ++ u8 __reserved1:2; ++ u8 exclusive; ++#endif ++} __packed; ++struct qm_fqd_taildrop { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 __reserved1:3; ++ u16 mant:8; ++ u16 exp:5; ++#else ++ u16 exp:5; ++ u16 mant:8; ++ u16 __reserved1:3; ++#endif ++} __packed; ++struct qm_fqd_oac { ++ /* See QM_OAC_<...> */ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 oac:2; /* "Overhead Accounting Control" */ ++ u8 __reserved1:6; ++#else ++ u8 __reserved1:6; ++ u8 oac:2; /* "Overhead Accounting Control" */ ++#endif ++ /* Two's-complement value (-128 to +127) */ ++ signed char oal; /* "Overhead Accounting Length" */ ++} __packed; ++struct qm_fqd { ++ union { ++ u8 orpc; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 __reserved1:2; ++ u8 orprws:3; ++ u8 oa:1; ++ u8 olws:2; ++#else ++ u8 olws:2; ++ u8 oa:1; ++ u8 orprws:3; ++ u8 __reserved1:2; ++#endif ++ } __packed; ++ }; ++ u8 cgid; ++ u16 fq_ctrl; /* See QM_FQCTRL_<...> */ ++ union { ++ u16 dest_wq; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 channel:13; /* qm_channel */ ++ u16 wq:3; ++#else ++ u16 wq:3; ++ u16 channel:13; /* qm_channel */ ++#endif ++ } __packed dest; ++ }; ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 __reserved2:1; ++ u16 ics_cred:15; ++#else ++ u16 __reserved2:1; ++ u16 ics_cred:15; ++#endif ++ /* For "Initialize Frame Queue" commands, the write-enable mask ++ * determines whether 'td' or 'oac_init' is observed. For query ++ * commands, this field is always 'td', and 'oac_query' (below) reflects ++ * the Overhead ACcounting values. */ ++ union { ++ struct qm_fqd_taildrop td; ++ struct qm_fqd_oac oac_init; ++ }; ++ u32 context_b; ++ union { ++ /* Treat it as 64-bit opaque */ ++ u64 opaque; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 hi; ++ u32 lo; ++#else ++ u32 lo; ++ u32 hi; ++#endif ++ }; ++ /* Treat it as s/w portal stashing config */ ++ /* See 1.5.6.7.1: "FQD Context_A field used for [...] */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ struct qm_fqd_stashing stashing; ++ /* 48-bit address of FQ context to ++ * stash, must be cacheline-aligned */ ++ u16 context_hi; ++ u32 context_lo; ++#else ++ u32 context_lo; ++ u16 context_hi; ++ struct qm_fqd_stashing stashing; ++#endif ++ } __packed; ++ } context_a; ++ struct qm_fqd_oac oac_query; ++} __packed; ++/* 64-bit converters for context_hi/lo */ ++static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) ++{ ++ return ((u64)fqd->context_a.context_hi << 32) | ++ (u64)fqd->context_a.context_lo; ++} ++static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) ++{ ++ return (dma_addr_t)qm_fqd_stashing_get64(fqd); ++} ++static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) ++{ ++ return ((u64)fqd->context_a.hi << 32) | ++ (u64)fqd->context_a.lo; ++} ++/* Macro, so we compile better when 'v' isn't necessarily 64-bit */ ++#define qm_fqd_stashing_set64(fqd, v) \ ++ do { \ ++ struct qm_fqd *__fqd931 = (fqd); \ ++ __fqd931->context_a.context_hi = upper_32_bits(v); \ ++ __fqd931->context_a.context_lo = lower_32_bits(v); \ ++ } while (0) ++#define qm_fqd_context_a_set64(fqd, v) \ ++ do { \ ++ struct qm_fqd *__fqd931 = (fqd); \ ++ __fqd931->context_a.hi = upper_32_bits(v); \ ++ __fqd931->context_a.lo = lower_32_bits(v); \ ++ } while (0) ++/* convert a threshold value into mant+exp representation */ ++static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val, ++ int roundup) ++{ ++ u32 e = 0; ++ int oddbit = 0; ++ if (val > 0xe0000000) ++ return -ERANGE; ++ while (val > 0xff) { ++ oddbit = val & 1; ++ val >>= 1; ++ e++; ++ if (roundup && oddbit) ++ val++; ++ } ++ td->exp = e; ++ td->mant = val; ++ return 0; ++} ++/* and the other direction */ ++static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td) ++{ ++ return (u32)td->mant << td->exp; ++} ++ ++/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */ ++/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ ++#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ ++#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ ++#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ ++#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */ ++#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ ++#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ ++#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ ++#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ ++#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ ++#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ ++#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ ++ ++/* See 1.5.6.7.1: "FQD Context_A field used for [...] */ ++/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ ++#define QM_STASHING_EXCL_ANNOTATION 0x04 ++#define QM_STASHING_EXCL_DATA 0x02 ++#define QM_STASHING_EXCL_CTX 0x01 ++ ++/* See 1.5.5.3: "Intra Class Scheduling" */ ++/* FQD field 'OAC' (Overhead ACcounting) uses these constants */ ++#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ ++#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ ++ ++/* See 1.5.8.4: "FQ State Change Notification" */ ++/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields ++ * and associated commands/responses. The WRED parameters are calculated from ++ * these fields as follows; ++ * MaxTH = MA * (2 ^ Mn) ++ * Slope = SA / (2 ^ Sn) ++ * MaxP = 4 * (Pn + 1) ++ */ ++struct qm_cgr_wr_parm { ++ union { ++ u32 word; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 MA:8; ++ u32 Mn:5; ++ u32 SA:7; /* must be between 64-127 */ ++ u32 Sn:6; ++ u32 Pn:6; ++#else ++ u32 Pn:6; ++ u32 Sn:6; ++ u32 SA:7; /* must be between 64-127 */ ++ u32 Mn:5; ++ u32 MA:8; ++#endif ++ } __packed; ++ }; ++} __packed; ++/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding ++ * management commands, this is padded to a 16-bit structure field, so that's ++ * how we represent it here. The congestion state threshold is calculated from ++ * these fields as follows; ++ * CS threshold = TA * (2 ^ Tn) ++ */ ++struct qm_cgr_cs_thres { ++ union { ++ u16 hword; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 __reserved:3; ++ u16 TA:8; ++ u16 Tn:5; ++#else ++ u16 Tn:5; ++ u16 TA:8; ++ u16 __reserved:3; ++#endif ++ } __packed; ++ }; ++} __packed; ++/* This identical structure of CGR fields is present in the "Init/Modify CGR" ++ * commands and the "Query CGR" result. It's suctioned out here into its own ++ * struct. */ ++struct __qm_mc_cgr { ++ struct qm_cgr_wr_parm wr_parm_g; ++ struct qm_cgr_wr_parm wr_parm_y; ++ struct qm_cgr_wr_parm wr_parm_r; ++ u8 wr_en_g; /* boolean, use QM_CGR_EN */ ++ u8 wr_en_y; /* boolean, use QM_CGR_EN */ ++ u8 wr_en_r; /* boolean, use QM_CGR_EN */ ++ u8 cscn_en; /* boolean, use QM_CGR_EN */ ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ ++ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ ++#else ++ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ ++ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ ++#endif ++ }; ++ u32 cscn_targ; /* use QM_CGR_TARG_* */ ++ }; ++ u8 cstd_en; /* boolean, use QM_CGR_EN */ ++ u8 cs; /* boolean, only used in query response */ ++ union { ++ /* use qm_cgr_cs_thres_set64() */ ++ struct qm_cgr_cs_thres cs_thres; ++ u16 __cs_thres; ++ }; ++ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ ++} __packed; ++#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ ++#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ ++#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ ++#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ ++#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ ++#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ ++/* Convert CGR thresholds to/from "cs_thres" format */ ++static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) ++{ ++ return (u64)th->TA << th->Tn; ++} ++static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, ++ int roundup) ++{ ++ u32 e = 0; ++ int oddbit = 0; ++ while (val > 0xff) { ++ oddbit = val & 1; ++ val >>= 1; ++ e++; ++ if (roundup && oddbit) ++ val++; ++ } ++ th->Tn = e; ++ th->TA = val; ++ return 0; ++} ++ ++/* See 1.5.8.5.1: "Initialize FQ" */ ++/* See 1.5.8.5.2: "Query FQ" */ ++/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ ++/* See 1.5.8.5.4: "Alter FQ State Commands " */ ++/* See 1.5.8.6.1: "Initialize/Modify CGR" */ ++/* See 1.5.8.6.2: "CGR Test Write" */ ++/* See 1.5.8.6.3: "Query CGR" */ ++/* See 1.5.8.6.4: "Query Congestion Group State" */ ++struct qm_mcc_initfq { ++ u8 __reserved1; ++ u16 we_mask; /* Write Enable Mask */ ++ u32 fqid; /* 24-bit */ ++ u16 count; /* Initialises 'count+1' FQDs */ ++ struct qm_fqd fqd; /* the FQD fields go here */ ++ u8 __reserved3[30]; ++} __packed; ++struct qm_mcc_queryfq { ++ u8 __reserved1[3]; ++ u32 fqid; /* 24-bit */ ++ u8 __reserved2[56]; ++} __packed; ++struct qm_mcc_queryfq_np { ++ u8 __reserved1[3]; ++ u32 fqid; /* 24-bit */ ++ u8 __reserved2[56]; ++} __packed; ++struct qm_mcc_alterfq { ++ u8 __reserved1[3]; ++ u32 fqid; /* 24-bit */ ++ u8 __reserved2; ++ u8 count; /* number of consecutive FQID */ ++ u8 __reserved3[10]; ++ u32 context_b; /* frame queue context b */ ++ u8 __reserved4[40]; ++} __packed; ++struct qm_mcc_initcgr { ++ u8 __reserved1; ++ u16 we_mask; /* Write Enable Mask */ ++ struct __qm_mc_cgr cgr; /* CGR fields */ ++ u8 __reserved2[2]; ++ u8 cgid; ++ u8 __reserved4[32]; ++} __packed; ++struct qm_mcc_cgrtestwrite { ++ u8 __reserved1[2]; ++ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ ++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ ++ u8 __reserved2[23]; ++ u8 cgid; ++ u8 __reserved3[32]; ++} __packed; ++struct qm_mcc_querycgr { ++ u8 __reserved1[30]; ++ u8 cgid; ++ u8 __reserved2[32]; ++} __packed; ++struct qm_mcc_querycongestion { ++ u8 __reserved[63]; ++} __packed; ++struct qm_mcc_querywq { ++ u8 __reserved; ++ /* select channel if verb != QUERYWQ_DEDICATED */ ++ union { ++ u16 channel_wq; /* ignores wq (3 lsbits) */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 id:13; /* qm_channel */ ++ u16 __reserved1:3; ++#else ++ u16 __reserved1:3; ++ u16 id:13; /* qm_channel */ ++#endif ++ } __packed channel; ++ }; ++ u8 __reserved2[60]; ++} __packed; ++ ++struct qm_mcc_ceetm_lfqmt_config { ++ u8 __reserved1[4]; ++ u32 lfqid:24; ++ u8 __reserved2[2]; ++ u16 cqid; ++ u8 __reserved3[2]; ++ u16 dctidx; ++ u8 __reserved4[48]; ++} __packed; ++ ++struct qm_mcc_ceetm_lfqmt_query { ++ u8 __reserved1[4]; ++ u32 lfqid:24; ++ u8 __reserved2[56]; ++} __packed; ++ ++struct qm_mcc_ceetm_cq_config { ++ u8 __reserved1; ++ u16 cqid; ++ u8 dcpid; ++ u8 __reserved2; ++ u16 ccgid; ++ u8 __reserved3[56]; ++} __packed; ++ ++struct qm_mcc_ceetm_cq_query { ++ u8 __reserved1; ++ u16 cqid; ++ u8 dcpid; ++ u8 __reserved2[59]; ++} __packed; ++ ++struct qm_mcc_ceetm_dct_config { ++ u8 __reserved1; ++ u16 dctidx; ++ u8 dcpid; ++ u8 __reserved2[15]; ++ u32 context_b; ++ u64 context_a; ++ u8 __reserved3[32]; ++} __packed; ++ ++struct qm_mcc_ceetm_dct_query { ++ u8 __reserved1; ++ u16 dctidx; ++ u8 dcpid; ++ u8 __reserved2[59]; ++} __packed; ++ ++struct qm_mcc_ceetm_class_scheduler_config { ++ u8 __reserved1; ++ u16 cqcid; ++ u8 dcpid; ++ u8 __reserved2[6]; ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 gpc_reserved:1; ++ u8 gpc_combine_flag:1; ++ u8 gpc_prio_b:3; ++ u8 gpc_prio_a:3; ++#else ++ u8 gpc_prio_a:3; ++ u8 gpc_prio_b:3; ++ u8 gpc_combine_flag:1; ++ u8 gpc_reserved:1; ++#endif ++ u16 crem; ++ u16 erem; ++ u8 w[8]; ++ u8 __reserved3[40]; ++} __packed; ++ ++struct qm_mcc_ceetm_class_scheduler_query { ++ u8 __reserved1; ++ u16 cqcid; ++ u8 dcpid; ++ u8 __reserved2[59]; ++} __packed; ++ ++#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12) ++#define CEETM_COMMAND_SP_MAPPING (1 << 12) ++#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12) ++#define CEETM_COMMAND_LNI_SHAPER (3 << 12) ++#define CEETM_COMMAND_TCFC (4 << 12) ++ ++#define CEETM_CCGRID_MASK 0x01FF ++#define CEETM_CCGR_CM_CONFIGURE (0 << 14) ++#define CEETM_CCGR_DN_CONFIGURE (1 << 14) ++#define CEETM_CCGR_TEST_WRITE (2 << 14) ++#define CEETM_CCGR_CM_QUERY (0 << 14) ++#define CEETM_CCGR_DN_QUERY (1 << 14) ++#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14) ++#define CEETM_QUERY_CONGESTION_STATE (3 << 14) ++ ++struct qm_mcc_ceetm_mapping_shaper_tcfc_config { ++ u8 __reserved1; ++ u16 cid; ++ u8 dcpid; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 map_shaped:1; ++ u8 map_reserved:4; ++ u8 map_lni_id:3; ++#else ++ u8 map_lni_id:3; ++ u8 map_reserved:4; ++ u8 map_shaped:1; ++#endif ++ u8 __reserved2[58]; ++ } __packed channel_mapping; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 map_reserved:5; ++ u8 map_lni_id:3; ++#else ++ u8 map_lni_id:3; ++ u8 map_reserved:5; ++#endif ++ u8 __reserved2[58]; ++ } __packed sp_mapping; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 cpl:1; ++ u8 cpl_reserved:2; ++ u8 oal:5; ++#else ++ u8 oal:5; ++ u8 cpl_reserved:2; ++ u8 cpl:1; ++#endif ++ u32 crtcr:24; ++ u32 ertcr:24; ++ u16 crtbl; ++ u16 ertbl; ++ u8 mps; /* This will be hardcoded by driver with 60 */ ++ u8 __reserved2[47]; ++ } __packed shaper_config; ++ struct { ++ u8 __reserved2[11]; ++ u64 lnitcfcc; ++ u8 __reserved3[40]; ++ } __packed tcfc_config; ++ }; ++} __packed; ++ ++struct qm_mcc_ceetm_mapping_shaper_tcfc_query { ++ u8 __reserved1; ++ u16 cid; ++ u8 dcpid; ++ u8 __reserved2[59]; ++} __packed; ++ ++struct qm_mcc_ceetm_ccgr_config { ++ u8 __reserved1; ++ u16 ccgrid; ++ u8 dcpid; ++ u8 __reserved2; ++ u16 we_mask; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 ctl_reserved:1; ++ u8 ctl_wr_en_g:1; ++ u8 ctl_wr_en_y:1; ++ u8 ctl_wr_en_r:1; ++ u8 ctl_td_en:1; ++ u8 ctl_td_mode:1; ++ u8 ctl_cscn_en:1; ++ u8 ctl_mode:1; ++#else ++ u8 ctl_mode:1; ++ u8 ctl_cscn_en:1; ++ u8 ctl_td_mode:1; ++ u8 ctl_td_en:1; ++ u8 ctl_wr_en_r:1; ++ u8 ctl_wr_en_y:1; ++ u8 ctl_wr_en_g:1; ++ u8 ctl_reserved:1; ++#endif ++ u8 cdv; ++ u16 cscn_tupd; ++ u8 oal; ++ u8 __reserved3; ++ struct qm_cgr_cs_thres cs_thres; ++ struct qm_cgr_cs_thres cs_thres_x; ++ struct qm_cgr_cs_thres td_thres; ++ struct qm_cgr_wr_parm wr_parm_g; ++ struct qm_cgr_wr_parm wr_parm_y; ++ struct qm_cgr_wr_parm wr_parm_r; ++ } __packed cm_config; ++ struct { ++ u8 dnc; ++ u8 dn0; ++ u8 dn1; ++ u64 dnba:40; ++ u8 __reserved3[2]; ++ u16 dnth_0; ++ u8 __reserved4[2]; ++ u16 dnth_1; ++ u8 __reserved5[8]; ++ } __packed dn_config; ++ struct { ++ u8 __reserved3[3]; ++ u64 i_cnt:40; ++ u8 __reserved4[16]; ++ } __packed test_write; ++ }; ++ u8 __reserved5[32]; ++} __packed; ++ ++struct qm_mcc_ceetm_ccgr_query { ++ u8 __reserved1; ++ u16 ccgrid; ++ u8 dcpid; ++ u8 __reserved2[59]; ++} __packed; ++ ++struct qm_mcc_ceetm_cq_peek_pop_xsfdrread { ++ u8 __reserved1; ++ u16 cqid; ++ u8 dcpid; ++ u8 ct; ++ u16 xsfdr; ++ u8 __reserved2[56]; ++} __packed; ++ ++#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00 ++#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01 ++#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02 ++#define CEETM_QUERY_REJECT_STATISTICS 0x03 ++#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04 ++#define CEETM_WRITE_REJECT_STATISTICS 0x05 ++struct qm_mcc_ceetm_statistics_query_write { ++ u8 __reserved1; ++ u16 cid; ++ u8 dcpid; ++ u8 ct; ++ u8 __reserved2[13]; ++ u64 frm_cnt:40; ++ u8 __reserved3[2]; ++ u64 byte_cnt:48; ++ u8 __reserved[32]; ++} __packed; ++ ++struct qm_mc_command { ++ u8 __dont_write_directly__verb; ++ union { ++ struct qm_mcc_initfq initfq; ++ struct qm_mcc_queryfq queryfq; ++ struct qm_mcc_queryfq_np queryfq_np; ++ struct qm_mcc_alterfq alterfq; ++ struct qm_mcc_initcgr initcgr; ++ struct qm_mcc_cgrtestwrite cgrtestwrite; ++ struct qm_mcc_querycgr querycgr; ++ struct qm_mcc_querycongestion querycongestion; ++ struct qm_mcc_querywq querywq; ++ struct qm_mcc_ceetm_lfqmt_config lfqmt_config; ++ struct qm_mcc_ceetm_lfqmt_query lfqmt_query; ++ struct qm_mcc_ceetm_cq_config cq_config; ++ struct qm_mcc_ceetm_cq_query cq_query; ++ struct qm_mcc_ceetm_dct_config dct_config; ++ struct qm_mcc_ceetm_dct_query dct_query; ++ struct qm_mcc_ceetm_class_scheduler_config csch_config; ++ struct qm_mcc_ceetm_class_scheduler_query csch_query; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config; ++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query; ++ struct qm_mcc_ceetm_ccgr_config ccgr_config; ++ struct qm_mcc_ceetm_ccgr_query ccgr_query; ++ struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr; ++ struct qm_mcc_ceetm_statistics_query_write stats_query_write; ++ }; ++} __packed; ++#define QM_MCC_VERB_VBIT 0x80 ++#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ ++#define QM_MCC_VERB_INITFQ_PARKED 0x40 ++#define QM_MCC_VERB_INITFQ_SCHED 0x41 ++#define QM_MCC_VERB_QUERYFQ 0x44 ++#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ ++#define QM_MCC_VERB_QUERYWQ 0x46 ++#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 ++#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ ++#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ ++#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ ++#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ ++#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ ++#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ ++#define QM_MCC_VERB_INITCGR 0x50 ++#define QM_MCC_VERB_MODIFYCGR 0x51 ++#define QM_MCC_VERB_CGRTESTWRITE 0x52 ++#define QM_MCC_VERB_QUERYCGR 0x58 ++#define QM_MCC_VERB_QUERYCONGESTION 0x59 ++/* INITFQ-specific flags */ ++#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ ++#define QM_INITFQ_WE_OAC 0x0100 ++#define QM_INITFQ_WE_ORPC 0x0080 ++#define QM_INITFQ_WE_CGID 0x0040 ++#define QM_INITFQ_WE_FQCTRL 0x0020 ++#define QM_INITFQ_WE_DESTWQ 0x0010 ++#define QM_INITFQ_WE_ICSCRED 0x0008 ++#define QM_INITFQ_WE_TDTHRESH 0x0004 ++#define QM_INITFQ_WE_CONTEXTB 0x0002 ++#define QM_INITFQ_WE_CONTEXTA 0x0001 ++/* INITCGR/MODIFYCGR-specific flags */ ++#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ ++#define QM_CGR_WE_WR_PARM_G 0x0400 ++#define QM_CGR_WE_WR_PARM_Y 0x0200 ++#define QM_CGR_WE_WR_PARM_R 0x0100 ++#define QM_CGR_WE_WR_EN_G 0x0080 ++#define QM_CGR_WE_WR_EN_Y 0x0040 ++#define QM_CGR_WE_WR_EN_R 0x0020 ++#define QM_CGR_WE_CSCN_EN 0x0010 ++#define QM_CGR_WE_CSCN_TARG 0x0008 ++#define QM_CGR_WE_CSTD_EN 0x0004 ++#define QM_CGR_WE_CS_THRES 0x0002 ++#define QM_CGR_WE_MODE 0x0001 ++ ++/* See 1.5.9.7 CEETM Management Commands */ ++#define QM_CEETM_VERB_LFQMT_CONFIG 0x70 ++#define QM_CEETM_VERB_LFQMT_QUERY 0x71 ++#define QM_CEETM_VERB_CQ_CONFIG 0x72 ++#define QM_CEETM_VERB_CQ_QUERY 0x73 ++#define QM_CEETM_VERB_DCT_CONFIG 0x74 ++#define QM_CEETM_VERB_DCT_QUERY 0x75 ++#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76 ++#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77 ++#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78 ++#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79 ++#define QM_CEETM_VERB_CCGR_CONFIG 0x7A ++#define QM_CEETM_VERB_CCGR_QUERY 0x7B ++#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C ++#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D ++ ++/* See 1.5.8.5.1: "Initialize FQ" */ ++/* See 1.5.8.5.2: "Query FQ" */ ++/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ ++/* See 1.5.8.5.4: "Alter FQ State Commands " */ ++/* See 1.5.8.6.1: "Initialize/Modify CGR" */ ++/* See 1.5.8.6.2: "CGR Test Write" */ ++/* See 1.5.8.6.3: "Query CGR" */ ++/* See 1.5.8.6.4: "Query Congestion Group State" */ ++struct qm_mcr_initfq { ++ u8 __reserved1[62]; ++} __packed; ++struct qm_mcr_queryfq { ++ u8 __reserved1[8]; ++ struct qm_fqd fqd; /* the FQD fields are here */ ++ u8 __reserved2[30]; ++} __packed; ++struct qm_mcr_queryfq_np { ++ u8 __reserved1; ++ u8 state; /* QM_MCR_NP_STATE_*** */ ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 __reserved2; ++ u32 fqd_link:24; ++ u16 __reserved3:2; ++ u16 odp_seq:14; ++ u16 __reserved4:2; ++ u16 orp_nesn:14; ++ u16 __reserved5:1; ++ u16 orp_ea_hseq:15; ++ u16 __reserved6:1; ++ u16 orp_ea_tseq:15; ++ u8 __reserved7; ++ u32 orp_ea_hptr:24; ++ u8 __reserved8; ++ u32 orp_ea_tptr:24; ++ u8 __reserved9; ++ u32 pfdr_hptr:24; ++ u8 __reserved10; ++ u32 pfdr_tptr:24; ++ u8 __reserved11[5]; ++ u8 __reserved12:7; ++ u8 is:1; ++ u16 ics_surp; ++ u32 byte_cnt; ++ u8 __reserved13; ++ u32 frm_cnt:24; ++ u32 __reserved14; ++ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ ++ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ ++ u16 __reserved15; ++ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ ++ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ ++ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ ++#else ++ u8 __reserved2; ++ u32 fqd_link:24; ++ ++ u16 odp_seq:14; ++ u16 __reserved3:2; ++ ++ u16 orp_nesn:14; ++ u16 __reserved4:2; ++ ++ u16 orp_ea_hseq:15; ++ u16 __reserved5:1; ++ ++ u16 orp_ea_tseq:15; ++ u16 __reserved6:1; ++ ++ u8 __reserved7; ++ u32 orp_ea_hptr:24; ++ ++ u8 __reserved8; ++ u32 orp_ea_tptr:24; ++ ++ u8 __reserved9; ++ u32 pfdr_hptr:24; ++ ++ u8 __reserved10; ++ u32 pfdr_tptr:24; ++ ++ u8 __reserved11[5]; ++ u8 is:1; ++ u8 __reserved12:7; ++ u16 ics_surp; ++ u32 byte_cnt; ++ u8 __reserved13; ++ u32 frm_cnt:24; ++ u32 __reserved14; ++ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ ++ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ ++ u16 __reserved15; ++ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ ++ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ ++ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ ++#endif ++} __packed; ++ ++ ++struct qm_mcr_alterfq { ++ u8 fqs; /* Frame Queue Status */ ++ u8 __reserved1[61]; ++} __packed; ++struct qm_mcr_initcgr { ++ u8 __reserved1[62]; ++} __packed; ++struct qm_mcr_cgrtestwrite { ++ u16 __reserved1; ++ struct __qm_mc_cgr cgr; /* CGR fields */ ++ u8 __reserved2[3]; ++ u32 __reserved3:24; ++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ ++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ ++ u32 __reserved4:24; ++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ ++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ ++ u16 lgt; /* Last Group Tick */ ++ u16 wr_prob_g; ++ u16 wr_prob_y; ++ u16 wr_prob_r; ++ u8 __reserved5[8]; ++} __packed; ++struct qm_mcr_querycgr { ++ u16 __reserved1; ++ struct __qm_mc_cgr cgr; /* CGR fields */ ++ u8 __reserved2[3]; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved3:24; ++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ ++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ ++#else ++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ ++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ ++ u32 __reserved3:24; ++#endif ++ }; ++ u64 i_bcnt; ++ }; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u32 __reserved4:24; ++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ ++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ ++#else ++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */ ++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ ++ u32 __reserved4:24; ++#endif ++ }; ++ u64 a_bcnt; ++ }; ++ union { ++ u32 cscn_targ_swp[4]; ++ u8 __reserved5[16]; ++ }; ++} __packed; ++static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) ++{ ++ return be64_to_cpu(q->i_bcnt); ++} ++static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) ++{ ++ return be64_to_cpu(q->a_bcnt); ++} ++static inline u64 qm_mcr_cgrtestwrite_i_get64( ++ const struct qm_mcr_cgrtestwrite *q) ++{ ++ return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo); ++} ++static inline u64 qm_mcr_cgrtestwrite_a_get64( ++ const struct qm_mcr_cgrtestwrite *q) ++{ ++ return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo); ++} ++/* Macro, so we compile better if 'v' isn't always 64-bit */ ++#define qm_mcr_querycgr_i_set64(q, v) \ ++ do { \ ++ struct qm_mcr_querycgr *__q931 = (fd); \ ++ __q931->i_bcnt_hi = upper_32_bits(v); \ ++ __q931->i_bcnt_lo = lower_32_bits(v); \ ++ } while (0) ++#define qm_mcr_querycgr_a_set64(q, v) \ ++ do { \ ++ struct qm_mcr_querycgr *__q931 = (fd); \ ++ __q931->a_bcnt_hi = upper_32_bits(v); \ ++ __q931->a_bcnt_lo = lower_32_bits(v); \ ++ } while (0) ++struct __qm_mcr_querycongestion { ++ u32 __state[8]; ++}; ++struct qm_mcr_querycongestion { ++ u8 __reserved[30]; ++ /* Access this struct using QM_MCR_QUERYCONGESTION() */ ++ struct __qm_mcr_querycongestion state; ++} __packed; ++struct qm_mcr_querywq { ++ union { ++ u16 channel_wq; /* ignores wq (3 lsbits) */ ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u16 id:13; /* qm_channel */ ++ u16 __reserved:3; ++#else ++ u16 __reserved:3; ++ u16 id:13; /* qm_channel */ ++#endif ++ } __packed channel; ++ }; ++ u8 __reserved[28]; ++ u32 wq_len[8]; ++} __packed; ++ ++/* QMAN CEETM Management Command Response */ ++struct qm_mcr_ceetm_lfqmt_config { ++ u8 __reserved1[62]; ++} __packed; ++struct qm_mcr_ceetm_lfqmt_query { ++ u8 __reserved1[8]; ++ u16 cqid; ++ u8 __reserved2[2]; ++ u16 dctidx; ++ u8 __reserved3[2]; ++ u16 ccgid; ++ u8 __reserved4[44]; ++} __packed; ++ ++struct qm_mcr_ceetm_cq_config { ++ u8 __reserved1[62]; ++} __packed; ++ ++struct qm_mcr_ceetm_cq_query { ++ u8 __reserved1[4]; ++ u16 ccgid; ++ u16 state; ++ u32 pfdr_hptr:24; ++ u32 pfdr_tptr:24; ++ u16 od1_xsfdr; ++ u16 od2_xsfdr; ++ u16 od3_xsfdr; ++ u16 od4_xsfdr; ++ u16 od5_xsfdr; ++ u16 od6_xsfdr; ++ u16 ra1_xsfdr; ++ u16 ra2_xsfdr; ++ u8 __reserved2; ++ u32 frm_cnt:24; ++ u8 __reserved333[28]; ++} __packed; ++ ++struct qm_mcr_ceetm_dct_config { ++ u8 __reserved1[62]; ++} __packed; ++ ++struct qm_mcr_ceetm_dct_query { ++ u8 __reserved1[18]; ++ u32 context_b; ++ u64 context_a; ++ u8 __reserved2[32]; ++} __packed; ++ ++struct qm_mcr_ceetm_class_scheduler_config { ++ u8 __reserved1[62]; ++} __packed; ++ ++struct qm_mcr_ceetm_class_scheduler_query { ++ u8 __reserved1[9]; ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 gpc_reserved:1; ++ u8 gpc_combine_flag:1; ++ u8 gpc_prio_b:3; ++ u8 gpc_prio_a:3; ++#else ++ u8 gpc_prio_a:3; ++ u8 gpc_prio_b:3; ++ u8 gpc_combine_flag:1; ++ u8 gpc_reserved:1; ++#endif ++ u16 crem; ++ u16 erem; ++ u8 w[8]; ++ u8 __reserved2[5]; ++ u32 wbfslist:24; ++ u32 d8; ++ u32 d9; ++ u32 d10; ++ u32 d11; ++ u32 d12; ++ u32 d13; ++ u32 d14; ++ u32 d15; ++} __packed; ++ ++struct qm_mcr_ceetm_mapping_shaper_tcfc_config { ++ u16 cid; ++ u8 __reserved2[60]; ++} __packed; ++ ++struct qm_mcr_ceetm_mapping_shaper_tcfc_query { ++ u16 cid; ++ u8 __reserved1; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 map_shaped:1; ++ u8 map_reserved:4; ++ u8 map_lni_id:3; ++#else ++ u8 map_lni_id:3; ++ u8 map_reserved:4; ++ u8 map_shaped:1; ++#endif ++ u8 __reserved2[58]; ++ } __packed channel_mapping_query; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 map_reserved:5; ++ u8 map_lni_id:3; ++#else ++ u8 map_lni_id:3; ++ u8 map_reserved:5; ++#endif ++ u8 __reserved2[58]; ++ } __packed sp_mapping_query; ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 cpl:1; ++ u8 cpl_reserved:2; ++ u8 oal:5; ++#else ++ u8 oal:5; ++ u8 cpl_reserved:2; ++ u8 cpl:1; ++#endif ++ u32 crtcr:24; ++ u32 ertcr:24; ++ u16 crtbl; ++ u16 ertbl; ++ u8 mps; ++ u8 __reserved2[15]; ++ u32 crat; ++ u32 erat; ++ u8 __reserved3[24]; ++ } __packed shaper_query; ++ struct { ++ u8 __reserved1[11]; ++ u64 lnitcfcc; ++ u8 __reserved3[40]; ++ } __packed tcfc_query; ++ }; ++} __packed; ++ ++struct qm_mcr_ceetm_ccgr_config { ++ u8 __reserved1[46]; ++ union { ++ u8 __reserved2[8]; ++ struct { ++ u16 timestamp; ++ u16 wr_porb_g; ++ u16 wr_prob_y; ++ u16 wr_prob_r; ++ } __packed test_write; ++ }; ++ u8 __reserved3[8]; ++} __packed; ++ ++struct qm_mcr_ceetm_ccgr_query { ++ u8 __reserved1[6]; ++ union { ++ struct { ++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++ u8 ctl_reserved:1; ++ u8 ctl_wr_en_g:1; ++ u8 ctl_wr_en_y:1; ++ u8 ctl_wr_en_r:1; ++ u8 ctl_td_en:1; ++ u8 ctl_td_mode:1; ++ u8 ctl_cscn_en:1; ++ u8 ctl_mode:1; ++#else ++ u8 ctl_mode:1; ++ u8 ctl_cscn_en:1; ++ u8 ctl_td_mode:1; ++ u8 ctl_td_en:1; ++ u8 ctl_wr_en_r:1; ++ u8 ctl_wr_en_y:1; ++ u8 ctl_wr_en_g:1; ++ u8 ctl_reserved:1; ++#endif ++ u8 cdv; ++ u8 __reserved2[2]; ++ u8 oal; ++ u8 __reserved3; ++ struct qm_cgr_cs_thres cs_thres; ++ struct qm_cgr_cs_thres cs_thres_x; ++ struct qm_cgr_cs_thres td_thres; ++ struct qm_cgr_wr_parm wr_parm_g; ++ struct qm_cgr_wr_parm wr_parm_y; ++ struct qm_cgr_wr_parm wr_parm_r; ++ u16 cscn_targ_dcp; ++ u8 dcp_lsn; ++ u64 i_cnt:40; ++ u8 __reserved4[3]; ++ u64 a_cnt:40; ++ u32 cscn_targ_swp[4]; ++ } __packed cm_query; ++ struct { ++ u8 dnc; ++ u8 dn0; ++ u8 dn1; ++ u64 dnba:40; ++ u8 __reserved2[2]; ++ u16 dnth_0; ++ u8 __reserved3[2]; ++ u16 dnth_1; ++ u8 __reserved4[10]; ++ u16 dnacc_0; ++ u8 __reserved5[2]; ++ u16 dnacc_1; ++ u8 __reserved6[24]; ++ } __packed dn_query; ++ struct { ++ u8 __reserved2[24]; ++ struct __qm_mcr_querycongestion state; ++ } __packed congestion_state; ++ ++ }; ++} __packed; ++ ++struct qm_mcr_ceetm_cq_peek_pop_xsfdrread { ++ u8 stat; ++ u8 __reserved1[11]; ++ u16 dctidx; ++ struct qm_fd fd; ++ u8 __reserved2[32]; ++} __packed; ++ ++struct qm_mcr_ceetm_statistics_query { ++ u8 __reserved1[17]; ++ u64 frm_cnt:40; ++ u8 __reserved2[2]; ++ u64 byte_cnt:48; ++ u8 __reserved3[32]; ++} __packed; ++ ++struct qm_mc_result { ++ u8 verb; ++ u8 result; ++ union { ++ struct qm_mcr_initfq initfq; ++ struct qm_mcr_queryfq queryfq; ++ struct qm_mcr_queryfq_np queryfq_np; ++ struct qm_mcr_alterfq alterfq; ++ struct qm_mcr_initcgr initcgr; ++ struct qm_mcr_cgrtestwrite cgrtestwrite; ++ struct qm_mcr_querycgr querycgr; ++ struct qm_mcr_querycongestion querycongestion; ++ struct qm_mcr_querywq querywq; ++ struct qm_mcr_ceetm_lfqmt_config lfqmt_config; ++ struct qm_mcr_ceetm_lfqmt_query lfqmt_query; ++ struct qm_mcr_ceetm_cq_config cq_config; ++ struct qm_mcr_ceetm_cq_query cq_query; ++ struct qm_mcr_ceetm_dct_config dct_config; ++ struct qm_mcr_ceetm_dct_query dct_query; ++ struct qm_mcr_ceetm_class_scheduler_config csch_config; ++ struct qm_mcr_ceetm_class_scheduler_query csch_query; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config; ++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query; ++ struct qm_mcr_ceetm_ccgr_config ccgr_config; ++ struct qm_mcr_ceetm_ccgr_query ccgr_query; ++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr; ++ struct qm_mcr_ceetm_statistics_query stats_query; ++ }; ++} __packed; ++ ++#define QM_MCR_VERB_RRID 0x80 ++#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK ++#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED ++#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED ++#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ ++#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP ++#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ ++#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED ++#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED ++#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE ++#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE ++#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS ++#define QM_MCR_RESULT_NULL 0x00 ++#define QM_MCR_RESULT_OK 0xf0 ++#define QM_MCR_RESULT_ERR_FQID 0xf1 ++#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 ++#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ ++#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 ++#define QM_MCR_RESULT_PENDING 0xf8 ++#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff ++#define QM_MCR_NP_STATE_FE 0x10 ++#define QM_MCR_NP_STATE_R 0x08 ++#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ ++#define QM_MCR_NP_STATE_OOS 0x00 ++#define QM_MCR_NP_STATE_RETIRED 0x01 ++#define QM_MCR_NP_STATE_TEN_SCHED 0x02 ++#define QM_MCR_NP_STATE_TRU_SCHED 0x03 ++#define QM_MCR_NP_STATE_PARKED 0x04 ++#define QM_MCR_NP_STATE_ACTIVE 0x05 ++#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ ++#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ ++#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ ++#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ ++#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ ++#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ ++#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ ++/* This extracts the state for congestion group 'n' from a query response. ++ * Eg. ++ * u8 cgr = [...]; ++ * struct qm_mc_result *res = [...]; ++ * printf("congestion group %d congestion state: %d\n", cgr, ++ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr)); ++ */ ++#define __CGR_WORD(num) (num >> 5) ++#define __CGR_SHIFT(num) (num & 0x1f) ++#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) ++static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p, ++ u8 cgr) ++{ ++ return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr)); ++} ++ ++ ++/*********************/ ++/* Utility interface */ ++/*********************/ ++ ++/* Represents an allocator over a range of FQIDs. NB, accesses are not locked, ++ * spinlock them yourself if needed. */ ++struct qman_fqid_pool; ++ ++/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy() ++ * always succeeds, but returns non-zero if there were "leaked" FQID ++ * allocations. */ ++struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num); ++int qman_fqid_pool_destroy(struct qman_fqid_pool *pool); ++/* Alloc/free a FQID from the range. _alloc() returns zero for success. */ ++int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid); ++void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid); ++u32 qman_fqid_pool_used(struct qman_fqid_pool *pool); ++ ++/*******************************************************************/ ++/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ ++/*******************************************************************/ ++ ++ /* Portal and Frame Queues */ ++ /* ----------------------- */ ++/* Represents a managed portal */ ++struct qman_portal; ++ ++/* This object type represents Qman frame queue descriptors (FQD), it is ++ * cacheline-aligned, and initialised by qman_create_fq(). The structure is ++ * defined further down. */ ++struct qman_fq; ++ ++/* This object type represents a Qman congestion group, it is defined further ++ * down. */ ++struct qman_cgr; ++ ++struct qman_portal_config { ++ /* If the caller enables DQRR stashing (and thus wishes to operate the ++ * portal from only one cpu), this is the logical CPU that the portal ++ * will stash to. Whether stashing is enabled or not, this setting is ++ * also used for any "core-affine" portals, ie. default portals ++ * associated to the corresponding cpu. -1 implies that there is no core ++ * affinity configured. */ ++ int cpu; ++ /* portal interrupt line */ ++ int irq; ++ /* the unique index of this portal */ ++ u32 index; ++ /* Is this portal shared? (If so, it has coarser locking and demuxes ++ * processing on behalf of other CPUs.) */ ++ int is_shared; ++ /* The portal's dedicated channel id, use this value for initialising ++ * frame queues to target this portal when scheduled. */ ++ u16 channel; ++ /* A mask of which pool channels this portal has dequeue access to ++ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */ ++ u32 pools; ++}; ++ ++/* This enum, and the callback type that returns it, are used when handling ++ * dequeued frames via DQRR. Note that for "null" callbacks registered with the ++ * portal object (for handling dequeues that do not demux because contextB is ++ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */ ++enum qman_cb_dqrr_result { ++ /* DQRR entry can be consumed */ ++ qman_cb_dqrr_consume, ++ /* Like _consume, but requests parking - FQ must be held-active */ ++ qman_cb_dqrr_park, ++ /* Does not consume, for DCA mode only. This allows out-of-order ++ * consumes by explicit calls to qman_dca() and/or the use of implicit ++ * DCA via EQCR entries. */ ++ qman_cb_dqrr_defer, ++ /* Stop processing without consuming this ring entry. Exits the current ++ * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an ++ * interrupt handler, the callback would typically call ++ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, ++ * otherwise the interrupt will reassert immediately. */ ++ qman_cb_dqrr_stop, ++ /* Like qman_cb_dqrr_stop, but consumes the current entry. */ ++ qman_cb_dqrr_consume_stop ++}; ++typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, ++ struct qman_fq *fq, ++ const struct qm_dqrr_entry *dqrr); ++ ++/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They ++ * are always consumed after the callback returns. */ ++typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, ++ const struct qm_mr_entry *msg); ++ ++/* This callback type is used when handling DCP ERNs */ ++typedef void (*qman_cb_dc_ern)(struct qman_portal *qm, ++ const struct qm_mr_entry *msg); ++ ++/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + ++ * held-active + held-suspended are just "sched". Things like "retired" will not ++ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until ++ * then, to indicate it's completing and to gate attempts to retry the retire ++ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's ++ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring ++ * index rather than the FQ that ring entry corresponds to), so repeated park ++ * commands are allowed (if you're silly enough to try) but won't change FQ ++ * state, and the resulting park notifications move FQs from "sched" to ++ * "parked". */ ++enum qman_fq_state { ++ qman_fq_state_oos, ++ qman_fq_state_parked, ++ qman_fq_state_sched, ++ qman_fq_state_retired ++}; ++ ++/* Frame queue objects (struct qman_fq) are stored within memory passed to ++ * qman_create_fq(), as this allows stashing of caller-provided demux callback ++ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the ++ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, ++ * they should; ++ * ++ * (a) extend the qman_fq structure with their state; eg. ++ * ++ * // myfq is allocated and driver_fq callbacks filled in; ++ * struct my_fq { ++ * struct qman_fq base; ++ * int an_extra_field; ++ * [ ... add other fields to be associated with each FQ ...] ++ * } *myfq = some_my_fq_allocator(); ++ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); ++ * ++ * // in a dequeue callback, access extra fields from 'fq' via a cast; ++ * struct my_fq *myfq = (struct my_fq *)fq; ++ * do_something_with(myfq->an_extra_field); ++ * [...] ++ * ++ * (b) when and if configuring the FQ for context stashing, specify how ever ++ * many cachelines are required to stash 'struct my_fq', to accelerate not ++ * only the Qman driver but the callback as well. ++ */ ++ ++struct qman_fq_cb { ++ qman_cb_dqrr dqrr; /* for dequeued frames */ ++ qman_cb_mr ern; /* for s/w ERNs */ ++ qman_cb_mr fqs; /* frame-queue state changes*/ ++}; ++ ++struct qman_fq { ++ /* Caller of qman_create_fq() provides these demux callbacks */ ++ struct qman_fq_cb cb; ++ /* These are internal to the driver, don't touch. In particular, they ++ * may change, be removed, or extended (so you shouldn't rely on ++ * sizeof(qman_fq) being a constant). */ ++ spinlock_t fqlock; ++ u32 fqid; ++ volatile unsigned long flags; ++ enum qman_fq_state state; ++ int cgr_groupid; ++ struct rb_node node; ++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP ++ u32 key; ++#endif ++}; ++ ++/* This callback type is used when handling congestion group entry/exit. ++ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */ ++typedef void (*qman_cb_cgr)(struct qman_portal *qm, ++ struct qman_cgr *cgr, int congested); ++ ++struct qman_cgr { ++ /* Set these prior to qman_create_cgr() */ ++ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ ++ qman_cb_cgr cb; ++ /* These are private to the driver */ ++ u16 chan; /* portal channel this object is created on */ ++ struct list_head node; ++}; ++ ++/* Flags to qman_create_fq() */ ++#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ ++#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ ++#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ ++#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */ ++#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */ ++#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ ++ ++/* Flags to qman_destroy_fq() */ ++#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */ ++ ++/* Flags from qman_fq_state() */ ++#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ ++#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ ++#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ ++#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ ++#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ ++#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ ++ ++/* Flags to qman_init_fq() */ ++#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ ++#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ ++ ++/* Flags to qman_volatile_dequeue() */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ ++#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ ++#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ ++#endif ++ ++/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware, ++ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so ++ * any change here should be audited in PME.) */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT ++#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */ ++#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */ ++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC ++#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ ++#endif ++#endif ++#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */ ++#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */ ++#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */ ++#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \ ++ (((u32)(p) << 2) & 0x00000f00) ++#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */ ++#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008 ++#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010 ++#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018 ++/* For the ORP-specific qman_enqueue_orp() variant; ++ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment ++ * of a frame. */ ++#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000 ++/* - this flag performs no enqueue but fills in an ORP sequence number that ++ * would otherwise block it (eg. if a frame has been dropped). */ ++#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000 ++/* - this flag performs no enqueue but advances NESN to the given sequence ++ * number. */ ++#define QMAN_ENQUEUE_FLAG_NESN 0x04000000 ++ ++/* Flags to qman_modify_cgr() */ ++#define QMAN_CGR_FLAG_USE_INIT 0x00000001 ++#define QMAN_CGR_MODE_FRAME 0x00000001 ++ ++ /* Portal Management */ ++ /* ----------------- */ ++/** ++ * qman_get_portal_config - get portal configuration settings ++ * ++ * This returns a read-only view of the current cpu's affine portal settings. ++ */ ++const struct qman_portal_config *qman_get_portal_config(void); ++ ++/** ++ * qman_irqsource_get - return the portal work that is interrupt-driven ++ * ++ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently ++ * enabled for interrupt handling on the current cpu's affine portal. These ++ * sources will trigger the portal interrupt and the interrupt handler (or a ++ * tasklet/bottom-half it defers to) will perform the corresponding processing ++ * work. The qman_poll_***() functions will only process sources that are not in ++ * this bitmask. If the current CPU is sharing a portal hosted on another CPU, ++ * this always returns zero. ++ */ ++u32 qman_irqsource_get(void); ++ ++/** ++ * qman_irqsource_add - add processing sources to be interrupt-driven ++ * @bits: bitmask of QM_PIRQ_**I processing sources ++ * ++ * Adds processing sources that should be interrupt-driven (rather than ++ * processed via qman_poll_***() functions). Returns zero for success, or ++ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. ++ */ ++int qman_irqsource_add(u32 bits); ++ ++/** ++ * qman_irqsource_remove - remove processing sources from being interrupt-driven ++ * @bits: bitmask of QM_PIRQ_**I processing sources ++ * ++ * Removes processing sources from being interrupt-driven, so that they will ++ * instead be processed via qman_poll_***() functions. Returns zero for success, ++ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. ++ */ ++int qman_irqsource_remove(u32 bits); ++ ++/** ++ * qman_affine_cpus - return a mask of cpus that have affine portals ++ */ ++const cpumask_t *qman_affine_cpus(void); ++ ++/** ++ * qman_affine_channel - return the channel ID of an portal ++ * @cpu: the cpu whose affine portal is the subject of the query ++ * ++ * If @cpu is -1, the affine portal for the current CPU will be used. It is a ++ * bug to call this function for any value of @cpu (other than -1) that is not a ++ * member of the mask returned from qman_affine_cpus(). ++ */ ++u16 qman_affine_channel(int cpu); ++ ++/** ++ * qman_get_affine_portal - return the portal pointer affine to cpu ++ * @cpu: the cpu whose affine portal is the subject of the query ++ * ++ */ ++void *qman_get_affine_portal(int cpu); ++ ++/** ++ * qman_poll_dqrr - process DQRR (fast-path) entries ++ * @limit: the maximum number of DQRR entries to process ++ * ++ * Use of this function requires that DQRR processing not be interrupt-driven. ++ * Ie. the value returned by qman_irqsource_get() should not include ++ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU, ++ * this function will return -EINVAL, otherwise the return value is >=0 and ++ * represents the number of DQRR entries processed. ++ */ ++int qman_poll_dqrr(unsigned int limit); ++ ++/** ++ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven. ++ * ++ * This function does any portal processing that isn't interrupt-driven. If the ++ * current CPU is sharing a portal hosted on another CPU, this function will ++ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources ++ * indicating what interrupt sources were actually processed by the call. ++ */ ++u32 qman_poll_slow(void); ++ ++/** ++ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow() ++ * ++ * Dispatcher logic on a cpu can use this to trigger any maintenance of the ++ * affine portal. There are two classes of portal processing in question; ++ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking ++ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR ++ * thresholds, congestion state changes, etc). This function does whatever ++ * processing is not triggered by interrupts. ++ * ++ * Note, if DQRR and some slow-path processing are poll-driven (rather than ++ * interrupt-driven) then this function uses a heuristic to determine how often ++ * to run slow-path processing - as slow-path processing introduces at least a ++ * minimum latency each time it is run, whereas fast-path (DQRR) processing is ++ * close to zero-cost if there is no work to be done. Applications can tune this ++ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly ++ * rather than going via this wrapper. ++ */ ++void qman_poll(void); ++ ++/** ++ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal ++ * ++ * Disables DQRR processing of the portal. This is reference-counted, so ++ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to ++ * truly re-enable dequeuing. ++ */ ++void qman_stop_dequeues(void); ++ ++/** ++ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal ++ * ++ * Enables DQRR processing of the portal. This is reference-counted, so ++ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to ++ * truly re-enable dequeuing. ++ */ ++void qman_start_dequeues(void); ++ ++/** ++ * qman_static_dequeue_add - Add pool channels to the portal SDQCR ++ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) ++ * ++ * Adds a set of pool channels to the portal's static dequeue command register ++ * (SDQCR). The requested pools are limited to those the portal has dequeue ++ * access to. ++ */ ++void qman_static_dequeue_add(u32 pools); ++ ++/** ++ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR ++ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) ++ * ++ * Removes a set of pool channels from the portal's static dequeue command ++ * register (SDQCR). The requested pools are limited to those the portal has ++ * dequeue access to. ++ */ ++void qman_static_dequeue_del(u32 pools); ++ ++/** ++ * qman_static_dequeue_get - return the portal's current SDQCR ++ * ++ * Returns the portal's current static dequeue command register (SDQCR). The ++ * entire register is returned, so if only the currently-enabled pool channels ++ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK. ++ */ ++u32 qman_static_dequeue_get(void); ++ ++/** ++ * qman_dca - Perform a Discrete Consumption Acknowledgement ++ * @dq: the DQRR entry to be consumed ++ * @park_request: indicates whether the held-active @fq should be parked ++ * ++ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had ++ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this ++ * does not take a 'portal' argument but implies the core affine portal from the ++ * cpu that is currently executing the function. For reasons of locking, this ++ * function must be called from the same CPU as that which processed the DQRR ++ * entry in the first place. ++ */ ++void qman_dca(struct qm_dqrr_entry *dq, int park_request); ++ ++/** ++ * qman_eqcr_is_empty - Determine if portal's EQCR is empty ++ * ++ * For use in situations where a cpu-affine caller needs to determine when all ++ * enqueues for the local portal have been processed by Qman but can't use the ++ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue(). ++ * The function forces tracking of EQCR consumption (which normally doesn't ++ * happen until enqueue processing needs to find space to put new enqueue ++ * commands), and returns zero if the ring still has unprocessed entries, ++ * non-zero if it is empty. ++ */ ++int qman_eqcr_is_empty(void); ++ ++/** ++ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications ++ * @handler: callback for processing DCP ERNs ++ * @affine: whether this handler is specific to the locally affine portal ++ * ++ * If a hardware block's interface to Qman (ie. its direct-connect portal, or ++ * DCP) is configured not to receive enqueue rejections, then any enqueues ++ * through that DCP that are rejected will be sent to a given software portal. ++ * If @affine is non-zero, then this handler will only be used for DCP ERNs ++ * received on the portal affine to the current CPU. If multiple CPUs share a ++ * portal and they all call this function, they will be setting the handler for ++ * the same portal! If @affine is zero, then this handler will be global to all ++ * portals handled by this instance of the driver. Only those portals that do ++ * not have their own affine handler will use the global handler. ++ */ ++void qman_set_dc_ern(qman_cb_dc_ern handler, int affine); ++ ++ /* FQ management */ ++ /* ------------- */ ++/** ++ * qman_create_fq - Allocates a FQ ++ * @fqid: the index of the FQD to encapsulate, must be "Out of Service" ++ * @flags: bit-mask of QMAN_FQ_FLAG_*** options ++ * @fq: memory for storing the 'fq', with callbacks filled in ++ * ++ * Creates a frame queue object for the given @fqid, unless the ++ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is ++ * dynamically allocated (or the function fails if none are available). Once ++ * created, the caller should not touch the memory at 'fq' except as extended to ++ * adjacent memory for user-defined fields (see the definition of "struct ++ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to ++ * pre-existing frame-queues that aren't to be otherwise interfered with, it ++ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag ++ * causes the driver to honour any contextB modifications requested in the ++ * qm_init_fq() API, as this indicates the frame queue will be consumed by a ++ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by ++ * software portals, the contextB field is controlled by the driver and can't be ++ * modified by the caller. If the AS_IS flag is specified, management commands ++ * will be used on portal @p to query state for frame queue @fqid and construct ++ * a frame queue object based on that, rather than assuming/requiring that it be ++ * Out of Service. ++ */ ++int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); ++ ++/** ++ * qman_destroy_fq - Deallocates a FQ ++ * @fq: the frame queue object to release ++ * @flags: bit-mask of QMAN_FQ_FREE_*** options ++ * ++ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is ++ * not deallocated but the caller regains ownership, to do with as desired. The ++ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag ++ * is specified, in which case it may also be in the 'parked' state. ++ */ ++void qman_destroy_fq(struct qman_fq *fq, u32 flags); ++ ++/** ++ * qman_fq_fqid - Queries the frame queue ID of a FQ object ++ * @fq: the frame queue object to query ++ */ ++u32 qman_fq_fqid(struct qman_fq *fq); ++ ++/** ++ * qman_fq_state - Queries the state of a FQ object ++ * @fq: the frame queue object to query ++ * @state: pointer to state enum to return the FQ scheduling state ++ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask ++ * ++ * Queries the state of the FQ object, without performing any h/w commands. ++ * This captures the state, as seen by the driver, at the time the function ++ * executes. ++ */ ++void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); ++ ++/** ++ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" ++ * @fq: the frame queue object to modify, must be 'parked' or new. ++ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options ++ * @opts: the FQ-modification settings, as defined in the low-level API ++ * ++ * The @opts parameter comes from the low-level portal API. Select ++ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled ++ * rather than parked. NB, @opts can be NULL. ++ * ++ * Note that some fields and options within @opts may be ignored or overwritten ++ * by the driver; ++ * 1. the 'count' and 'fqid' fields are always ignored (this operation only ++ * affects one frame queue: @fq). ++ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated ++ * 'fqd' structure's 'context_b' field are sometimes overwritten; ++ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is ++ * initialised to a value used by the driver for demux. ++ * - if context_b is initialised for demux, so is context_a in case stashing ++ * is requested (see item 4). ++ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue ++ * objects.) ++ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's ++ * 'dest::channel' field will be overwritten to match the portal used to issue ++ * the command. If the WE_DESTWQ write-enable bit had already been set by the ++ * caller, the channel workqueue will be left as-is, otherwise the write-enable ++ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag ++ * isn't set, the destination channel/workqueue fields and the write-enable bit ++ * are left as-is. ++ * 4. if the driver overwrites context_a/b for demux, then if ++ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite ++ * context_a.address fields and will leave the stashing fields provided by the ++ * user alone, otherwise it will zero out the context_a.stashing fields. ++ */ ++int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); ++ ++/** ++ * qman_schedule_fq - Schedules a FQ ++ * @fq: the frame queue object to schedule, must be 'parked' ++ * ++ * Schedules the frame queue, which must be Parked, which takes it to ++ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. ++ */ ++int qman_schedule_fq(struct qman_fq *fq); ++ ++/** ++ * qman_retire_fq - Retires a FQ ++ * @fq: the frame queue object to retire ++ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately ++ * ++ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if ++ * the retirement was started asynchronously, otherwise it returns negative for ++ * failure. When this function returns zero, @flags is set to indicate whether ++ * the retired FQ is empty and/or whether it has any ORL fragments (to show up ++ * as ERNs). Otherwise the corresponding flags will be known when a subsequent ++ * FQRN message shows up on the portal's message ring. ++ * ++ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or ++ * Active state), the completion will be via the message ring as a FQRN - but ++ * the corresponding callback may occur before this function returns!! Ie. the ++ * caller should be prepared to accept the callback as the function is called, ++ * not only once it has returned. ++ */ ++int qman_retire_fq(struct qman_fq *fq, u32 *flags); ++ ++/** ++ * qman_oos_fq - Puts a FQ "out of service" ++ * @fq: the frame queue object to be put out-of-service, must be 'retired' ++ * ++ * The frame queue must be retired and empty, and if any order restoration list ++ * was released as ERNs at the time of retirement, they must all be consumed. ++ */ ++int qman_oos_fq(struct qman_fq *fq); ++ ++/** ++ * qman_fq_flow_control - Set the XON/XOFF state of a FQ ++ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos', ++ * or 'retired' or 'parked' state ++ * @xon: boolean to set fq in XON or XOFF state ++ * ++ * The frame should be in Tentatively Scheduled state or Truly Schedule sate, ++ * otherwise the IFSI interrupt will be asserted. ++ */ ++int qman_fq_flow_control(struct qman_fq *fq, int xon); ++ ++/** ++ * qman_query_fq - Queries FQD fields (via h/w query command) ++ * @fq: the frame queue object to be queried ++ * @fqd: storage for the queried FQD fields ++ */ ++int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); ++ ++/** ++ * qman_query_fq_np - Queries non-programmable FQD fields ++ * @fq: the frame queue object to be queried ++ * @np: storage for the queried FQD fields ++ */ ++int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); ++ ++/** ++ * qman_query_wq - Queries work queue lengths ++ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated ++ * to this software portal. Otherwise, query length of WQs in a ++ * channel specified in wq. ++ * @wq: storage for the queried WQs lengths. Also specified the channel to ++ * to query if query_dedicated is zero. ++ */ ++int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq); ++ ++/** ++ * qman_volatile_dequeue - Issue a volatile dequeue command ++ * @fq: the frame queue object to dequeue from ++ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options ++ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() ++ * ++ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. ++ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and ++ * the VDQCR is already in use, otherwise returns non-zero for failure. If ++ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once ++ * the VDQCR command has finished executing (ie. once the callback for the last ++ * DQRR entry resulting from the VDQCR command has been called). If not using ++ * the FINISH flag, completion can be determined either by detecting the ++ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits ++ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue ++ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the ++ * "flags" retrieved from qman_fq_state(). ++ */ ++int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); ++ ++/** ++ * qman_enqueue - Enqueue a frame to a frame queue ++ * @fq: the frame queue object to enqueue to ++ * @fd: a descriptor of the frame to be enqueued ++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options ++ * ++ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by ++ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' ++ * field is ignored. The return value is non-zero on error, such as ring full ++ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR ++ * specified), etc. If the ring is full and FLAG_WAIT is specified, this ++ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal ++ * interrupt will assert when Qman consumes the EQCR entry (subject to "status ++ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will ++ * perform an implied "discrete consumption acknowledgement" on the dequeue ++ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x) ++ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries, ++ * this implicit DCA can delay the release of a "held active" frame queue ++ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing ++ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is ++ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption ++ * acknowledgement should "park request" the "held active" frame queue. Ie. ++ * when the portal eventually releases that frame queue, it will be left in the ++ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the ++ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag ++ * is requested, and the FQ is a member of a congestion group, then this ++ * function returns -EAGAIN if the congestion group is currently congested. ++ * Note, this does not eliminate ERNs, as the async interface means we can be ++ * sending enqueue commands to an un-congested FQ that becomes congested before ++ * the enqueue commands are processed, but it does minimise needless thrashing ++ * of an already busy hardware resource by throttling many of the to-be-dropped ++ * enqueues "at the source". ++ */ ++int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags); ++ ++typedef int (*qman_cb_precommit) (void *arg); ++/** ++ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb ++ * @fq: the frame queue object to enqueue to ++ * @fd: a descriptor of the frame to be enqueued ++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options ++ * @cb: user supplied callback function to invoke before writing commit verb. ++ * @cb_arg: callback function argument ++ * ++ * This is similar to qman_enqueue except that it will invoke a user supplied ++ * callback function just before writng the commit verb. This is useful ++ * when the user want to do something *just before* enqueuing the request and ++ * the enqueue can't fail. ++ */ ++int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, ++ u32 flags, qman_cb_precommit cb, void *cb_arg); ++ ++/** ++ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP ++ * @fq: the frame queue object to enqueue to ++ * @fd: a descriptor of the frame to be enqueued ++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options ++ * @orp: the frame queue object used as an order restoration point. ++ * @orp_seqnum: the sequence number of this frame in the order restoration path ++ * ++ * Similar to qman_enqueue(), but with the addition of an Order Restoration ++ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this ++ * enqueue operation to employ order restoration. Each frame queue object acts ++ * as an Order Definition Point (ODP) by providing each frame dequeued from it ++ * with an incrementing sequence number, this value is generally ignored unless ++ * that sequence of dequeued frames will need order restoration later. Each ++ * frame queue object also encapsulates an Order Restoration Point (ORP), which ++ * is a re-assembly context for re-ordering frames relative to their sequence ++ * numbers as they are enqueued. The ORP does not have to be within the frame ++ * queue that receives the enqueued frame, in fact it is usually the frame ++ * queue from which the frames were originally dequeued. For the purposes of ++ * order restoration, multiple frames (or "fragments") can be enqueued for a ++ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all ++ * enqueues except the final fragment of a given sequence number. Ordering ++ * between sequence numbers is guaranteed, even if fragments of different ++ * sequence numbers are interlaced with one another. Fragments of the same ++ * sequence number will retain the order in which they are enqueued. If no ++ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given ++ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been ++ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given ++ * sequence number should become the ORP's "Next Expected Sequence Number". ++ * ++ * Side note: a frame queue object can be used purely as an ORP, without ++ * carrying any frames at all. Care should be taken not to deallocate a frame ++ * queue object that is being actively used as an ORP, as a future allocation ++ * of the frame queue object may start using the internal ORP before the ++ * previous use has finished. ++ */ ++int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, ++ struct qman_fq *orp, u16 orp_seqnum); ++ ++/** ++ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs ++ * @result: is set by the API to the base FQID of the allocated range ++ * @count: the number of FQIDs required ++ * @align: required alignment of the allocated range ++ * @partial: non-zero if the API can return fewer than @count FQIDs ++ * ++ * Returns the number of frame queues allocated, or a negative error code. If ++ * @partial is non zero, the allocation request may return a smaller range of ++ * FQs than requested (though alignment will be as requested). If @partial is ++ * zero, the return value will either be 'count' or negative. ++ */ ++int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial); ++static inline int qman_alloc_fqid(u32 *result) ++{ ++ int ret = qman_alloc_fqid_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++ ++/** ++ * qman_release_fqid_range - Release the specified range of frame queue IDs ++ * @fqid: the base FQID of the range to deallocate ++ * @count: the number of FQIDs in the range ++ * ++ * This function can also be used to seed the allocator with ranges of FQIDs ++ * that it can subsequently allocate from. ++ */ ++void qman_release_fqid_range(u32 fqid, unsigned int count); ++static inline void qman_release_fqid(u32 fqid) ++{ ++ qman_release_fqid_range(fqid, 1); ++} ++ ++void qman_seed_fqid_range(u32 fqid, unsigned int count); ++ ++ ++int qman_shutdown_fq(u32 fqid); ++ ++/** ++ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs ++ * @fqid: the base FQID of the range to deallocate ++ * @count: the number of FQIDs in the range ++ */ ++int qman_reserve_fqid_range(u32 fqid, unsigned int count); ++static inline int qman_reserve_fqid(u32 fqid) ++{ ++ return qman_reserve_fqid_range(fqid, 1); ++} ++ ++ /* Pool-channel management */ ++ /* ----------------------- */ ++/** ++ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs ++ * @result: is set by the API to the base pool-channel ID of the allocated range ++ * @count: the number of pool-channel IDs required ++ * @align: required alignment of the allocated range ++ * @partial: non-zero if the API can return fewer than @count ++ * ++ * Returns the number of pool-channel IDs allocated, or a negative error code. ++ * If @partial is non zero, the allocation request may return a smaller range of ++ * than requested (though alignment will be as requested). If @partial is zero, ++ * the return value will either be 'count' or negative. ++ */ ++int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial); ++static inline int qman_alloc_pool(u32 *result) ++{ ++ int ret = qman_alloc_pool_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++ ++/** ++ * qman_release_pool_range - Release the specified range of pool-channel IDs ++ * @id: the base pool-channel ID of the range to deallocate ++ * @count: the number of pool-channel IDs in the range ++ */ ++void qman_release_pool_range(u32 id, unsigned int count); ++static inline void qman_release_pool(u32 id) ++{ ++ qman_release_pool_range(id, 1); ++} ++ ++/** ++ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs ++ * @id: the base pool-channel ID of the range to reserve ++ * @count: the number of pool-channel IDs in the range ++ */ ++int qman_reserve_pool_range(u32 id, unsigned int count); ++static inline int qman_reserve_pool(u32 id) ++{ ++ return qman_reserve_pool_range(id, 1); ++} ++ ++void qman_seed_pool_range(u32 id, unsigned int count); ++ ++ /* CGR management */ ++ /* -------------- */ ++/** ++ * qman_create_cgr - Register a congestion group object ++ * @cgr: the 'cgr' object, with fields filled in ++ * @flags: QMAN_CGR_FLAG_* values ++ * @opts: optional state of CGR settings ++ * ++ * Registers this object to receiving congestion entry/exit callbacks on the ++ * portal affine to the cpu portal on which this API is executed. If opts is ++ * NULL then only the callback (cgr->cb) function is registered. If @flags ++ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset ++ * any unspecified parameters) will be used rather than a modify hw hardware ++ * (which only modifies the specified parameters). ++ */ ++int qman_create_cgr(struct qman_cgr *cgr, u32 flags, ++ struct qm_mcc_initcgr *opts); ++ ++/** ++ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal ++ * @cgr: the 'cgr' object, with fields filled in ++ * @flags: QMAN_CGR_FLAG_* values ++ * @dcp_portal: the DCP portal to which the cgr object is registered. ++ * @opts: optional state of CGR settings ++ * ++ */ ++int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, ++ struct qm_mcc_initcgr *opts); ++ ++/** ++ * qman_delete_cgr - Deregisters a congestion group object ++ * @cgr: the 'cgr' object to deregister ++ * ++ * "Unplugs" this CGR object from the portal affine to the cpu on which this API ++ * is executed. This must be excuted on the same affine portal on which it was ++ * created. ++ */ ++int qman_delete_cgr(struct qman_cgr *cgr); ++ ++/** ++ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU ++ * @cgr: the 'cgr' object to deregister ++ * ++ * This will select the proper CPU and run there qman_delete_cgr(). ++ */ ++void qman_delete_cgr_safe(struct qman_cgr *cgr); ++ ++/** ++ * qman_modify_cgr - Modify CGR fields ++ * @cgr: the 'cgr' object to modify ++ * @flags: QMAN_CGR_FLAG_* values ++ * @opts: the CGR-modification settings ++ * ++ * The @opts parameter comes from the low-level portal API, and can be NULL. ++ * Note that some fields and options within @opts may be ignored or overwritten ++ * by the driver, in particular the 'cgrid' field is ignored (this operation ++ * only affects the given CGR object). If @flags contains ++ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any ++ * unspecified parameters) will be used rather than a modify hw hardware (which ++ * only modifies the specified parameters). ++ */ ++int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, ++ struct qm_mcc_initcgr *opts); ++ ++/** ++* qman_query_cgr - Queries CGR fields ++* @cgr: the 'cgr' object to query ++* @result: storage for the queried congestion group record ++*/ ++int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result); ++ ++/** ++ * qman_query_congestion - Queries the state of all congestion groups ++ * @congestion: storage for the queried state of all congestion groups ++ */ ++int qman_query_congestion(struct qm_mcr_querycongestion *congestion); ++ ++/** ++ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs ++ * @result: is set by the API to the base CGR ID of the allocated range ++ * @count: the number of CGR IDs required ++ * @align: required alignment of the allocated range ++ * @partial: non-zero if the API can return fewer than @count ++ * ++ * Returns the number of CGR IDs allocated, or a negative error code. ++ * If @partial is non zero, the allocation request may return a smaller range of ++ * than requested (though alignment will be as requested). If @partial is zero, ++ * the return value will either be 'count' or negative. ++ */ ++int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial); ++static inline int qman_alloc_cgrid(u32 *result) ++{ ++ int ret = qman_alloc_cgrid_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++ ++/** ++ * qman_release_cgrid_range - Release the specified range of CGR IDs ++ * @id: the base CGR ID of the range to deallocate ++ * @count: the number of CGR IDs in the range ++ */ ++void qman_release_cgrid_range(u32 id, unsigned int count); ++static inline void qman_release_cgrid(u32 id) ++{ ++ qman_release_cgrid_range(id, 1); ++} ++ ++/** ++ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID ++ * @id: the base CGR ID of the range to reserve ++ * @count: the number of CGR IDs in the range ++ */ ++int qman_reserve_cgrid_range(u32 id, unsigned int count); ++static inline int qman_reserve_cgrid(u32 id) ++{ ++ return qman_reserve_cgrid_range(id, 1); ++} ++ ++void qman_seed_cgrid_range(u32 id, unsigned int count); ++ ++ ++ /* Helpers */ ++ /* ------- */ ++/** ++ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS ++ * @fqid: the FQID that will be initialised by other s/w ++ * ++ * In many situations, a FQID is provided for communication between s/w ++ * entities, and whilst the consumer is responsible for initialising and ++ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using ++ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie; ++ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...); ++ * However, data can not be enqueued to the FQ until it is initialised out of ++ * the OOS state - this function polls for that condition. It is particularly ++ * useful for users of IPC functions - each endpoint's Rx FQ is the other ++ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object ++ * and then use this API on the (NO_MODIFY) Tx FQ object in order to ++ * synchronise. The function returns zero for success, +1 if the FQ is still in ++ * the OOS state, or negative if there was an error. ++ */ ++static inline int qman_poll_fq_for_init(struct qman_fq *fq) ++{ ++ struct qm_mcr_queryfq_np np; ++ int err; ++ err = qman_query_fq_np(fq, &np); ++ if (err) ++ return err; ++ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS) ++ return 1; ++ return 0; ++} ++ ++ /* -------------- */ ++ /* CEETM :: types */ ++ /* -------------- */ ++/** ++ * Token Rate Structure ++ * Shaping rates are based on a "credit" system and a pre-configured h/w ++ * internal timer. The following type represents a shaper "rate" parameter as a ++ * fractional number of "tokens". Here's how it works. This (fractional) number ++ * of tokens is added to the shaper's "credit" every time the h/w timer elapses ++ * (up to a limit which is set by another shaper parameter). Every time a frame ++ * is enqueued through a shaper, the shaper deducts as many tokens as there are ++ * bytes of data in the enqueued frame. A shaper will not allow itself to ++ * enqueue any frames if its token count is negative. As such; ++ * ++ * The rate at which data is enqueued is limited by the ++ * rate at which tokens are added. ++ * ++ * Therefore if the user knows the period between these h/w timer updates in ++ * seconds, they can calculate the maximum traffic rate of the shaper (in ++ * bytes-per-second) from the token rate. And vice versa, they can calculate ++ * the token rate to use in order to achieve a given traffic rate. ++ */ ++struct qm_ceetm_rate { ++ /* The token rate is; whole + (fraction/8192) */ ++ u32 whole:11; /* 0..2047 */ ++ u32 fraction:13; /* 0..8191 */ ++}; ++ ++struct qm_ceetm_weight_code { ++ /* The weight code is; 5 msbits + 3 lsbits */ ++ u8 y:5; ++ u8 x:3; ++}; ++ ++struct qm_ceetm { ++ unsigned int idx; ++ struct list_head sub_portals; ++ struct list_head lnis; ++ unsigned int sp_range[2]; ++ unsigned int lni_range[2]; ++}; ++ ++struct qm_ceetm_sp { ++ struct list_head node; ++ unsigned int idx; ++ unsigned int dcp_idx; ++ int is_claimed; ++ struct qm_ceetm_lni *lni; ++}; ++ ++/* Logical Network Interface */ ++struct qm_ceetm_lni { ++ struct list_head node; ++ unsigned int idx; ++ unsigned int dcp_idx; ++ int is_claimed; ++ struct qm_ceetm_sp *sp; ++ struct list_head channels; ++ int shaper_enable; ++ int shaper_couple; ++ int oal; ++ struct qm_ceetm_rate cr_token_rate; ++ struct qm_ceetm_rate er_token_rate; ++ u16 cr_token_bucket_limit; ++ u16 er_token_bucket_limit; ++}; ++ ++/* Class Queue Channel */ ++struct qm_ceetm_channel { ++ struct list_head node; ++ unsigned int idx; ++ unsigned int lni_idx; ++ unsigned int dcp_idx; ++ struct list_head class_queues; ++ struct list_head ccgs; ++ u8 shaper_enable; ++ u8 shaper_couple; ++ struct qm_ceetm_rate cr_token_rate; ++ struct qm_ceetm_rate er_token_rate; ++ u16 cr_token_bucket_limit; ++ u16 er_token_bucket_limit; ++}; ++ ++struct qm_ceetm_ccg; ++ ++/* This callback type is used when handling congestion entry/exit. The ++ * 'cb_ctx' value is the opaque value associated with ccg object. ++ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. ++ */ ++typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx, ++ int congested); ++ ++/* Class Congestion Group */ ++struct qm_ceetm_ccg { ++ struct qm_ceetm_channel *parent; ++ struct list_head node; ++ struct list_head cb_node; ++ qman_cb_ccgr cb; ++ void *cb_ctx; ++ unsigned int idx; ++}; ++ ++/* Class Queue */ ++struct qm_ceetm_cq { ++ struct qm_ceetm_channel *parent; ++ struct qm_ceetm_ccg *ccg; ++ struct list_head node; ++ unsigned int idx; ++ int is_claimed; ++ struct list_head bound_lfqids; ++ struct list_head binding_node; ++}; ++ ++/* Logical Frame Queue */ ++struct qm_ceetm_lfq { ++ struct qm_ceetm_channel *parent; ++ struct list_head node; ++ unsigned int idx; ++ unsigned int dctidx; ++ u64 context_a; ++ u32 context_b; ++ qman_cb_mr ern; ++}; ++ ++/** ++ * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps ++ * (ie. bits-per-second), compute the 'token_rate' fraction that best ++ * approximates that rate. ++ * @bps: the desired shaper rate in bps. ++ * @token_rate: the output token rate computed with the given kbps. ++ * @rounding: dictates how to round if an exact conversion is not possible; if ++ * it is negative then 'token_rate' will round down to the highest value that ++ * does not exceed the desired rate, if it is positive then 'token_rate' will ++ * round up to the lowest value that is greater than or equal to the desired ++ * rate, and if it is zero then it will round to the nearest approximation, ++ * whether that be up or down. ++ * ++ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. ++ */ ++int qman_ceetm_bps2tokenrate(u64 bps, ++ struct qm_ceetm_rate *token_rate, ++ int rounding); ++ ++/** ++ * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the ++ * corresponding number of 'bps'. ++ * @token_rate: the input desired token_rate fraction. ++ * @bps: the output shaper rate in bps computed with the give token rate. ++ * @rounding: has the same semantics as the previous function. ++ * ++ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. ++ */ ++int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, ++ u64 *bps, ++ int rounding); ++ ++int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, ++ int partial); ++static inline int qman_alloc_ceetm0_channel(u32 *result) ++{ ++ int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++void qman_release_ceetm0_channel_range(u32 channelid, u32 count); ++static inline void qman_release_ceetm0_channelid(u32 channelid) ++{ ++ qman_release_ceetm0_channel_range(channelid, 1); ++} ++ ++int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count); ++static inline int qman_reserve_ceetm0_channelid(u32 channelid) ++{ ++ return qman_reserve_ceetm0_channel_range(channelid, 1); ++} ++ ++void qman_seed_ceetm0_channel_range(u32 channelid, u32 count); ++ ++ ++int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, ++ int partial); ++static inline int qman_alloc_ceetm1_channel(u32 *result) ++{ ++ int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++void qman_release_ceetm1_channel_range(u32 channelid, u32 count); ++static inline void qman_release_ceetm1_channelid(u32 channelid) ++{ ++ qman_release_ceetm1_channel_range(channelid, 1); ++} ++int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count); ++static inline int qman_reserve_ceetm1_channelid(u32 channelid) ++{ ++ return qman_reserve_ceetm1_channel_range(channelid, 1); ++} ++ ++void qman_seed_ceetm1_channel_range(u32 channelid, u32 count); ++ ++ ++int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, ++ int partial); ++static inline int qman_alloc_ceetm0_lfqid(u32 *result) ++{ ++ int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count); ++static inline void qman_release_ceetm0_lfqid(u32 lfqid) ++{ ++ qman_release_ceetm0_lfqid_range(lfqid, 1); ++} ++int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count); ++static inline int qman_reserve_ceetm0_lfqid(u32 lfqid) ++{ ++ return qman_reserve_ceetm0_lfqid_range(lfqid, 1); ++} ++ ++void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count); ++ ++ ++int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, ++ int partial); ++static inline int qman_alloc_ceetm1_lfqid(u32 *result) ++{ ++ int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0); ++ return (ret > 0) ? 0 : ret; ++} ++void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count); ++static inline void qman_release_ceetm1_lfqid(u32 lfqid) ++{ ++ qman_release_ceetm1_lfqid_range(lfqid, 1); ++} ++int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count); ++static inline int qman_reserve_ceetm1_lfqid(u32 lfqid) ++{ ++ return qman_reserve_ceetm1_lfqid_range(lfqid, 1); ++} ++ ++void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count); ++ ++ ++ /* ----------------------------- */ ++ /* CEETM :: sub-portals */ ++ /* ----------------------------- */ ++ ++/** ++ * qman_ceetm_sp_claim - Claims the given sub-portal, provided it is available ++ * to us and configured for traffic-management. ++ * @sp: the returned sub-portal object, if successful. ++ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM ++ * instance), ++ * @sp_idx" is the desired sub-portal index from 0 to 15. ++ * ++ * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL ++ * if the sp_idx is out of range. ++ * ++ * Note that if there are multiple driver domains (eg. a linux kernel versus ++ * user-space drivers in USDPAA, or multiple guests running under a hypervisor) ++ * then a sub-portal may be accessible by more than one instance of a qman ++ * driver and so it may be claimed multiple times. If this is the case, it is ++ * up to the system architect to prevent conflicting configuration actions ++ * coming from the different driver domains. The qman drivers do not have any ++ * behind-the-scenes coordination to prevent this from happening. ++ */ ++int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, ++ enum qm_dc_portal dcp_idx, ++ unsigned int sp_idx); ++ ++/** ++ * qman_ceetm_sp_release - Releases a previously claimed sub-portal. ++ * @sp: the sub-portal to be released. ++ * ++ * Returns 0 for success, or -EBUSY for failure if the dependencies are not ++ * released. ++ */ ++int qman_ceetm_sp_release(struct qm_ceetm_sp *sp); ++ ++ /* ----------------------------------- */ ++ /* CEETM :: logical network interfaces */ ++ /* ----------------------------------- */ ++ ++/** ++ * qman_ceetm_lni_claim - Claims an unclaimed LNI. ++ * @lni: the returned LNI object, if successful. ++ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM ++ * instance) ++ * @lni_idx: is the desired LNI index. ++ * ++ * Returns zero for success, or -EINVAL on failure, which will happen if the LNI ++ * is not available or has already been claimed (and not yet successfully ++ * released), or lni_dix is out of range. ++ * ++ * Note that there may be multiple driver domains (or instances) that need to ++ * transmit out the same LNI, so this claim is only guaranteeing exclusivity ++ * within the domain of the driver being called. See qman_ceetm_sp_claim() and ++ * qman_ceetm_sp_get_lni() for more information. ++ */ ++int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, ++ enum qm_dc_portal dcp_id, ++ unsigned int lni_idx); ++ ++/** ++ * qman_ceetm_lni_releaes - Releases a previously claimed LNI. ++ * @lni: the lni needs to be released. ++ * ++ * This will only succeed if all dependent objects have been released. ++ * Returns zero for success, or -EBUSY if the dependencies are not released. ++ */ ++int qman_ceetm_lni_release(struct qm_ceetm_lni *lni); ++ ++/** ++ * qman_ceetm_sp_set_lni ++ * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently ++ * mapped to. ++ * @sp: the given sub-portal. ++ * @lni(in "set"function): the LNI object which the sp will be mappaed to. ++ * @lni_idx(in "get" function): the LNI index which the sp is mapped to. ++ * ++ * Returns zero for success, or -EINVAL for the "set" function when this sp-lni ++ * mapping has been set, or configure mapping command returns error, and ++ * -EINVAL for "get" function when this sp-lni mapping is not set or the query ++ * mapping command returns error. ++ * ++ * This may be useful in situations where multiple driver domains have access ++ * to the same sub-portals in order to all be able to transmit out the same ++ * physical interface (perhaps they're on different IP addresses or VPNs, so ++ * Fman is splitting Rx traffic and here we need to converge Tx traffic). In ++ * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed ++ * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains ++ * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim() ++ * in order to determine the LNI that the control-plane had assigned. This is ++ * why the "get" returns an index, whereas the "set" takes an (already claimed) ++ * LNI object. ++ */ ++int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, ++ struct qm_ceetm_lni *lni); ++int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, ++ unsigned int *lni_idx); ++ ++/** ++ * qman_ceetm_lni_enable_shaper ++ * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI. ++ * @lni: the given LNI. ++ * @coupled: indicates whether CR and ER are coupled. ++ * @oal: the overhead accounting length which is added to the actual length of ++ * each frame when performing shaper calculations. ++ * ++ * When the number of (unused) committed-rate tokens reach the committed-rate ++ * token limit, 'coupled' indicates whether surplus tokens should be added to ++ * the excess-rate token count (up to the excess-rate token limit). ++ * When LNI is claimed, the shaper is disabled by default. The enable function ++ * will turn on this shaper for this lni. ++ * Whenever a claimed LNI is first enabled for shaping, its committed and ++ * excess token rates and limits are zero, so will need to be changed to do ++ * anything useful. The shaper can subsequently be enabled/disabled without ++ * resetting the shaping parameters, but the shaping parameters will be reset ++ * when the LNI is released. ++ * ++ * Returns zero for success, or errno for "enable" function in the cases as: ++ * a) -EINVAL if the shaper is already enabled, ++ * b) -EIO if the configure shaper command returns error. ++ * For "disable" function, returns: ++ * a) -EINVAL if the shaper is has already disabled. ++ * b) -EIO if calling configure shaper command returns error. ++ */ ++int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, ++ int oal); ++int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni); ++ ++/** ++ * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status ++ * @lni: the give LNI ++ */ ++int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni); ++ ++/** ++ * qman_ceetm_lni_set_commit_rate ++ * qman_ceetm_lni_get_commit_rate ++ * qman_ceetm_lni_set_excess_rate ++ * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and ++ * token limit for the given LNI. ++ * @lni: the given LNI. ++ * @token_rate: the desired token rate for "set" fuction, or the token rate of ++ * the LNI queried by "get" function. ++ * @token_limit: the desired token bucket limit for "set" function, or the token ++ * limit of the given LNI queried by "get" function. ++ * ++ * Returns zero for success. The "set" function returns -EINVAL if the given ++ * LNI is unshapped or -EIO if the configure shaper command returns error. ++ * The "get" function returns -EINVAL if the token rate or the token limit is ++ * not set or the query command returns error. ++ */ ++int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit); ++int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit); ++int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit); ++int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit); ++/** ++ * qman_ceetm_lni_set_commit_rate_bps ++ * qman_ceetm_lni_get_commit_rate_bps ++ * qman_ceetm_lni_set_excess_rate_bps ++ * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate ++ * and token limit for the given LNI. ++ * @lni: the given LNI. ++ * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate ++ * of the LNI queried by "get" function. ++ * @token_limit: the desired token bucket limit for "set" function, or the token ++ * limit of the given LNI queried by "get" function. ++ * ++ * Returns zero for success. The "set" function returns -EINVAL if the given ++ * LNI is unshapped or -EIO if the configure shaper command returns error. ++ * The "get" function returns -EINVAL if the token rate or the token limit is ++ * not set or the query command returns error. ++ */ ++int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni, ++ u64 bps, ++ u16 token_limit); ++int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni, ++ u64 *bps, u16 *token_limit); ++int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni, ++ u64 bps, ++ u16 token_limit); ++int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni, ++ u64 *bps, u16 *token_limit); ++ ++/** ++ * qman_ceetm_lni_set_tcfcc ++ * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control". ++ * @lni: the given LNI. ++ * @cq_level: is between 0 and 15, representing individual class queue levels ++ * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15 ++ * for every channel). ++ * @traffic_class: is between 0 and 7 when associating a given class queue level ++ * to a traffic class, or -1 when disabling traffic class flow control for this ++ * class queue level. ++ * ++ * Return zero for success, or -EINVAL if the cq_level or traffic_class is out ++ * of range as indicated above, or -EIO if the configure/query tcfcc command ++ * returns error. ++ * ++ * Refer to the section of QMan CEETM traffic class flow control in the ++ * Reference Manual. ++ */ ++int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, ++ unsigned int cq_level, ++ int traffic_class); ++int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, ++ unsigned int cq_level, ++ int *traffic_class); ++ ++ /* ----------------------------- */ ++ /* CEETM :: class queue channels */ ++ /* ----------------------------- */ ++ ++/** ++ * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to ++ * the given LNI. ++ * @channel: the returned class queue channel object, if successful. ++ * @lni: the LNI that the channel belongs to. ++ * ++ * Channels are always initially "unshaped". ++ * ++ * Return zero for success, or -ENODEV if there is no channel available(all 32 ++ * channels are claimed) or -EINVAL if the channel mapping command returns ++ * error. ++ */ ++int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, ++ struct qm_ceetm_lni *lni); ++ ++/** ++ * qman_ceetm_channel_release - Releases a previously claimed CQ channel. ++ * @channel: the channel needs to be released. ++ * ++ * Returns zero for success, or -EBUSY if the dependencies are still in use. ++ * ++ * Note any shaping of the channel will be cleared to leave it in an unshaped ++ * state. ++ */ ++int qman_ceetm_channel_release(struct qm_ceetm_channel *channel); ++ ++/** ++ * qman_ceetm_channel_enable_shaper ++ * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel. ++ * @channel: the given channel. ++ * @coupled: indicates whether surplus CR tokens should be added to the ++ * excess-rate token count (up to the excess-rate token limit) when the number ++ * of (unused) committed-rate tokens reach the committed_rate token limit. ++ * ++ * Whenever a claimed channel is first enabled for shaping, its committed and ++ * excess token rates and limits are zero, so will need to be changed to do ++ * anything useful. The shaper can subsequently be enabled/disabled without ++ * resetting the shaping parameters, but the shaping parameters will be reset ++ * when the channel is released. ++ * ++ * Return 0 for success, or -EINVAL for failure, in the case that the channel ++ * shaper has been enabled/disabled or the management command returns error. ++ */ ++int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, ++ int coupled); ++int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel); ++ ++/** ++ * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status. ++ * @channel: the give channel. ++ */ ++int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel); ++ ++/** ++ * qman_ceetm_channel_set_commit_rate ++ * qman_ceetm_channel_get_commit_rate ++ * qman_ceetm_channel_set_excess_rate ++ * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters. ++ * @channel: the given channel. ++ * @token_rate: the desired token rate for "set" function, or the queried token ++ * rate for "get" function. ++ * @token_limit: the desired token limit for "set" function, or the queried ++ * token limit for "get" function. ++ * ++ * Return zero for success. The "set" function returns -EINVAL if the channel ++ * is unshaped, or -EIO if the configure shapper command returns error. The ++ * "get" function returns -EINVAL if token rate of token limit is not set, or ++ * the query shaper command returns error. ++ */ ++int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit); ++int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit); ++int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, ++ const struct qm_ceetm_rate *token_rate, ++ u16 token_limit); ++int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, ++ struct qm_ceetm_rate *token_rate, ++ u16 *token_limit); ++/** ++ * qman_ceetm_channel_set_commit_rate_bps ++ * qman_ceetm_channel_get_commit_rate_bps ++ * qman_ceetm_channel_set_excess_rate_bps ++ * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper ++ * parameters. ++ * @channel: the given channel. ++ * @token_rate: the desired shaper rate in bps for "set" function, or the ++ * shaper rate in bps for "get" function. ++ * @token_limit: the desired token limit for "set" function, or the queried ++ * token limit for "get" function. ++ * ++ * Return zero for success. The "set" function returns -EINVAL if the channel ++ * is unshaped, or -EIO if the configure shapper command returns error. The ++ * "get" function returns -EINVAL if token rate of token limit is not set, or ++ * the query shaper command returns error. ++ */ ++int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel, ++ u64 bps, u16 token_limit); ++int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel, ++ u64 *bps, u16 *token_limit); ++int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel, ++ u64 bps, u16 token_limit); ++int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel, ++ u64 *bps, u16 *token_limit); ++ ++/** ++ * qman_ceetm_channel_set_weight ++ * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel ++ * @channel: the given channel. ++ * @token_limit: the desired token limit as the weight of the unshaped channel ++ * for "set" function, or the queried token limit for "get" function. ++ * ++ * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel. ++ * It allows the unshaped channels to be included in the CR time eligible list, ++ * and thus use the configured CR token limit value as their fair queuing ++ * weight. ++ * ++ * Return zero for success, or -EINVAL if the channel is a shaped channel or ++ * the management command returns error. ++ */ ++int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, ++ u16 token_limit); ++int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, ++ u16 *token_limit); ++ ++/** ++ * qman_ceetm_channel_set_group ++ * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler. ++ * @channel: the given channel. ++ * @group_b: indicates whether there is group B in this channel. ++ * @prio_a: the priority of group A. ++ * @prio_b: the priority of group B. ++ * ++ * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues ++ * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in ++ * group A, otherwise they are split into group A (CQ8-11) and group B ++ * (CQ12-C15). The individual class queues and the group(s) are in strict ++ * priority order relative to each other. Within the group(s), the scheduling ++ * is not strict priority order, but the result of scheduling within a group ++ * is in strict priority order relative to the other class queues in the ++ * channel. 'prio_a' and 'prio_b' control the priority order of the groups ++ * relative to the individual class queues, and take values from 0-7. Eg. if ++ * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict ++ * priority order would be; ++ * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7 ++ * ++ * Return 0 for success. For "set" function, returns -EINVAL if prio_a or ++ * prio_b are out of the range 0 - 7 (priority of group A or group B can not ++ * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if ++ * the configure scheduler command returns error. For "get" function, return ++ * -EINVAL if the query scheduler command returns error. ++ */ ++int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, ++ int group_b, ++ unsigned int prio_a, ++ unsigned int prio_b); ++int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, ++ int *group_b, ++ unsigned int *prio_a, ++ unsigned int *prio_b); ++ ++/** ++ * qman_ceetm_channel_set_group_cr_eligibility ++ * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility ++ * @channel: the given channel object ++ * @group_b: indicates whether there is group B in this channel. ++ * @cre: the commit rate eligibility, 1 for enable, 0 for disable. ++ * ++ * Return zero for success, or -EINVAL if eligibility setting fails. ++*/ ++int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel ++ *channel, int group_b, int cre); ++int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel ++ *channel, int group_b, int ere); ++ ++/** ++ * qman_ceetm_channel_set_cq_cr_eligibility ++ * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility ++ * @channel: the given channel object ++ * @idx: is from 0 to 7 (representing CQ0 to CQ7). ++ * @cre: the commit rate eligibility, 1 for enable, 0 for disable. ++ * ++ * Return zero for success, or -EINVAL if eligibility setting fails. ++*/ ++int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel, ++ unsigned int idx, int cre); ++int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel, ++ unsigned int idx, int ere); ++ ++ /* --------------------- */ ++ /* CEETM :: class queues */ ++ /* --------------------- */ ++ ++/** ++ * qman_ceetm_cq_claim - Claims an individual class queue. ++ * @cq: the returned class queue object, if successful. ++ * @channel: the class queue channel. ++ * @idx: is from 0 to 7 (representing CQ0 to CQ7). ++ * @ccg: represents the class congestion group that this class queue should be ++ * subscribed to, or NULL if no congestion group membership is desired. ++ * ++ * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or ++ * if this class queue has been claimed, or configure class queue command ++ * returns error, or returns -ENOMEM if allocating CQ memory fails. ++ */ ++int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, ++ unsigned int idx, ++ struct qm_ceetm_ccg *ccg); ++ ++/** ++ * qman_ceetm_cq_claim_A - Claims a class queue group A. ++ * @cq: the returned class queue object, if successful. ++ * @channel: the class queue channel. ++ * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11. ++ * @ccg: represents the class congestion group that this class queue should be ++ * subscribed to, or NULL if no congestion group membership is desired. ++ * ++ * Return zero for success, or -EINVAL if @idx is out the range or if ++ * this class queue has been claimed or configure class queue command returns ++ * error, or returns -ENOMEM if allocating CQ memory fails. ++ */ ++int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, ++ unsigned int idx, ++ struct qm_ceetm_ccg *ccg); ++ ++/** ++ * qman_ceetm_cq_claim_B - Claims a class queue group B. ++ * @cq: the returned class queue object, if successful. ++ * @channel: the class queue channel. ++ * @idx: is from 0 to 3 (CQ12 to CQ15). ++ * @ccg: represents the class congestion group that this class queue should be ++ * subscribed to, or NULL if no congestion group membership is desired. ++ * ++ * Return zero for success, or -EINVAL if @idx is out the range or if ++ * this class queue has been claimed or configure class queue command returns ++ * error, or returns -ENOMEM if allocating CQ memory fails. ++ */ ++int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, ++ struct qm_ceetm_channel *channel, ++ unsigned int idx, ++ struct qm_ceetm_ccg *ccg); ++ ++/** ++ * qman_ceetm_cq_release - Releases a previously claimed class queue. ++ * @cq: The class queue to be released. ++ * ++ * Return zero for success, or -EBUSY if the dependent objects (eg. logical ++ * FQIDs) have not been released. ++ */ ++int qman_ceetm_cq_release(struct qm_ceetm_cq *cq); ++ ++/** ++ * qman_ceetm_set_queue_weight ++ * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class ++ * queue. ++ * @cq: the given class queue. ++ * @weight_code: the desired weight code to set for the given class queue for ++ * "set" function or the queired weight code for "get" function. ++ * ++ * Grouped class queues have a default weight code of zero, which corresponds to ++ * a scheduler weighting of 1. This function can be used to modify a grouped ++ * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio() ++ * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values ++ * and the corresponding sharing weight.) ++ * ++ * Returns zero for success, or -EIO if the configure weight command returns ++ * error for "set" function, or -EINVAL if the query command returns ++ * error for "get" function. ++ * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference ++ * Manual for weight and weight code. ++ */ ++int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, ++ struct qm_ceetm_weight_code *weight_code); ++int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, ++ struct qm_ceetm_weight_code *weight_code); ++ ++/** ++ * qman_ceetm_set_queue_weight_in_ratio ++ * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a ++ * grouped class queue. ++ * @cq: the given class queue. ++ * @ratio: the weight in ratio. It should be the real ratio number multiplied ++ * by 100 to get rid of fraction. ++ * ++ * Returns zero for success, or -EIO if the configure weight command returns ++ * error for "set" function, or -EINVAL if the query command returns ++ * error for "get" function. ++ */ ++int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio); ++int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio); ++ ++/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0, ++ * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights ++ * corresponding to intermediate weight codes are calculated using linear ++ * interpolation on the inverted values. Or put another way, the inverse weights ++ * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals ++ * between these are divided linearly into 32 intermediate values, the inverses ++ * of which form the remaining weight codes. ++ * ++ * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of ++ * scheduling within a group of class queues (group A or B). Weights are used to ++ * normalise the class queues to an underlying BFS algorithm where all class ++ * queues are assumed to require "equal bandwidth". So the weights referred to ++ * by the weight codes act as divisors on the size of frames being enqueued. Ie. ++ * one class queue in a group is assigned a weight of 2 whilst the other class ++ * queues in the group keep the default weight of 1, then the WBFS scheduler ++ * will effectively treat all frames enqueued on the weight-2 class queue as ++ * having half the number of bytes they really have. Ie. if all other things are ++ * equal, that class queue would get twice as much bytes-per-second bandwidth as ++ * the others. So weights should be chosen to provide bandwidth ratios between ++ * members of the same class queue group. These weights have no bearing on ++ * behaviour outside that group's WBFS mechanism though. ++ */ ++ ++/** ++ * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional ++ * representation of the corresponding weight is given (in order to not lose ++ * any precision). ++ * @weight_code: The given weight code in WBFS. ++ * @numerator: the numerator part of the weight computed by the weight code. ++ * @denominator: the denominator part of the weight computed by the weight code ++ * ++ * Returns zero for success or -EINVAL if the given weight code is illegal. ++ */ ++int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code, ++ u32 *numerator, ++ u32 *denominator); ++/** ++ * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code ++ * If the user needs to know how close this is, convert the resulting weight ++ * code back to a weight and compare. ++ * @numerator: numerator part of the given weight. ++ * @denominator: denominator part of the given weight. ++ * @weight_code: the weight code computed from the given weight. ++ * ++ * Returns zero for success, or -ERANGE if "numerator/denominator" is outside ++ * the range of weights. ++ */ ++int qman_ceetm_ratio2wbfs(u32 numerator, ++ u32 denominator, ++ struct qm_ceetm_weight_code *weight_code, ++ int rounding); ++ ++#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1 ++/** ++ * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM ++ * CQ counters. ++ * @cq: the given CQ object. ++ * @flags: indicates whether the statistics counter will be cleared after query. ++ * @frame_count: The number of the frames that have been counted since the ++ * counter was cleared last time. ++ * @byte_count: the number of bytes in all frames that have been counted. ++ * ++ * Return zero for success or -EINVAL if query statistics command returns error. ++ * ++ */ ++int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, ++ u64 *frame_count, u64 *byte_count); ++ ++/** ++ * qman_ceetm_drain_cq - drain the CQ till it is empty. ++ * @cq: the give CQ object. ++ * Return 0 for success or -EINVAL for unsuccessful command to empty CQ. ++ */ ++int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq); ++ ++ /* ---------------------- */ ++ /* CEETM :: logical FQIDs */ ++ /* ---------------------- */ ++/** ++ * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with ++ * the given class queue. ++ * @lfq: the returned lfq object, if successful. ++ * @cq: the class queue which needs to claim a LFQID. ++ * ++ * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if ++ * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails. ++ */ ++int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, ++ struct qm_ceetm_cq *cq); ++ ++/** ++ * qman_ceetm_lfq_release - Releases a previously claimed logical FQID. ++ * @lfq: the lfq to be released. ++ * ++ * Return zero for success. ++ */ ++int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq); ++ ++/** ++ * qman_ceetm_lfq_set_context ++ * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the ++ * "dequeue context table" associated with the logical FQID. ++ * @lfq: the given logical FQ object. ++ * @context_a: contextA of the dequeue context. ++ * @context_b: contextB of the dequeue context. ++ * ++ * Returns zero for success, or -EINVAL if there is error to set/get the ++ * context pair. ++ */ ++int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, ++ u64 context_a, ++ u32 context_b); ++int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, ++ u64 *context_a, ++ u32 *context_b); ++ ++/** ++ * qman_ceetm_create_fq - Initialise a FQ object for the LFQ. ++ * @lfq: the given logic fq. ++ * @fq: the fq object created for the given logic fq. ++ * ++ * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to ++ * target a logical FQID (and the class queue it is associated with). ++ * Note that this FQ object can only be used for enqueues, and ++ * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter, ++ * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only ++ * valid as long as the underlying 'lfq' remains claimed. It is the user's ++ * responsibility to ensure that the underlying 'lfq' is not released until any ++ * enqueues to this FQ object have completed. The only field the user needs to ++ * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that ++ * could conceivably be called on this FQ object. This API can be called ++ * multiple times to create multiple FQ objects referring to the same logical ++ * FQID, and any enqueue rejections will respect the callback of the object that ++ * issued the enqueue (and will identify the object via the parameter passed to ++ * the callback too). There is no 'flags' parameter to this API as there is for ++ * qman_create_fq() - the created FQ object behaves as though qman_create_fq() ++ * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY. ++ * ++ * Returns 0 for success. ++ */ ++int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq); ++ ++ /* -------------------------------- */ ++ /* CEETM :: class congestion groups */ ++ /* -------------------------------- */ ++ ++/** ++ * qman_ceetm_ccg_claim - Claims an unused CCG. ++ * @ccg: the returned CCG object, if successful. ++ * @channel: the given class queue channel ++ * @cscn: the callback function of this CCG. ++ * @cb_ctx: the corresponding context to be used used if state change ++ * notifications are later enabled for this CCG. ++ * ++ * The congestion group is local to the given class queue channel, so only ++ * class queues within the channel can be associated with that congestion group. ++ * The association of class queues to congestion groups occurs when the class ++ * queues are claimed, see qman_ceetm_cq_claim() and related functions. ++ * Congestion groups are in a "zero" state when initially claimed, and they are ++ * returned to that state when released. ++ * ++ * Return zero for success, or -EINVAL if no CCG in the channel is available. ++ */ ++int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, ++ struct qm_ceetm_channel *channel, ++ unsigned int idx, ++ void (*cscn)(struct qm_ceetm_ccg *, ++ void *cb_ctx, ++ int congested), ++ void *cb_ctx); ++ ++/** ++ * qman_ceetm_ccg_release - Releases a previously claimed CCG. ++ * @ccg: the given ccg. ++ * ++ * Returns zero for success, or -EBUSY if the given ccg's dependent objects ++ * (class queues that are associated with the CCG) have not been released. ++ */ ++int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg); ++ ++/* This struct is used to specify attributes for a CCG. The 'we_mask' field ++ * controls which CCG attributes are to be updated, and the remainder specify ++ * the values for those attributes. A CCG counts either frames or the bytes ++ * within those frames, but not both ('mode'). A CCG can optionally cause ++ * enqueues to be rejected, due to tail-drop or WRED, or both (they are ++ * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be ++ * level-triggered due to a single threshold ('td_thres') or edge-triggered due ++ * to a "congestion state", but not both ('td_mode'). Congestion state has ++ * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and ++ * notifications can be sent to software the CCG goes in to and out of this ++ * congested state ('cscn_en'). */ ++struct qm_ceetm_ccg_params { ++ /* Boolean fields together in a single bitfield struct */ ++ struct { ++ /* Whether to count bytes or frames. 1==frames */ ++ u8 mode:1; ++ /* En/disable tail-drop. 1==enable */ ++ u8 td_en:1; ++ /* Tail-drop on congestion-state or threshold. 1=threshold */ ++ u8 td_mode:1; ++ /* Generate congestion state change notifications. 1==enable */ ++ u8 cscn_en:1; ++ /* Enable WRED rejections (per colour). 1==enable */ ++ u8 wr_en_g:1; ++ u8 wr_en_y:1; ++ u8 wr_en_r:1; ++ } __packed; ++ /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */ ++ struct qm_cgr_cs_thres td_thres; ++ /* Congestion state thresholds, for entry and exit. */ ++ struct qm_cgr_cs_thres cs_thres_in; ++ struct qm_cgr_cs_thres cs_thres_out; ++ /* Overhead accounting length. Per-packet "tax", from -128 to +127 */ ++ signed char oal; ++ /* Congestion state change notification for DCP portal, virtual CCGID*/ ++ /* WRED parameters. */ ++ struct qm_cgr_wr_parm wr_parm_g; ++ struct qm_cgr_wr_parm wr_parm_y; ++ struct qm_cgr_wr_parm wr_parm_r; ++}; ++/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of ++ * the CCGR are to be updated. */ ++#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */ ++#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */ ++#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */ ++#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */ ++#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */ ++#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */ ++#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */ ++#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */ ++#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */ ++#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */ ++#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */ ++#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */ ++#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */ ++#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */ ++#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */ ++#define QM_CCGR_WE_CDV 0x8000 /* cdv */ ++ ++/** ++ * qman_ceetm_ccg_set ++ * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes. ++ * @ccg: the given CCG object. ++ * @we_mask: the write enable mask. ++ * @params: the parameters setting for this ccg ++ * ++ * Return 0 for success, or -EIO if configure ccg command returns error for ++ * "set" function, or -EINVAL if query ccg command returns error for "get" ++ * function. ++ */ ++int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, ++ u16 we_mask, ++ const struct qm_ceetm_ccg_params *params); ++int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, ++ struct qm_ceetm_ccg_params *params); ++ ++/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target ++ * mask. ++ * qman_ceetm_cscn_swp_get - Query whether a given software portal index is ++ * in the cscn target mask. ++ * @ccg: the give CCG object. ++ * @swp_idx: the index of the software portal. ++ * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from ++ * the target mask. ++ * @we_mask: the write enable mask. ++ * @params: the parameters setting for this ccg ++ * ++ * Return 0 for success, or -EINVAL if command in set/get function fails. ++ */ ++int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg, ++ u16 swp_idx, ++ unsigned int cscn_enabled, ++ u16 we_mask, ++ const struct qm_ceetm_ccg_params *params); ++int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, ++ u16 swp_idx, ++ unsigned int *cscn_enabled); ++ ++/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\ ++ * target mask. ++ * qman_ceetm_cscn_dcp_get - Query whether a given direct connect portal index ++ * is in the cscn target mask. ++ * @ccg: the give CCG object. ++ * @dcp_idx: the index of the direct connect portal. ++ * @vcgid: congestion state change notification for dcp portal, virtual CGID. ++ * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from ++ * the target mask. ++ * @we_mask: the write enable mask. ++ * @params: the parameters setting for this ccg ++ * ++ * Return 0 for success, or -EINVAL if command in set/get function fails. ++ */ ++int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, ++ u16 dcp_idx, ++ u8 vcgid, ++ unsigned int cscn_enabled, ++ u16 we_mask, ++ const struct qm_ceetm_ccg_params *params); ++int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, ++ u16 dcp_idx, ++ u8 *vcgid, ++ unsigned int *cscn_enabled); ++ ++/** ++ * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by ++ * CEETM CCG counters. ++ * @ccg: the given CCG object. ++ * @flags: indicates whether the statistics counter will be cleared after query. ++ * @frame_count: The number of the frames that have been counted since the ++ * counter was cleared last time. ++ * @byte_count: the number of bytes in all frames that have been counted. ++ * ++ * Return zero for success or -EINVAL if query statistics command returns error. ++ * ++ */ ++int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, ++ u64 *frame_count, u64 *byte_count); ++ ++/** ++ * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table ++ * @lfqid: Logical Frame Queue ID ++ * @lfqmt_query: Results of the query command ++ * ++ * Returns zero for success or -EIO if the query command returns error. ++ * ++ */ ++int qman_ceetm_query_lfqmt(int lfqid, ++ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query); ++ ++/** ++ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics ++ * @cid: Target ID (CQID or CCGRID) ++ * @dcp_idx: CEETM portal ID ++ * @command_type: One of the following: ++ * 0 = Query dequeue statistics. CID carries the CQID to be queried. ++ * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried ++ * 2 = Write dequeue statistics. CID carries the CQID to be written. ++ * 3 = Query reject statistics. CID carries the CCGRID to be queried. ++ * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried ++ * 5 = Write reject statistics. CID carries the CCGRID to be written ++ * @frame_count: Frame count value to be written if this is a write command ++ * @byte_count: Bytes count value to be written if this is a write command ++ * ++ * Returns zero for success or -EIO if the query command returns error. ++ */ ++int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx, ++ u16 command_type, u64 frame_count, ++ u64 byte_count); ++ ++/** ++ * qman_set_wpm - Set waterfall power management ++ * ++ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. ++ * ++ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not ++ * accessible. ++ */ ++int qman_set_wpm(int wpm_enable); ++ ++/** ++ * qman_get_wpm - Query the waterfall power management setting ++ * ++ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. ++ * ++ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not ++ * accessible. ++ */ ++int qman_get_wpm(int *wpm_enable); ++ ++/* The below qman_p_***() variants might be called in a migration situation ++ * (e.g. cpu hotplug). They are used to continue accessing the portal that ++ * execution was affine to prior to migration. ++ * @qman_portal specifies which portal the APIs will use. ++*/ ++const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal ++ *p); ++int qman_p_irqsource_add(struct qman_portal *p, u32 bits); ++int qman_p_irqsource_remove(struct qman_portal *p, u32 bits); ++int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit); ++u32 qman_p_poll_slow(struct qman_portal *p); ++void qman_p_poll(struct qman_portal *p); ++void qman_p_stop_dequeues(struct qman_portal *p); ++void qman_p_start_dequeues(struct qman_portal *p); ++void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); ++void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools); ++u32 qman_p_static_dequeue_get(struct qman_portal *p); ++void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq, ++ int park_request); ++int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq, ++ u32 flags __maybe_unused, u32 vdqcr); ++int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags); ++int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags, ++ struct qman_fq *orp, u16 orp_seqnum); ++int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, ++ const struct qm_fd *fd, u32 flags, ++ qman_cb_precommit cb, void *cb_arg); ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* FSL_QMAN_H */ +--- /dev/null ++++ b/include/linux/fsl_usdpaa.h +@@ -0,0 +1,372 @@ ++/* Copyright 2011-2012 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++#ifndef FSL_USDPAA_H ++#define FSL_USDPAA_H ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#include ++#include ++#include /* For "enum qm_channel" */ ++#include ++ ++#ifdef CONFIG_FSL_USDPAA ++ ++/******************************/ ++/* Allocation of resource IDs */ ++/******************************/ ++ ++/* This enum is used to distinguish between the type of underlying object being ++ * manipulated. */ ++enum usdpaa_id_type { ++ usdpaa_id_fqid, ++ usdpaa_id_bpid, ++ usdpaa_id_qpool, ++ usdpaa_id_cgrid, ++ usdpaa_id_ceetm0_lfqid, ++ usdpaa_id_ceetm0_channelid, ++ usdpaa_id_ceetm1_lfqid, ++ usdpaa_id_ceetm1_channelid, ++ usdpaa_id_max /* <-- not a valid type, represents the number of types */ ++}; ++#define USDPAA_IOCTL_MAGIC 'u' ++struct usdpaa_ioctl_id_alloc { ++ uint32_t base; /* Return value, the start of the allocated range */ ++ enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */ ++ uint32_t num; /* how many IDs to allocate (and return value) */ ++ uint32_t align; /* must be a power of 2, 0 is treated like 1 */ ++ int partial; /* whether to allow less than 'num' */ ++}; ++struct usdpaa_ioctl_id_release { ++ /* Input; */ ++ enum usdpaa_id_type id_type; ++ uint32_t base; ++ uint32_t num; ++}; ++struct usdpaa_ioctl_id_reserve { ++ enum usdpaa_id_type id_type; ++ uint32_t base; ++ uint32_t num; ++}; ++ ++ ++/* ioctl() commands */ ++#define USDPAA_IOCTL_ID_ALLOC \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc) ++#define USDPAA_IOCTL_ID_RELEASE \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release) ++#define USDPAA_IOCTL_ID_RESERVE \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve) ++ ++/**********************/ ++/* Mapping DMA memory */ ++/**********************/ ++ ++/* Maximum length for a map name, including NULL-terminator */ ++#define USDPAA_DMA_NAME_MAX 16 ++/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named. ++ * For a sharable and named map, specify _SHARED (whether creating one or ++ * binding to an existing one). If _SHARED is specified and _CREATE is not, then ++ * the mapping must already exist. If _SHARED and _CREATE are specified and the ++ * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are ++ * specified and the mapping already exists, the mapping will fail unless _LAZY ++ * is specified. When mapping to a pre-existing sharable map, the length must be ++ * an exact match. Lengths must be a power-of-4 multiple of page size. ++ * ++ * Note that this does not actually map the memory to user-space, that is done ++ * by a subsequent mmap() using the page offset returned from this ioctl(). The ++ * ioctl() is what gives the process permission to do this, and a page-offset ++ * with which to do so. ++ */ ++#define USDPAA_DMA_FLAG_SHARE 0x01 ++#define USDPAA_DMA_FLAG_CREATE 0x02 ++#define USDPAA_DMA_FLAG_LAZY 0x04 ++#define USDPAA_DMA_FLAG_RDONLY 0x08 ++struct usdpaa_ioctl_dma_map { ++ /* Output parameters - virtual and physical addresses */ ++ void *ptr; ++ uint64_t phys_addr; ++ /* Input parameter, the length of the region to be created (or if ++ * mapping an existing region, this must match it). Must be a power-of-4 ++ * multiple of page size. */ ++ uint64_t len; ++ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */ ++ uint32_t flags; ++ /* If _FLAG_SHARE is specified, the name of the region to be created (or ++ * of the existing mapping to use). */ ++ char name[USDPAA_DMA_NAME_MAX]; ++ /* If this ioctl() creates the mapping, this is an input parameter ++ * stating whether the region supports locking. If mapping an existing ++ * region, this is a return value indicating the same thing. */ ++ int has_locking; ++ /* In the case of a successful map with _CREATE and _LAZY, this return ++ * value indicates whether we created the mapped region or whether it ++ * already existed. */ ++ int did_create; ++}; ++ ++#ifdef CONFIG_COMPAT ++struct usdpaa_ioctl_dma_map_compat { ++ /* Output parameters - virtual and physical addresses */ ++ compat_uptr_t ptr; ++ uint64_t phys_addr; ++ /* Input parameter, the length of the region to be created (or if ++ * mapping an existing region, this must match it). Must be a power-of-4 ++ * multiple of page size. */ ++ uint64_t len; ++ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */ ++ uint32_t flags; ++ /* If _FLAG_SHARE is specified, the name of the region to be created (or ++ * of the existing mapping to use). */ ++ char name[USDPAA_DMA_NAME_MAX]; ++ /* If this ioctl() creates the mapping, this is an input parameter ++ * stating whether the region supports locking. If mapping an existing ++ * region, this is a return value indicating the same thing. */ ++ int has_locking; ++ /* In the case of a successful map with _CREATE and _LAZY, this return ++ * value indicates whether we created the mapped region or whether it ++ * already existed. */ ++ int did_create; ++}; ++ ++#define USDPAA_IOCTL_DMA_MAP_COMPAT \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat) ++#endif ++ ++ ++#define USDPAA_IOCTL_DMA_MAP \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map) ++/* munmap() does not remove the DMA map, just the user-space mapping to it. ++ * This ioctl will do both (though you can munmap() before calling the ioctl ++ * too). */ ++#define USDPAA_IOCTL_DMA_UNMAP \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char) ++/* We implement a cross-process locking scheme per DMA map. Call this ioctl() ++ * with a mmap()'d address, and the process will (interruptible) sleep if the ++ * lock is already held by another process. Process destruction will ++ * automatically clean up any held locks. */ ++#define USDPAA_IOCTL_DMA_LOCK \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char) ++#define USDPAA_IOCTL_DMA_UNLOCK \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char) ++ ++/***************************************/ ++/* Mapping and using QMan/BMan portals */ ++/***************************************/ ++enum usdpaa_portal_type { ++ usdpaa_portal_qman, ++ usdpaa_portal_bman, ++}; ++ ++#define QBMAN_ANY_PORTAL_IDX 0xffffffff ++ ++struct usdpaa_ioctl_portal_map { ++ /* Input parameter, is a qman or bman portal required. */ ++ ++ enum usdpaa_portal_type type; ++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX ++ for don't care. The portal index will be populated by the ++ driver when the ioctl() successfully completes */ ++ uint32_t index; ++ ++ /* Return value if the map succeeds, this gives the mapped ++ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ ++ struct usdpaa_portal_map { ++ void *cinh; ++ void *cena; ++ } addr; ++ /* Qman-specific return values */ ++ uint16_t channel; ++ uint32_t pools; ++}; ++ ++#ifdef CONFIG_COMPAT ++struct compat_usdpaa_ioctl_portal_map { ++ /* Input parameter, is a qman or bman portal required. */ ++ enum usdpaa_portal_type type; ++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX ++ for don't care. The portal index will be populated by the ++ driver when the ioctl() successfully completes */ ++ uint32_t index; ++ /* Return value if the map succeeds, this gives the mapped ++ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ ++ struct usdpaa_portal_map_compat { ++ compat_uptr_t cinh; ++ compat_uptr_t cena; ++ } addr; ++ /* Qman-specific return values */ ++ uint16_t channel; ++ uint32_t pools; ++}; ++#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map) ++#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat) ++#endif ++ ++#define USDPAA_IOCTL_PORTAL_MAP \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map) ++#define USDPAA_IOCTL_PORTAL_UNMAP \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map) ++ ++struct usdpaa_ioctl_irq_map { ++ enum usdpaa_portal_type type; /* Type of portal to map */ ++ int fd; /* File descriptor that contains the portal */ ++ void *portal_cinh; /* Cache inhibited area to identify the portal */ ++}; ++ ++#define USDPAA_IOCTL_PORTAL_IRQ_MAP \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map) ++ ++#ifdef CONFIG_COMPAT ++ ++struct compat_ioctl_irq_map { ++ enum usdpaa_portal_type type; /* Type of portal to map */ ++ compat_int_t fd; /* File descriptor that contains the portal */ ++ compat_uptr_t portal_cinh; /* Used identify the portal */}; ++ ++#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \ ++ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map) ++#endif ++ ++/* ioctl to query the amount of DMA memory used in the system */ ++struct usdpaa_ioctl_dma_used { ++ uint64_t free_bytes; ++ uint64_t total_bytes; ++}; ++#define USDPAA_IOCTL_DMA_USED \ ++ _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used) ++ ++/* ioctl to allocate a raw portal */ ++struct usdpaa_ioctl_raw_portal { ++ /* inputs */ ++ enum usdpaa_portal_type type; /* Type of portal to allocate */ ++ ++ /* set to non zero to turn on stashing */ ++ uint8_t enable_stash; ++ /* Stashing attributes for the portal */ ++ uint32_t cpu; ++ uint32_t cache; ++ uint32_t window; ++ ++ /* Specifies the stash request queue this portal should use */ ++ uint8_t sdest; ++ ++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX ++ * for don't care. The portal index will be populated by the ++ * driver when the ioctl() successfully completes */ ++ uint32_t index; ++ ++ /* outputs */ ++ uint64_t cinh; ++ uint64_t cena; ++}; ++ ++#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal) ++ ++#define USDPAA_IOCTL_FREE_RAW_PORTAL \ ++ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal) ++ ++#ifdef CONFIG_COMPAT ++ ++struct compat_ioctl_raw_portal { ++ /* inputs */ ++ enum usdpaa_portal_type type; /* Type of portal to allocate */ ++ ++ /* set to non zero to turn on stashing */ ++ uint8_t enable_stash; ++ /* Stashing attributes for the portal */ ++ uint32_t cpu; ++ uint32_t cache; ++ uint32_t window; ++ /* Specifies the stash request queue this portal should use */ ++ uint8_t sdest; ++ ++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX ++ * for don't care. The portal index will be populated by the ++ * driver when the ioctl() successfully completes */ ++ uint32_t index; ++ ++ /* outputs */ ++ uint64_t cinh; ++ uint64_t cena; ++}; ++ ++#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \ ++ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal) ++ ++#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \ ++ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal) ++ ++#endif ++ ++#ifdef __KERNEL__ ++ ++/* Early-boot hook */ ++int __init fsl_usdpaa_init_early(void); ++ ++/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect ++ * faults within its ranges via this hook. */ ++int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size); ++ ++#endif /* __KERNEL__ */ ++ ++#endif /* CONFIG_FSL_USDPAA */ ++ ++#ifdef __KERNEL__ ++/* This interface is needed in a few places and though it's not specific to ++ * USDPAA as such, creating a new header for it doesn't make any sense. The ++ * qbman kernel driver implements this interface and uses it as the backend for ++ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this ++ * interface for tracking per-process allocations handed out to user-space. */ ++struct dpa_alloc { ++ struct list_head free; ++ spinlock_t lock; ++ struct list_head used; ++}; ++#define DECLARE_DPA_ALLOC(name) \ ++ struct dpa_alloc name = { \ ++ .free = { \ ++ .prev = &name.free, \ ++ .next = &name.free \ ++ }, \ ++ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ ++ .used = { \ ++ .prev = &name.used, \ ++ .next = &name.used \ ++ } \ ++ } ++static inline void dpa_alloc_init(struct dpa_alloc *alloc) ++{ ++ INIT_LIST_HEAD(&alloc->free); ++ INIT_LIST_HEAD(&alloc->used); ++ spin_lock_init(&alloc->lock); ++} ++int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, ++ int partial); ++void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count); ++void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count); ++ ++/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire ++ * desired range is not available, or 0 for success. */ ++int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count); ++/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when ++ * 'alloc' is empty. */ ++int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count); ++/* Returns 1 if the specified id is alloced, 0 otherwise */ ++int dpa_alloc_check(struct dpa_alloc *list, u32 id); ++#endif /* __KERNEL__ */ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* FSL_USDPAA_H */ +--- /dev/null +++ b/include/uapi/linux/fmd/Kbuild @@ -0,0 +1,5 @@ +header-y += integrations/ diff --git a/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch new file mode 100644 index 000000000..4e3b139b4 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch @@ -0,0 +1,2036 @@ +From c4813da334b0c31e9c55eea015f1e898e84ff45b Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 11:04:10 +0800 +Subject: [PATCH] pci: support layerscape + +This is a integrated patch for layerscape pcie support. + +Signed-off-by: Po Liu +Signed-off-by: Liu Gang +Signed-off-by: Minghuan Lian +Signed-off-by: hongbo.wang +Signed-off-by: Bjorn Helgaas +Signed-off-by: Hou Zhiqiang +Signed-off-by: Mingkai Hu +Signed-off-by: Christoph Hellwig +Signed-off-by: Yangbo Lu +--- + drivers/irqchip/irq-ls-scfg-msi.c | 256 +++++++-- + drivers/pci/host/Makefile | 2 +- + drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++++++++++ + drivers/pci/host/pci-layerscape-ep.c | 309 +++++++++++ + drivers/pci/host/pci-layerscape-ep.h | 115 ++++ + drivers/pci/host/pci-layerscape.c | 37 +- + drivers/pci/host/pcie-designware.c | 6 + + drivers/pci/host/pcie-designware.h | 1 + + drivers/pci/pcie/portdrv_core.c | 181 +++---- + include/linux/pci.h | 1 + + 10 files changed, 1518 insertions(+), 148 deletions(-) + create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c + create mode 100644 drivers/pci/host/pci-layerscape-ep.c + create mode 100644 drivers/pci/host/pci-layerscape-ep.h + +--- a/drivers/irqchip/irq-ls-scfg-msi.c ++++ b/drivers/irqchip/irq-ls-scfg-msi.c +@@ -17,13 +17,32 @@ + #include + #include + #include ++#include + #include + #include + #include + +-#define MSI_MAX_IRQS 32 +-#define MSI_IBS_SHIFT 3 +-#define MSIR 4 ++#define MSI_IRQS_PER_MSIR 32 ++#define MSI_MSIR_OFFSET 4 ++ ++#define MSI_LS1043V1_1_IRQS_PER_MSIR 8 ++#define MSI_LS1043V1_1_MSIR_OFFSET 0x10 ++ ++struct ls_scfg_msi_cfg { ++ u32 ibs_shift; /* Shift of interrupt bit select */ ++ u32 msir_irqs; /* The irq number per MSIR */ ++ u32 msir_base; /* The base address of MSIR */ ++}; ++ ++struct ls_scfg_msir { ++ struct ls_scfg_msi *msi_data; ++ unsigned int index; ++ unsigned int gic_irq; ++ unsigned int bit_start; ++ unsigned int bit_end; ++ unsigned int srs; /* Shared interrupt register select */ ++ void __iomem *reg; ++}; + + struct ls_scfg_msi { + spinlock_t lock; +@@ -32,8 +51,11 @@ struct ls_scfg_msi { + struct irq_domain *msi_domain; + void __iomem *regs; + phys_addr_t msiir_addr; +- int irq; +- DECLARE_BITMAP(used, MSI_MAX_IRQS); ++ struct ls_scfg_msi_cfg *cfg; ++ u32 msir_num; ++ struct ls_scfg_msir *msir; ++ u32 irqs_num; ++ unsigned long *used; + }; + + static struct irq_chip ls_scfg_msi_irq_chip = { +@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_ms + .chip = &ls_scfg_msi_irq_chip, + }; + ++static int msi_affinity_flag = 1; ++ ++static int __init early_parse_ls_scfg_msi(char *p) ++{ ++ if (p && strncmp(p, "no-affinity", 11) == 0) ++ msi_affinity_flag = 0; ++ else ++ msi_affinity_flag = 1; ++ ++ return 0; ++} ++early_param("lsmsi", early_parse_ls_scfg_msi); ++ + static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) + { + struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); + + msg->address_hi = upper_32_bits(msi_data->msiir_addr); + msg->address_lo = lower_32_bits(msi_data->msiir_addr); +- msg->data = data->hwirq << MSI_IBS_SHIFT; ++ msg->data = data->hwirq; ++ ++ if (msi_affinity_flag) ++ msg->data |= cpumask_first(data->common->affinity); + } + + static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) + { +- return -EINVAL; ++ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data); ++ u32 cpu; ++ ++ if (!msi_affinity_flag) ++ return -EINVAL; ++ ++ if (!force) ++ cpu = cpumask_any_and(mask, cpu_online_mask); ++ else ++ cpu = cpumask_first(mask); ++ ++ if (cpu >= msi_data->msir_num) ++ return -EINVAL; ++ ++ if (msi_data->msir[cpu].gic_irq <= 0) { ++ pr_warn("cannot bind the irq to cpu%d\n", cpu); ++ return -EINVAL; ++ } ++ ++ cpumask_copy(irq_data->common->affinity, mask); ++ ++ return IRQ_SET_MASK_OK; + } + + static struct irq_chip ls_scfg_msi_parent_chip = { +@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc( + WARN_ON(nr_irqs != 1); + + spin_lock(&msi_data->lock); +- pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); +- if (pos < MSI_MAX_IRQS) ++ pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num); ++ if (pos < msi_data->irqs_num) + __set_bit(pos, msi_data->used); + else + err = -ENOSPC; +@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free( + int pos; + + pos = d->hwirq; +- if (pos < 0 || pos >= MSI_MAX_IRQS) { ++ if (pos < 0 || pos >= msi_data->irqs_num) { + pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); + return; + } +@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_sc + + static void ls_scfg_msi_irq_handler(struct irq_desc *desc) + { +- struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); ++ struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); ++ struct ls_scfg_msi *msi_data = msir->msi_data; + unsigned long val; +- int pos, virq; ++ int pos, size, virq, hwirq; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + +- val = ioread32be(msi_data->regs + MSIR); +- for_each_set_bit(pos, &val, MSI_MAX_IRQS) { +- virq = irq_find_mapping(msi_data->parent, (31 - pos)); ++ val = ioread32be(msir->reg); ++ ++ pos = msir->bit_start; ++ size = msir->bit_end + 1; ++ ++ for_each_set_bit_from(pos, &val, size) { ++ hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | ++ msir->srs; ++ virq = irq_find_mapping(msi_data->parent, hwirq); + if (virq) + generic_handle_irq(virq); + } +@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(stru + { + /* Initialize MSI domain parent */ + msi_data->parent = irq_domain_add_linear(NULL, +- MSI_MAX_IRQS, ++ msi_data->irqs_num, + &ls_scfg_msi_domain_ops, + msi_data); + if (!msi_data->parent) { +@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(stru + return 0; + } + ++static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index) ++{ ++ struct ls_scfg_msir *msir; ++ int virq, i, hwirq; ++ ++ virq = platform_get_irq(msi_data->pdev, index); ++ if (virq <= 0) ++ return -ENODEV; ++ ++ msir = &msi_data->msir[index]; ++ msir->index = index; ++ msir->msi_data = msi_data; ++ msir->gic_irq = virq; ++ msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index; ++ ++ if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) { ++ msir->bit_start = 32 - ((msir->index + 1) * ++ MSI_LS1043V1_1_IRQS_PER_MSIR); ++ msir->bit_end = msir->bit_start + ++ MSI_LS1043V1_1_IRQS_PER_MSIR - 1; ++ } else { ++ msir->bit_start = 0; ++ msir->bit_end = msi_data->cfg->msir_irqs - 1; ++ } ++ ++ irq_set_chained_handler_and_data(msir->gic_irq, ++ ls_scfg_msi_irq_handler, ++ msir); ++ ++ if (msi_affinity_flag) { ++ /* Associate MSIR interrupt to the cpu */ ++ irq_set_affinity(msir->gic_irq, get_cpu_mask(index)); ++ msir->srs = 0; /* This value is determined by the CPU */ ++ } else ++ msir->srs = index; ++ ++ /* Release the hwirqs corresponding to this MSIR */ ++ if (!msi_affinity_flag || msir->index == 0) { ++ for (i = 0; i < msi_data->cfg->msir_irqs; i++) { ++ hwirq = i << msi_data->cfg->ibs_shift | msir->index; ++ bitmap_clear(msi_data->used, hwirq, 1); ++ } ++ } ++ ++ return 0; ++} ++ ++static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir) ++{ ++ struct ls_scfg_msi *msi_data = msir->msi_data; ++ int i, hwirq; ++ ++ if (msir->gic_irq > 0) ++ irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL); ++ ++ for (i = 0; i < msi_data->cfg->msir_irqs; i++) { ++ hwirq = i << msi_data->cfg->ibs_shift | msir->index; ++ bitmap_set(msi_data->used, hwirq, 1); ++ } ++ ++ return 0; ++} ++ ++static struct ls_scfg_msi_cfg ls1021_msi_cfg = { ++ .ibs_shift = 3, ++ .msir_irqs = MSI_IRQS_PER_MSIR, ++ .msir_base = MSI_MSIR_OFFSET, ++}; ++ ++static struct ls_scfg_msi_cfg ls1046_msi_cfg = { ++ .ibs_shift = 2, ++ .msir_irqs = MSI_IRQS_PER_MSIR, ++ .msir_base = MSI_MSIR_OFFSET, ++}; ++ ++static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = { ++ .ibs_shift = 2, ++ .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR, ++ .msir_base = MSI_LS1043V1_1_MSIR_OFFSET, ++}; ++ ++static const struct of_device_id ls_scfg_msi_id[] = { ++ /* The following two misspelled compatibles are obsolete */ ++ { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg}, ++ { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg}, ++ ++ { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg }, ++ { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg }, ++ { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg }, ++ { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, ls_scfg_msi_id); ++ + static int ls_scfg_msi_probe(struct platform_device *pdev) + { ++ const struct of_device_id *match; + struct ls_scfg_msi *msi_data; + struct resource *res; +- int ret; ++ int i, ret; ++ ++ match = of_match_device(ls_scfg_msi_id, &pdev->dev); ++ if (!match) ++ return -ENODEV; + + msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); + if (!msi_data) + return -ENOMEM; + ++ msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data; ++ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + msi_data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi_data->regs)) { +@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct plat + } + msi_data->msiir_addr = res->start; + +- msi_data->irq = platform_get_irq(pdev, 0); +- if (msi_data->irq <= 0) { +- dev_err(&pdev->dev, "failed to get MSI irq\n"); +- return -ENODEV; +- } +- + msi_data->pdev = pdev; + spin_lock_init(&msi_data->lock); + ++ msi_data->irqs_num = MSI_IRQS_PER_MSIR * ++ (1 << msi_data->cfg->ibs_shift); ++ msi_data->used = devm_kcalloc(&pdev->dev, ++ BITS_TO_LONGS(msi_data->irqs_num), ++ sizeof(*msi_data->used), ++ GFP_KERNEL); ++ if (!msi_data->used) ++ return -ENOMEM; ++ /* ++ * Reserve all the hwirqs ++ * The available hwirqs will be released in ls1_msi_setup_hwirq() ++ */ ++ bitmap_set(msi_data->used, 0, msi_data->irqs_num); ++ ++ msi_data->msir_num = of_irq_count(pdev->dev.of_node); ++ ++ if (msi_affinity_flag) { ++ u32 cpu_num; ++ ++ cpu_num = num_possible_cpus(); ++ if (msi_data->msir_num >= cpu_num) ++ msi_data->msir_num = cpu_num; ++ else ++ msi_affinity_flag = 0; ++ } ++ ++ msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num, ++ sizeof(*msi_data->msir), ++ GFP_KERNEL); ++ if (!msi_data->msir) ++ return -ENOMEM; ++ ++ for (i = 0; i < msi_data->msir_num; i++) ++ ls_scfg_msi_setup_hwirq(msi_data, i); ++ + ret = ls_scfg_msi_domains_init(msi_data); + if (ret) + return ret; + +- irq_set_chained_handler_and_data(msi_data->irq, +- ls_scfg_msi_irq_handler, +- msi_data); +- + platform_set_drvdata(pdev, msi_data); + + return 0; +@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct plat + static int ls_scfg_msi_remove(struct platform_device *pdev) + { + struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); ++ int i; + +- irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); ++ for (i = 0; i < msi_data->msir_num; i++) ++ ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]); + + irq_domain_remove(msi_data->msi_domain); + irq_domain_remove(msi_data->parent); +@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct pla + return 0; + } + +-static const struct of_device_id ls_scfg_msi_id[] = { +- { .compatible = "fsl,1s1021a-msi", }, +- { .compatible = "fsl,1s1043a-msi", }, +- {}, +-}; +- + static struct platform_driver ls_scfg_msi_driver = { + .driver = { + .name = "ls-scfg-msi", +--- a/drivers/pci/host/Makefile ++++ b/drivers/pci/host/Makefile +@@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx + obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o + obj-$(CONFIG_PCI_XGENE) += pci-xgene.o + obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o +-obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o ++obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o pci-layerscape-ep-debugfs.o + obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o + obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o + obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o +--- /dev/null ++++ b/drivers/pci/host/pci-layerscape-ep-debugfs.c +@@ -0,0 +1,758 @@ ++/* ++ * PCIe Endpoint driver for Freescale Layerscape SoCs ++ * ++ * Copyright (C) 2015 Freescale Semiconductor. ++ * ++ * Author: Minghuan Lian ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "pci-layerscape-ep.h" ++ ++#define PCIE_ATU_INDEX3 (0x3 << 0) ++#define PCIE_ATU_INDEX2 (0x2 << 0) ++#define PCIE_ATU_INDEX1 (0x1 << 0) ++#define PCIE_ATU_INDEX0 (0x0 << 0) ++ ++#define PCIE_BAR0_SIZE (4 * 1024) /* 4K */ ++#define PCIE_BAR1_SIZE (8 * 1024) /* 8K for MSIX */ ++#define PCIE_BAR2_SIZE (4 * 1024) /* 4K */ ++#define PCIE_BAR4_SIZE (1 * 1024 * 1024) /* 1M */ ++#define PCIE_MSI_OB_SIZE (4 * 1024) /* 4K */ ++ ++#define PCIE_MSI_MSG_ADDR_OFF 0x54 ++#define PCIE_MSI_MSG_DATA_OFF 0x5c ++ ++enum test_type { ++ TEST_TYPE_DMA, ++ TEST_TYPE_MEMCPY ++}; ++ ++enum test_dirt { ++ TEST_DIRT_READ, ++ TEST_DIRT_WRITE ++}; ++ ++enum test_status { ++ TEST_IDLE, ++ TEST_BUSY ++}; ++ ++struct ls_ep_test { ++ struct ls_ep_dev *ep; ++ void __iomem *cfg; ++ void __iomem *buf; ++ void __iomem *out; ++ void __iomem *msi; ++ dma_addr_t cfg_addr; ++ dma_addr_t buf_addr; ++ dma_addr_t out_addr; ++ dma_addr_t bus_addr; ++ dma_addr_t msi_addr; ++ u64 msi_msg_addr; ++ u16 msi_msg_data; ++ struct task_struct *thread; ++ spinlock_t lock; ++ struct completion done; ++ u32 len; ++ int loop; ++ char data; ++ enum test_dirt dirt; ++ enum test_type type; ++ enum test_status status; ++ u64 result; /* Mbps */ ++ char cmd[256]; ++}; ++ ++static int ls_pcie_ep_trigger_msi(struct ls_ep_test *test) ++{ ++ if (!test->msi) ++ return -EINVAL; ++ ++ iowrite32(test->msi_msg_data, test->msi); ++ ++ return 0; ++} ++ ++static int ls_pcie_ep_test_try_run(struct ls_ep_test *test) ++{ ++ int ret; ++ ++ spin_lock(&test->lock); ++ if (test->status == TEST_IDLE) { ++ test->status = TEST_BUSY; ++ ret = 0; ++ } else ++ ret = -EBUSY; ++ spin_unlock(&test->lock); ++ ++ return ret; ++} ++ ++static void ls_pcie_ep_test_done(struct ls_ep_test *test) ++{ ++ spin_lock(&test->lock); ++ test->status = TEST_IDLE; ++ spin_unlock(&test->lock); ++} ++ ++static void ls_pcie_ep_test_dma_cb(void *arg) ++{ ++ struct ls_ep_test *test = arg; ++ ++ complete(&test->done); ++} ++ ++static int ls_pcie_ep_test_dma(struct ls_ep_test *test) ++{ ++ dma_cap_mask_t mask; ++ struct dma_chan *chan; ++ struct dma_device *dma_dev; ++ dma_addr_t src, dst; ++ enum dma_data_direction direction; ++ enum dma_ctrl_flags dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ++ struct timespec start, end, period; ++ int i = 0; ++ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_MEMCPY, mask); ++ ++ chan = dma_request_channel(mask, NULL, test); ++ if (!chan) { ++ pr_err("failed to request dma channel\n"); ++ return -EINVAL; ++ } ++ ++ memset(test->buf, test->data, test->len); ++ ++ if (test->dirt == TEST_DIRT_WRITE) { ++ src = test->buf_addr; ++ dst = test->out_addr; ++ direction = DMA_TO_DEVICE; ++ } else { ++ src = test->out_addr; ++ dst = test->buf_addr; ++ direction = DMA_FROM_DEVICE; ++ } ++ ++ dma_dev = chan->device; ++ dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ++ ++ dma_sync_single_for_device(&test->ep->dev, test->buf_addr, ++ test->len, direction); ++ ++ set_freezable(); ++ ++ getrawmonotonic(&start); ++ while (!kthread_should_stop() && (i < test->loop)) { ++ struct dma_async_tx_descriptor *dma_desc; ++ dma_cookie_t dma_cookie = {0}; ++ unsigned long tmo; ++ int status; ++ ++ init_completion(&test->done); ++ ++ dma_desc = dma_dev->device_prep_dma_memcpy(chan, ++ dst, src, ++ test->len, ++ dma_flags); ++ if (!dma_desc) { ++ pr_err("DMA desc constr failed...\n"); ++ goto _err; ++ } ++ ++ dma_desc->callback = ls_pcie_ep_test_dma_cb; ++ dma_desc->callback_param = test; ++ dma_cookie = dmaengine_submit(dma_desc); ++ ++ if (dma_submit_error(dma_cookie)) { ++ pr_err("DMA submit error....\n"); ++ goto _err; ++ } ++ ++ /* Trigger the transaction */ ++ dma_async_issue_pending(chan); ++ ++ tmo = wait_for_completion_timeout(&test->done, ++ msecs_to_jiffies(5 * test->len)); ++ if (tmo == 0) { ++ pr_err("Self-test copy timed out, disabling\n"); ++ goto _err; ++ } ++ ++ status = dma_async_is_tx_complete(chan, dma_cookie, ++ NULL, NULL); ++ if (status != DMA_COMPLETE) { ++ pr_err("got completion callback, but status is %s\n", ++ status == DMA_ERROR ? "error" : "in progress"); ++ goto _err; ++ } ++ ++ i++; ++ } ++ ++ getrawmonotonic(&end); ++ period = timespec_sub(end, start); ++ test->result = test->len * 8ULL * i * 1000; ++ do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec); ++ dma_release_channel(chan); ++ ++ return 0; ++ ++_err: ++ dma_release_channel(chan); ++ test->result = 0; ++ return -EINVAL; ++} ++ ++static int ls_pcie_ep_test_cpy(struct ls_ep_test *test) ++{ ++ void *dst, *src; ++ struct timespec start, end, period; ++ int i = 0; ++ ++ memset(test->buf, test->data, test->len); ++ ++ if (test->dirt == TEST_DIRT_WRITE) { ++ dst = test->out; ++ src = test->buf; ++ } else { ++ dst = test->buf; ++ src = test->out; ++ } ++ ++ getrawmonotonic(&start); ++ while (!kthread_should_stop() && i < test->loop) { ++ memcpy(dst, src, test->len); ++ i++; ++ } ++ getrawmonotonic(&end); ++ ++ period = timespec_sub(end, start); ++ test->result = test->len * 8ULL * i * 1000; ++ do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec); ++ ++ return 0; ++} ++ ++int ls_pcie_ep_test_thread(void *arg) ++{ ++ int ret; ++ ++ struct ls_ep_test *test = arg; ++ ++ if (test->type == TEST_TYPE_DMA) ++ ret = ls_pcie_ep_test_dma(test); ++ else ++ ret = ls_pcie_ep_test_cpy(test); ++ ++ if (ret) { ++ pr_err("\n%s \ttest failed\n", ++ test->cmd); ++ test->result = 0; ++ } else ++ pr_err("\n%s \tthroughput:%lluMbps\n", ++ test->cmd, test->result); ++ ++ ls_pcie_ep_test_done(test); ++ ++ ls_pcie_ep_trigger_msi(test); ++ ++ do_exit(0); ++} ++ ++static int ls_pcie_ep_free_test(struct ls_ep_dev *ep) ++{ ++ struct ls_ep_test *test = ep->driver_data; ++ ++ if (!test) ++ return 0; ++ ++ if (test->status == TEST_BUSY) { ++ kthread_stop(test->thread); ++ dev_info(&ep->dev, ++ "test is running please wait and run again\n"); ++ return -EBUSY; ++ } ++ ++ if (test->buf) ++ free_pages((unsigned long)test->buf, ++ get_order(PCIE_BAR4_SIZE)); ++ ++ if (test->cfg) ++ free_pages((unsigned long)test->cfg, ++ get_order(PCIE_BAR2_SIZE)); ++ ++ if (test->out) ++ iounmap(test->out); ++ ++ kfree(test); ++ ep->driver_data = NULL; ++ ++ return 0; ++} ++ ++static int ls_pcie_ep_init_test(struct ls_ep_dev *ep, u64 bus_addr) ++{ ++ struct ls_pcie *pcie = ep->pcie; ++ struct ls_ep_test *test = ep->driver_data; ++ int err; ++ ++ if (test) { ++ dev_info(&ep->dev, ++ "Please use 'free' to remove the exiting test\n"); ++ return -EBUSY; ++ } ++ ++ test = kzalloc(sizeof(*test), GFP_KERNEL); ++ if (!test) ++ return -ENOMEM; ++ ep->driver_data = test; ++ test->ep = ep; ++ spin_lock_init(&test->lock); ++ test->status = TEST_IDLE; ++ ++ test->buf = dma_alloc_coherent(pcie->dev, get_order(PCIE_BAR4_SIZE), ++ &test->buf_addr, ++ GFP_KERNEL); ++ if (!test->buf) { ++ dev_info(&ep->dev, "failed to get mem for bar4\n"); ++ err = -ENOMEM; ++ goto _err; ++ } ++ ++ test->cfg = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, ++ get_order(PCIE_BAR2_SIZE)); ++ if (!test->cfg) { ++ dev_info(&ep->dev, "failed to get mem for bar4\n"); ++ err = -ENOMEM; ++ goto _err; ++ } ++ test->cfg_addr = virt_to_phys(test->cfg); ++ ++ test->out_addr = pcie->out_base; ++ test->out = ioremap(test->out_addr, PCIE_BAR4_SIZE); ++ if (!test->out) { ++ dev_info(&ep->dev, "failed to map out\n"); ++ err = -ENOMEM; ++ goto _err; ++ } ++ ++ test->bus_addr = bus_addr; ++ ++ test->msi_addr = test->out_addr + PCIE_BAR4_SIZE; ++ test->msi = ioremap(test->msi_addr, PCIE_MSI_OB_SIZE); ++ if (!test->msi) ++ dev_info(&ep->dev, "failed to map MSI outbound region\n"); ++ ++ test->msi_msg_addr = ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF) | ++ (((u64)ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF + 4)) << 32); ++ test->msi_msg_data = ioread16(pcie->dbi + PCIE_MSI_MSG_DATA_OFF); ++ ++ ls_pcie_ep_dev_cfg_enable(ep); ++ ++ /* outbound iATU for memory */ ++ ls_pcie_iatu_outbound_set(pcie, 0, PCIE_ATU_TYPE_MEM, ++ test->out_addr, bus_addr, PCIE_BAR4_SIZE); ++ /* outbound iATU for MSI */ ++ ls_pcie_iatu_outbound_set(pcie, 1, PCIE_ATU_TYPE_MEM, ++ test->msi_addr, test->msi_msg_addr, ++ PCIE_MSI_OB_SIZE); ++ ++ /* ATU 0 : INBOUND : map BAR0 */ ++ ls_pcie_iatu_inbound_set(pcie, 0, 0, test->cfg_addr); ++ /* ATU 2 : INBOUND : map BAR2 */ ++ ls_pcie_iatu_inbound_set(pcie, 2, 2, test->cfg_addr); ++ /* ATU 3 : INBOUND : map BAR4 */ ++ ls_pcie_iatu_inbound_set(pcie, 3, 4, test->buf_addr); ++ ++ return 0; ++ ++_err: ++ ls_pcie_ep_free_test(ep); ++ return err; ++} ++ ++static int ls_pcie_ep_start_test(struct ls_ep_dev *ep, char *cmd) ++{ ++ struct ls_ep_test *test = ep->driver_data; ++ enum test_type type; ++ enum test_dirt dirt; ++ u32 cnt, len, loop; ++ unsigned int data; ++ char dirt_str[2]; ++ int ret; ++ ++ if (strncmp(cmd, "dma", 3) == 0) ++ type = TEST_TYPE_DMA; ++ else ++ type = TEST_TYPE_MEMCPY; ++ ++ cnt = sscanf(&cmd[4], "%1s %u %u %x", dirt_str, &len, &loop, &data); ++ if (cnt != 4) { ++ dev_info(&ep->dev, "format error %s", cmd); ++ dev_info(&ep->dev, "dma/cpy \n"); ++ return -EINVAL; ++ } ++ ++ if (strncmp(dirt_str, "r", 1) == 0) ++ dirt = TEST_DIRT_READ; ++ else ++ dirt = TEST_DIRT_WRITE; ++ ++ if (len > PCIE_BAR4_SIZE) { ++ dev_err(&ep->dev, "max len is %d", PCIE_BAR4_SIZE); ++ return -EINVAL; ++ } ++ ++ if (!test) { ++ dev_err(&ep->dev, "Please first run init command\n"); ++ return -EINVAL; ++ } ++ ++ if (ls_pcie_ep_test_try_run(test)) { ++ dev_err(&ep->dev, "There is already a test running\n"); ++ return -EINVAL; ++ } ++ ++ test->len = len; ++ test->loop = loop; ++ test->type = type; ++ test->data = (char)data; ++ test->dirt = dirt; ++ strcpy(test->cmd, cmd); ++ test->thread = kthread_run(ls_pcie_ep_test_thread, test, ++ "pcie ep test"); ++ if (IS_ERR(test->thread)) { ++ dev_err(&ep->dev, "fork failed for pcie ep test\n"); ++ ls_pcie_ep_test_done(test); ++ ret = PTR_ERR(test->thread); ++ } ++ ++ return ret; ++} ++ ++ ++/** ++ * ls_pcie_reg_ops_read - read for regs data ++ * @filp: the opened file ++ * @buffer: where to write the data for the user to read ++ * @count: the size of the user's buffer ++ * @ppos: file position offset ++ **/ ++static ssize_t ls_pcie_ep_dbg_regs_read(struct file *filp, char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ struct ls_ep_dev *ep = filp->private_data; ++ struct ls_pcie *pcie = ep->pcie; ++ char *buf; ++ int desc = 0, i, len; ++ ++ buf = kmalloc(4 * 1024, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ls_pcie_ep_dev_cfg_enable(ep); ++ ++ desc += sprintf(buf + desc, "%s", "reg info:"); ++ for (i = 0; i < 0x200; i += 4) { ++ if (i % 16 == 0) ++ desc += sprintf(buf + desc, "\n%08x:", i); ++ desc += sprintf(buf + desc, " %08x", readl(pcie->dbi + i)); ++ } ++ ++ desc += sprintf(buf + desc, "\n%s", "outbound iATU info:\n"); ++ for (i = 0; i < 6; i++) { ++ writel(PCIE_ATU_REGION_OUTBOUND | i, ++ pcie->dbi + PCIE_ATU_VIEWPORT); ++ desc += sprintf(buf + desc, "iATU%d", i); ++ desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LOWER_BASE)); ++ desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_UPPER_BASE)); ++ desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LOWER_TARGET)); ++ desc += sprintf(buf + desc, "\tUPPER BUS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_UPPER_TARGET)); ++ desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LIMIT)); ++ desc += sprintf(buf + desc, "\tCR1 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_CR1)); ++ desc += sprintf(buf + desc, "\tCR2 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_CR2)); ++ } ++ ++ desc += sprintf(buf + desc, "\n%s", "inbound iATU info:\n"); ++ for (i = 0; i < 6; i++) { ++ writel(PCIE_ATU_REGION_INBOUND | i, ++ pcie->dbi + PCIE_ATU_VIEWPORT); ++ desc += sprintf(buf + desc, "iATU%d", i); ++ desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LOWER_BASE)); ++ desc += sprintf(buf + desc, "\tUPPER BUSs 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_UPPER_BASE)); ++ desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LOWER_TARGET)); ++ desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_UPPER_TARGET)); ++ desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_LIMIT)); ++ desc += sprintf(buf + desc, "\tCR1 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_CR1)); ++ desc += sprintf(buf + desc, "\tCR2 0x%08x\n", ++ readl(pcie->dbi + PCIE_ATU_CR2)); ++ } ++ ++ len = simple_read_from_buffer(buffer, count, ppos, buf, desc); ++ kfree(buf); ++ ++ return len; ++} ++ ++/** ++ * ls_pcie_ep_dbg_regs_write - write into regs datum ++ * @filp: the opened file ++ * @buffer: where to find the user's data ++ * @count: the length of the user's data ++ * @ppos: file position offset ++ **/ ++static ssize_t ls_pcie_ep_dbg_regs_write(struct file *filp, ++ const char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ struct ls_ep_dev *ep = filp->private_data; ++ struct ls_pcie *pcie = ep->pcie; ++ char buf[256]; ++ ++ if (count >= sizeof(buf)) ++ return -ENOSPC; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (copy_from_user(buf, buffer, count)) ++ return -EFAULT; ++ ++ ls_pcie_ep_dev_cfg_enable(ep); ++ ++ if (strncmp(buf, "reg", 3) == 0) { ++ u32 reg, value; ++ int cnt; ++ ++ cnt = sscanf(&buf[3], "%x %x", ®, &value); ++ if (cnt == 2) { ++ writel(value, pcie->dbi + reg); ++ value = readl(pcie->dbi + reg); ++ dev_info(&ep->dev, "reg 0x%08x: 0x%08x\n", ++ reg, value); ++ } else { ++ dev_info(&ep->dev, "reg \n"); ++ } ++ } else if (strncmp(buf, "atu", 3) == 0) { ++ /* to do */ ++ dev_info(&ep->dev, " Not support atu command\n"); ++ } else { ++ dev_info(&ep->dev, "Unknown command %s\n", buf); ++ dev_info(&ep->dev, "Available commands:\n"); ++ dev_info(&ep->dev, " reg \n"); ++ } ++ ++ return count; ++} ++ ++static const struct file_operations ls_pcie_ep_dbg_regs_fops = { ++ .owner = THIS_MODULE, ++ .open = simple_open, ++ .read = ls_pcie_ep_dbg_regs_read, ++ .write = ls_pcie_ep_dbg_regs_write, ++}; ++ ++static ssize_t ls_pcie_ep_dbg_test_read(struct file *filp, ++ char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ struct ls_ep_dev *ep = filp->private_data; ++ struct ls_ep_test *test = ep->driver_data; ++ char buf[512]; ++ int desc = 0, len; ++ ++ if (!test) { ++ dev_info(&ep->dev, " there is NO test\n"); ++ return 0; ++ } ++ ++ if (test->status != TEST_IDLE) { ++ dev_info(&ep->dev, "test %s is running\n", test->cmd); ++ return 0; ++ } ++ ++ desc = sprintf(buf, "MSI ADDR:0x%llx MSI DATA:0x%x\n", ++ test->msi_msg_addr, test->msi_msg_data); ++ ++ desc += sprintf(buf + desc, "%s throughput:%lluMbps\n", ++ test->cmd, test->result); ++ ++ len = simple_read_from_buffer(buffer, count, ppos, ++ buf, desc); ++ ++ return len; ++} ++ ++static ssize_t ls_pcie_ep_dbg_test_write(struct file *filp, ++ const char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ struct ls_ep_dev *ep = filp->private_data; ++ char buf[256]; ++ ++ if (count >= sizeof(buf)) ++ return -ENOSPC; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (copy_from_user(buf, buffer, count)) ++ return -EFAULT; ++ ++ if (strncmp(buf, "init", 4) == 0) { ++ int i = 4; ++ u64 bus_addr; ++ ++ while (buf[i] == ' ') ++ i++; ++ ++ if (kstrtou64(&buf[i], 0, &bus_addr)) ++ dev_info(&ep->dev, "command: init \n"); ++ else { ++ if (ls_pcie_ep_init_test(ep, bus_addr)) ++ dev_info(&ep->dev, "failed to init test\n"); ++ } ++ } else if (strncmp(buf, "free", 4) == 0) ++ ls_pcie_ep_free_test(ep); ++ else if (strncmp(buf, "dma", 3) == 0 || ++ strncmp(buf, "cpy", 3) == 0) ++ ls_pcie_ep_start_test(ep, buf); ++ else { ++ dev_info(&ep->dev, "Unknown command: %s\n", buf); ++ dev_info(&ep->dev, "Available commands:\n"); ++ dev_info(&ep->dev, "\tinit \n"); ++ dev_info(&ep->dev, "\t \n"); ++ dev_info(&ep->dev, "\tfree\n"); ++ } ++ ++ return count; ++} ++ ++static const struct file_operations ls_pcie_ep_dbg_test_fops = { ++ .owner = THIS_MODULE, ++ .open = simple_open, ++ .read = ls_pcie_ep_dbg_test_read, ++ .write = ls_pcie_ep_dbg_test_write, ++}; ++ ++static ssize_t ls_pcie_ep_dbg_dump_read(struct file *filp, ++ char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ struct ls_ep_dev *ep = filp->private_data; ++ struct ls_ep_test *test = ep->driver_data; ++ char *buf; ++ int desc = 0, i, len; ++ ++ buf = kmalloc(4 * 1024, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ if (!test) { ++ dev_info(&ep->dev, " there is NO test\n"); ++ kfree(buf); ++ return 0; ++ } ++ ++ desc += sprintf(buf + desc, "%s", "dump info:"); ++ for (i = 0; i < 256; i += 4) { ++ if (i % 16 == 0) ++ desc += sprintf(buf + desc, "\n%08x:", i); ++ desc += sprintf(buf + desc, " %08x", readl(test->buf + i)); ++ } ++ ++ desc += sprintf(buf + desc, "\n"); ++ len = simple_read_from_buffer(buffer, count, ppos, buf, desc); ++ ++ kfree(buf); ++ ++ return len; ++} ++ ++static const struct file_operations ls_pcie_ep_dbg_dump_fops = { ++ .owner = THIS_MODULE, ++ .open = simple_open, ++ .read = ls_pcie_ep_dbg_dump_read, ++}; ++ ++static int ls_pcie_ep_dev_dbgfs_init(struct ls_ep_dev *ep) ++{ ++ struct ls_pcie *pcie = ep->pcie; ++ struct dentry *pfile; ++ ++ ls_pcie_ep_dev_cfg_enable(ep); ++ ++ ep->dir = debugfs_create_dir(dev_name(&ep->dev), pcie->dir); ++ if (!ep->dir) ++ return -ENOMEM; ++ ++ pfile = debugfs_create_file("regs", 0600, ep->dir, ep, ++ &ls_pcie_ep_dbg_regs_fops); ++ if (!pfile) ++ dev_info(&ep->dev, "debugfs regs for failed\n"); ++ ++ pfile = debugfs_create_file("test", 0600, ep->dir, ep, ++ &ls_pcie_ep_dbg_test_fops); ++ if (!pfile) ++ dev_info(&ep->dev, "debugfs test for failed\n"); ++ ++ pfile = debugfs_create_file("dump", 0600, ep->dir, ep, ++ &ls_pcie_ep_dbg_dump_fops); ++ if (!pfile) ++ dev_info(&ep->dev, "debugfs dump for failed\n"); ++ ++ return 0; ++} ++ ++int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie) ++{ ++ struct ls_ep_dev *ep; ++ ++ pcie->dir = debugfs_create_dir(dev_name(pcie->dev), NULL); ++ if (!pcie->dir) ++ return -ENOMEM; ++ ++ list_for_each_entry(ep, &pcie->ep_list, node) ++ ls_pcie_ep_dev_dbgfs_init(ep); ++ ++ return 0; ++} ++ ++int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie) ++{ ++ debugfs_remove_recursive(pcie->dir); ++ return 0; ++} ++ ++MODULE_AUTHOR("Minghuan Lian "); ++MODULE_DESCRIPTION("Freescale Layerscape PCIe EP controller driver"); ++MODULE_LICENSE("GPL v2"); +--- /dev/null ++++ b/drivers/pci/host/pci-layerscape-ep.c +@@ -0,0 +1,309 @@ ++/* ++ * PCIe Endpoint driver for Freescale Layerscape SoCs ++ * ++ * Copyright (C) 2015 Freescale Semiconductor. ++ * ++ * Author: Minghuan Lian ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pci-layerscape-ep.h" ++ ++struct ls_ep_dev * ++ls_pci_ep_find(struct ls_pcie *pcie, int dev_id) ++{ ++ struct ls_ep_dev *ep; ++ ++ list_for_each_entry(ep, &pcie->ep_list, node) { ++ if (ep->dev_id == dev_id) ++ return ep; ++ } ++ ++ return NULL; ++} ++ ++static void ls_pcie_try_cfg2(struct ls_pcie *pcie, int pf, int vf) ++{ ++ if (pcie->sriov) ++ writel(PCIE_LCTRL0_VAL(pf, vf), ++ pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_LCTRL0); ++} ++ ++static bool ls_pcie_is_bridge(struct ls_pcie *pcie) ++{ ++ u32 header_type = 0; ++ ++ header_type = readl(pcie->dbi + (PCI_HEADER_TYPE & ~0x3)); ++ header_type = (header_type >> 16) & 0x7f; ++ ++ return header_type == PCI_HEADER_TYPE_BRIDGE; ++} ++ ++void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type, ++ u64 cpu_addr, u64 pci_addr, u32 size) ++{ ++ writel(PCIE_ATU_REGION_OUTBOUND | idx, ++ pcie->dbi + PCIE_ATU_VIEWPORT); ++ writel(lower_32_bits(cpu_addr), ++ pcie->dbi + PCIE_ATU_LOWER_BASE); ++ writel(upper_32_bits(cpu_addr), ++ pcie->dbi + PCIE_ATU_UPPER_BASE); ++ writel(lower_32_bits(cpu_addr + size - 1), ++ pcie->dbi + PCIE_ATU_LIMIT); ++ writel(lower_32_bits(pci_addr), ++ pcie->dbi + PCIE_ATU_LOWER_TARGET); ++ writel(upper_32_bits(pci_addr), ++ pcie->dbi + PCIE_ATU_UPPER_TARGET); ++ writel(type, pcie->dbi + PCIE_ATU_CR1); ++ writel(PCIE_ATU_ENABLE, pcie->dbi + PCIE_ATU_CR2); ++} ++ ++/* Use bar match mode and MEM type as default */ ++void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx, ++ int bar, u64 phys) ++{ ++ writel(PCIE_ATU_REGION_INBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT); ++ writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_TARGET); ++ writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET); ++ writel(PCIE_ATU_TYPE_MEM, pcie->dbi + PCIE_ATU_CR1); ++ writel(PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE | ++ PCIE_ATU_BAR_NUM(bar), pcie->dbi + PCIE_ATU_CR2); ++} ++ ++void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep) ++{ ++ ls_pcie_try_cfg2(ep->pcie, ep->pf_idx, ep->vf_idx); ++} ++ ++void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size) ++{ ++ if (size < 4 * 1024) ++ return; ++ ++ switch (bar) { ++ case 0: ++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_0); ++ break; ++ case 1: ++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_1); ++ break; ++ case 2: ++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_2); ++ writel(0, bar_base + PCI_BASE_ADDRESS_3); ++ break; ++ case 4: ++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_4); ++ writel(0, bar_base + PCI_BASE_ADDRESS_5); ++ break; ++ default: ++ break; ++ } ++} ++ ++void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size) ++{ ++ struct ls_pcie *pcie = ep->pcie; ++ void *bar_base; ++ ++ if (size < 4 * 1024) ++ return; ++ ++ if (pcie->sriov) ++ bar_base = pcie->dbi; ++ else ++ bar_base = pcie->dbi + PCIE_NO_SRIOV_BAR_BASE; ++ ++ ls_pcie_ep_dev_cfg_enable(ep); ++ ls_pcie_ep_setup_bar(bar_base, bar, size); ++} ++ ++static int ls_pcie_ep_dev_init(struct ls_pcie *pcie, int pf_idx, int vf_idx) ++{ ++ struct ls_ep_dev *ep; ++ ++ ep = devm_kzalloc(pcie->dev, sizeof(*ep), GFP_KERNEL); ++ if (!ep) ++ return -ENOMEM; ++ ++ ep->pcie = pcie; ++ ep->pf_idx = pf_idx; ++ ep->vf_idx = vf_idx; ++ if (vf_idx) ++ ep->dev_id = pf_idx + 4 + 4 * (vf_idx - 1); ++ else ++ ep->dev_id = pf_idx; ++ ++ if (ep->vf_idx) ++ dev_set_name(&ep->dev, "pf%d-vf%d", ++ ep->pf_idx, ++ ep->vf_idx); ++ else ++ dev_set_name(&ep->dev, "pf%d", ++ ep->pf_idx); ++ ++ list_add_tail(&ep->node, &pcie->ep_list); ++ ++ return 0; ++} ++ ++static int ls_pcie_ep_init(struct ls_pcie *pcie) ++{ ++ u32 sriov_header; ++ int pf, vf, i, j; ++ ++ sriov_header = readl(pcie->dbi + PCIE_SRIOV_POS); ++ ++ if (PCI_EXT_CAP_ID(sriov_header) == PCI_EXT_CAP_ID_SRIOV) { ++ pcie->sriov = PCIE_SRIOV_POS; ++ pf = PCIE_PF_NUM; ++ vf = PCIE_VF_NUM; ++ } else { ++ pcie->sriov = 0; ++ pf = 1; ++ vf = 0; ++ } ++ ++ for (i = 0; i < pf; i++) { ++ for (j = 0; j <= vf; j++) ++ ls_pcie_ep_dev_init(pcie, i, j); ++ } ++ ++ return 0; ++} ++ ++static struct ls_pcie_ep_drvdata ls1043_drvdata = { ++ .lut_offset = 0x10000, ++ .ltssm_shift = 24, ++ .lut_dbg = 0x7fc, ++}; ++ ++static struct ls_pcie_ep_drvdata ls1046_drvdata = { ++ .lut_offset = 0x80000, ++ .ltssm_shift = 24, ++ .lut_dbg = 0x407fc, ++}; ++ ++static struct ls_pcie_ep_drvdata ls2080_drvdata = { ++ .lut_offset = 0x80000, ++ .ltssm_shift = 0, ++ .lut_dbg = 0x7fc, ++}; ++ ++static const struct of_device_id ls_pcie_ep_of_match[] = { ++ { .compatible = "fsl,ls1021a-pcie", }, ++ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, ++ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, ++ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, ++ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, ls_pcie_ep_of_match); ++ ++static int ls_pcie_ep_probe(struct platform_device *pdev) ++{ ++ struct ls_pcie *pcie; ++ struct resource *dbi_base, *cfg_res; ++ const struct of_device_id *match; ++ int ret; ++ ++ match = of_match_device(ls_pcie_ep_of_match, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); ++ if (!pcie) ++ return -ENOMEM; ++ ++ pcie->dev = &pdev->dev; ++ INIT_LIST_HEAD(&pcie->ep_list); ++ ++ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); ++ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); ++ if (IS_ERR(pcie->dbi)) { ++ dev_err(&pdev->dev, "missing *regs* space\n"); ++ return PTR_ERR(pcie->dbi); ++ } ++ ++ pcie->drvdata = match->data; ++ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; ++ ++ if (ls_pcie_is_bridge(pcie)) ++ return -ENODEV; ++ ++ dev_info(pcie->dev, "in EP mode\n"); ++ ++ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); ++ if (cfg_res) ++ pcie->out_base = cfg_res->start; ++ else { ++ dev_err(&pdev->dev, "missing *config* space\n"); ++ return -ENODEV; ++ } ++ ++ ret = ls_pcie_ep_init(pcie); ++ if (ret) ++ return ret; ++ ++ ls_pcie_ep_dbgfs_init(pcie); ++ ++ platform_set_drvdata(pdev, pcie); ++ ++ return 0; ++} ++ ++static int ls_pcie_ep_dev_remove(struct ls_ep_dev *ep) ++{ ++ list_del(&ep->node); ++ ++ return 0; ++} ++ ++static int ls_pcie_ep_remove(struct platform_device *pdev) ++{ ++ struct ls_pcie *pcie = platform_get_drvdata(pdev); ++ struct ls_ep_dev *ep, *tmp; ++ ++ if (!pcie) ++ return 0; ++ ++ ls_pcie_ep_dbgfs_remove(pcie); ++ ++ list_for_each_entry_safe(ep, tmp, &pcie->ep_list, node) ++ ls_pcie_ep_dev_remove(ep); ++ ++ return 0; ++} ++ ++static struct platform_driver ls_pcie_ep_driver = { ++ .driver = { ++ .name = "ls-pcie-ep", ++ .owner = THIS_MODULE, ++ .of_match_table = ls_pcie_ep_of_match, ++ }, ++ .probe = ls_pcie_ep_probe, ++ .remove = ls_pcie_ep_remove, ++}; ++ ++module_platform_driver(ls_pcie_ep_driver); ++ ++MODULE_AUTHOR("Minghuan Lian "); ++MODULE_DESCRIPTION("Freescale Layerscape PCIe EP driver"); ++MODULE_LICENSE("GPL v2"); +--- /dev/null ++++ b/drivers/pci/host/pci-layerscape-ep.h +@@ -0,0 +1,115 @@ ++/* ++ * PCIe Endpoint driver for Freescale Layerscape SoCs ++ * ++ * Copyright (C) 2015 Freescale Semiconductor. ++ * ++ * Author: Minghuan Lian ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++ ++#ifndef _PCIE_LAYERSCAPE_EP_H ++#define _PCIE_LAYERSCAPE_EP_H ++ ++#include ++ ++/* Synopsis specific PCIE configuration registers */ ++#define PCIE_ATU_VIEWPORT 0x900 ++#define PCIE_ATU_REGION_INBOUND (0x1 << 31) ++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) ++#define PCIE_ATU_REGION_INDEX3 (0x3 << 0) ++#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) ++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) ++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) ++#define PCIE_ATU_CR1 0x904 ++#define PCIE_ATU_TYPE_MEM (0x0 << 0) ++#define PCIE_ATU_TYPE_IO (0x2 << 0) ++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) ++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) ++#define PCIE_ATU_CR2 0x908 ++#define PCIE_ATU_ENABLE (0x1 << 31) ++#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) ++#define PCIE_ATU_LOWER_BASE 0x90C ++#define PCIE_ATU_UPPER_BASE 0x910 ++#define PCIE_ATU_LIMIT 0x914 ++#define PCIE_ATU_LOWER_TARGET 0x918 ++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) ++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) ++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) ++#define PCIE_ATU_UPPER_TARGET 0x91C ++ ++/* PEX internal configuration registers */ ++#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ ++ ++/* PEX LUT registers */ ++#define PCIE_LUT_BASE 0x80000 ++#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug register */ ++ ++#define PCIE_LUT_LCTRL0 0x7F8 ++ ++#define PCIE_ATU_BAR_NUM(bar) ((bar) << 8) ++#define PCIE_LCTRL0_CFG2_ENABLE (1 << 31) ++#define PCIE_LCTRL0_VF(vf) ((vf) << 22) ++#define PCIE_LCTRL0_PF(pf) ((pf) << 16) ++#define PCIE_LCTRL0_VF_ACTIVE (1 << 21) ++#define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \ ++ PCIE_LCTRL0_VF(vf) | \ ++ ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \ ++ PCIE_LCTRL0_CFG2_ENABLE) ++ ++#define PCIE_NO_SRIOV_BAR_BASE 0x1000 ++ ++#define PCIE_SRIOV_POS 0x178 ++#define PCIE_PF_NUM 2 ++#define PCIE_VF_NUM 64 ++ ++struct ls_pcie_ep_drvdata { ++ u32 lut_offset; ++ u32 ltssm_shift; ++ u32 lut_dbg; ++}; ++ ++struct ls_pcie { ++ struct list_head ep_list; ++ struct device *dev; ++ struct dentry *dir; ++ const struct ls_pcie_ep_drvdata *drvdata; ++ void __iomem *dbi; ++ void __iomem *lut; ++ phys_addr_t out_base; ++ int sriov; ++ int index; ++}; ++ ++struct ls_ep_dev { ++ struct list_head node; ++ struct ls_pcie *pcie; ++ struct device dev; ++ struct dentry *dir; ++ int pf_idx; ++ int vf_idx; ++ int dev_id; ++ void *driver_data; ++}; ++ ++struct ls_ep_dev *ls_pci_ep_find(struct ls_pcie *pcie, int dev_id); ++ ++void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type, ++ u64 cpu_addr, u64 pci_addr, u32 size); ++ ++/* Use bar match mode and MEM type as default */ ++void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx, ++ int bar, u64 phys); ++ ++void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size); ++ ++ ++void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep); ++ ++int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie); ++int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie); ++ ++#endif /* _PCIE_LAYERSCAPE_EP_H */ +--- a/drivers/pci/host/pci-layerscape.c ++++ b/drivers/pci/host/pci-layerscape.c +@@ -35,12 +35,14 @@ + #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ + #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ + +-/* PEX LUT registers */ +-#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ ++#define PCIE_IATU_NUM 6 ++ ++static void ls_pcie_host_init(struct pcie_port *pp); + + struct ls_pcie_drvdata { + u32 lut_offset; + u32 ltssm_shift; ++ u32 lut_dbg; + struct pcie_host_ops *ops; + }; + +@@ -86,6 +88,14 @@ static void ls_pcie_drop_msg_tlp(struct + iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1); + } + ++static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) ++{ ++ int i; ++ ++ for (i = 0; i < PCIE_IATU_NUM; i++) ++ dw_pcie_disable_outbound_atu(&pcie->pp, i); ++} ++ + static int ls1021_pcie_link_up(struct pcie_port *pp) + { + u32 state; +@@ -134,7 +144,7 @@ static int ls_pcie_link_up(struct pcie_p + struct ls_pcie *pcie = to_ls_pcie(pp); + u32 state; + +- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> ++ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> + pcie->drvdata->ltssm_shift) & + LTSSM_STATE_MASK; + +@@ -153,6 +163,9 @@ static void ls_pcie_host_init(struct pci + ls_pcie_clear_multifunction(pcie); + ls_pcie_drop_msg_tlp(pcie); + iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); ++ ++ ls_pcie_disable_outbound_atus(pcie); ++ dw_pcie_setup_rc(pp); + } + + static int ls_pcie_msi_host_init(struct pcie_port *pp, +@@ -196,20 +209,38 @@ static struct ls_pcie_drvdata ls1021_drv + static struct ls_pcie_drvdata ls1043_drvdata = { + .lut_offset = 0x10000, + .ltssm_shift = 24, ++ .lut_dbg = 0x7fc, ++ .ops = &ls_pcie_host_ops, ++}; ++ ++static struct ls_pcie_drvdata ls1046_drvdata = { ++ .lut_offset = 0x80000, ++ .ltssm_shift = 24, ++ .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + }; + + static struct ls_pcie_drvdata ls2080_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, ++ .lut_dbg = 0x7fc, ++ .ops = &ls_pcie_host_ops, ++}; ++ ++static struct ls_pcie_drvdata ls2088_drvdata = { ++ .lut_offset = 0x80000, ++ .ltssm_shift = 0, ++ .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + }; + + static const struct of_device_id ls_pcie_of_match[] = { + { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, ++ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, ++ { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, + { }, + }; + +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_po + return -ETIMEDOUT; + } + ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index) ++{ ++ dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index); ++ dw_pcie_writel_rc(pp, PCIE_ATU_CR2, 0); ++} ++ + int dw_pcie_link_up(struct pcie_port *pp) + { + u32 val; +--- a/drivers/pci/host/pcie-designware.h ++++ b/drivers/pci/host/pcie-designware.h +@@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_po + int dw_pcie_link_up(struct pcie_port *pp); + void dw_pcie_setup_rc(struct pcie_port *pp); + int dw_pcie_host_init(struct pcie_port *pp); ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); + + #endif /* _PCIE_DESIGNWARE_H */ +--- a/drivers/pci/pcie/portdrv_core.c ++++ b/drivers/pci/pcie/portdrv_core.c +@@ -44,52 +44,30 @@ static void release_pcie_device(struct d + } + + /** +- * pcie_port_msix_add_entry - add entry to given array of MSI-X entries +- * @entries: Array of MSI-X entries +- * @new_entry: Index of the entry to add to the array +- * @nr_entries: Number of entries already in the array ++ * pcibios_check_service_irqs - check irqs in the device tree ++ * @dev: PCI Express port to handle ++ * @irqs: Array of irqs to populate ++ * @mask: Bitmask of port capabilities returned by get_port_device_capability() ++ * ++ * Return value: 0 means no service irqs in the device tree + * +- * Return value: Position of the added entry in the array + */ +-static int pcie_port_msix_add_entry( +- struct msix_entry *entries, int new_entry, int nr_entries) ++int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask) + { +- int j; +- +- for (j = 0; j < nr_entries; j++) +- if (entries[j].entry == new_entry) +- return j; +- +- entries[j].entry = new_entry; +- return j; ++ return 0; + } + + /** + * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port + * @dev: PCI Express port to handle +- * @vectors: Array of interrupt vectors to populate ++ * @irqs: Array of interrupt vectors to populate + * @mask: Bitmask of port capabilities returned by get_port_device_capability() + * + * Return value: 0 on success, error code on failure + */ +-static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) ++static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) + { +- struct msix_entry *msix_entries; +- int idx[PCIE_PORT_DEVICE_MAXSERVICES]; +- int nr_entries, status, pos, i, nvec; +- u16 reg16; +- u32 reg32; +- +- nr_entries = pci_msix_vec_count(dev); +- if (nr_entries < 0) +- return nr_entries; +- BUG_ON(!nr_entries); +- if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) +- nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; +- +- msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL); +- if (!msix_entries) +- return -ENOMEM; ++ int nr_entries, entry, nvec = 0; + + /* + * Allocate as many entries as the port wants, so that we can check +@@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct + * equal to the number of entries this port actually uses, we'll happily + * go through without any tricks. + */ +- for (i = 0; i < nr_entries; i++) +- msix_entries[i].entry = i; +- +- status = pci_enable_msix_exact(dev, msix_entries, nr_entries); +- if (status) +- goto Exit; +- +- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) +- idx[i] = -1; +- status = -EIO; +- nvec = 0; ++ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES, ++ PCI_IRQ_MSIX); ++ if (nr_entries < 0) ++ return nr_entries; + + if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { +- int entry; ++ u16 reg16; + + /* + * The code below follows the PCI Express Base Specification 2.0 +@@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct + pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); + entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; + if (entry >= nr_entries) +- goto Error; ++ goto out_free_irqs; + +- i = pcie_port_msix_add_entry(msix_entries, entry, nvec); +- if (i == nvec) +- nvec++; ++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry); ++ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry); + +- idx[PCIE_PORT_SERVICE_PME_SHIFT] = i; +- idx[PCIE_PORT_SERVICE_HP_SHIFT] = i; ++ nvec = max(nvec, entry + 1); + } + + if (mask & PCIE_PORT_SERVICE_AER) { +- int entry; ++ u32 reg32, pos; + + /* + * The code below follows Section 7.10.10 of the PCI Express +@@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + entry = reg32 >> 27; + if (entry >= nr_entries) +- goto Error; ++ goto out_free_irqs; + +- i = pcie_port_msix_add_entry(msix_entries, entry, nvec); +- if (i == nvec) +- nvec++; ++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry); + +- idx[PCIE_PORT_SERVICE_AER_SHIFT] = i; ++ nvec = max(nvec, entry + 1); + } + + /* +@@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct + * what we have. Otherwise, the port has some extra entries not for the + * services we know and we need to work around that. + */ +- if (nvec == nr_entries) { +- status = 0; +- } else { ++ if (nvec != nr_entries) { + /* Drop the temporary MSI-X setup */ +- pci_disable_msix(dev); ++ pci_free_irq_vectors(dev); + + /* Now allocate the MSI-X vectors for real */ +- status = pci_enable_msix_exact(dev, msix_entries, nvec); +- if (status) +- goto Exit; ++ nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, ++ PCI_IRQ_MSIX); ++ if (nr_entries < 0) ++ return nr_entries; + } + +- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) +- vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1; +- +- Exit: +- kfree(msix_entries); +- return status; ++ return 0; + +- Error: +- pci_disable_msix(dev); +- goto Exit; ++out_free_irqs: ++ pci_free_irq_vectors(dev); ++ return -EIO; + } + + /** +- * init_service_irqs - initialize irqs for PCI Express port services ++ * pcie_init_service_irqs - initialize irqs for PCI Express port services + * @dev: PCI Express port to handle + * @irqs: Array of irqs to populate + * @mask: Bitmask of port capabilities returned by get_port_device_capability() + * + * Return value: Interrupt mode associated with the port + */ +-static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) ++static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) + { +- int i, irq = -1; ++ unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; ++ int ret, i; ++ int irq = -1; ++ ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) ++ irqs[i] = -1; ++ ++ /* Check if some platforms owns independent irq pins for AER/PME etc. ++ * Some platforms may own independent AER/PME interrupts and set ++ * them in the device tree file. ++ */ ++ ret = pcibios_check_service_irqs(dev, irqs, mask); ++ if (ret) { ++ if (dev->irq) ++ irq = dev->irq; ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) ++ if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT) ++ irqs[i] = irq; ++ return 0; ++ } + + /* + * If MSI cannot be used for PCIe PME or hotplug, we have to use +@@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_ + */ + if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || + ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { +- if (dev->irq) +- irq = dev->irq; +- goto no_msi; ++ flags &= ~PCI_IRQ_MSI; ++ } else { ++ /* Try to use MSI-X if supported */ ++ if (!pcie_port_enable_msix(dev, irqs, mask)) ++ return 0; + } + +- /* Try to use MSI-X if supported */ +- if (!pcie_port_enable_msix(dev, irqs, mask)) +- return 0; +- +- /* +- * We're not going to use MSI-X, so try MSI and fall back to INTx. +- * If neither MSI/MSI-X nor INTx available, try other interrupt. On +- * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode. +- */ +- if (!pci_enable_msi(dev) || dev->irq) +- irq = dev->irq; ++ ret = pci_alloc_irq_vectors(dev, 1, 1, flags); ++ if (ret < 0) ++ return -ENODEV; + +- no_msi: +- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) +- irqs[i] = irq; +- irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { ++ if (i != PCIE_PORT_SERVICE_VC_SHIFT) ++ irqs[i] = pci_irq_vector(dev, 0); ++ } + +- if (irq < 0) +- return -ENODEV; + return 0; + } + +-static void cleanup_service_irqs(struct pci_dev *dev) +-{ +- if (dev->msix_enabled) +- pci_disable_msix(dev); +- else if (dev->msi_enabled) +- pci_disable_msi(dev); +-} +- + /** + * get_port_device_capability - discover capabilities of a PCI Express port + * @dev: PCI Express port to examine +@@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci + * that can be used in the absence of irqs. Allow them to determine + * if that is to be used. + */ +- status = init_service_irqs(dev, irqs, capabilities); ++ status = pcie_init_service_irqs(dev, irqs, capabilities); + if (status) { + capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP; + if (!capabilities) +@@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci + return 0; + + error_cleanup_irqs: +- cleanup_service_irqs(dev); ++ pci_free_irq_vectors(dev); + error_disable: + pci_disable_device(dev); + return status; +@@ -469,7 +433,7 @@ static int remove_iter(struct device *de + void pcie_port_device_remove(struct pci_dev *dev) + { + device_for_each_child(&dev->dev, NULL, remove_iter); +- cleanup_service_irqs(dev); ++ pci_free_irq_vectors(dev); + pci_disable_device(dev); + } + +@@ -499,7 +463,6 @@ static int pcie_port_probe_service(struc + if (status) + return status; + +- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name); + get_device(dev); + return 0; + } +@@ -524,8 +487,6 @@ static int pcie_port_remove_service(stru + pciedev = to_pcie_device(dev); + driver = to_service_driver(dev->driver); + if (driver && driver->remove) { +- dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", +- driver->name); + driver->remove(pciedev); + put_device(dev); + } +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -1823,6 +1823,7 @@ void pcibios_release_device(struct pci_d + void pcibios_penalize_isa_irq(int irq, int active); + int pcibios_alloc_irq(struct pci_dev *dev); + void pcibios_free_irq(struct pci_dev *dev); ++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask); + + #ifdef CONFIG_HIBERNATE_CALLBACKS + extern struct dev_pm_ops pcibios_pm_ops; diff --git a/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch new file mode 100644 index 000000000..312faf2c5 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch @@ -0,0 +1,1753 @@ +From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 11:54:28 +0800 +Subject: [PATCH] phy: support layerscape + +This is a integrated patch for layerscape mdio-phy support. + +Signed-off-by: Bogdan Purcareata +Signed-off-by: Zhang Ying-22455 +Signed-off-by: costi +Signed-off-by: Madalin Bucur +Signed-off-by: Shaohui Xie +Signed-off-by: Florian Fainelli +Signed-off-by: Yangbo Lu +--- + drivers/net/phy/Kconfig | 11 + + drivers/net/phy/Makefile | 2 + + drivers/net/phy/aquantia.c | 28 + + drivers/net/phy/cortina.c | 118 ++++ + drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++ + drivers/net/phy/phy.c | 23 +- + drivers/net/phy/phy_device.c | 6 +- + drivers/net/phy/swphy.c | 1 + + include/linux/phy.h | 4 + + 9 files changed, 1544 insertions(+), 7 deletions(-) + create mode 100644 drivers/net/phy/cortina.c + create mode 100644 drivers/net/phy/fsl_backplane.c + +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG + config MDIO_CAVIUM + tristate + ++config MDIO_FSL_BACKPLANE ++ tristate "Support for backplane on Freescale XFI interface" ++ depends on OF_MDIO ++ help ++ This module provides a driver for Freescale XFI's backplane. ++ + config MDIO_GPIO + tristate "GPIO lib-based bitbanged MDIO buses" + depends on MDIO_BITBANG && GPIOLIB +@@ -298,6 +304,11 @@ config CICADA_PHY + ---help--- + Currently supports the cis8204 + ++config CORTINA_PHY ++ tristate "Cortina EDC CDR 10G Ethernet PHY" ++ ---help--- ++ Currently supports the CS4340 phy. ++ + config DAVICOM_PHY + tristate "Davicom PHYs" + ---help--- +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += + obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o + obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o + obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o ++obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o + obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o + obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +@@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygn + obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o + obj-$(CONFIG_BROADCOM_PHY) += broadcom.o + obj-$(CONFIG_CICADA_PHY) += cicada.o ++obj-$(CONFIG_CORTINA_PHY) += cortina.o + obj-$(CONFIG_DAVICOM_PHY) += davicom.o + obj-$(CONFIG_DP83640_PHY) += dp83640.o + obj-$(CONFIG_DP83848_PHY) += dp83848.o +--- a/drivers/net/phy/aquantia.c ++++ b/drivers/net/phy/aquantia.c +@@ -21,6 +21,8 @@ + #define PHY_ID_AQ1202 0x03a1b445 + #define PHY_ID_AQ2104 0x03a1b460 + #define PHY_ID_AQR105 0x03a1b4a2 ++#define PHY_ID_AQR106 0x03a1b4d0 ++#define PHY_ID_AQR107 0x03a1b4e0 + #define PHY_ID_AQR405 0x03a1b4b0 + + #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ +@@ -154,6 +156,30 @@ static struct phy_driver aquantia_driver + .read_status = aquantia_read_status, + }, + { ++ .phy_id = PHY_ID_AQR106, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR106", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++}, ++{ ++ .phy_id = PHY_ID_AQR107, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR107", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++}, ++{ + .phy_id = PHY_ID_AQR405, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR405", +@@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unu + { PHY_ID_AQ1202, 0xfffffff0 }, + { PHY_ID_AQ2104, 0xfffffff0 }, + { PHY_ID_AQR105, 0xfffffff0 }, ++ { PHY_ID_AQR106, 0xfffffff0 }, ++ { PHY_ID_AQR107, 0xfffffff0 }, + { PHY_ID_AQR405, 0xfffffff0 }, + { } + }; +--- /dev/null ++++ b/drivers/net/phy/cortina.c +@@ -0,0 +1,118 @@ ++/* ++ * Copyright 2017 NXP ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * CORTINA is a registered trademark of Cortina Systems, Inc. ++ * ++ */ ++#include ++#include ++ ++#define PHY_ID_CS4340 0x13e51002 ++ ++#define VILLA_GLOBAL_CHIP_ID_LSB 0x0 ++#define VILLA_GLOBAL_CHIP_ID_MSB 0x1 ++ ++#define VILLA_GLOBAL_GPIO_1_INTS 0x017 ++ ++static int cortina_read_reg(struct phy_device *phydev, u16 regnum) ++{ ++ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, ++ MII_ADDR_C45 | regnum); ++} ++ ++static int cortina_config_aneg(struct phy_device *phydev) ++{ ++ phydev->supported = SUPPORTED_10000baseT_Full; ++ phydev->advertising = SUPPORTED_10000baseT_Full; ++ ++ return 0; ++} ++ ++static int cortina_read_status(struct phy_device *phydev) ++{ ++ int gpio_int_status, ret = 0; ++ ++ gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS); ++ if (gpio_int_status < 0) { ++ ret = gpio_int_status; ++ goto err; ++ } ++ ++ if (gpio_int_status & 0x8) { ++ /* up when edc_convergedS set */ ++ phydev->speed = SPEED_10000; ++ phydev->duplex = DUPLEX_FULL; ++ phydev->link = 1; ++ } else { ++ phydev->link = 0; ++ } ++ ++err: ++ return ret; ++} ++ ++static int cortina_soft_reset(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int cortina_probe(struct phy_device *phydev) ++{ ++ u32 phy_id = 0; ++ int id_lsb = 0, id_msb = 0; ++ ++ /* Read device id from phy registers. */ ++ id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB); ++ if (id_lsb < 0) ++ return -ENXIO; ++ ++ phy_id = id_lsb << 16; ++ ++ id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB); ++ if (id_msb < 0) ++ return -ENXIO; ++ ++ phy_id |= id_msb; ++ ++ /* Make sure the device tree binding matched the driver with the ++ * right device. ++ */ ++ if (phy_id != phydev->drv->phy_id) { ++ phydev_err(phydev, "Error matching phy with %s driver\n", ++ phydev->drv->name); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static struct phy_driver cortina_driver[] = { ++{ ++ .phy_id = PHY_ID_CS4340, ++ .phy_id_mask = 0xffffffff, ++ .name = "Cortina CS4340", ++ .config_aneg = cortina_config_aneg, ++ .read_status = cortina_read_status, ++ .soft_reset = cortina_soft_reset, ++ .probe = cortina_probe, ++}, ++}; ++ ++module_phy_driver(cortina_driver); ++ ++static struct mdio_device_id __maybe_unused cortina_tbl[] = { ++ { PHY_ID_CS4340, 0xffffffff}, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(mdio, cortina_tbl); +--- /dev/null ++++ b/drivers/net/phy/fsl_backplane.c +@@ -0,0 +1,1358 @@ ++/* Freescale backplane driver. ++ * Author: Shaohui Xie ++ * ++ * Copyright 2015 Freescale Semiconductor, Inc. ++ * ++ * Licensed under the GPL-2 or later. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* XFI PCS Device Identifier */ ++#define FSL_PCS_PHY_ID 0x0083e400 ++ ++/* Freescale KR PMD registers */ ++#define FSL_KR_PMD_CTRL 0x96 ++#define FSL_KR_PMD_STATUS 0x97 ++#define FSL_KR_LP_CU 0x98 ++#define FSL_KR_LP_STATUS 0x99 ++#define FSL_KR_LD_CU 0x9a ++#define FSL_KR_LD_STATUS 0x9b ++ ++/* Freescale KR PMD defines */ ++#define PMD_RESET 0x1 ++#define PMD_STATUS_SUP_STAT 0x4 ++#define PMD_STATUS_FRAME_LOCK 0x2 ++#define TRAIN_EN 0x3 ++#define TRAIN_DISABLE 0x1 ++#define RX_STAT 0x1 ++ ++#define FSL_KR_RX_LINK_STAT_MASK 0x1000 ++#define FSL_XFI_PCS_10GR_SR1 0x20 ++ ++/* Freescale KX PCS mode register */ ++#define FSL_PCS_IF_MODE 0x8014 ++ ++/* Freescale KX PCS mode register init value */ ++#define IF_MODE_INIT 0x8 ++ ++/* Freescale KX/KR AN registers */ ++#define FSL_AN_AD1 0x11 ++#define FSL_AN_BP_STAT 0x30 ++ ++/* Freescale KX/KR AN registers defines */ ++#define AN_CTRL_INIT 0x1200 ++#define KX_AN_AD1_INIT 0x25 ++#define KR_AN_AD1_INIT 0x85 ++#define AN_LNK_UP_MASK 0x4 ++#define KR_AN_MASK 0x8 ++#define TRAIN_FAIL 0x8 ++ ++/* C(-1) */ ++#define BIN_M1 0 ++/* C(1) */ ++#define BIN_LONG 1 ++#define BIN_M1_SEL 6 ++#define BIN_Long_SEL 7 ++#define CDR_SEL_MASK 0x00070000 ++#define BIN_SNAPSHOT_NUM 5 ++#define BIN_M1_THRESHOLD 3 ++#define BIN_LONG_THRESHOLD 2 ++ ++#define PRE_COE_SHIFT 22 ++#define POST_COE_SHIFT 16 ++#define ZERO_COE_SHIFT 8 ++ ++#define PRE_COE_MAX 0x0 ++#define PRE_COE_MIN 0x8 ++#define POST_COE_MAX 0x0 ++#define POST_COE_MIN 0x10 ++#define ZERO_COE_MAX 0x30 ++#define ZERO_COE_MIN 0x0 ++ ++#define TECR0_INIT 0x24200000 ++#define RATIO_PREQ 0x3 ++#define RATIO_PST1Q 0xd ++#define RATIO_EQ 0x20 ++ ++#define GCR0_RESET_MASK 0x600000 ++#define GCR1_SNP_START_MASK 0x00000040 ++#define GCR1_CTL_SNP_START_MASK 0x00002000 ++#define GCR1_REIDL_TH_MASK 0x00700000 ++#define GCR1_REIDL_EX_SEL_MASK 0x000c0000 ++#define GCR1_REIDL_ET_MAS_MASK 0x00004000 ++#define TECR0_AMP_RED_MASK 0x0000003f ++ ++#define RECR1_CTL_SNP_DONE_MASK 0x00000002 ++#define RECR1_SNP_DONE_MASK 0x00000004 ++#define TCSR1_SNP_DATA_MASK 0x0000ffc0 ++#define TCSR1_SNP_DATA_SHIFT 6 ++#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100 ++ ++#define RECR1_GAINK2_MASK 0x0f000000 ++#define RECR1_GAINK2_SHIFT 24 ++#define RECR1_GAINK3_MASK 0x000f0000 ++#define RECR1_GAINK3_SHIFT 16 ++#define RECR1_OFFSET_MASK 0x00003f80 ++#define RECR1_OFFSET_SHIFT 7 ++#define RECR1_BLW_MASK 0x00000f80 ++#define RECR1_BLW_SHIFT 7 ++#define EYE_CTRL_SHIFT 12 ++#define BASE_WAND_SHIFT 10 ++ ++#define XGKR_TIMEOUT 1050 ++ ++#define INCREMENT 1 ++#define DECREMENT 2 ++#define TIMEOUT_LONG 3 ++#define TIMEOUT_M1 3 ++ ++#define RX_READY_MASK 0x8000 ++#define PRESET_MASK 0x2000 ++#define INIT_MASK 0x1000 ++#define COP1_MASK 0x30 ++#define COP1_SHIFT 4 ++#define COZ_MASK 0xc ++#define COZ_SHIFT 2 ++#define COM1_MASK 0x3 ++#define COM1_SHIFT 0 ++#define REQUEST_MASK 0x3f ++#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \ ++ COP1_MASK | COZ_MASK | COM1_MASK) ++ ++#define NEW_ALGORITHM_TRAIN_TX ++#ifdef NEW_ALGORITHM_TRAIN_TX ++#define FORCE_INC_COP1_NUMBER 0 ++#define FORCE_INC_COM1_NUMBER 1 ++#endif ++ ++#define VAL_INVALID 0xff ++ ++static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5, ++ 0x7, 0x9, 0xb, 0xc, VAL_INVALID}; ++static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7, ++ 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID}; ++ ++enum backplane_mode { ++ PHY_BACKPLANE_1000BASE_KX, ++ PHY_BACKPLANE_10GBASE_KR, ++ PHY_BACKPLANE_INVAL ++}; ++ ++enum coe_filed { ++ COE_COP1, ++ COE_COZ, ++ COE_COM ++}; ++ ++enum coe_update { ++ COE_NOTUPDATED, ++ COE_UPDATED, ++ COE_MIN, ++ COE_MAX, ++ COE_INV ++}; ++ ++enum train_state { ++ DETECTING_LP, ++ TRAINED, ++}; ++ ++struct per_lane_ctrl_status { ++ __be32 gcr0; /* 0x.000 - General Control Register 0 */ ++ __be32 gcr1; /* 0x.004 - General Control Register 1 */ ++ __be32 gcr2; /* 0x.008 - General Control Register 2 */ ++ __be32 resv1; /* 0x.00C - Reserved */ ++ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */ ++ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */ ++ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */ ++ __be32 resv2; /* 0x.01C - Reserved */ ++ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */ ++ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */ ++ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */ ++ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */ ++ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */ ++ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */ ++ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */ ++ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */ ++}; ++ ++struct tx_condition { ++ bool bin_m1_late_early; ++ bool bin_long_late_early; ++ bool bin_m1_stop; ++ bool bin_long_stop; ++ bool tx_complete; ++ bool sent_init; ++ int m1_min_max_cnt; ++ int long_min_max_cnt; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ int pre_inc; ++ int post_inc; ++#endif ++}; ++ ++struct fsl_xgkr_inst { ++ void *reg_base; ++ struct phy_device *phydev; ++ struct tx_condition tx_c; ++ struct delayed_work xgkr_wk; ++ enum train_state state; ++ u32 ld_update; ++ u32 ld_status; ++ u32 ratio_preq; ++ u32 ratio_pst1q; ++ u32 adpt_eq; ++}; ++ ++static void tx_condition_init(struct tx_condition *tx_c) ++{ ++ tx_c->bin_m1_late_early = true; ++ tx_c->bin_long_late_early = false; ++ tx_c->bin_m1_stop = false; ++ tx_c->bin_long_stop = false; ++ tx_c->tx_complete = false; ++ tx_c->sent_init = false; ++ tx_c->m1_min_max_cnt = 0; ++ tx_c->long_min_max_cnt = 0; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ tx_c->pre_inc = FORCE_INC_COM1_NUMBER; ++ tx_c->post_inc = FORCE_INC_COP1_NUMBER; ++#endif ++} ++ ++void tune_tecr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base = inst->reg_base; ++ u32 val; ++ ++ val = TECR0_INIT | ++ inst->adpt_eq << ZERO_COE_SHIFT | ++ inst->ratio_preq << PRE_COE_SHIFT | ++ inst->ratio_pst1q << POST_COE_SHIFT; ++ ++ /* reset the lane */ ++ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32(val, ®_base->tecr0); ++ udelay(1); ++ /* unreset the lane */ ++ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void start_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN); ++} ++ ++static void stop_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE); ++} ++ ++static void reset_gcr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base = inst->reg_base; ++ ++ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++void lane_set_1gkx(void *reg) ++{ ++ struct per_lane_ctrl_status *reg_base = reg; ++ u32 val; ++ ++ /* reset the lane */ ++ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ ++ /* set gcr1 for 1GKX */ ++ val = ioread32(®_base->gcr1); ++ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK | ++ GCR1_REIDL_ET_MAS_MASK); ++ iowrite32(val, ®_base->gcr1); ++ udelay(1); ++ ++ /* set tecr0 for 1GKX */ ++ val = ioread32(®_base->tecr0); ++ val &= ~TECR0_AMP_RED_MASK; ++ iowrite32(val, ®_base->tecr0); ++ udelay(1); ++ ++ /* unreset the lane */ ++ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void reset_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0); ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0); ++} ++ ++static void start_xgkr_state_machine(struct delayed_work *work) ++{ ++ queue_delayed_work(system_power_efficient_wq, work, ++ msecs_to_jiffies(XGKR_TIMEOUT)); ++} ++ ++static void start_xgkr_an(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_inst *inst; ++ ++ reset_lt(phydev); ++ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT); ++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT); ++ ++ inst = phydev->priv; ++ ++ /* start state machine*/ ++ start_xgkr_state_machine(&inst->xgkr_wk); ++} ++ ++static void start_1gkx_an(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT); ++ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT); ++ phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT); ++} ++ ++static void ld_coe_status(struct fsl_xgkr_inst *inst) ++{ ++ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD, ++ FSL_KR_LD_STATUS, inst->ld_status); ++} ++ ++static void ld_coe_update(struct fsl_xgkr_inst *inst) ++{ ++ dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update); ++ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD, ++ FSL_KR_LD_CU, inst->ld_update); ++} ++ ++static void init_inst(struct fsl_xgkr_inst *inst, int reset) ++{ ++ if (reset) { ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ tune_tecr0(inst); ++ } ++ ++ tx_condition_init(&inst->tx_c); ++ inst->state = DETECTING_LP; ++ inst->ld_status &= RX_READY_MASK; ++ ld_coe_status(inst); ++ inst->ld_update = 0; ++ inst->ld_status &= ~RX_READY_MASK; ++ ld_coe_status(inst); ++} ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++static int get_median_gaink2(u32 *reg) ++{ ++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM]; ++ u32 rx_eq_snp; ++ struct per_lane_ctrl_status *reg_base; ++ int timeout; ++ int i, j, tmp, pos; ++ ++ reg_base = (struct per_lane_ctrl_status *)reg; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while (ioread32(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* start snap shot */ ++ iowrite32((ioread32(®_base->gcr1) | ++ GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ rx_eq_snp = ioread32(®_base->recr1); ++ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >> ++ RECR1_GAINK2_SHIFT; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32((ioread32(®_base->gcr1) & ++ ~GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ } ++ ++ /* get median of the 5 snap shot */ ++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) { ++ tmp = gaink2_snap_shot[i]; ++ pos = i; ++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) { ++ if (gaink2_snap_shot[j] < tmp) { ++ tmp = gaink2_snap_shot[j]; ++ pos = j; ++ } ++ } ++ ++ gaink2_snap_shot[pos] = gaink2_snap_shot[i]; ++ gaink2_snap_shot[i] = tmp; ++ } ++ ++ return gaink2_snap_shot[2]; ++} ++#endif ++ ++static bool is_bin_early(int bin_sel, void *reg) ++{ ++ bool early = false; ++ int bin_snap_shot[BIN_SNAPSHOT_NUM]; ++ int i, negative_count = 0; ++ struct per_lane_ctrl_status *reg_base = reg; ++ int timeout; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while ((ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */ ++ if (bin_sel == BIN_M1) { ++ iowrite32((ioread32(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_M1_SEL, ++ ®_base->tcsr1); ++ } else { ++ iowrite32((ioread32(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_Long_SEL, ++ ®_base->tcsr1); ++ } ++ ++ /* start snap shot */ ++ iowrite32(ioread32(®_base->gcr1) | GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ bin_snap_shot[i] = (ioread32(®_base->tcsr1) & ++ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT; ++ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK) ++ negative_count++; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32(ioread32(®_base->gcr1) & ~GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ } ++ ++ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) || ++ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) { ++ early = true; ++ } ++ ++ return early; ++} ++ ++static void train_tx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ struct tx_condition *tx_c = &inst->tx_c; ++ bool bin_m1_early, bin_long_early; ++ u32 lp_status, old_ld_update; ++ u32 status_cop1, status_coz, status_com1; ++ u32 req_cop1, req_coz, req_com1, req_preset, req_init; ++ u32 temp; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ u32 median_gaink2; ++#endif ++ ++recheck: ++ if (tx_c->bin_long_stop && tx_c->bin_m1_stop) { ++ tx_c->tx_complete = true; ++ inst->ld_status |= RX_READY_MASK; ++ ld_coe_status(inst); ++ /* tell LP we are ready */ ++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, ++ FSL_KR_PMD_STATUS, RX_STAT); ++ return; ++ } ++ ++ /* We start by checking the current LP status. If we got any responses, ++ * we can clear up the appropriate update request so that the ++ * subsequent code may easily issue new update requests if needed. ++ */ ++ lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) & ++ REQUEST_MASK; ++ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT; ++ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT; ++ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT; ++ ++ old_ld_update = inst->ld_update; ++ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT; ++ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT; ++ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT; ++ req_preset = old_ld_update & PRESET_MASK; ++ req_init = old_ld_update & INIT_MASK; ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.1 ++ * We may clear PRESET when all coefficients show UPDATED or MAX. ++ */ ++ if (req_preset) { ++ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) && ++ (status_coz == COE_UPDATED || status_coz == COE_MAX) && ++ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) { ++ inst->ld_update &= ~PRESET_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * We may clear INITIALIZE when no coefficients show NOT UPDATED. ++ */ ++ if (req_init) { ++ if (status_cop1 != COE_NOTUPDATED && ++ status_coz != COE_NOTUPDATED && ++ status_com1 != COE_NOTUPDATED) { ++ inst->ld_update &= ~INIT_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * we send initialize to the other side to ensure default settings ++ * for the LP. Naturally, we should do this only once. ++ */ ++ if (!tx_c->sent_init) { ++ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) { ++ inst->ld_update = INIT_MASK; ++ tx_c->sent_init = true; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We set coefficient requests to HOLD when we get the information ++ * about any updates On clearing our prior response, we also update ++ * our internal status. ++ */ ++ if (status_cop1 != COE_NOTUPDATED) { ++ if (req_cop1) { ++ inst->ld_update &= ~COP1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (tx_c->post_inc) { ++ if (req_cop1 == INCREMENT && ++ status_cop1 == COE_MAX) { ++ tx_c->post_inc = 0; ++ tx_c->bin_long_stop = true; ++ tx_c->bin_m1_stop = true; ++ } else { ++ tx_c->post_inc -= 1; ++ } ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) || ++ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) { ++ dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s", ++ (status_cop1 == COE_MIN) ? ++ "DEC MIN" : "INC MAX"); ++ tx_c->long_min_max_cnt++; ++ if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) { ++ tx_c->bin_long_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (status_coz != COE_NOTUPDATED) { ++ if (req_coz) ++ inst->ld_update &= ~COZ_MASK; ++ } ++ ++ if (status_com1 != COE_NOTUPDATED) { ++ if (req_com1) { ++ inst->ld_update &= ~COM1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (tx_c->pre_inc) { ++ if (req_com1 == INCREMENT && ++ status_com1 == COE_MAX) ++ tx_c->pre_inc = 0; ++ else ++ tx_c->pre_inc -= 1; ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ /* Stop If we have reached the limit for a parameter. */ ++ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) || ++ (req_com1 == INCREMENT && status_com1 == COE_MAX)) { ++ dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s", ++ (status_com1 == COE_MIN) ? ++ "DEC MIN" : "INC MAX"); ++ tx_c->m1_min_max_cnt++; ++ if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) { ++ tx_c->bin_m1_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (old_ld_update != inst->ld_update) { ++ ld_coe_update(inst); ++ /* Redo these status checks and updates until we have no more ++ * changes, to speed up the overall process. ++ */ ++ goto recheck; ++ } ++ ++ /* Do nothing if we have pending request. */ ++ if ((req_coz || req_com1 || req_cop1)) ++ return; ++ else if (lp_status) ++ /* No pending request but LP status was not reverted to ++ * not updated. ++ */ ++ return; ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (tx_c->pre_inc) { ++ inst->ld_update = INCREMENT << COM1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ ++ if (status_cop1 != COE_MAX) { ++ median_gaink2 = get_median_gaink2(inst->reg_base); ++ if (median_gaink2 == 0xf) { ++ tx_c->post_inc = 1; ++ } else { ++ /* Gaink2 median lower than "F" */ ++ tx_c->bin_m1_stop = true; ++ tx_c->bin_long_stop = true; ++ goto recheck; ++ } ++ } else { ++ /* C1 MAX */ ++ tx_c->bin_m1_stop = true; ++ tx_c->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ if (tx_c->post_inc) { ++ inst->ld_update = INCREMENT << COP1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ } ++#endif ++ ++ /* snapshot and select bin */ ++ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base); ++ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base); ++ ++ if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) { ++ tx_c->bin_m1_stop = true; ++ goto recheck; ++ } ++ ++ if (!tx_c->bin_long_stop && ++ tx_c->bin_long_late_early && !bin_long_early) { ++ tx_c->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We only request coefficient updates when no PRESET/INITIALIZE is ++ * pending. We also only request coefficient updates when the ++ * corresponding status is NOT UPDATED and nothing is pending. ++ */ ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (!tx_c->bin_long_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Avoid BinM1Late by requesting an ++ * immediate decrement. ++ */ ++ if (!bin_m1_early) { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update = temp; ++ ld_coe_update(inst); ++ tx_c->bin_m1_late_early = bin_m1_early; ++ return; ++ } ++ } ++ ++ /* BinLong correction means changing COP1 */ ++ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) { ++ /* Locate BinLong transition point (if any) ++ * while avoiding BinM1Late. ++ */ ++ if (bin_long_early) { ++ /* request increment c(1) */ ++ temp = INCREMENT << COP1_SHIFT; ++ inst->ld_update = temp; ++ } else { ++ /* request decrement c(1) */ ++ temp = DECREMENT << COP1_SHIFT; ++ inst->ld_update = temp; ++ } ++ ++ ld_coe_update(inst); ++ tx_c->bin_long_late_early = bin_long_early; ++ } ++ /* We try to finish BinLong before we do BinM1 */ ++ return; ++ } ++ ++ if (!tx_c->bin_m1_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Locate BinM1 transition point (if any) */ ++ if (bin_m1_early) { ++ /* request increment c(-1) */ ++ temp = INCREMENT << COM1_SHIFT; ++ inst->ld_update = temp; ++ } else { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update = temp; ++ } ++ ++ ld_coe_update(inst); ++ tx_c->bin_m1_late_early = bin_m1_early; ++ } ++ } ++ } ++} ++ ++static int is_link_up(struct phy_device *phydev) ++{ ++ int val; ++ ++ phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1); ++ val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1); ++ ++ return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0; ++} ++ ++static int is_link_training_fail(struct phy_device *phydev) ++{ ++ int val; ++ int timeout = 100; ++ ++ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS); ++ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) { ++ /* check LNK_STAT for sure */ ++ while (timeout--) { ++ if (is_link_up(phydev)) ++ return 0; ++ ++ usleep_range(100, 500); ++ } ++ } ++ ++ return 1; ++} ++ ++static int check_rx(struct phy_device *phydev) ++{ ++ return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) & ++ RX_READY_MASK; ++} ++ ++/* Coefficient values have hardware restrictions */ ++static int is_ld_valid(struct fsl_xgkr_inst *inst) ++{ ++ u32 ratio_pst1q = inst->ratio_pst1q; ++ u32 adpt_eq = inst->adpt_eq; ++ u32 ratio_preq = inst->ratio_preq; ++ ++ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48) ++ return 0; ++ ++ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >= ++ ((adpt_eq - ratio_pst1q - ratio_preq) * 17)) ++ return 0; ++ ++ if (ratio_preq > ratio_pst1q) ++ return 0; ++ ++ if (ratio_preq > 8) ++ return 0; ++ ++ if (adpt_eq < 26) ++ return 0; ++ ++ if (ratio_pst1q > 16) ++ return 0; ++ ++ return 1; ++} ++ ++static int is_value_allowed(const u32 *val_table, u32 val) ++{ ++ int i; ++ ++ for (i = 0;; i++) { ++ if (*(val_table + i) == VAL_INVALID) ++ return 0; ++ if (*(val_table + i) == val) ++ return 1; ++ } ++} ++ ++static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request) ++{ ++ u32 ld_limit[3], ld_coe[3], step[3]; ++ ++ ld_coe[0] = inst->ratio_pst1q; ++ ld_coe[1] = inst->adpt_eq; ++ ld_coe[2] = inst->ratio_preq; ++ ++ /* Information specific to the Freescale SerDes for 10GBase-KR: ++ * Incrementing C(+1) means *decrementing* RATIO_PST1Q ++ * Incrementing C(0) means incrementing ADPT_EQ ++ * Incrementing C(-1) means *decrementing* RATIO_PREQ ++ */ ++ step[0] = -1; ++ step[1] = 1; ++ step[2] = -1; ++ ++ switch (request) { ++ case INCREMENT: ++ ld_limit[0] = POST_COE_MAX; ++ ld_limit[1] = ZERO_COE_MAX; ++ ld_limit[2] = PRE_COE_MAX; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] += step[field]; ++ else ++ /* MAX */ ++ return 2; ++ break; ++ case DECREMENT: ++ ld_limit[0] = POST_COE_MIN; ++ ld_limit[1] = ZERO_COE_MIN; ++ ld_limit[2] = PRE_COE_MIN; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] -= step[field]; ++ else ++ /* MIN */ ++ return 1; ++ break; ++ default: ++ break; ++ } ++ ++ if (is_ld_valid(inst)) { ++ /* accept new ld */ ++ inst->ratio_pst1q = ld_coe[0]; ++ inst->adpt_eq = ld_coe[1]; ++ inst->ratio_preq = ld_coe[2]; ++ /* only some values for preq and pst1q can be used. ++ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc. ++ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10. ++ */ ++ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) { ++ dev_dbg(&inst->phydev->mdio.dev, ++ "preq skipped value: %d\n", ld_coe[2]); ++ return 0; ++ } ++ ++ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) { ++ dev_dbg(&inst->phydev->mdio.dev, ++ "pst1q skipped value: %d\n", ld_coe[0]); ++ return 0; ++ } ++ ++ tune_tecr0(inst); ++ } else { ++ if (request == DECREMENT) ++ /* MIN */ ++ return 1; ++ if (request == INCREMENT) ++ /* MAX */ ++ return 2; ++ } ++ ++ return 0; ++} ++ ++static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld) ++{ ++ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX}; ++ u32 mask, val; ++ ++ switch (field) { ++ case COE_COP1: ++ mask = COP1_MASK; ++ val = ld_coe[new_ld] << COP1_SHIFT; ++ break; ++ case COE_COZ: ++ mask = COZ_MASK; ++ val = ld_coe[new_ld] << COZ_SHIFT; ++ break; ++ case COE_COM: ++ mask = COM1_MASK; ++ val = ld_coe[new_ld] << COM1_SHIFT; ++ break; ++ default: ++ return; ++ } ++ ++ inst->ld_status &= ~mask; ++ inst->ld_status |= val; ++} ++ ++static void check_request(struct fsl_xgkr_inst *inst, int request) ++{ ++ int cop1_req, coz_req, com_req; ++ int old_status, new_ld_sta; ++ ++ cop1_req = (request & COP1_MASK) >> COP1_SHIFT; ++ coz_req = (request & COZ_MASK) >> COZ_SHIFT; ++ com_req = (request & COM1_MASK) >> COM1_SHIFT; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED ++ */ ++ old_status = inst->ld_status; ++ ++ if (cop1_req && !(inst->ld_status & COP1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req); ++ min_max_updated(inst, COE_COP1, new_ld_sta); ++ } ++ ++ if (coz_req && !(inst->ld_status & COZ_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COZ, coz_req); ++ min_max_updated(inst, COE_COZ, new_ld_sta); ++ } ++ ++ if (com_req && !(inst->ld_status & COM1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COM, com_req); ++ min_max_updated(inst, COE_COM, new_ld_sta); ++ } ++ ++ if (old_status != inst->ld_status) ++ ld_coe_status(inst); ++} ++ ++static void preset(struct fsl_xgkr_inst *inst) ++{ ++ /* These are all MAX values from the IEEE802.3 perspective. */ ++ inst->ratio_pst1q = POST_COE_MAX; ++ inst->adpt_eq = ZERO_COE_MAX; ++ inst->ratio_preq = PRE_COE_MAX; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_MAX << COP1_SHIFT | ++ COE_MAX << COZ_SHIFT | ++ COE_MAX << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void initialize(struct fsl_xgkr_inst *inst) ++{ ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_UPDATED << COP1_SHIFT | ++ COE_UPDATED << COZ_SHIFT | ++ COE_UPDATED << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void train_rx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ int request, old_ld_status; ++ ++ /* get request from LP */ ++ request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) & ++ (LD_ALL_MASK); ++ old_ld_status = inst->ld_status; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we always go to NOT UDPATED for status reporting in ++ * response to HOLD requests. ++ * IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * ... but only if PRESET/INITIALIZE are not active to ensure ++ * we keep status until they are released. ++ */ ++ if (!(request & (PRESET_MASK | INIT_MASK))) { ++ if (!(request & COP1_MASK)) ++ inst->ld_status &= ~COP1_MASK; ++ ++ if (!(request & COZ_MASK)) ++ inst->ld_status &= ~COZ_MASK; ++ ++ if (!(request & COM1_MASK)) ++ inst->ld_status &= ~COM1_MASK; ++ ++ if (old_ld_status != inst->ld_status) ++ ld_coe_status(inst); ++ } ++ ++ /* As soon as the LP shows ready, no need to do any more updates. */ ++ if (check_rx(phydev)) { ++ /* LP receiver is ready */ ++ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) { ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ ld_coe_status(inst); ++ } ++ } else { ++ /* IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * only act on PRESET/INITIALIZE if all status is NOT UPDATED. ++ */ ++ if (request & (PRESET_MASK | INIT_MASK)) { ++ if (!(inst->ld_status & ++ (COP1_MASK | COZ_MASK | COM1_MASK))) { ++ if (request & PRESET_MASK) ++ preset(inst); ++ ++ if (request & INIT_MASK) ++ initialize(inst); ++ } ++ } ++ ++ /* LP Coefficient are not in HOLD */ ++ if (request & REQUEST_MASK) ++ check_request(inst, request & REQUEST_MASK); ++ } ++} ++ ++static void xgkr_start_train(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_inst *inst = phydev->priv; ++ struct tx_condition *tx_c = &inst->tx_c; ++ int val = 0, i; ++ int lt_state; ++ unsigned long dead_line; ++ int rx_ok, tx_ok; ++ ++ init_inst(inst, 0); ++ start_lt(phydev); ++ ++ for (i = 0; i < 2;) { ++ dead_line = jiffies + msecs_to_jiffies(500); ++ while (time_before(jiffies, dead_line)) { ++ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, ++ FSL_KR_PMD_STATUS); ++ if (val & TRAIN_FAIL) { ++ /* LT failed already, reset lane to avoid ++ * it run into hanging, then start LT again. ++ */ ++ reset_gcr0(inst); ++ start_lt(phydev); ++ } else if ((val & PMD_STATUS_SUP_STAT) && ++ (val & PMD_STATUS_FRAME_LOCK)) ++ break; ++ usleep_range(100, 500); ++ } ++ ++ if (!((val & PMD_STATUS_FRAME_LOCK) && ++ (val & PMD_STATUS_SUP_STAT))) { ++ i++; ++ continue; ++ } ++ ++ /* init process */ ++ rx_ok = false; ++ tx_ok = false; ++ /* the LT should be finished in 500ms, failed or OK. */ ++ dead_line = jiffies + msecs_to_jiffies(500); ++ ++ while (time_before(jiffies, dead_line)) { ++ /* check if the LT is already failed */ ++ lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, ++ FSL_KR_PMD_STATUS); ++ if (lt_state & TRAIN_FAIL) { ++ reset_gcr0(inst); ++ break; ++ } ++ ++ rx_ok = check_rx(phydev); ++ tx_ok = tx_c->tx_complete; ++ ++ if (rx_ok && tx_ok) ++ break; ++ ++ if (!rx_ok) ++ train_rx(inst); ++ ++ if (!tx_ok) ++ train_tx(inst); ++ ++ usleep_range(100, 500); ++ } ++ ++ i++; ++ /* check LT result */ ++ if (is_link_training_fail(phydev)) { ++ init_inst(inst, 0); ++ continue; ++ } else { ++ stop_lt(phydev); ++ inst->state = TRAINED; ++ break; ++ } ++ } ++} ++ ++static void xgkr_state_machine(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct fsl_xgkr_inst *inst = container_of(dwork, ++ struct fsl_xgkr_inst, ++ xgkr_wk); ++ struct phy_device *phydev = inst->phydev; ++ int an_state; ++ bool needs_train = false; ++ ++ mutex_lock(&phydev->lock); ++ ++ switch (inst->state) { ++ case DETECTING_LP: ++ phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT); ++ an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT); ++ if ((an_state & KR_AN_MASK)) ++ needs_train = true; ++ break; ++ case TRAINED: ++ if (!is_link_up(phydev)) { ++ dev_info(&phydev->mdio.dev, ++ "Detect hotplug, restart training\n"); ++ init_inst(inst, 1); ++ start_xgkr_an(phydev); ++ inst->state = DETECTING_LP; ++ } ++ break; ++ } ++ ++ if (needs_train) ++ xgkr_start_train(phydev); ++ ++ mutex_unlock(&phydev->lock); ++ queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk, ++ msecs_to_jiffies(XGKR_TIMEOUT)); ++} ++ ++static int fsl_backplane_probe(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_inst *xgkr_inst; ++ struct device_node *phy_node, *lane_node; ++ struct resource res_lane; ++ const char *bm; ++ int ret; ++ int bp_mode; ++ u32 lane[2]; ++ ++ phy_node = phydev->mdio.dev.of_node; ++ bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm); ++ if (bp_mode < 0) ++ return 0; ++ ++ if (!strcasecmp(bm, "1000base-kx")) { ++ bp_mode = PHY_BACKPLANE_1000BASE_KX; ++ } else if (!strcasecmp(bm, "10gbase-kr")) { ++ bp_mode = PHY_BACKPLANE_10GBASE_KR; ++ } else { ++ dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n"); ++ return -EINVAL; ++ } ++ ++ lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0); ++ if (!lane_node) { ++ dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n"); ++ return -EINVAL; ++ } ++ ++ ret = of_address_to_resource(lane_node, 0, &res_lane); ++ if (ret) { ++ dev_err(&phydev->mdio.dev, "could not obtain memory map\n"); ++ return ret; ++ } ++ ++ of_node_put(lane_node); ++ ret = of_property_read_u32_array(phy_node, "fsl,lane-reg", ++ (u32 *)&lane, 2); ++ if (ret) { ++ dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n"); ++ return -EINVAL; ++ } ++ ++ phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev, ++ res_lane.start + lane[0], ++ lane[1]); ++ if (!phydev->priv) { ++ dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (bp_mode == PHY_BACKPLANE_1000BASE_KX) { ++ phydev->speed = SPEED_1000; ++ /* configure the lane for 1000BASE-KX */ ++ lane_set_1gkx(phydev->priv); ++ return 0; ++ } ++ ++ xgkr_inst = devm_kzalloc(&phydev->mdio.dev, ++ sizeof(*xgkr_inst), GFP_KERNEL); ++ if (!xgkr_inst) ++ return -ENOMEM; ++ ++ xgkr_inst->reg_base = phydev->priv; ++ xgkr_inst->phydev = phydev; ++ phydev->priv = xgkr_inst; ++ ++ if (bp_mode == PHY_BACKPLANE_10GBASE_KR) { ++ phydev->speed = SPEED_10000; ++ INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine); ++ } ++ ++ return 0; ++} ++ ++static int fsl_backplane_aneg_done(struct phy_device *phydev) ++{ ++ return 1; ++} ++ ++static int fsl_backplane_config_aneg(struct phy_device *phydev) ++{ ++ if (phydev->speed == SPEED_10000) { ++ phydev->supported |= SUPPORTED_10000baseKR_Full; ++ start_xgkr_an(phydev); ++ } else if (phydev->speed == SPEED_1000) { ++ phydev->supported |= SUPPORTED_1000baseKX_Full; ++ start_1gkx_an(phydev); ++ } ++ ++ phydev->advertising = phydev->supported; ++ phydev->duplex = 1; ++ ++ return 0; ++} ++ ++static int fsl_backplane_suspend(struct phy_device *phydev) ++{ ++ if (phydev->speed == SPEED_10000) { ++ struct fsl_xgkr_inst *xgkr_inst = phydev->priv; ++ ++ cancel_delayed_work_sync(&xgkr_inst->xgkr_wk); ++ } ++ return 0; ++} ++ ++static int fsl_backplane_resume(struct phy_device *phydev) ++{ ++ if (phydev->speed == SPEED_10000) { ++ struct fsl_xgkr_inst *xgkr_inst = phydev->priv; ++ ++ init_inst(xgkr_inst, 1); ++ queue_delayed_work(system_power_efficient_wq, ++ &xgkr_inst->xgkr_wk, ++ msecs_to_jiffies(XGKR_TIMEOUT)); ++ } ++ return 0; ++} ++ ++static int fsl_backplane_read_status(struct phy_device *phydev) ++{ ++ if (is_link_up(phydev)) ++ phydev->link = 1; ++ else ++ phydev->link = 0; ++ ++ return 0; ++} ++ ++static struct phy_driver fsl_backplane_driver[] = { ++ { ++ .phy_id = FSL_PCS_PHY_ID, ++ .name = "Freescale Backplane", ++ .phy_id_mask = 0xffffffff, ++ .features = SUPPORTED_Backplane | SUPPORTED_Autoneg | ++ SUPPORTED_MII, ++ .probe = fsl_backplane_probe, ++ .aneg_done = fsl_backplane_aneg_done, ++ .config_aneg = fsl_backplane_config_aneg, ++ .read_status = fsl_backplane_read_status, ++ .suspend = fsl_backplane_suspend, ++ .resume = fsl_backplane_resume, ++ }, ++}; ++ ++module_phy_driver(fsl_backplane_driver); ++ ++static struct mdio_device_id __maybe_unused freescale_tbl[] = { ++ { FSL_PCS_PHY_ID, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, freescale_tbl); ++ ++MODULE_DESCRIPTION("Freescale Backplane driver"); ++MODULE_AUTHOR("Shaohui Xie "); ++MODULE_LICENSE("GPL v2"); +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phy + return 0; + + case SIOCSHWTSTAMP: +- if (phydev->drv->hwtstamp) ++ if (phydev->drv && phydev->drv->hwtstamp) + return phydev->drv->hwtstamp(phydev, ifr); + /* fall through */ + +@@ -610,6 +610,9 @@ static int phy_start_aneg_priv(struct ph + bool trigger = 0; + int err; + ++ if (!phydev->drv) ++ return -EIO; ++ + mutex_lock(&phydev->lock); + + if (AUTONEG_DISABLE == phydev->autoneg) +@@ -1009,7 +1012,7 @@ void phy_state_machine(struct work_struc + + old_state = phydev->state; + +- if (phydev->drv->link_change_notify) ++ if (phydev->drv && phydev->drv->link_change_notify) + phydev->drv->link_change_notify(phydev); + + switch (phydev->state) { +@@ -1311,6 +1314,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect); + */ + int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) + { ++ if (!phydev->drv) ++ return -EIO; ++ + /* According to 802.3az,the EEE is supported only in full duplex-mode. + * Also EEE feature is active when core is operating with MII, GMII + * or RGMII (all kinds). Internal PHYs are also allowed to proceed and +@@ -1388,6 +1394,9 @@ EXPORT_SYMBOL(phy_init_eee); + */ + int phy_get_eee_err(struct phy_device *phydev) + { ++ if (!phydev->drv) ++ return -EIO; ++ + return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); + } + EXPORT_SYMBOL(phy_get_eee_err); +@@ -1404,6 +1413,9 @@ int phy_ethtool_get_eee(struct phy_devic + { + int val; + ++ if (!phydev->drv) ++ return -EIO; ++ + /* Get Supported EEE */ + val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); + if (val < 0) +@@ -1437,6 +1449,9 @@ int phy_ethtool_set_eee(struct phy_devic + { + int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + ++ if (!phydev->drv) ++ return -EIO; ++ + /* Mask prohibited EEE modes */ + val &= ~phydev->eee_broken_modes; + +@@ -1448,7 +1463,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee); + + int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) + { +- if (phydev->drv->set_wol) ++ if (phydev->drv && phydev->drv->set_wol) + return phydev->drv->set_wol(phydev, wol); + + return -EOPNOTSUPP; +@@ -1457,7 +1472,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol); + + void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) + { +- if (phydev->drv->get_wol) ++ if (phydev->drv && phydev->drv->get_wol) + phydev->drv->get_wol(phydev, wol); + } + EXPORT_SYMBOL(phy_ethtool_get_wol); +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phyde + if (wol.wolopts) + return -EBUSY; + +- if (phydrv->suspend) ++ if (phydev->drv && phydrv->suspend) + ret = phydrv->suspend(phydev); + + if (ret) +@@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev + struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); + int ret = 0; + +- if (phydrv->resume) ++ if (phydev->drv && phydrv->resume) + ret = phydrv->resume(phydev); + + if (ret) +@@ -1726,7 +1726,7 @@ static int phy_remove(struct device *dev + phydev->state = PHY_DOWN; + mutex_unlock(&phydev->lock); + +- if (phydev->drv->remove) ++ if (phydev->drv && phydev->drv->remove) + phydev->drv->remove(phydev); + phydev->drv = NULL; + +--- a/drivers/net/phy/swphy.c ++++ b/drivers/net/phy/swphy.c +@@ -77,6 +77,7 @@ static const struct swmii_regs duplex[] + static int swphy_decode_speed(int speed) + { + switch (speed) { ++ case 10000: + case 1000: + return SWMII_SPEED_1000; + case 100: +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -81,6 +81,7 @@ typedef enum { + PHY_INTERFACE_MODE_MOCA, + PHY_INTERFACE_MODE_QSGMII, + PHY_INTERFACE_MODE_TRGMII, ++ PHY_INTERFACE_MODE_SGMII_2500, + PHY_INTERFACE_MODE_MAX, + } phy_interface_t; + +@@ -784,6 +785,9 @@ int phy_stop_interrupts(struct phy_devic + + static inline int phy_read_status(struct phy_device *phydev) + { ++ if (!phydev->drv) ++ return -EIO; ++ + return phydev->drv->read_status(phydev); + } + diff --git a/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch new file mode 100644 index 000000000..2927c7e66 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch @@ -0,0 +1,11444 @@ +From 464b4d9b8282e0f1e5040e4914505f91ce4d3750 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:06:25 +0800 +Subject: [PATCH] fsl-mc: layerscape support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a integrated patch for layerscape mc-bus support. + +Signed-off-by: Stuart Yoder +Signed-off-by: Bharat Bhushan +Signed-off-by: Arnd Bergmann +Signed-off-by: Laurentiu Tudor +Signed-off-by: Roy Pledge +Signed-off-by: Shiva Kerdel +Signed-off-by: Nipun Gupta +Signed-off-by: Ioana Ciornei +Signed-off-by: Horia Geantă +Signed-off-by: Yangbo Lu +--- + drivers/staging/fsl-mc/bus/Kconfig | 41 +- + drivers/staging/fsl-mc/bus/Makefile | 10 +- + drivers/staging/fsl-mc/bus/dpbp-cmd.h | 80 ++ + drivers/staging/fsl-mc/bus/dpbp.c | 450 +-------- + drivers/staging/fsl-mc/bus/dpcon-cmd.h | 85 ++ + drivers/staging/fsl-mc/bus/dpcon.c | 317 ++++++ + drivers/staging/fsl-mc/bus/dpio/Makefile | 11 + + .../{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} | 73 +- + drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 296 ++++++ + drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt | 135 +++ + drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 689 +++++++++++++ + drivers/staging/fsl-mc/bus/dpio/dpio.c | 224 +++++ + drivers/staging/fsl-mc/bus/dpio/dpio.h | 109 ++ + drivers/staging/fsl-mc/bus/dpio/qbman-portal.c | 1049 ++++++++++++++++++++ + drivers/staging/fsl-mc/bus/dpio/qbman-portal.h | 662 ++++++++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 853 ++++++++++++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++ + drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 171 ++++ + drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 112 +-- + drivers/staging/fsl-mc/bus/dpmcp.c | 374 +------ + drivers/staging/fsl-mc/bus/dpmcp.h | 127 +-- + drivers/staging/fsl-mc/bus/dpmng-cmd.h | 14 +- + drivers/staging/fsl-mc/bus/dpmng.c | 37 +- + drivers/staging/fsl-mc/bus/dprc-cmd.h | 82 +- + drivers/staging/fsl-mc/bus/dprc-driver.c | 38 +- + drivers/staging/fsl-mc/bus/dprc.c | 629 +----------- + drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 78 +- + drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 318 +++--- + drivers/staging/fsl-mc/bus/fsl-mc-iommu.c | 104 ++ + drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 3 +- + drivers/staging/fsl-mc/bus/fsl-mc-private.h | 6 +- + .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 11 +- + drivers/staging/fsl-mc/bus/mc-io.c | 4 +- + drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 + + drivers/staging/fsl-mc/bus/mc-restool.c | 405 ++++++++ + drivers/staging/fsl-mc/bus/mc-sys.c | 14 +- + drivers/staging/fsl-mc/include/dpaa2-fd.h | 706 +++++++++++++ + drivers/staging/fsl-mc/include/dpaa2-global.h | 202 ++++ + drivers/staging/fsl-mc/include/dpaa2-io.h | 190 ++++ + drivers/staging/fsl-mc/include/dpbp-cmd.h | 185 ---- + drivers/staging/fsl-mc/include/dpbp.h | 158 +-- + drivers/staging/fsl-mc/include/dpcon.h | 115 +++ + drivers/staging/fsl-mc/include/dpmng.h | 16 +- + drivers/staging/fsl-mc/include/dpopr.h | 110 ++ + drivers/staging/fsl-mc/include/dprc.h | 470 +++------ + drivers/staging/fsl-mc/include/mc-bus.h | 7 +- + drivers/staging/fsl-mc/include/mc-cmd.h | 44 +- + drivers/staging/fsl-mc/include/mc-sys.h | 3 +- + drivers/staging/fsl-mc/include/mc.h | 17 +- + 49 files changed, 7380 insertions(+), 2612 deletions(-) + create mode 100644 drivers/staging/fsl-mc/bus/dpbp-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpcon-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile + rename drivers/staging/fsl-mc/{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} (64%) + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-service.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h + create mode 100644 drivers/staging/fsl-mc/bus/fsl-mc-iommu.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h + create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c + create mode 100644 drivers/staging/fsl-mc/include/dpaa2-fd.h + create mode 100644 drivers/staging/fsl-mc/include/dpaa2-global.h + create mode 100644 drivers/staging/fsl-mc/include/dpaa2-io.h + delete mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpcon.h + create mode 100644 drivers/staging/fsl-mc/include/dpopr.h + +--- a/drivers/staging/fsl-mc/bus/Kconfig ++++ b/drivers/staging/fsl-mc/bus/Kconfig +@@ -1,25 +1,40 @@ + # +-# Freescale Management Complex (MC) bus drivers ++# DPAA2 fsl-mc bus + # +-# Copyright (C) 2014 Freescale Semiconductor, Inc. ++# Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + # + # This file is released under the GPLv2 + # + + config FSL_MC_BUS +- bool "Freescale Management Complex (MC) bus driver" +- depends on OF && ARM64 ++ bool "QorIQ DPAA2 fsl-mc bus driver" ++ depends on OF && ARCH_LAYERSCAPE + select GENERIC_MSI_IRQ_DOMAIN + help +- Driver to enable the bus infrastructure for the Freescale +- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware +- module of the QorIQ LS2 SoCs, that does resource management +- for hardware building-blocks in the SoC that can be used +- to dynamically create networking hardware objects such as +- network interfaces (NICs), crypto accelerator instances, +- or L2 switches. ++ Driver to enable the bus infrastructure for the QorIQ DPAA2 ++ architecture. The fsl-mc bus driver handles discovery of ++ DPAA2 objects (which are represented as Linux devices) and ++ binding objects to drivers. + +- Only enable this option when building the kernel for +- Freescale QorQIQ LS2xxxx SoCs. ++config FSL_MC_DPIO ++ tristate "QorIQ DPAA2 DPIO driver" ++ depends on FSL_MC_BUS ++ help ++ Driver for the DPAA2 DPIO object. A DPIO provides queue and ++ buffer management facilities for software to interact with ++ other DPAA2 objects. This driver does not expose the DPIO ++ objects individually, but groups them under a service layer ++ API. + ++config FSL_QBMAN_DEBUG ++ tristate "Freescale QBMAN Debug APIs" ++ depends on FSL_MC_DPIO ++ help ++ QBMan debug assistant APIs. + ++config FSL_MC_RESTOOL ++ tristate "Freescale Management Complex (MC) restool driver" ++ depends on FSL_MC_BUS ++ help ++ Driver that provides kernel support for the Freescale Management ++ Complex resource manager user-space tool. +--- a/drivers/staging/fsl-mc/bus/Makefile ++++ b/drivers/staging/fsl-mc/bus/Makefile +@@ -17,4 +17,12 @@ mc-bus-driver-objs := fsl-mc-bus.o \ + fsl-mc-msi.o \ + irq-gic-v3-its-fsl-mc-msi.o \ + dpmcp.o \ +- dpbp.o ++ dpbp.o \ ++ dpcon.o \ ++ fsl-mc-iommu.o ++ ++# MC DPIO driver ++obj-$(CONFIG_FSL_MC_DPIO) += dpio/ ++ ++# MC restool kernel support ++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h +@@ -0,0 +1,80 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPBP_CMD_H ++#define _FSL_DPBP_CMD_H ++ ++/* DPBP Version */ ++#define DPBP_VER_MAJOR 3 ++#define DPBP_VER_MINOR 2 ++ ++/* Command versioning */ ++#define DPBP_CMD_BASE_VERSION 1 ++#define DPBP_CMD_ID_OFFSET 4 ++ ++#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPBP_CMDID_CLOSE DPBP_CMD(0x800) ++#define DPBP_CMDID_OPEN DPBP_CMD(0x804) ++#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04) ++ ++#define DPBP_CMDID_ENABLE DPBP_CMD(0x002) ++#define DPBP_CMDID_DISABLE DPBP_CMD(0x003) ++#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004) ++#define DPBP_CMDID_RESET DPBP_CMD(0x005) ++#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006) ++ ++struct dpbp_cmd_open { ++ __le32 dpbp_id; ++}; ++ ++struct dpbp_cmd_destroy { ++ __le32 object_id; ++}; ++ ++#define DPBP_ENABLE 0x1 ++ ++struct dpbp_rsp_is_enabled { ++ u8 enabled; ++}; ++ ++struct dpbp_rsp_get_attributes { ++ /* response word 0 */ ++ __le16 pad; ++ __le16 bpid; ++ __le32 id; ++ /* response word 1 */ ++ __le16 version_major; ++ __le16 version_minor; ++}; ++ ++#endif /* _FSL_DPBP_CMD_H */ +--- a/drivers/staging/fsl-mc/bus/dpbp.c ++++ b/drivers/staging/fsl-mc/bus/dpbp.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -32,7 +32,8 @@ + #include "../include/mc-sys.h" + #include "../include/mc-cmd.h" + #include "../include/dpbp.h" +-#include "../include/dpbp-cmd.h" ++ ++#include "dpbp-cmd.h" + + /** + * dpbp_open() - Open a control session for the specified object. +@@ -105,74 +106,6 @@ int dpbp_close(struct fsl_mc_io *mc_io, + EXPORT_SYMBOL(dpbp_close); + + /** +- * dpbp_create() - Create the DPBP object. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @cfg: Configuration structure +- * @token: Returned token; use in subsequent API calls +- * +- * Create the DPBP object, allocate required resources and +- * perform required initialization. +- * +- * The object can be created either by declaring it in the +- * DPL file, or by calling this function. +- * This function returns a unique authentication token, +- * associated with the specific object ID and the specific MC +- * portal; this token must be used in all subsequent calls to +- * this specific object. For objects that are created using the +- * DPL file, call dpbp_open function to get an authentication +- * token first. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_create(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- const struct dpbp_cfg *cfg, +- u16 *token) +-{ +- struct mc_command cmd = { 0 }; +- int err; +- +- (void)(cfg); /* unused */ +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, +- cmd_flags, 0); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- *token = mc_cmd_hdr_read_token(&cmd); +- +- return 0; +-} +- +-/** +- * dpbp_destroy() - Destroy the DPBP object and release all its resources. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * +- * Return: '0' on Success; error code otherwise. +- */ +-int dpbp_destroy(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token) +-{ +- struct mc_command cmd = { 0 }; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, +- cmd_flags, token); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** + * dpbp_enable() - Enable the DPBP. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +@@ -250,6 +183,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc + + return 0; + } ++EXPORT_SYMBOL(dpbp_is_enabled); + + /** + * dpbp_reset() - Reset the DPBP, returns the object to initial state. +@@ -272,310 +206,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io, + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); + } +- +-/** +- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: Identifies the interrupt index to configure +- * @irq_cfg: IRQ configuration +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_set_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- struct dpbp_irq_cfg *irq_cfg) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_set_irq *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_set_irq *)cmd.params; +- cmd_params->irq_index = irq_index; +- cmd_params->irq_val = cpu_to_le32(irq_cfg->val); +- cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr); +- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpbp_get_irq() - Get IRQ information from the DPBP. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @type: Interrupt type: 0 represents message interrupt +- * type (both irq_addr and irq_val are valid) +- * @irq_cfg: IRQ attributes +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_get_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- int *type, +- struct dpbp_irq_cfg *irq_cfg) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_get_irq *cmd_params; +- struct dpbp_rsp_get_irq *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_get_irq *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpbp_rsp_get_irq *)cmd.params; +- irq_cfg->val = le32_to_cpu(rsp_params->irq_val); +- irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr); +- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num); +- *type = le32_to_cpu(rsp_params->type); +- +- return 0; +-} +- +-/** +- * dpbp_set_irq_enable() - Set overall interrupt state. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @en: Interrupt state - enable = 1, disable = 0 +- * +- * Allows GPP software to control when interrupts are generated. +- * Each interrupt can have up to 32 causes. The enable/disable control's the +- * overall interrupt state. if the interrupt is disabled no causes will cause +- * an interrupt. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 en) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_set_irq_enable *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params; +- cmd_params->enable = en & DPBP_ENABLE; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpbp_get_irq_enable() - Get overall interrupt state +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @en: Returned interrupt state - enable = 1, disable = 0 +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 *en) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_get_irq_enable *cmd_params; +- struct dpbp_rsp_get_irq_enable *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params; +- *en = rsp_params->enabled & DPBP_ENABLE; +- return 0; +-} +- +-/** +- * dpbp_set_irq_mask() - Set interrupt mask. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @mask: Event mask to trigger interrupt; +- * each bit: +- * 0 = ignore event +- * 1 = consider event for asserting IRQ +- * +- * Every interrupt can have up to 32 causes and the interrupt model supports +- * masking/unmasking each cause independently +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 mask) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_set_irq_mask *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params; +- cmd_params->mask = cpu_to_le32(mask); +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpbp_get_irq_mask() - Get interrupt mask. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @mask: Returned event mask to trigger interrupt +- * +- * Every interrupt can have up to 32 causes and the interrupt model supports +- * masking/unmasking each cause independently +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *mask) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_get_irq_mask *cmd_params; +- struct dpbp_rsp_get_irq_mask *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params; +- *mask = le32_to_cpu(rsp_params->mask); +- +- return 0; +-} +- +-/** +- * dpbp_get_irq_status() - Get the current status of any pending interrupts. +- * +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @status: Returned interrupts status - one bit per cause: +- * 0 = no interrupt pending +- * 1 = interrupt pending +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_get_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *status) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_get_irq_status *cmd_params; +- struct dpbp_rsp_get_irq_status *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params; +- cmd_params->status = cpu_to_le32(*status); +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params; +- *status = le32_to_cpu(rsp_params->status); +- +- return 0; +-} +- +-/** +- * dpbp_clear_irq_status() - Clear a pending interrupt's status +- * +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @irq_index: The interrupt index to configure +- * @status: Bits to clear (W1C) - one bit per cause: +- * 0 = don't change +- * 1 = clear status bit +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 status) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_cmd_clear_irq_status *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params; +- cmd_params->status = cpu_to_le32(status); +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} ++EXPORT_SYMBOL(dpbp_reset); + + /** + * dpbp_get_attributes - Retrieve DPBP attributes. +@@ -609,83 +240,40 @@ int dpbp_get_attributes(struct fsl_mc_io + rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params; + attr->bpid = le16_to_cpu(rsp_params->bpid); + attr->id = le32_to_cpu(rsp_params->id); +- attr->version.major = le16_to_cpu(rsp_params->version_major); +- attr->version.minor = le16_to_cpu(rsp_params->version_minor); + + return 0; + } + EXPORT_SYMBOL(dpbp_get_attributes); + + /** +- * dpbp_set_notifications() - Set notifications towards software +- * @mc_io: Pointer to MC portal's I/O object ++ * dpbp_get_api_version - Get Data Path Buffer Pool API version ++ * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @cfg: notifications configuration ++ * @major_ver: Major version of Buffer Pool API ++ * @minor_ver: Minor version of Buffer Pool API + * + * Return: '0' on Success; Error code otherwise. + */ +-int dpbp_set_notifications(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpbp_notification_cfg *cfg) ++int dpbp_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) + { + struct mc_command cmd = { 0 }; +- struct dpbp_cmd_set_notifications *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, +- cmd_flags, token); +- cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params; +- cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry); +- cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit); +- cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry); +- cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit); +- cmd_params->options = cpu_to_le16(cfg->options); +- cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); +- cmd_params->message_iova = cpu_to_le64(cfg->message_iova); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpbp_get_notifications() - Get the notifications configuration +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPBP object +- * @cfg: notifications configuration +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpbp_get_notifications(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpbp_notification_cfg *cfg) +-{ +- struct mc_command cmd = { 0 }; +- struct dpbp_rsp_get_notifications *rsp_params; + int err; + + /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, +- cmd_flags, +- token); ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION, ++ cmd_flags, 0); + +- /* send command to mc*/ ++ /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ +- rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params; +- cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry); +- cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit); +- cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry); +- cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit); +- cfg->options = le16_to_cpu(rsp_params->options); +- cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); +- cfg->message_iova = le64_to_cpu(rsp_params->message_iova); ++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver); + + return 0; + } ++EXPORT_SYMBOL(dpbp_get_api_version); +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h +@@ -0,0 +1,85 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPCON_CMD_H ++#define _FSL_DPCON_CMD_H ++ ++/* DPCON Version */ ++#define DPCON_VER_MAJOR 3 ++#define DPCON_VER_MINOR 2 ++ ++/* Command versioning */ ++#define DPCON_CMD_BASE_VERSION 1 ++#define DPCON_CMD_ID_OFFSET 4 ++ ++#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPCON_CMDID_CLOSE DPCON_CMD(0x800) ++#define DPCON_CMDID_OPEN DPCON_CMD(0x808) ++#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08) ++ ++#define DPCON_CMDID_ENABLE DPCON_CMD(0x002) ++#define DPCON_CMDID_DISABLE DPCON_CMD(0x003) ++#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004) ++#define DPCON_CMDID_RESET DPCON_CMD(0x005) ++#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006) ++ ++#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100) ++ ++struct dpcon_cmd_open { ++ __le32 dpcon_id; ++}; ++ ++#define DPCON_ENABLE 1 ++ ++struct dpcon_rsp_is_enabled { ++ u8 enabled; ++}; ++ ++struct dpcon_rsp_get_attr { ++ /* response word 0 */ ++ __le32 id; ++ __le16 qbman_ch_id; ++ u8 num_priorities; ++ u8 pad; ++}; ++ ++struct dpcon_cmd_set_notification { ++ /* cmd word 0 */ ++ __le32 dpio_id; ++ u8 priority; ++ u8 pad[3]; ++ /* cmd word 1 */ ++ __le64 user_ctx; ++}; ++ ++#endif /* _FSL_DPCON_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpcon.c +@@ -0,0 +1,317 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpcon.h" ++ ++#include "dpcon-cmd.h" ++ ++/** ++ * dpcon_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpcon_id: DPCON unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpcon_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpcon_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpcon_cmd_open *dpcon_cmd; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params; ++ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_open); ++ ++/** ++ * dpcon_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_close); ++ ++/** ++ * dpcon_enable() - Enable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_enable); ++ ++/** ++ * dpcon_disable() - Disable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_disable); ++ ++/** ++ * dpcon_is_enabled() - Check if the DPCON is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpcon_rsp_is_enabled *dpcon_rsp; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params; ++ *en = dpcon_rsp->enabled & DPCON_ENABLE; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_is_enabled); ++ ++/** ++ * dpcon_reset() - Reset the DPCON, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, ++ cmd_flags, token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_reset); ++ ++/** ++ * dpcon_get_attributes() - Retrieve DPCON attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpcon_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpcon_rsp_get_attr *dpcon_rsp; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params; ++ attr->id = le32_to_cpu(dpcon_rsp->id); ++ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id); ++ attr->num_priorities = dpcon_rsp->num_priorities; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_get_attributes); ++ ++/** ++ * dpcon_set_notification() - Set DPCON notification destination ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @cfg: Notification parameters ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpcon_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpcon_cmd_set_notification *dpcon_cmd; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, ++ cmd_flags, ++ token); ++ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params; ++ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id); ++ dpcon_cmd->priority = cfg->priority; ++ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_set_notification); ++ ++/** ++ * dpcon_get_api_version - Get Data Path Concentrator API version ++ * @mc_io: Pointer to MC portal's DPCON object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of DPCON API ++ * @minor_ver: Minor version of DPCON API ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION, ++ cmd_flags, 0); ++ ++ /* send command to mc */ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_get_api_version); +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile +@@ -0,0 +1,11 @@ ++# ++# QorIQ DPAA2 DPIO driver ++# ++ ++subdir-ccflags-y := -Werror ++ ++obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o ++ ++fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o ++ ++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o +--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h ++++ /dev/null +@@ -1,62 +0,0 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. +- * +- * Redistribution and use in source and binary forms, with or without +- * modification, are permitted provided that the following conditions are met: +- * * Redistributions of source code must retain the above copyright +- * notice, this list of conditions and the following disclaimer. +- * * Redistributions in binary form must reproduce the above copyright +- * notice, this list of conditions and the following disclaimer in the +- * documentation and/or other materials provided with the distribution. +- * * Neither the name of the above-listed copyright holders nor the +- * names of any contributors may be used to endorse or promote products +- * derived from this software without specific prior written permission. +- * +- * +- * ALTERNATIVELY, this software may be distributed under the terms of the +- * GNU General Public License ("GPL") as published by the Free Software +- * Foundation, either version 2 of that License or (at your option) any +- * later version. +- * +- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +- * POSSIBILITY OF SUCH DAMAGE. +- */ +-#ifndef _FSL_DPCON_CMD_H +-#define _FSL_DPCON_CMD_H +- +-/* DPCON Version */ +-#define DPCON_VER_MAJOR 2 +-#define DPCON_VER_MINOR 1 +- +-/* Command IDs */ +-#define DPCON_CMDID_CLOSE 0x800 +-#define DPCON_CMDID_OPEN 0x808 +-#define DPCON_CMDID_CREATE 0x908 +-#define DPCON_CMDID_DESTROY 0x900 +- +-#define DPCON_CMDID_ENABLE 0x002 +-#define DPCON_CMDID_DISABLE 0x003 +-#define DPCON_CMDID_GET_ATTR 0x004 +-#define DPCON_CMDID_RESET 0x005 +-#define DPCON_CMDID_IS_ENABLED 0x006 +- +-#define DPCON_CMDID_SET_IRQ 0x010 +-#define DPCON_CMDID_GET_IRQ 0x011 +-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 +-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 +-#define DPCON_CMDID_SET_IRQ_MASK 0x014 +-#define DPCON_CMDID_GET_IRQ_MASK 0x015 +-#define DPCON_CMDID_GET_IRQ_STATUS 0x016 +-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 +- +-#define DPCON_CMDID_SET_NOTIFICATION 0x100 +- +-#endif /* _FSL_DPCON_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h +@@ -0,0 +1,75 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPIO_CMD_H ++#define _FSL_DPIO_CMD_H ++ ++/* DPIO Version */ ++#define DPIO_VER_MAJOR 4 ++#define DPIO_VER_MINOR 2 ++ ++/* Command Versioning */ ++ ++#define DPIO_CMD_ID_OFFSET 4 ++#define DPIO_CMD_BASE_VERSION 1 ++ ++#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPIO_CMDID_CLOSE DPIO_CMD(0x800) ++#define DPIO_CMDID_OPEN DPIO_CMD(0x803) ++#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03) ++#define DPIO_CMDID_ENABLE DPIO_CMD(0x002) ++#define DPIO_CMDID_DISABLE DPIO_CMD(0x003) ++#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004) ++ ++struct dpio_cmd_open { ++ __le32 dpio_id; ++}; ++ ++#define DPIO_CHANNEL_MODE_MASK 0x3 ++ ++struct dpio_rsp_get_attr { ++ /* cmd word 0 */ ++ __le32 id; ++ __le16 qbman_portal_id; ++ u8 num_priorities; ++ u8 channel_mode; ++ /* cmd word 1 */ ++ __le64 qbman_portal_ce_addr; ++ /* cmd word 2 */ ++ __le64 qbman_portal_ci_addr; ++ /* cmd word 3 */ ++ __le32 qbman_version; ++}; ++ ++#endif /* _FSL_DPIO_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c +@@ -0,0 +1,296 @@ ++/* ++ * Copyright 2014-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../include/mc.h" ++#include "../../include/dpaa2-io.h" ++ ++#include "qbman-portal.h" ++#include "dpio.h" ++#include "dpio-cmd.h" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION("DPIO Driver"); ++ ++struct dpio_priv { ++ struct dpaa2_io *io; ++}; ++ ++static irqreturn_t dpio_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct dpio_priv *priv = dev_get_drvdata(dev); ++ ++ return dpaa2_io_irq(priv->io); ++} ++ ++static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev) ++{ ++ struct fsl_mc_device_irq *irq; ++ ++ irq = dpio_dev->irqs[0]; ++ ++ /* clear the affinity hint */ ++ irq_set_affinity_hint(irq->msi_desc->irq, NULL); ++} ++ ++static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) ++{ ++ struct dpio_priv *priv; ++ int error; ++ struct fsl_mc_device_irq *irq; ++ cpumask_t mask; ++ ++ priv = dev_get_drvdata(&dpio_dev->dev); ++ ++ irq = dpio_dev->irqs[0]; ++ error = devm_request_irq(&dpio_dev->dev, ++ irq->msi_desc->irq, ++ dpio_irq_handler, ++ 0, ++ dev_name(&dpio_dev->dev), ++ &dpio_dev->dev); ++ if (error < 0) { ++ dev_err(&dpio_dev->dev, ++ "devm_request_irq() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ /* set the affinity hint */ ++ cpumask_clear(&mask); ++ cpumask_set_cpu(cpu, &mask); ++ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask)) ++ dev_err(&dpio_dev->dev, ++ "irq_set_affinity failed irq %d cpu %d\n", ++ irq->msi_desc->irq, cpu); ++ ++ return 0; ++} ++ ++static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) ++{ ++ struct dpio_attr dpio_attrs; ++ struct dpaa2_io_desc desc; ++ struct dpio_priv *priv; ++ int err = -ENOMEM; ++ struct device *dev = &dpio_dev->dev; ++ static int next_cpu = -1; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ goto err_priv_alloc; ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io); ++ if (err) { ++ dev_dbg(dev, "MC portal allocation failed\n"); ++ err = -EPROBE_DEFER; ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id, ++ &dpio_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle, ++ &dpio_attrs); ++ if (err) { ++ dev_err(dev, "dpio_get_attributes() failed %d\n", err); ++ goto err_get_attr; ++ } ++ desc.qman_version = dpio_attrs.qbman_version; ++ ++ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_enable() failed %d\n", err); ++ goto err_get_attr; ++ } ++ ++ /* initialize DPIO descriptor */ ++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; ++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; ++ desc.dpio_id = dpio_dev->obj_desc.id; ++ ++ /* get the cpu to use for the affinity hint */ ++ if (next_cpu == -1) ++ next_cpu = cpumask_first(cpu_online_mask); ++ else ++ next_cpu = cpumask_next(next_cpu, cpu_online_mask); ++ ++ if (!cpu_possible(next_cpu)) { ++ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n"); ++ err = -ERANGE; ++ goto err_allocate_irqs; ++ } ++ desc.cpu = next_cpu; ++ ++ /* ++ * Set the CENA regs to be the cache enabled area of the portal to ++ * achieve the best performance. ++ */ ++ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start, ++ resource_size(&dpio_dev->regions[0])); ++ desc.regs_cinh = ioremap(dpio_dev->regions[1].start, ++ resource_size(&dpio_dev->regions[1])); ++ ++ err = fsl_mc_allocate_irqs(dpio_dev); ++ if (err) { ++ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err); ++ goto err_allocate_irqs; ++ } ++ ++ err = register_dpio_irq_handlers(dpio_dev, desc.cpu); ++ if (err) ++ goto err_register_dpio_irq; ++ ++ priv->io = dpaa2_io_create(&desc); ++ if (!priv->io) { ++ dev_err(dev, "dpaa2_io_create failed\n"); ++ goto err_dpaa2_io_create; ++ } ++ ++ dev_info(dev, "probed\n"); ++ dev_dbg(dev, " receives_notifications = %d\n", ++ desc.receives_notifications); ++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++ fsl_mc_portal_free(dpio_dev->mc_io); ++ ++ return 0; ++ ++err_dpaa2_io_create: ++ unregister_dpio_irq_handlers(dpio_dev); ++err_register_dpio_irq: ++ fsl_mc_free_irqs(dpio_dev); ++err_allocate_irqs: ++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++err_get_attr: ++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++err_open: ++ fsl_mc_portal_free(dpio_dev->mc_io); ++err_mcportal: ++ dev_set_drvdata(dev, NULL); ++err_priv_alloc: ++ return err; ++} ++ ++/* Tear down interrupts for a given DPIO object */ ++static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev) ++{ ++ unregister_dpio_irq_handlers(dpio_dev); ++ fsl_mc_free_irqs(dpio_dev); ++} ++ ++static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) ++{ ++ struct device *dev; ++ struct dpio_priv *priv; ++ int err; ++ ++ dev = &dpio_dev->dev; ++ priv = dev_get_drvdata(dev); ++ ++ dpaa2_io_down(priv->io); ++ ++ dpio_teardown_irqs(dpio_dev); ++ ++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id, ++ &dpio_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++ ++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); ++ ++ fsl_mc_portal_free(dpio_dev->mc_io); ++ ++ dev_set_drvdata(dev, NULL); ++ ++ return 0; ++ ++err_open: ++ fsl_mc_portal_free(dpio_dev->mc_io); ++err_mcportal: ++ return err; ++} ++ ++static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpio", ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_dpio_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_dpio_probe, ++ .remove = dpaa2_dpio_remove, ++ .match_id_table = dpaa2_dpio_match_id_table ++}; ++ ++static int dpio_driver_init(void) ++{ ++ return fsl_mc_driver_register(&dpaa2_dpio_driver); ++} ++ ++static void dpio_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dpaa2_dpio_driver); ++} ++module_init(dpio_driver_init); ++module_exit(dpio_driver_exit); +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt +@@ -0,0 +1,135 @@ ++Copyright 2016 NXP ++ ++Introduction ++------------ ++ ++A DPAA2 DPIO (Data Path I/O) is a hardware object that provides ++interfaces to enqueue and dequeue frames to/from network interfaces ++and other accelerators. A DPIO also provides hardware buffer ++pool management for network interfaces. ++ ++This document provides an overview the Linux DPIO driver, its ++subcomponents, and its APIs. ++ ++See Documentation/dpaa2/overview.txt for a general overview of DPAA2 ++and the general DPAA2 driver architecture in Linux. ++ ++Driver Overview ++--------------- ++ ++The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and ++provides services that: ++ A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue ++ frames for their respective objects ++ B) allow drivers to register callbacks for data availability notifications ++ when data becomes available on a queue or channel ++ C) allow drivers to manage hardware buffer pools ++ ++The Linux DPIO driver consists of 3 primary components-- ++ DPIO object driver-- fsl-mc driver that manages the DPIO object ++ DPIO service-- provides APIs to other Linux drivers for services ++ QBman portal interface-- sends portal commands, gets responses ++ ++ fsl-mc other ++ bus drivers ++ | | ++ +---+----+ +------+-----+ ++ |DPIO obj| |DPIO service| ++ | driver |---| (DPIO) | ++ +--------+ +------+-----+ ++ | ++ +------+-----+ ++ | QBman | ++ | portal i/f | ++ +------------+ ++ | ++ hardware ++ ++The diagram below shows how the DPIO driver components fit with the other ++DPAA2 Linux driver components: ++ +------------+ ++ | OS Network | ++ | Stack | ++ +------------+ +------------+ ++ | Allocator |. . . . . . . | Ethernet | ++ |(DPMCP,DPBP)| | (DPNI) | ++ +-.----------+ +---+---+----+ ++ . . ^ | ++ . . | | dequeue> ++ +-------------+ . | | ++ | DPRC driver | . +--------+ +------------+ ++ | (DPRC) | . . |DPIO obj| |DPIO service| ++ +----------+--+ | driver |-| (DPIO) | ++ | +--------+ +------+-----+ ++ | +------|-----+ ++ | | QBman | ++ +----+--------------+ | portal i/f | ++ | MC-bus driver | +------------+ ++ | | | ++ | /soc/fsl-mc | | ++ +-------------------+ | ++ | ++ =========================================|=========|======================== ++ +-+--DPIO---|-----------+ ++ | | | ++ | QBman Portal | ++ +-----------------------+ ++ ++ ============================================================================ ++ ++ ++DPIO Object Driver (dpio-driver.c) ++---------------------------------- ++ ++ The dpio-driver component registers with the fsl-mc bus to handle objects of ++ type "dpio". The implementation of probe() handles basic initialization ++ of the DPIO including mapping of the DPIO regions (the QBman SW portal) ++ and initializing interrupts and registering irq handlers. The dpio-driver ++ registers the probed DPIO with dpio-service. ++ ++DPIO service (dpio-service.c, dpaa2-io.h) ++------------------------------------------ ++ ++ The dpio service component provides queuing, notification, and buffers ++ management services to DPAA2 drivers, such as the Ethernet driver. A system ++ will typically allocate 1 DPIO object per CPU to allow queuing operations ++ to happen simultaneously across all CPUs. ++ ++ Notification handling ++ dpaa2_io_service_register() ++ dpaa2_io_service_deregister() ++ dpaa2_io_service_rearm() ++ ++ Queuing ++ dpaa2_io_service_pull_fq() ++ dpaa2_io_service_pull_channel() ++ dpaa2_io_service_enqueue_fq() ++ dpaa2_io_service_enqueue_qd() ++ dpaa2_io_store_create() ++ dpaa2_io_store_destroy() ++ dpaa2_io_store_next() ++ ++ Buffer pool management ++ dpaa2_io_service_release() ++ dpaa2_io_service_acquire() ++ ++QBman portal interface (qbman-portal.c) ++--------------------------------------- ++ ++ The qbman-portal component provides APIs to do the low level hardware ++ bit twiddling for operations such as: ++ -initializing Qman software portals ++ -building and sending portal commands ++ -portal interrupt configuration and processing ++ ++ The qbman-portal APIs are not public to other drivers, and are ++ only used by dpio-service. ++ ++Other (dpaa2-fd.h, dpaa2-global.h) ++---------------------------------- ++ ++ Frame descriptor and scatter-gather definitions and the APIs used to ++ manipulate them are defined in dpaa2-fd.h. ++ ++ Dequeue result struct and parsing APIs are defined in dpaa2-global.h. +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c +@@ -0,0 +1,689 @@ ++/* ++ * Copyright 2014-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include "../../include/mc.h" ++#include "../../include/dpaa2-io.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dpio.h" ++#include "qbman-portal.h" ++#include "qbman_debug.h" ++ ++struct dpaa2_io { ++ atomic_t refs; ++ struct dpaa2_io_desc dpio_desc; ++ struct qbman_swp_desc swp_desc; ++ struct qbman_swp *swp; ++ struct list_head node; ++ /* protect against multiple management commands */ ++ spinlock_t lock_mgmt_cmd; ++ /* protect notifications list */ ++ spinlock_t lock_notifications; ++ struct list_head notifications; ++}; ++ ++struct dpaa2_io_store { ++ unsigned int max; ++ dma_addr_t paddr; ++ struct dpaa2_dq *vaddr; ++ void *alloced_addr; /* unaligned value from kmalloc() */ ++ unsigned int idx; /* position of the next-to-be-returned entry */ ++ struct qbman_swp *swp; /* portal used to issue VDQCR */ ++ struct device *dev; /* device used for DMA mapping */ ++}; ++ ++/* keep a per cpu array of DPIOs for fast access */ ++static struct dpaa2_io *dpio_by_cpu[NR_CPUS]; ++static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list); ++static DEFINE_SPINLOCK(dpio_list_lock); ++ ++static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, ++ int cpu) ++{ ++ if (d) ++ return d; ++ ++ if (unlikely(cpu >= num_possible_cpus())) ++ return NULL; ++ ++ /* ++ * If cpu == -1, choose the current cpu, with no guarantees about ++ * potentially being migrated away. ++ */ ++ if (unlikely(cpu < 0)) ++ cpu = smp_processor_id(); ++ ++ /* If a specific cpu was requested, pick it up immediately */ ++ return dpio_by_cpu[cpu]; ++} ++ ++static inline struct dpaa2_io *service_select(struct dpaa2_io *d) ++{ ++ if (d) ++ return d; ++ ++ spin_lock(&dpio_list_lock); ++ d = list_entry(dpio_list.next, struct dpaa2_io, node); ++ list_del(&d->node); ++ list_add_tail(&d->node, &dpio_list); ++ spin_unlock(&dpio_list_lock); ++ ++ return d; ++} ++ ++/** ++ * dpaa2_io_create() - create a dpaa2_io object. ++ * @desc: the dpaa2_io descriptor ++ * ++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual ++ * DPIO object. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) ++{ ++ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); ++ ++ if (!obj) ++ return NULL; ++ ++ /* check if CPU is out of range (-1 means any cpu) */ ++ if (desc->cpu >= num_possible_cpus()) { ++ kfree(obj); ++ return NULL; ++ } ++ ++ atomic_set(&obj->refs, 1); ++ obj->dpio_desc = *desc; ++ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; ++ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; ++ obj->swp_desc.qman_version = obj->dpio_desc.qman_version; ++ obj->swp = qbman_swp_init(&obj->swp_desc); ++ ++ if (!obj->swp) { ++ kfree(obj); ++ return NULL; ++ } ++ ++ INIT_LIST_HEAD(&obj->node); ++ spin_lock_init(&obj->lock_mgmt_cmd); ++ spin_lock_init(&obj->lock_notifications); ++ INIT_LIST_HEAD(&obj->notifications); ++ ++ /* For now only enable DQRR interrupts */ ++ qbman_swp_interrupt_set_trigger(obj->swp, ++ QBMAN_SWP_INTERRUPT_DQRI); ++ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff); ++ if (obj->dpio_desc.receives_notifications) ++ qbman_swp_push_set(obj->swp, 0, 1); ++ ++ spin_lock(&dpio_list_lock); ++ list_add_tail(&obj->node, &dpio_list); ++ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu]) ++ dpio_by_cpu[desc->cpu] = obj; ++ spin_unlock(&dpio_list_lock); ++ ++ return obj; ++} ++EXPORT_SYMBOL(dpaa2_io_create); ++ ++/** ++ * dpaa2_io_down() - release the dpaa2_io object. ++ * @d: the dpaa2_io object to be released. ++ * ++ * The "struct dpaa2_io" type can represent an individual DPIO object (as ++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", ++ * which can be used to group/encapsulate multiple DPIO objects. In all cases, ++ * each handle obtained should be released using this function. ++ */ ++void dpaa2_io_down(struct dpaa2_io *d) ++{ ++ if (!atomic_dec_and_test(&d->refs)) ++ return; ++ kfree(d); ++} ++EXPORT_SYMBOL(dpaa2_io_down); ++ ++#define DPAA_POLL_MAX 32 ++ ++/** ++ * dpaa2_io_irq() - ISR for DPIO interrupts ++ * ++ * @obj: the given DPIO object. ++ * ++ * Return IRQ_HANDLED for success or IRQ_NONE if there ++ * were no pending interrupts. ++ */ ++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj) ++{ ++ const struct dpaa2_dq *dq; ++ int max = 0; ++ struct qbman_swp *swp; ++ u32 status; ++ ++ swp = obj->swp; ++ status = qbman_swp_interrupt_read_status(swp); ++ if (!status) ++ return IRQ_NONE; ++ ++ dq = qbman_swp_dqrr_next(swp); ++ while (dq) { ++ if (qbman_result_is_SCN(dq)) { ++ struct dpaa2_io_notification_ctx *ctx; ++ u64 q64; ++ ++ q64 = qbman_result_SCN_ctx(dq); ++ ctx = (void *)q64; ++ ctx->cb(ctx); ++ } else { ++ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n"); ++ } ++ qbman_swp_dqrr_consume(swp, dq); ++ ++max; ++ if (max > DPAA_POLL_MAX) ++ goto done; ++ dq = qbman_swp_dqrr_next(swp); ++ } ++done: ++ qbman_swp_interrupt_clear_status(swp, status); ++ qbman_swp_interrupt_set_inhibit(swp, 0); ++ return IRQ_HANDLED; ++} ++EXPORT_SYMBOL(dpaa2_io_irq); ++ ++/** ++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN ++ * notifications on the given DPIO service. ++ * @d: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * The caller should make the MC command to attach a DPAA2 object to ++ * a DPIO after this function completes successfully. In that way: ++ * (a) The DPIO service is "ready" to handle a notification arrival ++ * (which might happen before the "attach" command to MC has ++ * returned control of execution back to the caller) ++ * (b) The DPIO service can provide back to the caller the 'dpio_id' and ++ * 'qman64' parameters that it should pass along in the MC command ++ * in order for the object to be configured to produce the right ++ * notification fields to the DPIO service. ++ * ++ * Return 0 for success, or -ENODEV for failure. ++ */ ++int dpaa2_io_service_register(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ ++ d = service_select_by_cpu(d, ctx->desired_cpu); ++ if (!d) ++ return -ENODEV; ++ ++ ctx->dpio_id = d->dpio_desc.dpio_id; ++ ctx->qman64 = (u64)ctx; ++ ctx->dpio_private = d; ++ spin_lock_irqsave(&d->lock_notifications, irqflags); ++ list_add(&ctx->node, &d->notifications); ++ spin_unlock_irqrestore(&d->lock_notifications, irqflags); ++ ++ /* Enable the generation of CDAN notifications */ ++ if (ctx->is_cdan) ++ qbman_swp_CDAN_set_context_enable(d->swp, ++ (u16)ctx->id, ++ ctx->qman64); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_service_register); ++ ++/** ++ * dpaa2_io_service_deregister - The opposite of 'register'. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * This function should be called only after sending the MC command to ++ * to detach the notification-producing device from the DPIO. ++ */ ++void dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_io *d = ctx->dpio_private; ++ unsigned long irqflags; ++ ++ if (ctx->is_cdan) ++ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id); ++ ++ spin_lock_irqsave(&d->lock_notifications, irqflags); ++ list_del(&ctx->node); ++ spin_unlock_irqrestore(&d->lock_notifications, irqflags); ++} ++EXPORT_SYMBOL(dpaa2_io_service_deregister); ++ ++/** ++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. ++ * @d: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is ++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that ++ * traffic source for as long as it likes. Eventually it may wish to "rearm" ++ * that source to allow it to produce another FQDAN/CDAN, that's what this ++ * function achieves. ++ * ++ * Return 0 for success. ++ */ ++int dpaa2_io_service_rearm(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = service_select_by_cpu(d, ctx->desired_cpu); ++ if (!unlikely(d)) ++ return -ENODEV; ++ ++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); ++ if (ctx->is_cdan) ++ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id); ++ else ++ err = qbman_swp_fq_schedule(d->swp, ctx->id); ++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_rearm); ++ ++/** ++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (u8)s->max); ++ qbman_pull_desc_set_fq(&pd, fqid); ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ s->swp = d->swp; ++ err = qbman_swp_pull(d->swp, &pd); ++ if (err) ++ s->swp = NULL; ++ ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_fq); ++ ++/** ++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. ++ * @d: the given DPIO service. ++ * @channelid: the given channel id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (u8)s->max); ++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ s->swp = d->swp; ++ err = qbman_swp_pull(d->swp, &pd); ++ if (err) ++ s->swp = NULL; ++ ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_channel); ++ ++/** ++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, ++ u32 fqid, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_fq(&ed, fqid); ++ ++ return qbman_swp_enqueue(d->swp, &ed, fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); ++ ++/** ++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. ++ * @d: the given DPIO service. ++ * @qdid: the given queuing destination id. ++ * @prio: the given queuing priority. ++ * @qdbin: the given queuing destination bin. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, ++ u32 qdid, u8 prio, u16 qdbin, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); ++ ++ return qbman_swp_enqueue(d->swp, &ed, fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); ++ ++/** ++ * dpaa2_io_service_release() - Release buffers to a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffers to be released. ++ * @num_buffers: the number of the buffers to be released. ++ * ++ * Return 0 for success, and negative error code for failure. ++ */ ++int dpaa2_io_service_release(struct dpaa2_io *d, ++ u32 bpid, ++ const u64 *buffers, ++ unsigned int num_buffers) ++{ ++ struct qbman_release_desc rd; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ qbman_release_desc_clear(&rd); ++ qbman_release_desc_set_bpid(&rd, bpid); ++ ++ return qbman_swp_release(d->swp, &rd, buffers, num_buffers); ++} ++EXPORT_SYMBOL(dpaa2_io_service_release); ++ ++/** ++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffer addresses for acquired buffers. ++ * @num_buffers: the expected number of the buffers to acquire. ++ * ++ * Return a negative error code if the command failed, otherwise it returns ++ * the number of buffers acquired, which may be less than the number requested. ++ * Eg. if the buffer pool is empty, this will return zero. ++ */ ++int dpaa2_io_service_acquire(struct dpaa2_io *d, ++ u32 bpid, ++ u64 *buffers, ++ unsigned int num_buffers) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); ++ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers); ++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_acquire); ++ ++/* ++ * 'Stores' are reusable memory blocks for holding dequeue results, and to ++ * assist with parsing those results. ++ */ ++ ++/** ++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. ++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. ++ * @dev: the device to allow mapping/unmapping the DMAable region. ++ * ++ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". ++ * The 'dpaa2_io_store' returned is a DPIO service managed object. ++ * ++ * Return pointer to dpaa2_io_store struct for successfuly created storage ++ * memory, or NULL on error. ++ */ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev) ++{ ++ struct dpaa2_io_store *ret; ++ size_t size; ++ ++ if (!max_frames || (max_frames > 16)) ++ return NULL; ++ ++ ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ if (!ret) ++ return NULL; ++ ++ ret->max = max_frames; ++ size = max_frames * sizeof(struct dpaa2_dq) + 64; ++ ret->alloced_addr = kzalloc(size, GFP_KERNEL); ++ if (!ret->alloced_addr) { ++ kfree(ret); ++ return NULL; ++ } ++ ++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); ++ ret->paddr = dma_map_single(dev, ret->vaddr, ++ sizeof(struct dpaa2_dq) * max_frames, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, ret->paddr)) { ++ kfree(ret->alloced_addr); ++ kfree(ret); ++ return NULL; ++ } ++ ++ ret->idx = 0; ++ ret->dev = dev; ++ ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_create); ++ ++/** ++ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue ++ * result. ++ * @s: the storage memory to be destroyed. ++ */ ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s) ++{ ++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, ++ DMA_FROM_DEVICE); ++ kfree(s->alloced_addr); ++ kfree(s); ++} ++EXPORT_SYMBOL(dpaa2_io_store_destroy); ++ ++/** ++ * dpaa2_io_store_next() - Determine when the next dequeue result is available. ++ * @s: the dpaa2_io_store object. ++ * @is_last: indicate whether this is the last frame in the pull command. ++ * ++ * When an object driver performs dequeues to a dpaa2_io_store, this function ++ * can be used to determine when the next frame result is available. Once ++ * this function returns non-NULL, a subsequent call to it will try to find ++ * the next dequeue result. ++ * ++ * Note that if a pull-dequeue has a NULL result because the target FQ/channel ++ * was empty, then this function will also return NULL (rather than expecting ++ * the caller to always check for this. As such, "is_last" can be used to ++ * differentiate between "end-of-empty-dequeue" and "still-waiting". ++ * ++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. ++ */ ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) ++{ ++ int match; ++ struct dpaa2_dq *ret = &s->vaddr[s->idx]; ++ ++ match = qbman_result_has_new_result(s->swp, ret); ++ if (!match) { ++ *is_last = 0; ++ return NULL; ++ } ++ ++ s->idx++; ++ ++ if (dpaa2_dq_is_pull_complete(ret)) { ++ *is_last = 1; ++ s->idx = 0; ++ /* ++ * If we get an empty dequeue result to terminate a zero-results ++ * vdqcr, return NULL to the caller rather than expecting him to ++ * check non-NULL results every time. ++ */ ++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) ++ ret = NULL; ++ } else { ++ *is_last = 0; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_next); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++/** ++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. ++ * @d: the given DPIO object. ++ * @fqid: the id of frame queue to be queried. ++ * @fcnt: the queried frame count. ++ * @bcnt: the queried byte count. ++ * ++ * Knowing the FQ count at run-time can be useful in debugging situations. ++ * The instantaneous frame- and byte-count are hereby returned. ++ * ++ * Return 0 for a successful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ u32 *fcnt, u32 *bcnt) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->swp; ++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); ++ ret = qbman_fq_query_state(swp, fqid, &state); ++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *fcnt = qbman_fq_state_frame_count(&state); ++ *bcnt = qbman_fq_state_byte_count(&state); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_fq_count); ++ ++/** ++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a ++ * buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the index of buffer pool to be queried. ++ * @num: the queried number of buffers in the buffer pool. ++ * ++ * Return 0 for a sucessful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, u32 *num) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->swp; ++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); ++ ret = qbman_bp_query(swp, bpid, &state); ++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *num = qbman_bp_info_num_free_bufs(&state); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_bp_count); ++#endif +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c +@@ -0,0 +1,224 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../include/mc-sys.h" ++#include "../../include/mc-cmd.h" ++ ++#include "dpio.h" ++#include "dpio-cmd.h" ++ ++/* ++ * Data Path I/O Portal API ++ * Contains initialization APIs and runtime control APIs for DPIO ++ */ ++ ++/** ++ * dpio_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpio_id: DPIO unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpio_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpio_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpio_cmd_open *dpio_cmd; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ dpio_cmd = (struct dpio_cmd_open *)cmd.params; ++ dpio_cmd->dpio_id = cpu_to_le32(dpio_id); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpio_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpio_enable() - Enable the DPIO, allow I/O portal operations. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpio_get_attributes() - Retrieve DPIO attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpio_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpio_rsp_get_attr *dpio_rsp; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params; ++ attr->id = le32_to_cpu(dpio_rsp->id); ++ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id); ++ attr->num_priorities = dpio_rsp->num_priorities; ++ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK; ++ attr->qbman_portal_ce_offset = ++ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr); ++ attr->qbman_portal_ci_offset = ++ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr); ++ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version); ++ ++ return 0; ++} ++ ++/** ++ * dpio_get_api_version - Get Data Path I/O API version ++ * @mc_io: Pointer to MC portal's DPIO object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of DPIO API ++ * @minor_ver: Minor version of DPIO API ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION, ++ cmd_flags, 0); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h +@@ -0,0 +1,109 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPIO_H ++#define __FSL_DPIO_H ++ ++struct fsl_mc_io; ++ ++int dpio_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpio_id, ++ u16 *token); ++ ++int dpio_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * enum dpio_channel_mode - DPIO notification channel mode ++ * @DPIO_NO_CHANNEL: No support for notification channel ++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a ++ * dedicated channel in the DPIO; user should point the queue's ++ * destination in the relevant interface to this DPIO ++ */ ++enum dpio_channel_mode { ++ DPIO_NO_CHANNEL = 0, ++ DPIO_LOCAL_CHANNEL = 1, ++}; ++ ++/** ++ * struct dpio_cfg - Structure representing DPIO configuration ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ */ ++struct dpio_cfg { ++ enum dpio_channel_mode channel_mode; ++ u8 num_priorities; ++}; ++ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * struct dpio_attr - Structure representing DPIO attributes ++ * @id: DPIO object ID ++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area ++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area ++ * @qbman_portal_id: Software portal ID ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ * @qbman_version: QBMAN version ++ */ ++struct dpio_attr { ++ int id; ++ u64 qbman_portal_ce_offset; ++ u64 qbman_portal_ci_offset; ++ u16 qbman_portal_id; ++ enum dpio_channel_mode channel_mode; ++ u8 num_priorities; ++ u32 qbman_version; ++}; ++ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpio_attr *attr); ++ ++int dpio_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); ++ ++#endif /* __FSL_DPIO_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c +@@ -0,0 +1,1049 @@ ++/* ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include "../../include/dpaa2-global.h" ++ ++#include "qbman-portal.h" ++ ++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); ++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); ++ ++#define QMAN_REV_4000 0x04000000 ++#define QMAN_REV_4100 0x04010000 ++#define QMAN_REV_4101 0x04010001 ++#define QMAN_REV_MASK 0xffff0000 ++ ++/* All QBMan command and result structures use this "valid bit" encoding */ ++#define QB_VALID_BIT ((u32)0x80) ++ ++/* QBMan portal management command codes */ ++#define QBMAN_MC_ACQUIRE 0x30 ++#define QBMAN_WQCHAN_CONFIGURE 0x46 ++ ++/* CINH register offsets */ ++#define QBMAN_CINH_SWP_EQAR 0x8c0 ++#define QBMAN_CINH_SWP_DQPI 0xa00 ++#define QBMAN_CINH_SWP_DCAP 0xac0 ++#define QBMAN_CINH_SWP_SDQCR 0xb00 ++#define QBMAN_CINH_SWP_RAR 0xcc0 ++#define QBMAN_CINH_SWP_ISR 0xe00 ++#define QBMAN_CINH_SWP_IER 0xe40 ++#define QBMAN_CINH_SWP_ISDR 0xe80 ++#define QBMAN_CINH_SWP_IIR 0xec0 ++ ++/* CENA register offsets */ ++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) ++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6)) ++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6)) ++#define QBMAN_CENA_SWP_CR 0x600 ++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) ++#define QBMAN_CENA_SWP_VDQCR 0x780 ++ ++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ ++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) ++ ++/* Define token used to determine if response written to memory is valid */ ++#define QMAN_DQ_TOKEN_VALID 1 ++ ++/* SDQCR attribute codes */ ++#define QB_SDQCR_FC_SHIFT 29 ++#define QB_SDQCR_FC_MASK 0x1 ++#define QB_SDQCR_DCT_SHIFT 24 ++#define QB_SDQCR_DCT_MASK 0x3 ++#define QB_SDQCR_TOK_SHIFT 16 ++#define QB_SDQCR_TOK_MASK 0xff ++#define QB_SDQCR_SRC_SHIFT 0 ++#define QB_SDQCR_SRC_MASK 0xffff ++ ++/* opaque token for static dequeues */ ++#define QMAN_SDQCR_TOKEN 0xbb ++ ++enum qbman_sdqcr_dct { ++ qbman_sdqcr_dct_null = 0, ++ qbman_sdqcr_dct_prio_ics, ++ qbman_sdqcr_dct_active_ics, ++ qbman_sdqcr_dct_active ++}; ++ ++enum qbman_sdqcr_fc { ++ qbman_sdqcr_fc_one = 0, ++ qbman_sdqcr_fc_up_to_3 = 1 ++}; ++ ++#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } ++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } ++static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset) ++{ ++ dcivac(p->addr_cena + offset); ++ prefetch(p->addr_cena + offset); ++} ++ ++/* Portal Access */ ++ ++static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset) ++{ ++ return readl_relaxed(p->addr_cinh + offset); ++} ++ ++static inline void qbman_write_register(struct qbman_swp *p, u32 offset, ++ u32 value) ++{ ++ writel_relaxed(value, p->addr_cinh + offset); ++} ++ ++static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset) ++{ ++ return p->addr_cena + offset; ++} ++ ++#define QBMAN_CINH_SWP_CFG 0xd00 ++ ++#define SWP_CFG_DQRR_MF_SHIFT 20 ++#define SWP_CFG_EST_SHIFT 16 ++#define SWP_CFG_WN_SHIFT 14 ++#define SWP_CFG_RPM_SHIFT 12 ++#define SWP_CFG_DCM_SHIFT 10 ++#define SWP_CFG_EPM_SHIFT 8 ++#define SWP_CFG_SD_SHIFT 5 ++#define SWP_CFG_SP_SHIFT 4 ++#define SWP_CFG_SE_SHIFT 3 ++#define SWP_CFG_DP_SHIFT 2 ++#define SWP_CFG_DE_SHIFT 1 ++#define SWP_CFG_EP_SHIFT 0 ++ ++static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, ++ u8 epm, int sd, int sp, int se, ++ int dp, int de, int ep) ++{ ++ return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT | ++ est << SWP_CFG_EST_SHIFT | ++ wn << SWP_CFG_WN_SHIFT | ++ rpm << SWP_CFG_RPM_SHIFT | ++ dcm << SWP_CFG_DCM_SHIFT | ++ epm << SWP_CFG_EPM_SHIFT | ++ sd << SWP_CFG_SD_SHIFT | ++ sp << SWP_CFG_SP_SHIFT | ++ se << SWP_CFG_SE_SHIFT | ++ dp << SWP_CFG_DP_SHIFT | ++ de << SWP_CFG_DE_SHIFT | ++ ep << SWP_CFG_EP_SHIFT); ++} ++ ++/** ++ * qbman_swp_init() - Create a functional object representing the given ++ * QBMan portal descriptor. ++ * @d: the given qbman swp descriptor ++ * ++ * Return qbman_swp portal for success, NULL if the object cannot ++ * be created. ++ */ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ++{ ++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); ++ u32 reg; ++ ++ if (!p) ++ return NULL; ++ p->desc = d; ++ p->mc.valid_bit = QB_VALID_BIT; ++ p->sdq = 0; ++ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; ++ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; ++ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; ++ ++ atomic_set(&p->vdq.available, 1); ++ p->vdq.valid_bit = QB_VALID_BIT; ++ p->dqrr.next_idx = 0; ++ p->dqrr.valid_bit = QB_VALID_BIT; ++ ++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) { ++ p->dqrr.dqrr_size = 4; ++ p->dqrr.reset_bug = 1; ++ } else { ++ p->dqrr.dqrr_size = 8; ++ p->dqrr.reset_bug = 0; ++ } ++ ++ p->addr_cena = d->cena_bar; ++ p->addr_cinh = d->cinh_bar; ++ ++ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, ++ 0, /* Writes cacheable */ ++ 0, /* EQCR_CI stashing threshold */ ++ 3, /* RPM: Valid bit mode, RCR in array mode */ ++ 2, /* DCM: Discrete consumption ack mode */ ++ 3, /* EPM: Valid bit mode, EQCR in array mode */ ++ 0, /* mem stashing drop enable == FALSE */ ++ 1, /* mem stashing priority == TRUE */ ++ 0, /* mem stashing enable == FALSE */ ++ 1, /* dequeue stashing priority == TRUE */ ++ 0, /* dequeue stashing enable == FALSE */ ++ 0); /* EQCR_CI stashing priority == FALSE */ ++ ++ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); ++ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); ++ if (!reg) { ++ pr_err("qbman: the portal is not enabled!\n"); ++ return NULL; ++ } ++ ++ /* ++ * SDQCR needs to be initialized to 0 when no channels are ++ * being dequeued from or else the QMan HW will indicate an ++ * error. The values that were calculated above will be ++ * applied when dequeues from a specific channel are enabled. ++ */ ++ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); ++ return p; ++} ++ ++/** ++ * qbman_swp_finish() - Create and destroy a functional object representing ++ * the given QBMan portal descriptor. ++ * @p: the qbman_swp object to be destroyed ++ */ ++void qbman_swp_finish(struct qbman_swp *p) ++{ ++ kfree(p); ++} ++ ++/** ++ * qbman_swp_interrupt_read_status() ++ * @p: the given software portal ++ * ++ * Return the value in the SWP_ISR register. ++ */ ++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p) ++{ ++ return qbman_read_register(p, QBMAN_CINH_SWP_ISR); ++} ++ ++/** ++ * qbman_swp_interrupt_clear_status() ++ * @p: the given software portal ++ * @mask: The mask to clear in SWP_ISR register ++ */ ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask) ++{ ++ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask); ++} ++ ++/** ++ * qbman_swp_interrupt_get_trigger() - read interrupt enable register ++ * @p: the given software portal ++ * ++ * Return the value in the SWP_IER register. ++ */ ++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p) ++{ ++ return qbman_read_register(p, QBMAN_CINH_SWP_IER); ++} ++ ++/** ++ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp ++ * @p: the given software portal ++ * @mask: The mask of bits to enable in SWP_IER ++ */ ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask) ++{ ++ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask); ++} ++ ++/** ++ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register ++ * @p: the given software portal object ++ * ++ * Return the value in the SWP_IIR register. ++ */ ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) ++{ ++ return qbman_read_register(p, QBMAN_CINH_SWP_IIR); ++} ++ ++/** ++ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register ++ * @p: the given software portal object ++ * @mask: The mask to set in SWP_IIR register ++ */ ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) ++{ ++ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); ++} ++ ++/* ++ * Different management commands all use this common base layer of code to issue ++ * commands and poll for results. ++ */ ++ ++/* ++ * Returns a pointer to where the caller should fill in their management command ++ * (caller should ignore the verb byte) ++ */ ++void *qbman_swp_mc_start(struct qbman_swp *p) ++{ ++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); ++} ++ ++/* ++ * Commits merges in the caller-supplied command verb (which should not include ++ * the valid-bit) and submits the command to hardware ++ */ ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb) ++{ ++ u8 *v = cmd; ++ ++ dma_wmb(); ++ *v = cmd_verb | p->mc.valid_bit; ++ dccvac(cmd); ++} ++ ++/* ++ * Checks for a completed response (returns non-NULL if only if the response ++ * is complete). ++ */ ++void *qbman_swp_mc_result(struct qbman_swp *p) ++{ ++ u32 *ret, verb; ++ ++ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ ++ /* Remove the valid-bit - command completed if the rest is non-zero */ ++ verb = ret[0] & ~QB_VALID_BIT; ++ if (!verb) ++ return NULL; ++ p->mc.valid_bit ^= QB_VALID_BIT; ++ return ret; ++} ++ ++#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0 ++enum qb_enqueue_commands { ++ enqueue_empty = 0, ++ enqueue_response_always = 1, ++ enqueue_rejects_to_fq = 2 ++}; ++ ++#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 ++#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 ++#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 ++ ++/** ++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_eq_desc_clear(struct qbman_eq_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++/** ++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp ++ * @d: the enqueue descriptor. ++ * @response_success: 1 = enqueue with response always; 0 = enqueue with ++ * rejections returned on a FQ. ++ */ ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) ++{ ++ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT); ++ if (respond_success) ++ d->verb |= enqueue_response_always; ++ else ++ d->verb |= enqueue_rejects_to_fq; ++} ++ ++/* ++ * Exactly one of the following descriptor "targets" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * -enqueue to a frame queue ++ * -enqueue to a queuing destination ++ */ ++ ++/** ++ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command ++ * @d: the enqueue descriptor ++ * @fqid: the id of the frame queue to be enqueued ++ */ ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid) ++{ ++ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT); ++ d->tgtid = cpu_to_le32(fqid); ++} ++ ++/** ++ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command ++ * @d: the enqueue descriptor ++ * @qdid: the id of the queuing destination to be enqueued ++ * @qd_bin: the queuing destination bin ++ * @qd_prio: the queuing destination priority ++ */ ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, ++ u32 qd_bin, u32 qd_prio) ++{ ++ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT; ++ d->tgtid = cpu_to_le32(qdid); ++ d->qdbin = cpu_to_le16(qd_bin); ++ d->qpri = qd_prio; ++} ++ ++#define EQAR_IDX(eqar) ((eqar) & 0x7) ++#define EQAR_VB(eqar) ((eqar) & 0x80) ++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) ++ ++/** ++ * qbman_swp_enqueue() - Issue an enqueue command ++ * @s: the software portal used for enqueue ++ * @d: the enqueue descriptor ++ * @fd: the frame descriptor to be enqueued ++ * ++ * Please note that 'fd' should only be NULL if the "action" of the ++ * descriptor is "orp_hole" or "orp_nesn". ++ * ++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. ++ */ ++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc *p; ++ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); ++ ++ if (!EQAR_SUCCESS(eqar)) ++ return -EBUSY; ++ ++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); ++ memcpy(&p->dca, &d->dca, 31); ++ memcpy(&p->fd, fd, sizeof(*fd)); ++ ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ dma_wmb(); ++ p->verb = d->verb | EQAR_VB(eqar); ++ dccvac(p); ++ ++ return 0; ++} ++ ++/* Static (push) dequeue */ ++ ++/** ++ * qbman_swp_push_get() - Get the push dequeue setup ++ * @p: the software portal object ++ * @channel_idx: the channel index to query ++ * @enabled: returned boolean to show whether the push dequeue is enabled ++ * for the given channel ++ */ ++void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled) ++{ ++ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; ++ ++ WARN_ON(channel_idx > 15); ++ *enabled = src | (1 << channel_idx); ++} ++ ++/** ++ * qbman_swp_push_set() - Enable or disable push dequeue ++ * @p: the software portal object ++ * @channel_idx: the channel index (0 to 15) ++ * @enable: enable or disable push dequeue ++ */ ++void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable) ++{ ++ u16 dqsrc; ++ ++ WARN_ON(channel_idx > 15); ++ if (enable) ++ s->sdq |= 1 << channel_idx; ++ else ++ s->sdq &= ~(1 << channel_idx); ++ ++ /* Read make the complete src map. If no channels are enabled ++ * the SDQCR must be 0 or else QMan will assert errors ++ */ ++ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; ++ if (dqsrc != 0) ++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq); ++ else ++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0); ++} ++ ++#define QB_VDQCR_VERB_DCT_SHIFT 0 ++#define QB_VDQCR_VERB_DT_SHIFT 2 ++#define QB_VDQCR_VERB_RLS_SHIFT 4 ++#define QB_VDQCR_VERB_WAE_SHIFT 5 ++ ++enum qb_pull_dt_e { ++ qb_pull_dt_channel, ++ qb_pull_dt_workqueue, ++ qb_pull_dt_framequeue ++}; ++ ++/** ++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state ++ * @d: the pull dequeue descriptor to be cleared ++ */ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++/** ++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage ++ * @d: the pull dequeue descriptor to be set ++ * @storage: the pointer of the memory to store the dequeue result ++ * @storage_phys: the physical address of the storage memory ++ * @stash: to indicate whether write allocate is enabled ++ * ++ * If not called, or if called with 'storage' as NULL, the result pull dequeues ++ * will produce results to DQRR. If 'storage' is non-NULL, then results are ++ * produced to the given memory location (using the DMA address which ++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not ++ * those writes to main-memory express a cache-warming attribute. ++ */ ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash) ++{ ++ /* save the virtual address */ ++ d->rsp_addr_virt = (u64)storage; ++ ++ if (!storage) { ++ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); ++ return; ++ } ++ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT; ++ if (stash) ++ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT; ++ else ++ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT); ++ ++ d->rsp_addr = cpu_to_le64(storage_phys); ++} ++ ++/** ++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued ++ * @d: the pull dequeue descriptor to be set ++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive ++ */ ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes) ++{ ++ d->numf = numframes - 1; ++} ++ ++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token) ++{ ++ d->tok = token; ++} ++ ++/* ++ * Exactly one of the following descriptor "actions" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * - pull dequeue from the given frame queue (FQ) ++ * - pull dequeue from any FQ in the given work queue (WQ) ++ * - pull dequeue from any FQ in any WQ in the given channel ++ */ ++ ++/** ++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues ++ * @fqid: the frame queue index of the given FQ ++ */ ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid) ++{ ++ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT; ++ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT; ++ d->dq_src = cpu_to_le32(fqid); ++} ++ ++/** ++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues ++ * @wqid: composed of channel id and wqid within the channel ++ * @dct: the dequeue command type ++ */ ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, ++ enum qbman_pull_type_e dct) ++{ ++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; ++ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT; ++ d->dq_src = cpu_to_le32(wqid); ++} ++ ++/** ++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command ++ * dequeues ++ * @chid: the channel id to be dequeued ++ * @dct: the dequeue command type ++ */ ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, ++ enum qbman_pull_type_e dct) ++{ ++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; ++ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT; ++ d->dq_src = cpu_to_le32(chid); ++} ++ ++/** ++ * qbman_swp_pull() - Issue the pull dequeue command ++ * @s: the software portal object ++ * @d: the software portal descriptor which has been configured with ++ * the set of qbman_pull_desc_set_*() calls ++ * ++ * Return 0 for success, and -EBUSY if the software portal is not ready ++ * to do pull dequeue. ++ */ ++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) ++{ ++ struct qbman_pull_desc *p; ++ ++ if (!atomic_dec_and_test(&s->vdq.available)) { ++ atomic_inc(&s->vdq.available); ++ return -EBUSY; ++ } ++ s->vdq.storage = (void *)d->rsp_addr_virt; ++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); ++ p->numf = d->numf; ++ p->tok = QMAN_DQ_TOKEN_VALID; ++ p->dq_src = d->dq_src; ++ p->rsp_addr = d->rsp_addr; ++ p->rsp_addr_virt = d->rsp_addr_virt; ++ dma_wmb(); ++ ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ p->verb = d->verb | s->vdq.valid_bit; ++ s->vdq.valid_bit ^= QB_VALID_BIT; ++ dccvac(p); ++ ++ return 0; ++} ++ ++#define QMAN_DQRR_PI_MASK 0xf ++ ++/** ++ * qbman_swp_dqrr_next() - Get an valid DQRR entry ++ * @s: the software portal object ++ * ++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry ++ * only once, so repeated calls can return a sequence of DQRR entries, without ++ * requiring they be consumed immediately or in any particular order. ++ */ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) ++{ ++ u32 verb; ++ u32 response_verb; ++ u32 flags; ++ struct dpaa2_dq *p; ++ ++ /* Before using valid-bit to detect if something is there, we have to ++ * handle the case of the DQRR reset bug... ++ */ ++ if (unlikely(s->dqrr.reset_bug)) { ++ /* ++ * We pick up new entries by cache-inhibited producer index, ++ * which means that a non-coherent mapping would require us to ++ * invalidate and read *only* once that PI has indicated that ++ * there's an entry here. The first trip around the DQRR ring ++ * will be much less efficient than all subsequent trips around ++ * it... ++ */ ++ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & ++ QMAN_DQRR_PI_MASK; ++ ++ /* there are new entries if pi != next_idx */ ++ if (pi == s->dqrr.next_idx) ++ return NULL; ++ ++ /* ++ * if next_idx is/was the last ring index, and 'pi' is ++ * different, we can disable the workaround as all the ring ++ * entries have now been DMA'd to so valid-bit checking is ++ * repaired. Note: this logic needs to be based on next_idx ++ * (which increments one at a time), rather than on pi (which ++ * can burst and wrap-around between our snapshots of it). ++ */ ++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { ++ pr_debug("next_idx=%d, pi=%d, clear reset bug\n", ++ s->dqrr.next_idx, pi); ++ s->dqrr.reset_bug = 0; ++ } ++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ } ++ ++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ verb = p->dq.verb; ++ ++ /* ++ * If the valid-bit isn't of the expected polarity, nothing there. Note, ++ * in the DQRR reset bug workaround, we shouldn't need to skip these ++ * check, because we've already determined that a new entry is available ++ * and we've invalidated the cacheline before reading it, so the ++ * valid-bit behaviour is repaired and should tell us what we already ++ * knew from reading PI. ++ */ ++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { ++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ return NULL; ++ } ++ /* ++ * There's something there. Move "next_idx" attention to the next ring ++ * entry (and prefetch it) before returning what we found. ++ */ ++ s->dqrr.next_idx++; ++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ ++ if (!s->dqrr.next_idx) ++ s->dqrr.valid_bit ^= QB_VALID_BIT; ++ ++ /* ++ * If this is the final response to a volatile dequeue command ++ * indicate that the vdq is available ++ */ ++ flags = p->dq.stat; ++ response_verb = verb & QBMAN_RESULT_MASK; ++ if ((response_verb == QBMAN_RESULT_DQ) && ++ (flags & DPAA2_DQ_STAT_VOLATILE) && ++ (flags & DPAA2_DQ_STAT_EXPIRED)) ++ atomic_inc(&s->vdq.available); ++ ++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ ++ return p; ++} ++ ++/** ++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from ++ * qbman_swp_dqrr_next(). ++ * @s: the software portal object ++ * @dq: the DQRR entry to be consumed ++ */ ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) ++{ ++ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); ++} ++ ++/** ++ * qbman_result_has_new_result() - Check and get the dequeue response from the ++ * dq storage memory set in pull dequeue command ++ * @s: the software portal object ++ * @dq: the dequeue result read from the memory ++ * ++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid ++ * dequeue result. ++ * ++ * Only used for user-provided storage of dequeue results, not DQRR. For ++ * efficiency purposes, the driver will perform any required endianness ++ * conversion to ensure that the user's dequeue result storage is in host-endian ++ * format. As such, once the user has called qbman_result_has_new_result() and ++ * been returned a valid dequeue result, they should not call it again on ++ * the same memory location (except of course if another dequeue command has ++ * been executed to produce a new result to that location). ++ */ ++int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq) ++{ ++ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID) ++ return 0; ++ ++ /* ++ * Set token to be 0 so we will detect change back to 1 ++ * next time the looping is traversed. Const is cast away here ++ * as we want users to treat the dequeue responses as read only. ++ */ ++ ((struct dpaa2_dq *)dq)->dq.tok = 0; ++ ++ /* ++ * Determine whether VDQCR is available based on whether the ++ * current result is sitting in the first storage location of ++ * the busy command. ++ */ ++ if (s->vdq.storage == dq) { ++ s->vdq.storage = NULL; ++ atomic_inc(&s->vdq.available); ++ } ++ ++ return 1; ++} ++ ++/** ++ * qbman_release_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_release_desc_clear(struct qbman_release_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++ d->verb = 1 << 5; /* Release Command Valid */ ++} ++ ++/** ++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to ++ */ ++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid) ++{ ++ d->bpid = cpu_to_le16(bpid); ++} ++ ++/** ++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI ++ * interrupt source should be asserted after the release command is completed. ++ */ ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) ++{ ++ if (enable) ++ d->verb |= 1 << 6; ++ else ++ d->verb &= ~(1 << 6); ++} ++ ++#define RAR_IDX(rar) ((rar) & 0x7) ++#define RAR_VB(rar) ((rar) & 0x80) ++#define RAR_SUCCESS(rar) ((rar) & 0x100) ++ ++/** ++ * qbman_swp_release() - Issue a buffer release command ++ * @s: the software portal object ++ * @d: the release descriptor ++ * @buffers: a pointer pointing to the buffer address to be released ++ * @num_buffers: number of buffers to be released, must be less than 8 ++ * ++ * Return 0 for success, -EBUSY if the release command ring is not ready. ++ */ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const u64 *buffers, unsigned int num_buffers) ++{ ++ int i; ++ struct qbman_release_desc *p; ++ u32 rar; ++ ++ if (!num_buffers || (num_buffers > 7)) ++ return -EINVAL; ++ ++ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); ++ if (!RAR_SUCCESS(rar)) ++ return -EBUSY; ++ ++ /* Start the release command */ ++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); ++ /* Copy the caller's buffer pointers to the command */ ++ for (i = 0; i < num_buffers; i++) ++ p->buf[i] = cpu_to_le64(buffers[i]); ++ p->bpid = d->bpid; ++ ++ /* ++ * Set the verb byte, have to substitute in the valid-bit and the number ++ * of buffers. ++ */ ++ dma_wmb(); ++ p->verb = d->verb | RAR_VB(rar) | num_buffers; ++ dccvac(p); ++ ++ return 0; ++} ++ ++struct qbman_acquire_desc { ++ u8 verb; ++ u8 reserved; ++ u16 bpid; ++ u8 num; ++ u8 reserved2[59]; ++}; ++ ++struct qbman_acquire_rslt { ++ u8 verb; ++ u8 rslt; ++ u16 reserved; ++ u8 num; ++ u8 reserved2[3]; ++ u64 buf[7]; ++}; ++ ++/** ++ * qbman_swp_acquire() - Issue a buffer acquire command ++ * @s: the software portal object ++ * @bpid: the buffer pool index ++ * @buffers: a pointer pointing to the acquired buffer addresses ++ * @num_buffers: number of buffers to be acquired, must be less than 8 ++ * ++ * Return 0 for success, or negative error code if the acquire command ++ * fails. ++ */ ++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, ++ unsigned int num_buffers) ++{ ++ struct qbman_acquire_desc *p; ++ struct qbman_acquire_rslt *r; ++ int i; ++ ++ if (!num_buffers || (num_buffers > 7)) ++ return -EINVAL; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ p->bpid = cpu_to_le16(bpid); ++ p->num = num_buffers; ++ ++ /* Complete the management command */ ++ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE); ++ if (unlikely(!r)) { ++ pr_err("qbman: acquire from BPID %d failed, no response\n", ++ bpid); ++ return -EIO; ++ } ++ ++ /* Decode the outcome */ ++ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE); ++ ++ /* Determine success or failure */ ++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n", ++ bpid, r->rslt); ++ return -EIO; ++ } ++ ++ WARN_ON(r->num > num_buffers); ++ ++ /* Copy the acquired buffers to the caller's array */ ++ for (i = 0; i < r->num; i++) ++ buffers[i] = le64_to_cpu(r->buf[i]); ++ ++ return (int)r->num; ++} ++ ++struct qbman_alt_fq_state_desc { ++ u8 verb; ++ u8 reserved[3]; ++ u32 fqid; ++ u8 reserved2[56]; ++}; ++ ++struct qbman_alt_fq_state_rslt { ++ u8 verb; ++ u8 rslt; ++ u8 reserved[62]; ++}; ++ ++#define ALT_FQ_FQID_MASK 0x00FFFFFF ++ ++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, ++ u8 alt_fq_verb) ++{ ++ struct qbman_alt_fq_state_desc *p; ++ struct qbman_alt_fq_state_rslt *r; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK; ++ ++ /* Complete the management command */ ++ r = qbman_swp_mc_complete(s, p, alt_fq_verb); ++ if (unlikely(!r)) { ++ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n", ++ alt_fq_verb); ++ return -EIO; ++ } ++ ++ /* Decode the outcome */ ++ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n", ++ fqid, r->verb, r->rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++struct qbman_cdan_ctrl_desc { ++ u8 verb; ++ u8 reserved; ++ u16 ch; ++ u8 we; ++ u8 ctrl; ++ u16 reserved2; ++ u64 cdan_ctx; ++ u8 reserved3[48]; ++ ++}; ++ ++struct qbman_cdan_ctrl_rslt { ++ u8 verb; ++ u8 rslt; ++ u16 ch; ++ u8 reserved[60]; ++}; ++ ++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, ++ u8 we_mask, u8 cdan_en, ++ u64 ctx) ++{ ++ struct qbman_cdan_ctrl_desc *p = NULL; ++ struct qbman_cdan_ctrl_rslt *r = NULL; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ p->ch = cpu_to_le16(channelid); ++ p->we = we_mask; ++ if (cdan_en) ++ p->ctrl = 1; ++ else ++ p->ctrl = 0; ++ p->cdan_ctx = cpu_to_le64(ctx); ++ ++ /* Complete the management command */ ++ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE); ++ if (unlikely(!r)) { ++ pr_err("qbman: wqchan config failed, no response\n"); ++ return -EIO; ++ } ++ ++ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE); ++ ++ /* Determine success or failure */ ++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n", ++ channelid, r->rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h +@@ -0,0 +1,662 @@ ++/* ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_QBMAN_PORTAL_H ++#define __FSL_QBMAN_PORTAL_H ++ ++#include "qbman_private.h" ++#include "../../include/dpaa2-fd.h" ++ ++struct dpaa2_dq; ++struct qbman_swp; ++ ++/* qbman software portal descriptor structure */ ++struct qbman_swp_desc { ++ void *cena_bar; /* Cache-enabled portal base address */ ++ void *cinh_bar; /* Cache-inhibited portal base address */ ++ u32 qman_version; ++}; ++ ++#define QBMAN_SWP_INTERRUPT_EQRI 0x01 ++#define QBMAN_SWP_INTERRUPT_EQDI 0x02 ++#define QBMAN_SWP_INTERRUPT_DQRI 0x04 ++#define QBMAN_SWP_INTERRUPT_RCRI 0x08 ++#define QBMAN_SWP_INTERRUPT_RCDI 0x10 ++#define QBMAN_SWP_INTERRUPT_VDCI 0x20 ++ ++/* the structure for pull dequeue descriptor */ ++struct qbman_pull_desc { ++ u8 verb; ++ u8 numf; ++ u8 tok; ++ u8 reserved; ++ u32 dq_src; ++ u64 rsp_addr; ++ u64 rsp_addr_virt; ++ u8 padding[40]; ++}; ++ ++enum qbman_pull_type_e { ++ /* dequeue with priority precedence, respect intra-class scheduling */ ++ qbman_pull_type_prio = 1, ++ /* dequeue with active FQ precedence, respect ICS */ ++ qbman_pull_type_active, ++ /* dequeue with active FQ precedence, no ICS */ ++ qbman_pull_type_active_noics ++}; ++ ++/* Definitions for parsing dequeue entries */ ++#define QBMAN_RESULT_MASK 0x7f ++#define QBMAN_RESULT_DQ 0x60 ++#define QBMAN_RESULT_FQRN 0x21 ++#define QBMAN_RESULT_FQRNI 0x22 ++#define QBMAN_RESULT_FQPN 0x24 ++#define QBMAN_RESULT_FQDAN 0x25 ++#define QBMAN_RESULT_CDAN 0x26 ++#define QBMAN_RESULT_CSCN_MEM 0x27 ++#define QBMAN_RESULT_CGCU 0x28 ++#define QBMAN_RESULT_BPSCN 0x29 ++#define QBMAN_RESULT_CSCN_WQ 0x2a ++ ++/* QBMan FQ management command codes */ ++#define QBMAN_FQ_SCHEDULE 0x48 ++#define QBMAN_FQ_FORCE 0x49 ++#define QBMAN_FQ_XON 0x4d ++#define QBMAN_FQ_XOFF 0x4e ++ ++/* structure of enqueue descriptor */ ++struct qbman_eq_desc { ++ u8 verb; ++ u8 dca; ++ u16 seqnum; ++ u16 orpid; ++ u16 reserved1; ++ u32 tgtid; ++ u32 tag; ++ u16 qdbin; ++ u8 qpri; ++ u8 reserved[3]; ++ u8 wae; ++ u8 rspid; ++ u64 rsp_addr; ++ u8 fd[32]; ++}; ++ ++/* buffer release descriptor */ ++struct qbman_release_desc { ++ u8 verb; ++ u8 reserved; ++ u16 bpid; ++ u32 reserved2; ++ u64 buf[7]; ++}; ++ ++/* Management command result codes */ ++#define QBMAN_MC_RSLT_OK 0xf0 ++ ++#define CODE_CDAN_WE_EN 0x1 ++#define CODE_CDAN_WE_CTX 0x4 ++ ++/* portal data structure */ ++struct qbman_swp { ++ const struct qbman_swp_desc *desc; ++ void __iomem *addr_cena; ++ void __iomem *addr_cinh; ++ ++ /* Management commands */ ++ struct { ++ u32 valid_bit; /* 0x00 or 0x80 */ ++ } mc; ++ ++ /* Push dequeues */ ++ u32 sdq; ++ ++ /* Volatile dequeues */ ++ struct { ++ atomic_t available; /* indicates if a command can be sent */ ++ u32 valid_bit; /* 0x00 or 0x80 */ ++ struct dpaa2_dq *storage; /* NULL if DQRR */ ++ } vdq; ++ ++ /* DQRR */ ++ struct { ++ u32 next_idx; ++ u32 valid_bit; ++ u8 dqrr_size; ++ int reset_bug; /* indicates dqrr reset workaround is needed */ ++ } dqrr; ++}; ++ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); ++void qbman_swp_finish(struct qbman_swp *p); ++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p); ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask); ++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p); ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask); ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); ++ ++void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled); ++void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable); ++ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d); ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash); ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes); ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid); ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, ++ enum qbman_pull_type_e dct); ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, ++ enum qbman_pull_type_e dct); ++ ++int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d); ++ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); ++ ++int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq); ++ ++void qbman_eq_desc_clear(struct qbman_eq_desc *d); ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); ++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token); ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid); ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, ++ u32 qd_bin, u32 qd_prio); ++ ++int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d, ++ const struct dpaa2_fd *fd); ++ ++void qbman_release_desc_clear(struct qbman_release_desc *d); ++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid); ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); ++ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const u64 *buffers, unsigned int num_buffers); ++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, ++ unsigned int num_buffers); ++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, ++ u8 alt_fq_verb); ++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, ++ u8 we_mask, u8 cdan_en, ++ u64 ctx); ++ ++void *qbman_swp_mc_start(struct qbman_swp *p); ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb); ++void *qbman_swp_mc_result(struct qbman_swp *p); ++ ++/** ++ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response ++ * @dq: the dequeue result to be checked ++ * ++ * DQRR entries may contain non-dequeue results, ie. notifications ++ */ ++static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ); ++} ++ ++/** ++ * qbman_result_is_SCN() - Check the dequeue result is notification or not ++ * @dq: the dequeue result to be checked ++ * ++ */ ++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) ++{ ++ return !qbman_result_is_DQ(dq); ++} ++ ++/* FQ Data Availability */ ++static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN); ++} ++ ++/* Channel Data Availability */ ++static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN); ++} ++ ++/* Congestion State Change */ ++static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ); ++} ++ ++/* Buffer Pool State Change */ ++static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN); ++} ++ ++/* Congestion Group Count Update */ ++static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU); ++} ++ ++/* Retirement */ ++static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN); ++} ++ ++/* Retirement Immediate */ ++static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI); ++} ++ ++ /* Park */ ++static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq) ++{ ++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN); ++} ++ ++/** ++ * qbman_result_SCN_state() - Get the state field in State-change notification ++ */ ++static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn) ++{ ++ return scn->scn.state; ++} ++ ++#define SCN_RID_MASK 0x00FFFFFF ++ ++/** ++ * qbman_result_SCN_rid() - Get the resource id in State-change notification ++ */ ++static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn) ++{ ++ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK; ++} ++ ++/** ++ * qbman_result_SCN_ctx() - Get the context data in State-change notification ++ */ ++static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn) ++{ ++ return le64_to_cpu(scn->scn.ctx); ++} ++ ++/** ++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state ++ * @s: the software portal object ++ * @fqid: the index of frame queue to be scheduled ++ * ++ * There are a couple of different ways that a FQ can end up parked state, ++ * This schedules it. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); ++} ++ ++/** ++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state ++ * @s: the software portal object ++ * @fqid: the index of frame queue to be forced ++ * ++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled ++ * and thus be available for selection by any channel-dequeuing behaviour (push ++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still ++ * empty at the time this happens, the resulting dq_entry will have no FD. ++ * (qbman_result_DQ_fd() will return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); ++} ++ ++/** ++ * qbman_swp_fq_xon() - sets FQ flow-control to XON ++ * @s: the software portal object ++ * @fqid: the index of frame queue ++ * ++ * This setting doesn't affect enqueues to the FQ, just dequeues. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); ++} ++ ++/** ++ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF ++ * @s: the software portal object ++ * @fqid: the index of frame queue ++ * ++ * This setting doesn't affect enqueues to the FQ, just dequeues. ++ * XOFF FQs will remain in the tenatively-scheduled state, even when ++ * non-empty, meaning they won't be selected for scheduled dequeuing. ++ * If a FQ is changed to XOFF after it had already become truly-scheduled ++ * to a channel, and a pull dequeue of that channel occurs that selects ++ * that FQ for dequeuing, then the resulting dq_entry will have no FD. ++ * (qbman_result_DQ_fd() will return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); ++} ++ ++/* If the user has been allocated a channel object that is going to generate ++ * CDANs to another channel, then the qbman_swp_CDAN* functions will be ++ * necessary. ++ * ++ * CDAN-enabled channels only generate a single CDAN notification, after which ++ * they need to be reenabled before they'll generate another. The idea is ++ * that pull dequeuing will occur in reaction to the CDAN, followed by a ++ * reenable step. Each function generates a distinct command to hardware, so a ++ * combination function is provided if the user wishes to modify the "context" ++ * (which shows up in each CDAN message) each time they reenable, as a single ++ * command to hardware. ++ */ ++ ++/** ++ * qbman_swp_CDAN_set_context() - Set CDAN context ++ * @s: the software portal object ++ * @channelid: the channel index ++ * @ctx: the context to be set in CDAN ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid, ++ u64 ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_CTX, ++ 0, ctx); ++} ++ ++/** ++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel ++ * @s: the software portal object ++ * @channelid: the index of the channel to generate CDAN ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 1, 0); ++} ++ ++/** ++ * qbman_swp_CDAN_disable() - disable CDAN for the channel ++ * @s: the software portal object ++ * @channelid: the index of the channel to generate CDAN ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 0, 0); ++} ++ ++/** ++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN ++ * @s: the software portal object ++ * @channelid: the index of the channel to generate CDAN ++ * @ctx:i the context set in CDAN ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, ++ u16 channelid, ++ u64 ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, ++ 1, ctx); ++} ++ ++/* Wraps up submit + poll-for-result */ ++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, ++ u8 cmd_verb) ++{ ++ int loopvar = 1000; ++ ++ qbman_swp_mc_submit(swp, cmd, cmd_verb); ++ ++ do { ++ cmd = qbman_swp_mc_result(swp); ++ } while (!cmd && loopvar--); ++ ++ WARN_ON(!loopvar); ++ ++ return cmd; ++} ++ ++/* ------------ */ ++/* qb_attr_code */ ++/* ------------ */ ++ ++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which ++ * is either serving as a configuration command or a query result. The ++ * representation is inherently little-endian, as the indexing of the words is ++ * itself little-endian in nature and layerscape is little endian for anything ++ * that crosses a word boundary too (64-bit fields are the obvious examples). ++ */ ++struct qb_attr_code { ++ unsigned int word; /* which u32[] array member encodes the field */ ++ unsigned int lsoffset; /* encoding offset from ls-bit */ ++ unsigned int width; /* encoding width. (bool must be 1.) */ ++}; ++ ++/* Some pre-defined codes */ ++extern struct qb_attr_code code_generic_verb; ++extern struct qb_attr_code code_generic_rslt; ++ ++/* Macros to define codes */ ++#define QB_CODE(a, b, c) { a, b, c} ++#define QB_CODE_NULL \ ++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) ++ ++/* Rotate a code "ms", meaning that it moves from less-significant bytes to ++ * more-significant, from less-significant words to more-significant, etc. The ++ * "ls" version does the inverse, from more-significant towards ++ * less-significant. ++ */ ++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ code->lsoffset += bits; ++ while (code->lsoffset > 31) { ++ code->word++; ++ code->lsoffset -= 32; ++ } ++} ++ ++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ /* Don't be fooled, this trick should work because the types are ++ * unsigned. So the case that interests the while loop (the rotate has ++ * gone too far and the word count needs to compensate for it), is ++ * manifested when lsoffset is negative. But that equates to a really ++ * large unsigned value, starting with lots of "F"s. As such, we can ++ * continue adding 32 back to it until it wraps back round above zero, ++ * to a value of 31 or less... ++ */ ++ code->lsoffset -= bits; ++ while (code->lsoffset > 31) { ++ code->word--; ++ code->lsoffset += 32; ++ } ++} ++ ++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ ++#define qb_attr_code_for_ms(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ms(code, bits)) ++#define qb_attr_code_for_ls(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ls(code, bits)) ++ ++static inline void word_copy(void *d, const void *s, unsigned int cnt) ++{ ++ u32 *dd = d; ++ const u32 *ss = s; ++ ++ while (cnt--) ++ *(dd++) = *(ss++); ++} ++ ++/* ++ * Currently, the CENA support code expects each 32-bit word to be written in ++ * host order, and these are converted to hardware (little-endian) order on ++ * command submission. However, 64-bit quantities are must be written (and read) ++ * as two 32-bit words with the least-significant word first, irrespective of ++ * host endianness. ++ */ ++static inline void u64_to_le32_copy(void *d, const u64 *s, ++ unsigned int cnt) ++{ ++ u32 *dd = d; ++ const u32 *ss = (const u32 *)s; ++ ++ while (cnt--) { ++ /* ++ * TBD: the toolchain was choking on the use of 64-bit types up ++ * until recently so this works entirely with 32-bit variables. ++ * When 64-bit types become usable again, investigate better ++ * ways of doing this. ++ */ ++#if defined(__BIG_ENDIAN) ++ *(dd++) = ss[1]; ++ *(dd++) = ss[0]; ++ ss += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++ ++static inline void u64_from_le32_copy(u64 *d, const void *s, ++ unsigned int cnt) ++{ ++ const u32 *ss = s; ++ u32 *dd = (u32 *)d; ++ ++ while (cnt--) { ++#if defined(__BIG_ENDIAN) ++ dd[1] = *(ss++); ++ dd[0] = *(ss++); ++ dd += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++ ++/* decode a field from a cacheline */ ++static inline u32 qb_attr_code_decode(const struct qb_attr_code *code, ++ const u32 *cacheline) ++{ ++ return d32_u32(code->lsoffset, code->width, cacheline[code->word]); ++} ++ ++static inline u64 qb_attr_code_decode_64(const struct qb_attr_code *code, ++ const u64 *cacheline) ++{ ++ u64 res; ++ ++ u64_from_le32_copy(&res, &cacheline[code->word / 2], 1); ++ return res; ++} ++ ++/* encode a field to a cacheline */ ++static inline void qb_attr_code_encode(const struct qb_attr_code *code, ++ u32 *cacheline, u32 val) ++{ ++ cacheline[code->word] = ++ r32_u32(code->lsoffset, code->width, cacheline[code->word]) ++ | e32_u32(code->lsoffset, code->width, val); ++} ++ ++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, ++ u64 *cacheline, u64 val) ++{ ++ u64_to_le32_copy(&cacheline[code->word / 2], &val, 1); ++} ++ ++/* Small-width signed values (two's-complement) will decode into medium-width ++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to ++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value ++ * 249. Likewise -120 would decode as 136.) This function allows the caller to ++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit ++ * encoding, will become 0xfffffff9 if you cast the return value to u32). ++ */ ++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, ++ u32 val) ++{ ++ WARN_ON(val >= (1 << code->width)); ++ /* If the high bit was set, it was encoding a negative */ ++ if (val >= (1 << (code->width - 1))) ++ return (int32_t)0 - (int32_t)(((u32)1 << code->width) - ++ val); ++ /* Otherwise, it was encoding a positive */ ++ return (int32_t)val; ++} ++ ++/* ---------------------- */ ++/* Descriptors/cachelines */ ++/* ---------------------- */ ++ ++/* To avoid needless dynamic allocation, the driver API often gives the caller ++ * a "descriptor" type that the caller can instantiate however they like. ++ * Ultimately though, it is just a cacheline of binary storage (or something ++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for ++ * holding pre-formatted pieces of hardware commands. The performance-critical ++ * code can then copy these descriptors directly into hardware command ++ * registers more efficiently than trying to construct/format commands ++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in ++ * order for the compiler to know its size, but the internal details are not ++ * exposed. The following macro is used within the driver for converting *any* ++ * descriptor pointer to a usable array pointer. The use of a macro (instead of ++ * an inline) is necessary to work with different descriptor types and to work ++ * correctly with const and non-const inputs (and similarly-qualified outputs). ++ */ ++#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) ++ ++#endif /* __FSL_QBMAN_PORTAL_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c +@@ -0,0 +1,853 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#include "../../include/dpaa2-global.h" ++#include "qbman-portal.h" ++#include "qbman_debug.h" ++ ++/* QBMan portal management command code */ ++#define QBMAN_BP_QUERY 0x32 ++#define QBMAN_FQ_QUERY 0x44 ++#define QBMAN_FQ_QUERY_NP 0x45 ++#define QBMAN_CGR_QUERY 0x51 ++#define QBMAN_WRED_QUERY 0x54 ++#define QBMAN_CGR_STAT_QUERY 0x55 ++#define QBMAN_CGR_STAT_QUERY_CLR 0x56 ++ ++enum qbman_attr_usage_e { ++ qbman_attr_usage_fq, ++ qbman_attr_usage_bpool, ++ qbman_attr_usage_cgr, ++}; ++ ++struct int_qbman_attr { ++ u32 words[32]; ++ enum qbman_attr_usage_e usage; ++}; ++ ++#define attr_type_set(a, e) \ ++{ \ ++ struct qbman_attr *__attr = a; \ ++ enum qbman_attr_usage_e __usage = e; \ ++ ((struct int_qbman_attr *)__attr)->usage = __usage; \ ++} ++ ++#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) ++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) ++ ++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); ++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); ++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); ++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); ++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); ++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); ++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); ++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); ++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); ++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); ++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); ++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); ++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); ++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); ++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); ++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); ++ ++void qbman_bp_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_bpool); ++} ++ ++int qbman_bp_query(struct qbman_swp *s, u32 bpid, ++ struct qbman_attr *a) ++{ ++ u32 *p; ++ u32 verb, rslt; ++ u32 *attr = ATTR32(a); ++ ++ qbman_bp_attr_clear(a); ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_bp_bpid, p, bpid); ++ ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ WARN_ON(verb != QBMAN_BP_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); ++ return -EIO; ++ } ++ ++ /* For the query, word[0] of the result contains only the ++ * verb/rslt fields, so skip word[0]. ++ */ ++ word_copy(&attr[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) ++{ ++ u32 *p = ATTR32(a); ++ ++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); ++ *va = !!qb_attr_code_decode(&code_bp_va, p); ++ *wae = !!qb_attr_code_decode(&code_bp_wae, p); ++} ++ ++static u32 qbman_bp_thresh_to_value(u32 val) ++{ ++ return (val & 0xff) << ((val & 0xf00) >> 8); ++} ++ ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet) ++{ ++ u32 *p = ATTR32(a); ++ ++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, ++ p)); ++} ++ ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt) ++{ ++ u32 *p = ATTR32(a); ++ ++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet) ++{ ++ u32 *p = ATTR32(a); ++ ++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, ++ p)); ++} ++ ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt) ++{ ++ u32 *p = ATTR32(a); ++ ++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset) ++{ ++ u32 *p = ATTR32(a); ++ ++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, ++ p)); ++} ++ ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt) ++{ ++ u32 *p = ATTR32(a); ++ ++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid) ++{ ++ u32 *p = ATTR32(a); ++ ++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); ++} ++ ++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl) ++{ ++ u32 *p = ATTR32(a); ++ ++ *icid = qb_attr_code_decode(&code_bp_icid, p); ++ *pl = !!qb_attr_code_decode(&code_bp_pl, p); ++} ++ ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr) ++{ ++ u32 *p = ATTR32(a); ++ ++ *bpscn_addr = ((u64)qb_attr_code_decode(&code_bp_bpscn_addr_hi, ++ p) << 32) | ++ (u64)qb_attr_code_decode(&code_bp_bpscn_addr_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx) ++{ ++ u32 *p = ATTR32(a); ++ ++ *bpscn_ctx = ((u64)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) ++ << 32) | ++ (u64)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ) ++{ ++ u32 *p = ATTR32(a); ++ ++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); ++} ++ ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); ++} ++ ++int qbman_bp_info_is_depleted(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); ++} ++ ++int qbman_bp_info_is_surplus(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); ++} ++ ++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_fill, p); ++} ++ ++u32 qbman_bp_info_hdptr(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdptr, p); ++} ++ ++u32 qbman_bp_info_sdcnt(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sdcnt, p); ++} ++ ++u32 qbman_bp_info_hdcnt(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdcnt, p); ++} ++ ++u32 qbman_bp_info_sscnt(struct qbman_attr *a) ++{ ++ u32 *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sscnt, p); ++} ++ ++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); ++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); ++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); ++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); ++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); ++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); ++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); ++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); ++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); ++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); ++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); ++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); ++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); ++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); ++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); ++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); ++ ++void qbman_fq_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_fq); ++} ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, u32 fqid, struct qbman_attr *desc) ++{ ++ u32 *p; ++ u32 verb, rslt; ++ u32 *d = ATTR32(desc); ++ ++ qbman_fq_attr_clear(desc); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ WARN_ON(verb != QBMAN_FQ_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ /* ++ * For the configure, word[0] of the command contains only the WE-mask. ++ * For the query, word[0] of the result contains only the verb/rslt ++ * fields. Skip word[0] in the latter case. ++ */ ++ word_copy(&d[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl) ++{ ++ u32 *p = ATTR32(d); ++ ++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); ++} ++ ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid) ++{ ++ u32 *p = ATTR32(d); ++ ++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); ++} ++ ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq) ++{ ++ u32 *p = ATTR32(d); ++ ++ *destwq = qb_attr_code_decode(&code_fq_destwq, p); ++} ++ ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred) ++{ ++ u32 *p = ATTR32(d); ++ ++ *icscred = qb_attr_code_decode(&code_fq_icscred, p); ++} ++ ++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); ++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); ++static u32 qbman_thresh_to_value(u32 val) ++{ ++ u32 m, e; ++ ++ m = qb_attr_code_decode(&code_tdthresh_mant, &val); ++ e = qb_attr_code_decode(&code_tdthresh_exp, &val); ++ return m << e; ++} ++ ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh) ++{ ++ u32 *p = ATTR32(d); ++ ++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, ++ p)); ++} ++ ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len) ++{ ++ u32 *p = ATTR32(d); ++ ++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); ++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); ++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, ++ qb_attr_code_decode(&code_fq_oa_len, p)); ++} ++ ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps) ++{ ++ u32 *p = ATTR32(d); ++ ++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); ++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); ++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); ++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); ++} ++ ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo) ++{ ++ u32 *p = ATTR32(d); ++ ++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); ++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); ++} ++ ++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl) ++{ ++ u32 *p = ATTR32(d); ++ ++ *icid = qb_attr_code_decode(&code_fq_icid, p); ++ *pl = !!qb_attr_code_decode(&code_fq_pl, p); ++} ++ ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid) ++{ ++ u32 *p = ATTR32(d); ++ ++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); ++} ++ ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid) ++{ ++ u32 *p = ATTR32(d); ++ ++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); ++} ++ ++/* Query FQ Non-Programmalbe Fields */ ++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); ++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); ++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); ++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); ++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); ++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); ++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); ++ ++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, ++ struct qbman_attr *state) ++{ ++ u32 *p; ++ u32 verb, rslt; ++ u32 *d = ATTR32(state); ++ ++ qbman_fq_attr_clear(state); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ WARN_ON(verb != QBMAN_FQ_QUERY_NP); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ word_copy(&d[0], &p[0], 16); ++ return 0; ++} ++ ++u32 qbman_fq_state_schedstate(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_state, p); ++} ++ ++int qbman_fq_state_force_eligible(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_fe, p); ++} ++ ++int qbman_fq_state_xoff(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_x, p); ++} ++ ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_r, p); ++} ++ ++int qbman_fq_state_overflow_error(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_oe, p); ++} ++ ++u32 qbman_fq_state_frame_count(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); ++} ++ ++u32 qbman_fq_state_byte_count(const struct qbman_attr *state) ++{ ++ const u32 *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); ++} ++ ++/* Query CGR */ ++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); ++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); ++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); ++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); ++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); ++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); ++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); ++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); ++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); ++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); ++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); ++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); ++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); ++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); ++ ++void qbman_cgr_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_cgr); ++} ++ ++int qbman_cgr_query(struct qbman_swp *s, u32 cgid, struct qbman_attr *attr) ++{ ++ u32 *p; ++ u32 verb, rslt; ++ u32 *d[2]; ++ int i; ++ u32 query_verb; ++ ++ d[0] = ATTR32(attr); ++ d[1] = ATTR32_1(attr); ++ ++ qbman_cgr_attr_clear(attr); ++ ++ for (i = 0; i < 2; i++) { ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ WARN_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ /* For the configure, word[0] of the command contains only the ++ * verb/cgid. For the query, word[0] of the result contains ++ * only the verb/rslt fields. Skip word[0] in the latter case. ++ */ ++ word_copy(&d[i][1], &p[1], 15); ++ } ++ return 0; ++} ++ ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd) ++ { ++ u32 *p = ATTR32(d); ++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, ++ p); ++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); ++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); ++} ++ ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode, ++ int *rej_cnt_mode, int *cscn_bdi) ++{ ++ u32 *p = ATTR32(d); ++ *mode = qb_attr_code_decode(&code_cgr_mode, p); ++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); ++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); ++} ++ ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va) ++{ ++ u32 *p = ATTR32(d); ++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, ++ p); ++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); ++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); ++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); ++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); ++} ++ ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ u32 *i_cnt_wr_bnd) ++{ ++ u32 *p = ATTR32(d); ++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); ++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); ++} ++ ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) ++{ ++ u32 *p = ATTR32(d); ++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); ++} ++ ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres) ++{ ++ u32 *p = ATTR32(d); ++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ u32 *cs_thres_x) ++{ ++ u32 *p = ATTR32(d); ++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres_x, p)); ++} ++ ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres) ++{ ++ u32 *p = ATTR32(d); ++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_td_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp) ++{ ++ u32 *p = ATTR32(d); ++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); ++} ++ ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid) ++{ ++ u32 *p = ATTR32(d); ++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); ++} ++ ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ u32 *cscn_vcgid) ++{ ++ u32 *p = ATTR32(d); ++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); ++} ++ ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid, ++ int *pl) ++{ ++ u32 *p = ATTR32(d); ++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); ++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); ++} ++ ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ u64 *cg_wr_addr) ++{ ++ u32 *p = ATTR32(d); ++ *cg_wr_addr = ((u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, ++ p) << 32) | ++ (u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, ++ p); ++} ++ ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx) ++{ ++ u32 *p = ATTR32(d); ++ *cscn_ctx = ((u64)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) ++ << 32) | ++ (u64)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); ++} ++ ++#define WRED_EDP_WORD(n) (18 + (n) / 4) ++#define WRED_EDP_OFFSET(n) (8 * ((n) % 4)) ++#define WRED_PARM_DP_WORD(n) ((n) + 20) ++#define WRED_WE_EDP(n) (16 + (n) * 2) ++#define WRED_WE_PARM_DP(n) (17 + (n) * 2) ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx, ++ int *edp) ++{ ++ u32 *p = ATTR32(d); ++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), ++ WRED_EDP_OFFSET(idx), 8); ++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); ++} ++ ++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth, ++ u64 *maxth, u8 *maxp) ++{ ++ u8 ma, mn, step_i, step_s, pn; ++ ++ ma = (u8)(dp >> 24); ++ mn = (u8)(dp >> 19) & 0x1f; ++ step_i = (u8)(dp >> 11); ++ step_s = (u8)(dp >> 6) & 0x1f; ++ pn = (u8)dp & 0x3f; ++ ++ *maxp = ((pn << 2) * 100) / 256; ++ ++ if (mn == 0) ++ *maxth = ma; ++ else ++ *maxth = ((ma + 256) * (1 << (mn - 1))); ++ ++ if (step_s == 0) ++ *minth = *maxth - step_i; ++ else ++ *minth = *maxth - (256 + step_i) * (1 << (step_s - 1)); ++} ++ ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx, ++ u32 *dp) ++{ ++ u32 *p = ATTR32(d); ++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), ++ 0, 8); ++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); ++} ++ ++/* Query CGR/CCGR/CQ statistics */ ++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); ++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); ++static int qbman_cgr_statistics_query(struct qbman_swp *s, u32 cgid, ++ int clear, u32 command_type, ++ u64 *frame_cnt, u64 *byte_cnt) ++{ ++ u32 *p; ++ u32 verb, rslt; ++ u32 query_verb; ++ u32 hi, lo; ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ if (command_type < 2) ++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); ++ query_verb = clear ? ++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ WARN_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query statistics of CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ ++ if (*frame_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); ++ *frame_cnt = ((u64)hi << 32) | (u64)lo; ++ } ++ if (*byte_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); ++ *byte_cnt = ((u64)hi << 32) | (u64)lo; ++ } ++ ++ return 0; ++} ++ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 1, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0, ++ frame_cnt, byte_cnt); ++} +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h +@@ -0,0 +1,136 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++struct qbman_attr { ++ u32 dont_manipulate_directly[40]; ++}; ++ ++/* Buffer pool query commands */ ++int qbman_bp_query(struct qbman_swp *s, u32 bpid, ++ struct qbman_attr *a); ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet); ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt); ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet); ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt); ++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset); ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt); ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid); ++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl); ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr); ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx); ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ); ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a); ++int qbman_bp_info_is_depleted(struct qbman_attr *a); ++int qbman_bp_info_is_surplus(struct qbman_attr *a); ++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a); ++u32 qbman_bp_info_hdptr(struct qbman_attr *a); ++u32 qbman_bp_info_sdcnt(struct qbman_attr *a); ++u32 qbman_bp_info_hdcnt(struct qbman_attr *a); ++u32 qbman_bp_info_sscnt(struct qbman_attr *a); ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, u32 fqid, ++ struct qbman_attr *desc); ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl); ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid); ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq); ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred); ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh); ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len); ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps); ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo); ++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl); ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid); ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid); ++ ++/* FQ query command for non-programmable fields*/ ++enum qbman_fq_schedstate_e { ++ qbman_fq_schedstate_oos = 0, ++ qbman_fq_schedstate_retired, ++ qbman_fq_schedstate_tentatively_scheduled, ++ qbman_fq_schedstate_truly_scheduled, ++ qbman_fq_schedstate_parked, ++ qbman_fq_schedstate_held_active, ++}; ++ ++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, ++ struct qbman_attr *state); ++u32 qbman_fq_state_schedstate(const struct qbman_attr *state); ++int qbman_fq_state_force_eligible(const struct qbman_attr *state); ++int qbman_fq_state_xoff(const struct qbman_attr *state); ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state); ++int qbman_fq_state_overflow_error(const struct qbman_attr *state); ++u32 qbman_fq_state_frame_count(const struct qbman_attr *state); ++u32 qbman_fq_state_byte_count(const struct qbman_attr *state); ++ ++/* CGR query */ ++int qbman_cgr_query(struct qbman_swp *s, u32 cgid, ++ struct qbman_attr *attr); ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd); ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode, ++ int *rej_cnt_mode, int *cscn_bdi); ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va); ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ u32 *i_cnt_wr_bnd); ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres); ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ u32 *cs_thres_x); ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres); ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp); ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid); ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ u32 *cscn_vcgid); ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid, ++ int *pl); ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ u64 *cg_wr_addr); ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx); ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx, ++ int *edp); ++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth, ++ u64 *maxth, u8 *maxp); ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx, ++ u32 *dp); ++ ++/* CGR/CCGR/CQ statistics query */ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt); ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt); ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear, ++ u64 *frame_cnt, u64 *byte_cnt); +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h +@@ -0,0 +1,171 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/* Perform extra checking */ ++#define QBMAN_CHECKING ++ ++/* To maximise the amount of logic that is common between the Linux driver and ++ * other targets (such as the embedded MC firmware), we pivot here between the ++ * inclusion of two platform-specific headers. ++ * ++ * The first, qbman_sys_decl.h, includes any and all required system headers as ++ * well as providing any definitions for the purposes of compatibility. The ++ * second, qbman_sys.h, is where platform-specific routines go. ++ * ++ * The point of the split is that the platform-independent code (including this ++ * header) may depend on platform-specific declarations, yet other ++ * platform-specific routines may depend on platform-independent definitions. ++ */ ++ ++#define QMAN_REV_4000 0x04000000 ++#define QMAN_REV_4100 0x04010000 ++#define QMAN_REV_4101 0x04010001 ++ ++/* When things go wrong, it is a convenient trick to insert a few FOO() ++ * statements in the code to trace progress. TODO: remove this once we are ++ * hacking the code less actively. ++ */ ++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) ++ ++/* Any time there is a register interface which we poll on, this provides a ++ * "break after x iterations" scheme for it. It's handy for debugging, eg. ++ * where you don't want millions of lines of log output from a polling loop ++ * that won't, because such things tend to drown out the earlier log output ++ * that might explain what caused the problem. (NB: put ";" after each macro!) ++ * TODO: we should probably remove this once we're done sanitising the ++ * simulator... ++ */ ++#define DBG_POLL_START(loopvar) (loopvar = 1000) ++#define DBG_POLL_CHECK(loopvar) \ ++ do {if (!((loopvar)--)) WARN_ON(1); } while (0) ++ ++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets ++ * and widths, these macro-generated encode/decode/isolate/remove inlines can ++ * be used. ++ * ++ * Eg. to "d"ecode a 14-bit field out of a register (into a "u16" type), ++ * where the field is located 3 bits "up" from the least-significant bit of the ++ * register (ie. the field location within the 32-bit register corresponds to a ++ * mask of 0x0001fff8), you would do; ++ * u16 field = d32_u16(3, 14, reg_value); ++ * ++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, ++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" ++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the ++ * LS bit), do; ++ * reg_value |= e32_int(19, 1, !!field); ++ * ++ * If you wish to read-modify-write a register, such that you leave the 14-bit ++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit ++ * value using; ++ * reg_value = i32_u16(3, 14, reg_value); ++ * ++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to ++ * zero) but leaving all other fields as-is; ++ * reg_val = r32_int(19, 1, reg_value); ++ * ++ */ ++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ ++ (u32)((1 << width) - 1)) ++#define DECLARE_CODEC32(t) \ ++static inline u32 e32_##t(u32 lsoffset, u32 width, t val) \ ++{ \ ++ WARN_ON(width > (sizeof(t) * 8)); \ ++ return ((u32)val & MAKE_MASK32(width)) << lsoffset; \ ++} \ ++static inline t d32_##t(u32 lsoffset, u32 width, u32 val) \ ++{ \ ++ WARN_ON(width > (sizeof(t) * 8)); \ ++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ ++} \ ++static inline u32 i32_##t(u32 lsoffset, u32 width, \ ++ u32 val) \ ++{ \ ++ WARN_ON(width > (sizeof(t) * 8)); \ ++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ ++} \ ++static inline u32 r32_##t(u32 lsoffset, u32 width, \ ++ u32 val) \ ++{ \ ++ WARN_ON(width > (sizeof(t) * 8)); \ ++ return ~(MAKE_MASK32(width) << lsoffset) & val; \ ++} ++DECLARE_CODEC32(u32) ++DECLARE_CODEC32(u16) ++DECLARE_CODEC32(u8) ++DECLARE_CODEC32(int) ++ ++ /*********************/ ++ /* Debugging assists */ ++ /*********************/ ++ ++static inline void __hexdump(unsigned long start, unsigned long end, ++ unsigned long p, size_t sz, ++ const unsigned char *c) ++{ ++ while (start < end) { ++ unsigned int pos = 0; ++ char buf[64]; ++ int nl = 0; ++ ++ pos += sprintf(buf + pos, "%08lx: ", start); ++ do { ++ if ((start < p) || (start >= (p + sz))) ++ pos += sprintf(buf + pos, ".."); ++ else ++ pos += sprintf(buf + pos, "%02x", *(c++)); ++ if (!(++start & 15)) { ++ buf[pos++] = '\n'; ++ nl = 1; ++ } else { ++ nl = 0; ++ if (!(start & 1)) ++ buf[pos++] = ' '; ++ if (!(start & 3)) ++ buf[pos++] = ' '; ++ } ++ } while (start & 15); ++ if (!nl) ++ buf[pos++] = '\n'; ++ buf[pos] = '\0'; ++ pr_info("%s", buf); ++ } ++} ++ ++static inline void hexdump(const void *ptr, size_t sz) ++{ ++ unsigned long p = (unsigned long)ptr; ++ unsigned long start = p & ~15ul; ++ unsigned long end = (p + sz + 15) & ~15ul; ++ const unsigned char *c = ptr; ++ ++ __hexdump(start, end, p, sz, c); ++} +--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h ++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -33,108 +33,24 @@ + #define _FSL_DPMCP_CMD_H + + /* Minimal supported DPMCP Version */ +-#define DPMCP_MIN_VER_MAJOR 3 +-#define DPMCP_MIN_VER_MINOR 0 +- +-/* Command IDs */ +-#define DPMCP_CMDID_CLOSE 0x800 +-#define DPMCP_CMDID_OPEN 0x80b +-#define DPMCP_CMDID_CREATE 0x90b +-#define DPMCP_CMDID_DESTROY 0x900 +- +-#define DPMCP_CMDID_GET_ATTR 0x004 +-#define DPMCP_CMDID_RESET 0x005 +- +-#define DPMCP_CMDID_SET_IRQ 0x010 +-#define DPMCP_CMDID_GET_IRQ 0x011 +-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 +-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 +-#define DPMCP_CMDID_SET_IRQ_MASK 0x014 +-#define DPMCP_CMDID_GET_IRQ_MASK 0x015 +-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 +- +-struct dpmcp_cmd_open { +- __le32 dpmcp_id; +-}; +- +-struct dpmcp_cmd_create { +- __le32 portal_id; +-}; +- +-struct dpmcp_cmd_set_irq { +- /* cmd word 0 */ +- u8 irq_index; +- u8 pad[3]; +- __le32 irq_val; +- /* cmd word 1 */ +- __le64 irq_addr; +- /* cmd word 2 */ +- __le32 irq_num; +-}; +- +-struct dpmcp_cmd_get_irq { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpmcp_rsp_get_irq { +- /* cmd word 0 */ +- __le32 irq_val; +- __le32 pad; +- /* cmd word 1 */ +- __le64 irq_paddr; +- /* cmd word 2 */ +- __le32 irq_num; +- __le32 type; +-}; ++#define DPMCP_MIN_VER_MAJOR 3 ++#define DPMCP_MIN_VER_MINOR 0 + +-#define DPMCP_ENABLE 0x1 ++/* Command versioning */ ++#define DPMCP_CMD_BASE_VERSION 1 ++#define DPMCP_CMD_ID_OFFSET 4 + +-struct dpmcp_cmd_set_irq_enable { +- u8 enable; +- u8 pad[3]; +- u8 irq_index; +-}; ++#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION) + +-struct dpmcp_cmd_get_irq_enable { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpmcp_rsp_get_irq_enable { +- u8 enabled; +-}; +- +-struct dpmcp_cmd_set_irq_mask { +- __le32 mask; +- u8 irq_index; +-}; +- +-struct dpmcp_cmd_get_irq_mask { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpmcp_rsp_get_irq_mask { +- __le32 mask; +-}; ++/* Command IDs */ ++#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800) ++#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b) ++#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b) + +-struct dpmcp_cmd_get_irq_status { +- __le32 status; +- u8 irq_index; +-}; ++#define DPMCP_CMDID_RESET DPMCP_CMD(0x005) + +-struct dpmcp_rsp_get_irq_status { +- __le32 status; +-}; +- +-struct dpmcp_rsp_get_attributes { +- /* response word 0 */ +- __le32 pad; +- __le32 id; +- /* response word 1 */ +- __le16 version_major; +- __le16 version_minor; ++struct dpmcp_cmd_open { ++ __le32 dpmcp_id; + }; + + #endif /* _FSL_DPMCP_CMD_H */ +--- a/drivers/staging/fsl-mc/bus/dpmcp.c ++++ b/drivers/staging/fsl-mc/bus/dpmcp.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -104,76 +104,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io, + } + + /** +- * dpmcp_create() - Create the DPMCP object. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @cfg: Configuration structure +- * @token: Returned token; use in subsequent API calls +- * +- * Create the DPMCP object, allocate required resources and +- * perform required initialization. +- * +- * The object can be created either by declaring it in the +- * DPL file, or by calling this function. +- * This function returns a unique authentication token, +- * associated with the specific object ID and the specific MC +- * portal; this token must be used in all subsequent calls to +- * this specific object. For objects that are created using the +- * DPL file, call dpmcp_open function to get an authentication +- * token first. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_create(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- const struct dpmcp_cfg *cfg, +- u16 *token) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_create *cmd_params; +- +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, +- cmd_flags, 0); +- cmd_params = (struct dpmcp_cmd_create *)cmd.params; +- cmd_params->portal_id = cpu_to_le32(cfg->portal_id); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- *token = mc_cmd_hdr_read_token(&cmd); +- +- return 0; +-} +- +-/** +- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * +- * Return: '0' on Success; error code otherwise. +- */ +-int dpmcp_destroy(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token) +-{ +- struct mc_command cmd = { 0 }; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, +- cmd_flags, token); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** + * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +@@ -196,309 +126,33 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, + } + + /** +- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: Identifies the interrupt index to configure +- * @irq_cfg: IRQ configuration +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_set_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- struct dpmcp_irq_cfg *irq_cfg) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_set_irq *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params; +- cmd_params->irq_index = irq_index; +- cmd_params->irq_val = cpu_to_le32(irq_cfg->val); +- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr); +- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpmcp_get_irq() - Get IRQ information from the DPMCP. +- * @mc_io: Pointer to MC portal's I/O object ++ * dpmcp_get_api_version - Get Data Path Management Command Portal API version ++ * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @type: Interrupt type: 0 represents message interrupt +- * type (both irq_addr and irq_val are valid) +- * @irq_cfg: IRQ attributes ++ * @major_ver: Major version of Data Path Management Command Portal API ++ * @minor_ver: Minor version of Data Path Management Command Portal API + * + * Return: '0' on Success; Error code otherwise. + */ +-int dpmcp_get_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- int *type, +- struct dpmcp_irq_cfg *irq_cfg) ++int dpmcp_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) + { + struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_get_irq *cmd_params; +- struct dpmcp_rsp_get_irq *rsp_params; + int err; + + /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params; +- irq_cfg->val = le32_to_cpu(rsp_params->irq_val); +- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr); +- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num); +- *type = le32_to_cpu(rsp_params->type); +- return 0; +-} +- +-/** +- * dpmcp_set_irq_enable() - Set overall interrupt state. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @en: Interrupt state - enable = 1, disable = 0 +- * +- * Allows GPP software to control when interrupts are generated. +- * Each interrupt can have up to 32 causes. The enable/disable control's the +- * overall interrupt state. if the interrupt is disabled no causes will cause +- * an interrupt. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 en) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_set_irq_enable *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params; +- cmd_params->enable = en & DPMCP_ENABLE; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpmcp_get_irq_enable() - Get overall interrupt state +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @en: Returned interrupt state - enable = 1, disable = 0 +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 *en) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_get_irq_enable *cmd_params; +- struct dpmcp_rsp_get_irq_enable *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params; +- *en = rsp_params->enabled & DPMCP_ENABLE; +- return 0; +-} +- +-/** +- * dpmcp_set_irq_mask() - Set interrupt mask. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @mask: Event mask to trigger interrupt; +- * each bit: +- * 0 = ignore event +- * 1 = consider event for asserting IRQ +- * +- * Every interrupt can have up to 32 causes and the interrupt model supports +- * masking/unmasking each cause independently +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 mask) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_set_irq_mask *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params; +- cmd_params->mask = cpu_to_le32(mask); +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dpmcp_get_irq_mask() - Get interrupt mask. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @mask: Returned event mask to trigger interrupt +- * +- * Every interrupt can have up to 32 causes and the interrupt model supports +- * masking/unmasking each cause independently +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *mask) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_get_irq_mask *cmd_params; +- struct dpmcp_rsp_get_irq_mask *rsp_params; +- +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params; +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params; +- *mask = le32_to_cpu(rsp_params->mask); +- +- return 0; +-} +- +-/** +- * dpmcp_get_irq_status() - Get the current status of any pending interrupts. +- * +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @irq_index: The interrupt index to configure +- * @status: Returned interrupts status - one bit per cause: +- * 0 = no interrupt pending +- * 1 = interrupt pending +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *status) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_cmd_get_irq_status *cmd_params; +- struct dpmcp_rsp_get_irq_status *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, +- cmd_flags, token); +- cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params; +- cmd_params->status = cpu_to_le32(*status); +- cmd_params->irq_index = irq_index; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params; +- *status = le32_to_cpu(rsp_params->status); +- +- return 0; +-} +- +-/** +- * dpmcp_get_attributes - Retrieve DPMCP attributes. +- * +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPMCP object +- * @attr: Returned object's attributes +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmcp_get_attributes(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpmcp_attr *attr) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmcp_rsp_get_attributes *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, +- cmd_flags, token); ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION, ++ cmd_flags, 0); + +- /* send command to mc*/ ++ /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ +- rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params; +- attr->id = le32_to_cpu(rsp_params->id); +- attr->version.major = le16_to_cpu(rsp_params->version_major); +- attr->version.minor = le16_to_cpu(rsp_params->version_minor); ++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver); + + return 0; + } +--- a/drivers/staging/fsl-mc/bus/dpmcp.h ++++ b/drivers/staging/fsl-mc/bus/dpmcp.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -32,128 +32,29 @@ + #ifndef __FSL_DPMCP_H + #define __FSL_DPMCP_H + +-/* Data Path Management Command Portal API ++/* ++ * Data Path Management Command Portal API + * Contains initialization APIs and runtime control APIs for DPMCP + */ + + struct fsl_mc_io; + + int dpmcp_open(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, ++ u32 cmd_flags, + int dpmcp_id, +- uint16_t *token); +- +-/* Get portal ID from pool */ +-#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) ++ u16 *token); + + int dpmcp_close(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token); ++ u32 cmd_flags, ++ u16 token); + +-/** +- * struct dpmcp_cfg - Structure representing DPMCP configuration +- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID +- * from pool +- */ +-struct dpmcp_cfg { +- int portal_id; +-}; +- +-int dpmcp_create(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- const struct dpmcp_cfg *cfg, +- uint16_t *token); +- +-int dpmcp_destroy(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token); ++int dpmcp_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); + + int dpmcp_reset(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token); +- +-/* IRQ */ +-/* IRQ Index */ +-#define DPMCP_IRQ_INDEX 0 +-/* irq event - Indicates that the link state changed */ +-#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 +- +-/** +- * struct dpmcp_irq_cfg - IRQ configuration +- * @paddr: Address that must be written to signal a message-based interrupt +- * @val: Value to write into irq_addr address +- * @irq_num: A user defined number associated with this IRQ +- */ +-struct dpmcp_irq_cfg { +- uint64_t paddr; +- uint32_t val; +- int irq_num; +-}; +- +-int dpmcp_set_irq(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- struct dpmcp_irq_cfg *irq_cfg); +- +-int dpmcp_get_irq(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- int *type, +- struct dpmcp_irq_cfg *irq_cfg); +- +-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- uint8_t en); +- +-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- uint8_t *en); +- +-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- uint32_t mask); +- +-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- uint32_t *mask); +- +-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- uint8_t irq_index, +- uint32_t *status); +- +-/** +- * struct dpmcp_attr - Structure representing DPMCP attributes +- * @id: DPMCP object ID +- * @version: DPMCP version +- */ +-struct dpmcp_attr { +- int id; +- /** +- * struct version - Structure representing DPMCP version +- * @major: DPMCP major version +- * @minor: DPMCP minor version +- */ +- struct { +- uint16_t major; +- uint16_t minor; +- } version; +-}; +- +-int dpmcp_get_attributes(struct fsl_mc_io *mc_io, +- uint32_t cmd_flags, +- uint16_t token, +- struct dpmcp_attr *attr); ++ u32 cmd_flags, ++ u16 token); + + #endif /* __FSL_DPMCP_H */ +--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h ++++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h +@@ -12,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -41,13 +40,14 @@ + #ifndef __FSL_DPMNG_CMD_H + #define __FSL_DPMNG_CMD_H + +-/* Command IDs */ +-#define DPMNG_CMDID_GET_CONT_ID 0x830 +-#define DPMNG_CMDID_GET_VERSION 0x831 ++/* Command versioning */ ++#define DPMNG_CMD_BASE_VERSION 1 ++#define DPMNG_CMD_ID_OFFSET 4 + +-struct dpmng_rsp_get_container_id { +- __le32 container_id; +-}; ++#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831) + + struct dpmng_rsp_get_version { + __le32 revision; +--- a/drivers/staging/fsl-mc/bus/dpmng.c ++++ b/drivers/staging/fsl-mc/bus/dpmng.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_ + } + EXPORT_SYMBOL(mc_get_version); + +-/** +- * dpmng_get_container_id() - Get container ID associated with a given portal. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @container_id: Requested container ID +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dpmng_get_container_id(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- int *container_id) +-{ +- struct mc_command cmd = { 0 }; +- struct dpmng_rsp_get_container_id *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, +- cmd_flags, +- 0); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params; +- *container_id = le32_to_cpu(rsp_params->container_id); +- +- return 0; +-} +- +--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h ++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h +@@ -12,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -42,48 +41,39 @@ + #define _FSL_DPRC_CMD_H + + /* Minimal supported DPRC Version */ +-#define DPRC_MIN_VER_MAJOR 5 ++#define DPRC_MIN_VER_MAJOR 6 + #define DPRC_MIN_VER_MINOR 0 + +-/* Command IDs */ +-#define DPRC_CMDID_CLOSE 0x800 +-#define DPRC_CMDID_OPEN 0x805 +-#define DPRC_CMDID_CREATE 0x905 +- +-#define DPRC_CMDID_GET_ATTR 0x004 +-#define DPRC_CMDID_RESET_CONT 0x005 +- +-#define DPRC_CMDID_SET_IRQ 0x010 +-#define DPRC_CMDID_GET_IRQ 0x011 +-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 +-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 +-#define DPRC_CMDID_SET_IRQ_MASK 0x014 +-#define DPRC_CMDID_GET_IRQ_MASK 0x015 +-#define DPRC_CMDID_GET_IRQ_STATUS 0x016 +-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 +- +-#define DPRC_CMDID_CREATE_CONT 0x151 +-#define DPRC_CMDID_DESTROY_CONT 0x152 +-#define DPRC_CMDID_SET_RES_QUOTA 0x155 +-#define DPRC_CMDID_GET_RES_QUOTA 0x156 +-#define DPRC_CMDID_ASSIGN 0x157 +-#define DPRC_CMDID_UNASSIGN 0x158 +-#define DPRC_CMDID_GET_OBJ_COUNT 0x159 +-#define DPRC_CMDID_GET_OBJ 0x15A +-#define DPRC_CMDID_GET_RES_COUNT 0x15B +-#define DPRC_CMDID_GET_RES_IDS 0x15C +-#define DPRC_CMDID_GET_OBJ_REG 0x15E +-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F +-#define DPRC_CMDID_GET_OBJ_IRQ 0x160 +-#define DPRC_CMDID_SET_OBJ_LABEL 0x161 +-#define DPRC_CMDID_GET_OBJ_DESC 0x162 +- +-#define DPRC_CMDID_CONNECT 0x167 +-#define DPRC_CMDID_DISCONNECT 0x168 +-#define DPRC_CMDID_GET_POOL 0x169 +-#define DPRC_CMDID_GET_POOL_COUNT 0x16A ++/* Command versioning */ ++#define DPRC_CMD_BASE_VERSION 1 ++#define DPRC_CMD_ID_OFFSET 4 + +-#define DPRC_CMDID_GET_CONNECTION 0x16C ++#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) ++#define DPRC_CMDID_OPEN DPRC_CMD(0x805) ++#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05) ++ ++#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004) ++#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005) ++ ++#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010) ++#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011) ++#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012) ++#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013) ++#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014) ++#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015) ++#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016) ++#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017) ++ ++#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830) ++#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) ++#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) ++#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B) ++#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) ++#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) ++#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160) + + struct dprc_cmd_open { + __le32 container_id; +@@ -199,9 +189,6 @@ struct dprc_rsp_get_attributes { + /* response word 1 */ + __le32 options; + __le32 portal_id; +- /* response word 2 */ +- __le16 version_major; +- __le16 version_minor; + }; + + struct dprc_cmd_set_res_quota { +@@ -367,11 +354,16 @@ struct dprc_cmd_get_obj_region { + + struct dprc_rsp_get_obj_region { + /* response word 0 */ +- __le64 pad; ++ __le64 pad0; + /* response word 1 */ +- __le64 base_addr; ++ __le32 base_addr; ++ __le32 pad1; + /* response word 2 */ + __le32 size; ++ u8 type; ++ u8 pad2[3]; ++ /* response word 3 */ ++ __le32 flags; + }; + + struct dprc_cmd_set_obj_label { +--- a/drivers/staging/fsl-mc/bus/dprc-driver.c ++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c +@@ -1,7 +1,7 @@ + /* + * Freescale data path resource container (DPRC) driver + * +- * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -160,6 +160,8 @@ static void check_plugged_state_change(s + * dprc_add_new_devices - Adds devices to the logical bus for a DPRC + * + * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the ++ * DPRC, or NULL, if none. + * @obj_desc_array: array of device descriptors for child devices currently + * present in the physical DPRC. + * @num_child_objects_in_mc: number of entries in obj_desc_array +@@ -169,6 +171,7 @@ static void check_plugged_state_change(s + * in the physical DPRC. + */ + static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, + struct dprc_obj_desc *obj_desc_array, + int num_child_objects_in_mc) + { +@@ -188,11 +191,12 @@ static void dprc_add_new_devices(struct + child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev); + if (child_dev) { + check_plugged_state_change(child_dev, obj_desc); ++ put_device(&child_dev->dev); + continue; + } + + error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, +- &child_dev); ++ driver_override, &child_dev); + if (error < 0) + continue; + } +@@ -202,6 +206,8 @@ static void dprc_add_new_devices(struct + * dprc_scan_objects - Discover objects in a DPRC + * + * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the ++ * DPRC, or NULL, if none. + * @total_irq_count: total number of IRQs needed by objects in the DPRC. + * + * Detects objects added and removed from a DPRC and synchronizes the +@@ -217,6 +223,7 @@ static void dprc_add_new_devices(struct + * of the device drivers for the non-allocatable devices. + */ + int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, + unsigned int *total_irq_count) + { + int num_child_objects; +@@ -297,7 +304,7 @@ int dprc_scan_objects(struct fsl_mc_devi + dprc_remove_devices(mc_bus_dev, child_obj_desc_array, + num_child_objects); + +- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array, ++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, + num_child_objects); + + if (child_obj_desc_array) +@@ -328,7 +335,7 @@ int dprc_scan_container(struct fsl_mc_de + * Discover objects in the DPRC: + */ + mutex_lock(&mc_bus->scan_mutex); +- error = dprc_scan_objects(mc_bus_dev, &irq_count); ++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); + mutex_unlock(&mc_bus->scan_mutex); + if (error < 0) + goto error; +@@ -415,7 +422,7 @@ static irqreturn_t dprc_irq0_handler_thr + DPRC_IRQ_EVENT_OBJ_CREATED)) { + unsigned int irq_count; + +- error = dprc_scan_objects(mc_dev, &irq_count); ++ error = dprc_scan_objects(mc_dev, NULL, &irq_count); + if (error < 0) { + /* + * If the error is -ENXIO, we ignore it, as it indicates +@@ -505,7 +512,7 @@ static int register_dprc_irq_handler(str + dprc_irq0_handler, + dprc_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, +- "FSL MC DPRC irq0", ++ dev_name(&mc_dev->dev), + &mc_dev->dev); + if (error < 0) { + dev_err(&mc_dev->dev, +@@ -597,6 +604,7 @@ static int dprc_probe(struct fsl_mc_devi + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); + bool mc_io_created = false; + bool msi_domain_set = false; ++ u16 major_ver, minor_ver; + + if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) + return -EINVAL; +@@ -669,13 +677,21 @@ static int dprc_probe(struct fsl_mc_devi + goto error_cleanup_open; + } + +- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || +- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && +- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { ++ error = dprc_get_api_version(mc_dev->mc_io, 0, ++ &major_ver, ++ &minor_ver); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n", ++ error); ++ goto error_cleanup_open; ++ } ++ ++ if (major_ver < DPRC_MIN_VER_MAJOR || ++ (major_ver == DPRC_MIN_VER_MAJOR && ++ minor_ver < DPRC_MIN_VER_MINOR)) { + dev_err(&mc_dev->dev, + "ERROR: DPRC version %d.%d not supported\n", +- mc_bus->dprc_attr.version.major, +- mc_bus->dprc_attr.version.minor); ++ major_ver, minor_ver); + error = -ENOTSUPP; + goto error_cleanup_open; + } +--- a/drivers/staging/fsl-mc/bus/dprc.c ++++ b/drivers/staging/fsl-mc/bus/dprc.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -100,93 +100,6 @@ int dprc_close(struct fsl_mc_io *mc_io, + EXPORT_SYMBOL(dprc_close); + + /** +- * dprc_create_container() - Create child container +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @cfg: Child container configuration +- * @child_container_id: Returned child container ID +- * @child_portal_offset: Returned child portal offset from MC portal base +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_create_container(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dprc_cfg *cfg, +- int *child_container_id, +- u64 *child_portal_offset) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_create_container *cmd_params; +- struct dprc_rsp_create_container *rsp_params; +- int err; +- +- /* prepare command */ +- cmd_params = (struct dprc_cmd_create_container *)cmd.params; +- cmd_params->options = cpu_to_le32(cfg->options); +- cmd_params->icid = cpu_to_le16(cfg->icid); +- cmd_params->portal_id = cpu_to_le32(cfg->portal_id); +- strncpy(cmd_params->label, cfg->label, 16); +- cmd_params->label[15] = '\0'; +- +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, +- cmd_flags, token); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_create_container *)cmd.params; +- *child_container_id = le32_to_cpu(rsp_params->child_container_id); +- *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr); +- +- return 0; +-} +- +-/** +- * dprc_destroy_container() - Destroy child container. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @child_container_id: ID of the container to destroy +- * +- * This function terminates the child container, so following this call the +- * child container ID becomes invalid. +- * +- * Notes: +- * - All resources and objects of the destroyed container are returned to the +- * parent container or destroyed if were created be the destroyed container. +- * - This function destroy all the child containers of the specified +- * container prior to destroying the container itself. +- * +- * warning: Only the parent container is allowed to destroy a child policy +- * Container 0 can't be destroyed +- * +- * Return: '0' on Success; Error code otherwise. +- * +- */ +-int dprc_destroy_container(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_destroy_container *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, +- cmd_flags, token); +- cmd_params = (struct dprc_cmd_destroy_container *)cmd.params; +- cmd_params->child_container_id = cpu_to_le32(child_container_id); +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** + * dprc_reset_container - Reset child container. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +@@ -565,279 +478,6 @@ int dprc_get_attributes(struct fsl_mc_io + attr->icid = le16_to_cpu(rsp_params->icid); + attr->options = le32_to_cpu(rsp_params->options); + attr->portal_id = le32_to_cpu(rsp_params->portal_id); +- attr->version.major = le16_to_cpu(rsp_params->version_major); +- attr->version.minor = le16_to_cpu(rsp_params->version_minor); +- +- return 0; +-} +- +-/** +- * dprc_set_res_quota() - Set allocation policy for a specific resource/object +- * type in a child container +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @child_container_id: ID of the child container +- * @type: Resource/object type +- * @quota: Sets the maximum number of resources of the selected type +- * that the child container is allowed to allocate from its parent; +- * when quota is set to -1, the policy is the same as container's +- * general policy. +- * +- * Allocation policy determines whether or not a container may allocate +- * resources from its parent. Each container has a 'global' allocation policy +- * that is set when the container is created. +- * +- * This function sets allocation policy for a specific resource type. +- * The default policy for all resource types matches the container's 'global' +- * allocation policy. +- * +- * Return: '0' on Success; Error code otherwise. +- * +- * @warning Only the parent container is allowed to change a child policy. +- */ +-int dprc_set_res_quota(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- char *type, +- u16 quota) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_set_res_quota *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, +- cmd_flags, token); +- cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params; +- cmd_params->child_container_id = cpu_to_le32(child_container_id); +- cmd_params->quota = cpu_to_le16(quota); +- strncpy(cmd_params->type, type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dprc_get_res_quota() - Gets the allocation policy of a specific +- * resource/object type in a child container +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @child_container_id; ID of the child container +- * @type: resource/object type +- * @quota: Returnes the maximum number of resources of the selected type +- * that the child container is allowed to allocate from the parent; +- * when quota is set to -1, the policy is the same as container's +- * general policy. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_get_res_quota(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- char *type, +- u16 *quota) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_get_res_quota *cmd_params; +- struct dprc_rsp_get_res_quota *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, +- cmd_flags, token); +- cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params; +- cmd_params->child_container_id = cpu_to_le32(child_container_id); +- strncpy(cmd_params->type, type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params; +- *quota = le16_to_cpu(rsp_params->quota); +- +- return 0; +-} +- +-/** +- * dprc_assign() - Assigns objects or resource to a child container. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @container_id: ID of the child container +- * @res_req: Describes the type and amount of resources to +- * assign to the given container +- * +- * Assignment is usually done by a parent (this DPRC) to one of its child +- * containers. +- * +- * According to the DPRC allocation policy, the assigned resources may be taken +- * (allocated) from the container's ancestors, if not enough resources are +- * available in the container itself. +- * +- * The type of assignment depends on the dprc_res_req options, as follows: +- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have +- * the explicit base ID specified at the id_base_align field of res_req. +- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be +- * aligned to the value given at id_base_align field of res_req. +- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, +- * and indicates that the object must be set to the plugged state. +- * +- * A container may use this function with its own ID in order to change a +- * object state to plugged or unplugged. +- * +- * If IRQ information has been set in the child DPRC, it will signal an +- * interrupt following every change in its object assignment. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_assign(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int container_id, +- struct dprc_res_req *res_req) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_assign *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, +- cmd_flags, token); +- cmd_params = (struct dprc_cmd_assign *)cmd.params; +- cmd_params->container_id = cpu_to_le32(container_id); +- cmd_params->options = cpu_to_le32(res_req->options); +- cmd_params->num = cpu_to_le32(res_req->num); +- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align); +- strncpy(cmd_params->type, res_req->type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dprc_unassign() - Un-assigns objects or resources from a child container +- * and moves them into this (parent) DPRC. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @child_container_id: ID of the child container +- * @res_req: Describes the type and amount of resources to un-assign from +- * the child container +- * +- * Un-assignment of objects can succeed only if the object is not in the +- * plugged or opened state. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_unassign(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- struct dprc_res_req *res_req) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_unassign *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_unassign *)cmd.params; +- cmd_params->child_container_id = cpu_to_le32(child_container_id); +- cmd_params->options = cpu_to_le32(res_req->options); +- cmd_params->num = cpu_to_le32(res_req->num); +- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align); +- strncpy(cmd_params->type, res_req->type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dprc_get_pool_count() - Get the number of dprc's pools +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @mc_io: Pointer to MC portal's I/O object +- * @token: Token of DPRC object +- * @pool_count: Returned number of resource pools in the dprc +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_get_pool_count(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int *pool_count) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_rsp_get_pool_count *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, +- cmd_flags, token); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params; +- *pool_count = le32_to_cpu(rsp_params->pool_count); +- +- return 0; +-} +- +-/** +- * dprc_get_pool() - Get the type (string) of a certain dprc's pool +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @pool_index; Index of the pool to be queried (< pool_count) +- * @type: The type of the pool +- * +- * The pool types retrieved one by one by incrementing +- * pool_index up to (not including) the value of pool_count returned +- * from dprc_get_pool_count(). dprc_get_pool_count() must +- * be called prior to dprc_get_pool(). +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_get_pool(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int pool_index, +- char *type) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_get_pool *cmd_params; +- struct dprc_rsp_get_pool *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_get_pool *)cmd.params; +- cmd_params->pool_index = cpu_to_le32(pool_index); +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_pool *)cmd.params; +- strncpy(type, rsp_params->type, 16); +- type[15] = '\0'; + + return 0; + } +@@ -934,64 +574,6 @@ int dprc_get_obj(struct fsl_mc_io *mc_io + EXPORT_SYMBOL(dprc_get_obj); + + /** +- * dprc_get_obj_desc() - Get object descriptor. +- * +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @obj_type: The type of the object to get its descriptor. +- * @obj_id: The id of the object to get its descriptor +- * @obj_desc: The returned descriptor to fill and return to the user +- * +- * Return: '0' on Success; Error code otherwise. +- * +- */ +-int dprc_get_obj_desc(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- struct dprc_obj_desc *obj_desc) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_get_obj_desc *cmd_params; +- struct dprc_rsp_get_obj_desc *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params; +- cmd_params->obj_id = cpu_to_le32(obj_id); +- strncpy(cmd_params->type, obj_type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params; +- obj_desc->id = le32_to_cpu(rsp_params->id); +- obj_desc->vendor = le16_to_cpu(rsp_params->vendor); +- obj_desc->irq_count = rsp_params->irq_count; +- obj_desc->region_count = rsp_params->region_count; +- obj_desc->state = le32_to_cpu(rsp_params->state); +- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major); +- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor); +- obj_desc->flags = le16_to_cpu(rsp_params->flags); +- strncpy(obj_desc->type, rsp_params->type, 16); +- obj_desc->type[15] = '\0'; +- strncpy(obj_desc->label, rsp_params->label, 16); +- obj_desc->label[15] = '\0'; +- +- return 0; +-} +-EXPORT_SYMBOL(dprc_get_obj_desc); +- +-/** + * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +@@ -1130,52 +712,6 @@ int dprc_get_res_count(struct fsl_mc_io + EXPORT_SYMBOL(dprc_get_res_count); + + /** +- * dprc_get_res_ids() - Obtains IDs of free resources in the container +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @type: pool type +- * @range_desc: range descriptor +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_get_res_ids(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *type, +- struct dprc_res_ids_range_desc *range_desc) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_get_res_ids *cmd_params; +- struct dprc_rsp_get_res_ids *rsp_params; +- int err; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, +- cmd_flags, token); +- cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params; +- cmd_params->iter_status = range_desc->iter_status; +- cmd_params->base_id = cpu_to_le32(range_desc->base_id); +- cmd_params->last_id = cpu_to_le32(range_desc->last_id); +- strncpy(cmd_params->type, type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- err = mc_send_command(mc_io, &cmd); +- if (err) +- return err; +- +- /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params; +- range_desc->iter_status = rsp_params->iter_status; +- range_desc->base_id = le32_to_cpu(rsp_params->base_id); +- range_desc->last_id = le32_to_cpu(rsp_params->last_id); +- +- return 0; +-} +-EXPORT_SYMBOL(dprc_get_res_ids); +- +-/** + * dprc_get_obj_region() - Get region information for a specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +@@ -1216,160 +752,66 @@ int dprc_get_obj_region(struct fsl_mc_io + + /* retrieve response parameters */ + rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; +- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr); ++ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr); + region_desc->size = le32_to_cpu(rsp_params->size); ++ region_desc->type = rsp_params->type; ++ region_desc->flags = le32_to_cpu(rsp_params->flags); + + return 0; + } + EXPORT_SYMBOL(dprc_get_obj_region); + + /** +- * dprc_set_obj_label() - Set object label. +- * @mc_io: Pointer to MC portal's I/O object ++ * dprc_get_api_version - Get Data Path Resource Container API version ++ * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @obj_type: Object's type +- * @obj_id: Object's ID +- * @label: The required label. The maximum length is 16 chars. ++ * @major_ver: Major version of Data Path Resource Container API ++ * @minor_ver: Minor version of Data Path Resource Container API + * + * Return: '0' on Success; Error code otherwise. + */ +-int dprc_set_obj_label(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- char *label) ++int dprc_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) + { + struct mc_command cmd = { 0 }; +- struct dprc_cmd_set_obj_label *cmd_params; ++ int err; + + /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params; +- cmd_params->obj_id = cpu_to_le32(obj_id); +- strncpy(cmd_params->label, label, 16); +- cmd_params->label[15] = '\0'; +- strncpy(cmd_params->obj_type, obj_type, 16); +- cmd_params->obj_type[15] = '\0'; ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION, ++ cmd_flags, 0); + +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +-EXPORT_SYMBOL(dprc_set_obj_label); +- +-/** +- * dprc_connect() - Connect two endpoints to create a network link between them +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @endpoint1: Endpoint 1 configuration parameters +- * @endpoint2: Endpoint 2 configuration parameters +- * @cfg: Connection configuration. The connection configuration is ignored for +- * connections made to DPMAC objects, where rate is retrieved from the +- * MAC configuration. +- * +- * Return: '0' on Success; Error code otherwise. +- */ +-int dprc_connect(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint1, +- const struct dprc_endpoint *endpoint2, +- const struct dprc_connection_cfg *cfg) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_connect *cmd_params; ++ /* send command to mc */ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; + +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_connect *)cmd.params; +- cmd_params->ep1_id = cpu_to_le32(endpoint1->id); +- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id); +- cmd_params->ep2_id = cpu_to_le32(endpoint2->id); +- cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id); +- strncpy(cmd_params->ep1_type, endpoint1->type, 16); +- cmd_params->ep1_type[15] = '\0'; +- cmd_params->max_rate = cpu_to_le32(cfg->max_rate); +- cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate); +- strncpy(cmd_params->ep2_type, endpoint2->type, 16); +- cmd_params->ep2_type[15] = '\0'; ++ /* retrieve response parameters */ ++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver); + +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); ++ return 0; + } + + /** +- * dprc_disconnect() - Disconnect one endpoint to remove its network connection +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @endpoint: Endpoint configuration parameters ++ * dprc_get_container_id - Get container ID associated with a given portal. ++ * @mc_io: Pointer to Mc portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @container_id: Requested container id + * + * Return: '0' on Success; Error code otherwise. + */ +-int dprc_disconnect(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint) +-{ +- struct mc_command cmd = { 0 }; +- struct dprc_cmd_disconnect *cmd_params; +- +- /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, +- cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_disconnect *)cmd.params; +- cmd_params->id = cpu_to_le32(endpoint->id); +- cmd_params->interface_id = cpu_to_le32(endpoint->if_id); +- strncpy(cmd_params->type, endpoint->type, 16); +- cmd_params->type[15] = '\0'; +- +- /* send command to mc*/ +- return mc_send_command(mc_io, &cmd); +-} +- +-/** +- * dprc_get_connection() - Get connected endpoint and link status if connection +- * exists. +- * @mc_io: Pointer to MC portal's I/O object +- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' +- * @token: Token of DPRC object +- * @endpoint1: Endpoint 1 configuration parameters +- * @endpoint2: Returned endpoint 2 configuration parameters +- * @state: Returned link state: +- * 1 - link is up; +- * 0 - link is down; +- * -1 - no connection (endpoint2 information is irrelevant) +- * +- * Return: '0' on Success; -ENAVAIL if connection does not exist. +- */ +-int dprc_get_connection(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint1, +- struct dprc_endpoint *endpoint2, +- int *state) ++int dprc_get_container_id(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int *container_id) + { + struct mc_command cmd = { 0 }; +- struct dprc_cmd_get_connection *cmd_params; +- struct dprc_rsp_get_connection *rsp_params; + int err; + + /* prepare command */ +- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID, + cmd_flags, +- token); +- cmd_params = (struct dprc_cmd_get_connection *)cmd.params; +- cmd_params->ep1_id = cpu_to_le32(endpoint1->id); +- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id); +- strncpy(cmd_params->ep1_type, endpoint1->type, 16); +- cmd_params->ep1_type[15] = '\0'; ++ 0); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); +@@ -1377,12 +819,7 @@ int dprc_get_connection(struct fsl_mc_io + return err; + + /* retrieve response parameters */ +- rsp_params = (struct dprc_rsp_get_connection *)cmd.params; +- endpoint2->id = le32_to_cpu(rsp_params->ep2_id); +- endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id); +- strncpy(endpoint2->type, rsp_params->ep2_type, 16); +- endpoint2->type[15] = '\0'; +- *state = le32_to_cpu(rsp_params->state); ++ *container_id = (int)mc_cmd_read_object_id(&cmd); + + return 0; + } +--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c ++++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c +@@ -1,7 +1,7 @@ + /* +- * Freescale MC object device allocator driver ++ * fsl-mc object allocator driver + * +- * Copyright (C) 2013 Freescale Semiconductor, Inc. ++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any +@@ -12,9 +12,9 @@ + #include + #include "../include/mc-bus.h" + #include "../include/mc-sys.h" +-#include "../include/dpbp-cmd.h" +-#include "../include/dpcon-cmd.h" + ++#include "dpbp-cmd.h" ++#include "dpcon-cmd.h" + #include "fsl-mc-private.h" + + #define FSL_MC_IS_ALLOCATABLE(_obj_type) \ +@@ -23,15 +23,12 @@ + strcmp(_obj_type, "dpcon") == 0) + + /** +- * fsl_mc_resource_pool_add_device - add allocatable device to a resource +- * pool of a given MC bus ++ * fsl_mc_resource_pool_add_device - add allocatable object to a resource ++ * pool of a given fsl-mc bus + * +- * @mc_bus: pointer to the MC bus +- * @pool_type: MC bus pool type +- * @mc_dev: Pointer to allocatable MC object device +- * +- * It adds an allocatable MC object device to a container's resource pool of +- * the given resource type ++ * @mc_bus: pointer to the fsl-mc bus ++ * @pool_type: pool type ++ * @mc_dev: pointer to allocatable fsl-mc device + */ + static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus + *mc_bus, +@@ -95,10 +92,10 @@ out: + * fsl_mc_resource_pool_remove_device - remove an allocatable device from a + * resource pool + * +- * @mc_dev: Pointer to allocatable MC object device ++ * @mc_dev: pointer to allocatable fsl-mc device + * +- * It permanently removes an allocatable MC object device from the resource +- * pool, the device is currently in, as long as it is in the pool's free list. ++ * It permanently removes an allocatable fsl-mc device from the resource ++ * pool. It's an error if the device is in use. + */ + static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device + *mc_dev) +@@ -255,17 +252,18 @@ out_unlock: + EXPORT_SYMBOL_GPL(fsl_mc_resource_free); + + /** +- * fsl_mc_object_allocate - Allocates a MC object device of the given +- * pool type from a given MC bus ++ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given ++ * pool type from a given fsl-mc bus instance + * +- * @mc_dev: MC device for which the MC object device is to be allocated +- * @pool_type: MC bus resource pool type +- * @new_mc_dev: Pointer to area where the pointer to the allocated +- * MC object device is to be returned ++ * @mc_dev: fsl-mc device which is used in conjunction with the ++ * allocated object ++ * @pool_type: pool type ++ * @new_mc_dev: pointer to area where the pointer to the allocated device ++ * is to be returned + * +- * This function allocates a MC object device from the device's parent DPRC, +- * from the corresponding MC bus' pool of allocatable MC object devices of +- * the given resource type. mc_dev cannot be a DPRC itself. ++ * Allocatable objects are always used in conjunction with some functional ++ * device. This function allocates an object of the specified type from ++ * the DPRC containing the functional device. + * + * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC + * portals are allocated using fsl_mc_portal_allocate(), instead of +@@ -312,10 +310,9 @@ error: + EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); + + /** +- * fsl_mc_object_free - Returns an allocatable MC object device to the +- * corresponding resource pool of a given MC bus. +- * +- * @mc_adev: Pointer to the MC object device ++ * fsl_mc_object_free - Returns an fsl-mc object to the resource ++ * pool where it came from. ++ * @mc_adev: Pointer to the fsl-mc device + */ + void fsl_mc_object_free(struct fsl_mc_device *mc_adev) + { +@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_de + EXPORT_SYMBOL_GPL(fsl_mc_object_free); + + /* +- * Initialize the interrupt pool associated with a MC bus. +- * It allocates a block of IRQs from the GIC-ITS ++ * A DPRC and the devices in the DPRC all share the same GIC-ITS device ++ * ID. A block of IRQs is pre-allocated and maintained in a pool ++ * from which devices can allocate them when needed. ++ */ ++ ++/* ++ * Initialize the interrupt pool associated with an fsl-mc bus. ++ * It allocates a block of IRQs from the GIC-ITS. + */ + int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, + unsigned int irq_count) +@@ -395,7 +398,7 @@ cleanup_msi_irqs: + EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); + + /** +- * Teardown the interrupt pool associated with an MC bus. ++ * Teardown the interrupt pool associated with an fsl-mc bus. + * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. + */ + void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) +@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_ + EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); + + /** +- * It allocates the IRQs required by a given MC object device. The +- * IRQs are allocated from the interrupt pool associated with the +- * MC bus that contains the device, if the device is not a DPRC device. +- * Otherwise, the IRQs are allocated from the interrupt pool associated +- * with the MC bus that represents the DPRC device itself. ++ * Allocate the IRQs required by a given fsl-mc device. + */ + int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) + { +@@ -495,8 +494,7 @@ error_resource_alloc: + EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); + + /* +- * It frees the IRQs that were allocated for a MC object device, by +- * returning them to the corresponding interrupt pool. ++ * Frees the IRQs that were allocated for an fsl-mc device. + */ + void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) + { +@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct + return error; + + dev_dbg(&mc_dev->dev, +- "Allocatable MC object device bound to fsl_mc_allocator driver"); ++ "Allocatable fsl-mc device bound to fsl_mc_allocator driver"); + return 0; + } + +@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struc + } + + dev_dbg(&mc_dev->dev, +- "Allocatable MC object device unbound from fsl_mc_allocator driver"); ++ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver"); + return 0; + } + +--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c ++++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c +@@ -1,7 +1,7 @@ + /* + * Freescale Management Complex (MC) bus driver + * +- * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -9,6 +9,8 @@ + * warranty of any kind, whether express or implied. + */ + ++#define pr_fmt(fmt) "fsl-mc: " fmt ++ + #include + #include + #include +@@ -25,8 +27,6 @@ + #include "fsl-mc-private.h" + #include "dprc-cmd.h" + +-static struct kmem_cache *mc_dev_cache; +- + /** + * Default DMA mask for devices on a fsl-mc bus + */ +@@ -34,7 +34,7 @@ static struct kmem_cache *mc_dev_cache; + + /** + * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device +- * @root_mc_bus_dev: MC object device representing the root DPRC ++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC + * @num_translation_ranges: number of entries in addr_translation_ranges + * @translation_ranges: array of bus to system address translation ranges + */ +@@ -62,8 +62,8 @@ struct fsl_mc_addr_translation_range { + + /** + * fsl_mc_bus_match - device to driver matching callback +- * @dev: the MC object device structure to match against +- * @drv: the device driver to search for matching MC object device id ++ * @dev: the fsl-mc device to match against ++ * @drv: the device driver to search for matching fsl-mc object type + * structures + * + * Returns 1 on success, 0 otherwise. +@@ -75,8 +75,11 @@ static int fsl_mc_bus_match(struct devic + struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); + bool found = false; + +- if (WARN_ON(!fsl_mc_bus_exists())) ++ /* When driver_override is set, only bind to the matching driver */ ++ if (mc_dev->driver_override) { ++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); + goto out; ++ } + + if (!mc_drv->match_id_table) + goto out; +@@ -91,7 +94,7 @@ static int fsl_mc_bus_match(struct devic + + /* + * Traverse the match_id table of the given driver, trying to find +- * a matching for the given MC object device. ++ * a matching for the given device. + */ + for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { + if (id->vendor == mc_dev->obj_desc.vendor && +@@ -132,23 +135,141 @@ static ssize_t modalias_show(struct devi + } + static DEVICE_ATTR_RO(modalias); + ++static ssize_t rescan_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (!fsl_mc_is_root_dprc(dev)) ++ return -EINVAL; ++ ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) { ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return count; ++} ++static DEVICE_ATTR_WO(rescan); ++ ++static ssize_t driver_override_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ const char *driver_override, *old = mc_dev->driver_override; ++ char *cp; ++ ++ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) ++ return -EINVAL; ++ ++ if (count >= (PAGE_SIZE - 1)) ++ return -EINVAL; ++ ++ driver_override = kstrndup(buf, count, GFP_KERNEL); ++ if (!driver_override) ++ return -ENOMEM; ++ ++ cp = strchr(driver_override, '\n'); ++ if (cp) ++ *cp = '\0'; ++ ++ if (strlen(driver_override)) { ++ mc_dev->driver_override = driver_override; ++ } else { ++ kfree(driver_override); ++ mc_dev->driver_override = NULL; ++ } ++ ++ kfree(old); ++ ++ return count; ++} ++ ++static ssize_t driver_override_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override); ++} ++static DEVICE_ATTR_RW(driver_override); ++ + static struct attribute *fsl_mc_dev_attrs[] = { + &dev_attr_modalias.attr, ++ &dev_attr_rescan.attr, ++ &dev_attr_driver_override.attr, + NULL, + }; + + ATTRIBUTE_GROUPS(fsl_mc_dev); + ++static int scan_fsl_mc_bus(struct device *dev, void *data) ++{ ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (fsl_mc_is_root_dprc(dev)) { ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return 0; ++} ++ ++static ssize_t bus_rescan_store(struct bus_type *bus, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) ++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); ++ ++ return count; ++} ++static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store); ++ ++static struct attribute *fsl_mc_bus_attrs[] = { ++ &bus_attr_rescan.attr, ++ NULL, ++}; ++ ++static const struct attribute_group fsl_mc_bus_group = { ++ .attrs = fsl_mc_bus_attrs, ++}; ++ ++static const struct attribute_group *fsl_mc_bus_groups[] = { ++ &fsl_mc_bus_group, ++ NULL, ++}; ++ + struct bus_type fsl_mc_bus_type = { + .name = "fsl-mc", + .match = fsl_mc_bus_match, + .uevent = fsl_mc_bus_uevent, + .dev_groups = fsl_mc_dev_groups, ++ .bus_groups = fsl_mc_bus_groups, + }; + EXPORT_SYMBOL_GPL(fsl_mc_bus_type); + +-static atomic_t root_dprc_count = ATOMIC_INIT(0); +- + static int fsl_mc_driver_probe(struct device *dev) + { + struct fsl_mc_driver *mc_drv; +@@ -164,8 +285,7 @@ static int fsl_mc_driver_probe(struct de + + error = mc_drv->probe(mc_dev); + if (error < 0) { +- dev_err(dev, "MC object device probe callback failed: %d\n", +- error); ++ dev_err(dev, "%s failed: %d\n", __func__, error); + return error; + } + +@@ -183,9 +303,7 @@ static int fsl_mc_driver_remove(struct d + + error = mc_drv->remove(mc_dev); + if (error < 0) { +- dev_err(dev, +- "MC object device remove callback failed: %d\n", +- error); ++ dev_err(dev, "%s failed: %d\n", __func__, error); + return error; + } + +@@ -232,8 +350,6 @@ int __fsl_mc_driver_register(struct fsl_ + return error; + } + +- pr_info("MC object device driver %s registered\n", +- mc_driver->driver.name); + return 0; + } + EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); +@@ -249,15 +365,6 @@ void fsl_mc_driver_unregister(struct fsl + EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister); + + /** +- * fsl_mc_bus_exists - check if a root dprc exists +- */ +-bool fsl_mc_bus_exists(void) +-{ +- return atomic_read(&root_dprc_count) > 0; +-} +-EXPORT_SYMBOL_GPL(fsl_mc_bus_exists); +- +-/** + * fsl_mc_get_root_dprc - function to traverse to the root dprc + */ + void fsl_mc_get_root_dprc(struct device *dev, +@@ -315,21 +422,6 @@ static int get_dprc_icid(struct fsl_mc_i + return error; + } + +-static int get_dprc_version(struct fsl_mc_io *mc_io, +- int container_id, u16 *major, u16 *minor) +-{ +- struct dprc_attributes attr; +- int error; +- +- error = get_dprc_attr(mc_io, container_id, &attr); +- if (error == 0) { +- *major = attr.version.major; +- *minor = attr.version.minor; +- } +- +- return error; +-} +- + static int translate_mc_addr(struct fsl_mc_device *mc_dev, + enum dprc_region_type mc_region_type, + u64 mc_offset, phys_addr_t *phys_addr) +@@ -451,18 +543,37 @@ bool fsl_mc_is_root_dprc(struct device * + return dev == root_dprc_dev; + } + ++static void fsl_mc_device_release(struct device *dev) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_bus *mc_bus = NULL; ++ ++ kfree(mc_dev->regions); ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (mc_bus) ++ kfree(mc_bus); ++ else ++ kfree(mc_dev); ++} ++ + /** +- * Add a newly discovered MC object device to be visible in Linux ++ * Add a newly discovered fsl-mc device to be visible in Linux + */ + int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, + struct fsl_mc_io *mc_io, + struct device *parent_dev, ++ const char *driver_override, + struct fsl_mc_device **new_mc_dev) + { + int error; + struct fsl_mc_device *mc_dev = NULL; + struct fsl_mc_bus *mc_bus = NULL; + struct fsl_mc_device *parent_mc_dev; ++ struct device *fsl_mc_platform_dev; ++ struct device_node *fsl_mc_platform_node; + + if (dev_is_fsl_mc(parent_dev)) + parent_mc_dev = to_fsl_mc_device(parent_dev); +@@ -473,7 +584,7 @@ int fsl_mc_device_add(struct dprc_obj_de + /* + * Allocate an MC bus device object: + */ +- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL); ++ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL); + if (!mc_bus) + return -ENOMEM; + +@@ -482,16 +593,30 @@ int fsl_mc_device_add(struct dprc_obj_de + /* + * Allocate a regular fsl_mc_device object: + */ +- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL); ++ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL); + if (!mc_dev) + return -ENOMEM; + } + + mc_dev->obj_desc = *obj_desc; + mc_dev->mc_io = mc_io; ++ ++ if (driver_override) { ++ /* ++ * We trust driver_override, so we don't need to use ++ * kstrndup() here ++ */ ++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); ++ if (!mc_dev->driver_override) { ++ error = -ENOMEM; ++ goto error_cleanup_dev; ++ } ++ } ++ + device_initialize(&mc_dev->dev); + mc_dev->dev.parent = parent_dev; + mc_dev->dev.bus = &fsl_mc_bus_type; ++ mc_dev->dev.release = fsl_mc_device_release; + dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id); + + if (strcmp(obj_desc->type, "dprc") == 0) { +@@ -524,8 +649,6 @@ int fsl_mc_device_add(struct dprc_obj_de + } + + mc_io2 = mc_io; +- +- atomic_inc(&root_dprc_count); + } + + error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid); +@@ -533,8 +656,8 @@ int fsl_mc_device_add(struct dprc_obj_de + goto error_cleanup_dev; + } else { + /* +- * A non-DPRC MC object device has to be a child of another +- * MC object (specifically a DPRC object) ++ * A non-DPRC object has to be a child of a DPRC, use the ++ * parent's ICID and interrupt domain. + */ + mc_dev->icid = parent_mc_dev->icid; + mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; +@@ -556,9 +679,14 @@ int fsl_mc_device_add(struct dprc_obj_de + goto error_cleanup_dev; + } + +- /* Objects are coherent, unless 'no shareability' flag set. */ +- if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) +- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); ++ fsl_mc_platform_dev = &mc_dev->dev; ++ while (dev_is_fsl_mc(fsl_mc_platform_dev)) ++ fsl_mc_platform_dev = fsl_mc_platform_dev->parent; ++ fsl_mc_platform_node = fsl_mc_platform_dev->of_node; ++ ++ /* Set up the iommu configuration for the devices. */ ++ fsl_mc_dma_configure(mc_dev, fsl_mc_platform_node, ++ !(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)); + + /* + * The device-specific probe callback will get invoked by device_add() +@@ -571,9 +699,7 @@ int fsl_mc_device_add(struct dprc_obj_de + goto error_cleanup_dev; + } + +- (void)get_device(&mc_dev->dev); +- dev_dbg(parent_dev, "Added MC object device %s\n", +- dev_name(&mc_dev->dev)); ++ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev)); + + *new_mc_dev = mc_dev; + return 0; +@@ -581,47 +707,34 @@ int fsl_mc_device_add(struct dprc_obj_de + error_cleanup_dev: + kfree(mc_dev->regions); + if (mc_bus) +- devm_kfree(parent_dev, mc_bus); ++ kfree(mc_bus); + else +- kmem_cache_free(mc_dev_cache, mc_dev); ++ kfree(mc_dev); + + return error; + } + EXPORT_SYMBOL_GPL(fsl_mc_device_add); + + /** +- * fsl_mc_device_remove - Remove a MC object device from being visible to ++ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to + * Linux + * +- * @mc_dev: Pointer to a MC object device object ++ * @mc_dev: Pointer to an fsl-mc device + */ + void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) + { +- struct fsl_mc_bus *mc_bus = NULL; +- +- kfree(mc_dev->regions); ++ kfree(mc_dev->driver_override); ++ mc_dev->driver_override = NULL; + + /* + * The device-specific remove callback will get invoked by device_del() + */ + device_del(&mc_dev->dev); +- put_device(&mc_dev->dev); + +- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { +- mc_bus = to_fsl_mc_bus(mc_dev); +- +- if (fsl_mc_is_root_dprc(&mc_dev->dev)) { +- if (atomic_read(&root_dprc_count) > 0) +- atomic_dec(&root_dprc_count); +- else +- WARN_ON(1); +- } +- } ++ if (strcmp(mc_dev->obj_desc.type, "dprc") != 0) ++ mc_dev->dev.iommu_fwspec = NULL; + +- if (mc_bus) +- devm_kfree(mc_dev->dev.parent, mc_bus); +- else +- kmem_cache_free(mc_dev_cache, mc_dev); ++ put_device(&mc_dev->dev); + } + EXPORT_SYMBOL_GPL(fsl_mc_device_remove); + +@@ -629,8 +742,7 @@ static int parse_mc_ranges(struct device + int *paddr_cells, + int *mc_addr_cells, + int *mc_size_cells, +- const __be32 **ranges_start, +- u8 *num_ranges) ++ const __be32 **ranges_start) + { + const __be32 *prop; + int range_tuple_cell_count; +@@ -643,8 +755,6 @@ static int parse_mc_ranges(struct device + dev_warn(dev, + "missing or empty ranges property for device tree node '%s'\n", + mc_node->name); +- +- *num_ranges = 0; + return 0; + } + +@@ -671,8 +781,7 @@ static int parse_mc_ranges(struct device + return -EINVAL; + } + +- *num_ranges = ranges_len / tuple_len; +- return 0; ++ return ranges_len / tuple_len; + } + + static int get_mc_addr_translation_ranges(struct device *dev, +@@ -680,7 +789,7 @@ static int get_mc_addr_translation_range + **ranges, + u8 *num_ranges) + { +- int error; ++ int ret; + int paddr_cells; + int mc_addr_cells; + int mc_size_cells; +@@ -688,16 +797,16 @@ static int get_mc_addr_translation_range + const __be32 *ranges_start; + const __be32 *cell; + +- error = parse_mc_ranges(dev, ++ ret = parse_mc_ranges(dev, + &paddr_cells, + &mc_addr_cells, + &mc_size_cells, +- &ranges_start, +- num_ranges); +- if (error < 0) +- return error; ++ &ranges_start); ++ if (ret < 0) ++ return ret; + +- if (!(*num_ranges)) { ++ *num_ranges = ret; ++ if (!ret) { + /* + * Missing or empty ranges property ("ranges;") for the + * 'fsl,qoriq-mc' node. In this case, identity mapping +@@ -749,8 +858,6 @@ static int fsl_mc_bus_probe(struct platf + struct mc_version mc_version; + struct resource res; + +- dev_info(&pdev->dev, "Root MC bus device probed"); +- + mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); + if (!mc) + return -ENOMEM; +@@ -783,8 +890,7 @@ static int fsl_mc_bus_probe(struct platf + goto error_cleanup_mc_io; + } + +- dev_info(&pdev->dev, +- "Freescale Management Complex Firmware version: %u.%u.%u\n", ++ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n", + mc_version.major, mc_version.minor, mc_version.revision); + + error = get_mc_addr_translation_ranges(&pdev->dev, +@@ -793,16 +899,17 @@ static int fsl_mc_bus_probe(struct platf + if (error < 0) + goto error_cleanup_mc_io; + +- error = dpmng_get_container_id(mc_io, 0, &container_id); ++ error = dprc_get_container_id(mc_io, 0, &container_id); + if (error < 0) { + dev_err(&pdev->dev, +- "dpmng_get_container_id() failed: %d\n", error); ++ "dprc_get_container_id() failed: %d\n", error); + goto error_cleanup_mc_io; + } + + memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); +- error = get_dprc_version(mc_io, container_id, +- &obj_desc.ver_major, &obj_desc.ver_minor); ++ error = dprc_get_api_version(mc_io, 0, ++ &obj_desc.ver_major, ++ &obj_desc.ver_minor); + if (error < 0) + goto error_cleanup_mc_io; + +@@ -812,7 +919,8 @@ static int fsl_mc_bus_probe(struct platf + obj_desc.irq_count = 1; + obj_desc.region_count = 0; + +- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev); ++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, ++ &mc_bus_dev); + if (error < 0) + goto error_cleanup_mc_io; + +@@ -840,7 +948,6 @@ static int fsl_mc_bus_remove(struct plat + fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); + mc->root_mc_bus_dev->mc_io = NULL; + +- dev_info(&pdev->dev, "Root MC bus device removed"); + return 0; + } + +@@ -865,22 +972,12 @@ static int __init fsl_mc_bus_driver_init + { + int error; + +- mc_dev_cache = kmem_cache_create("fsl_mc_device", +- sizeof(struct fsl_mc_device), 0, 0, +- NULL); +- if (!mc_dev_cache) { +- pr_err("Could not create fsl_mc_device cache\n"); +- return -ENOMEM; +- } +- + error = bus_register(&fsl_mc_bus_type); + if (error < 0) { +- pr_err("fsl-mc bus type registration failed: %d\n", error); ++ pr_err("bus type registration failed: %d\n", error); + goto error_cleanup_cache; + } + +- pr_info("fsl-mc bus type registered\n"); +- + error = platform_driver_register(&fsl_mc_bus_driver); + if (error < 0) { + pr_err("platform_driver_register() failed: %d\n", error); +@@ -914,7 +1011,6 @@ error_cleanup_bus: + bus_unregister(&fsl_mc_bus_type); + + error_cleanup_cache: +- kmem_cache_destroy(mc_dev_cache); + return error; + } + postcore_initcall(fsl_mc_bus_driver_init); +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c +@@ -0,0 +1,104 @@ ++/* ++ * Copyright 2016-17 NXP ++ * Author: Nipun Gupta ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include "../include/mc.h" ++ ++/* Setup the IOMMU for the DPRC container */ ++static const struct iommu_ops ++*fsl_mc_iommu_configure(struct fsl_mc_device *mc_dev, ++ struct device_node *fsl_mc_platform_node) ++{ ++ struct of_phandle_args iommu_spec; ++ const struct iommu_ops *ops; ++ u32 iommu_phandle; ++ struct device_node *iommu_node; ++ const __be32 *map = NULL; ++ int iommu_cells, map_len, ret; ++ ++ map = of_get_property(fsl_mc_platform_node, "iommu-map", &map_len); ++ if (!map) ++ return NULL; ++ ++ ops = mc_dev->dev.bus->iommu_ops; ++ if (!ops || !ops->of_xlate) ++ return NULL; ++ ++ iommu_phandle = be32_to_cpup(map + 1); ++ iommu_node = of_find_node_by_phandle(iommu_phandle); ++ ++ if (of_property_read_u32(iommu_node, "#iommu-cells", &iommu_cells)) { ++ pr_err("%s: missing #iommu-cells property\n", iommu_node->name); ++ return NULL; ++ } ++ ++ /* Initialize the fwspec */ ++ ret = iommu_fwspec_init(&mc_dev->dev, &iommu_node->fwnode, ops); ++ if (ret) ++ return NULL; ++ ++ /* ++ * Fill in the required stream-id before calling the iommu's ++ * ops->xlate callback. ++ */ ++ iommu_spec.np = iommu_node; ++ iommu_spec.args[0] = mc_dev->icid; ++ iommu_spec.args_count = 1; ++ ++ ret = ops->of_xlate(&mc_dev->dev, &iommu_spec); ++ if (ret) ++ return NULL; ++ ++ of_node_put(iommu_spec.np); ++ ++ return ops; ++} ++ ++/* Set up DMA configuration for fsl-mc devices */ ++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev, ++ struct device_node *fsl_mc_platform_node, int coherent) ++{ ++ const struct iommu_ops *ops; ++ ++ ops = fsl_mc_iommu_configure(mc_dev, fsl_mc_platform_node); ++ ++ mc_dev->dev.coherent_dma_mask = DMA_BIT_MASK(48); ++ mc_dev->dev.dma_mask = &mc_dev->dev.coherent_dma_mask; ++ arch_setup_dma_ops(&mc_dev->dev, 0, ++ mc_dev->dev.coherent_dma_mask + 1, ops, coherent); ++} ++ ++/* Macro to get the container device of a MC device */ ++#define fsl_mc_cont_dev(_dev) ((to_fsl_mc_device(_dev)->flags & \ ++ FSL_MC_IS_DPRC) ? (_dev) : ((_dev)->parent)) ++ ++/* Macro to check if a device is a container device */ ++#define is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & FSL_MC_IS_DPRC) ++ ++/* Get the IOMMU group for device on fsl-mc bus */ ++struct iommu_group *fsl_mc_device_group(struct device *dev) ++{ ++ struct device *cont_dev = fsl_mc_cont_dev(dev); ++ struct iommu_group *group; ++ ++ /* Container device is responsible for creating the iommu group */ ++ if (is_cont_dev(dev)) { ++ group = iommu_group_alloc(); ++ if (IS_ERR(group)) ++ return NULL; ++ } else { ++ get_device(cont_dev); ++ group = iommu_group_get(cont_dev); ++ put_device(cont_dev); ++ } ++ ++ return group; ++} +--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c ++++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c +@@ -1,7 +1,7 @@ + /* + * Freescale Management Complex (MC) bus driver MSI support + * +- * Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -17,6 +17,7 @@ + #include + #include + #include "../include/mc-bus.h" ++#include "fsl-mc-private.h" + + /* + * Generate a unique ID identifying the interrupt (only used within the MSI +--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h ++++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h +@@ -10,13 +10,15 @@ + #ifndef _FSL_MC_PRIVATE_H_ + #define _FSL_MC_PRIVATE_H_ + ++#include "../include/mc.h" ++#include "../include/mc-bus.h" ++ + int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, + struct fsl_mc_io *mc_io, + struct device *parent_dev, ++ const char *driver_override, + struct fsl_mc_device **new_mc_dev); + +-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); +- + int __init dprc_driver_init(void); + + void dprc_driver_exit(void); +--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c ++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +@@ -1,7 +1,7 @@ + /* + * Freescale Management Complex (MC) bus driver MSI support + * +- * Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -17,9 +17,10 @@ + #include + #include + #include "../include/mc-bus.h" ++#include "fsl-mc-private.h" + + static struct irq_chip its_msi_irq_chip = { +- .name = "fsl-mc-bus-msi", ++ .name = "ITS-fMSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, +@@ -51,7 +52,7 @@ static int its_fsl_mc_msi_prepare(struct + return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); + } + +-static struct msi_domain_ops its_fsl_mc_msi_ops = { ++static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = { + .msi_prepare = its_fsl_mc_msi_prepare, + }; + +@@ -94,8 +95,8 @@ int __init its_fsl_mc_msi_init(void) + continue; + } + +- WARN_ON(mc_msi_domain-> +- host_data != &its_fsl_mc_msi_domain_info); ++ WARN_ON(mc_msi_domain->host_data != ++ &its_fsl_mc_msi_domain_info); + + pr_info("fsl-mc MSI: %s domain created\n", np->full_name); + } +--- a/drivers/staging/fsl-mc/bus/mc-io.c ++++ b/drivers/staging/fsl-mc/bus/mc-io.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -11,7 +12,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h +@@ -0,0 +1,22 @@ ++/* ++ * Freescale Management Complex (MC) ioclt interface ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: Lijun Pan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_IOCTL_H_ ++#define _FSL_MC_IOCTL_H_ ++ ++#include ++#include "../include/mc-sys.h" ++ ++#define RESTOOL_IOCTL_TYPE 'R' ++ ++#define RESTOOL_SEND_MC_COMMAND \ ++ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command) ++ ++#endif /* _FSL_MC_IOCTL_H_ */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-restool.c +@@ -0,0 +1,405 @@ ++/* ++ * Freescale Management Complex (MC) restool driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: Lijun Pan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mc-ioctl.h" ++#include "../include/mc-sys.h" ++#include "../include/mc-bus.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpmng.h" ++ ++/** ++ * Maximum number of DPRCs that can be opened at the same time ++ */ ++#define MAX_DPRC_HANDLES 64 ++ ++/** ++ * restool_misc - information associated with the newly added miscdevice ++ * @misc: newly created miscdevice associated with root dprc ++ * @miscdevt: device id of this miscdevice ++ * @list: a linked list node representing this miscdevcie ++ * @static_mc_io: pointer to the static MC I/O object used by the restool ++ * @dynamic_instance_count: number of dynamically created instances ++ * @static_instance_in_use: static instance is in use or not ++ * @mutex: mutex lock to serialze the open/release operations ++ * @dev: root dprc associated with this miscdevice ++ */ ++struct restool_misc { ++ struct miscdevice misc; ++ dev_t miscdevt; ++ struct list_head list; ++ struct fsl_mc_io *static_mc_io; ++ u32 dynamic_instance_count; ++ bool static_instance_in_use; ++ struct mutex mutex; /* serialze the open/release operations */ ++ struct device *dev; ++}; ++ ++/** ++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device ++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC ++ * @num_translation_ranges: number of entries in addr_translation_ranges ++ * @translation_ranges: array of bus to system address translation ranges ++ */ ++struct fsl_mc { ++ struct fsl_mc_device *root_mc_bus_dev; ++ u8 num_translation_ranges; ++ struct fsl_mc_addr_translation_range *translation_ranges; ++}; ++ ++/* ++ * initialize a global list to link all ++ * the miscdevice nodes (struct restool_misc) ++ */ ++static LIST_HEAD(misc_list); ++static DEFINE_MUTEX(misc_list_mutex); ++ ++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_device *root_mc_dev; ++ int error; ++ struct fsl_mc_io *dynamic_mc_io = NULL; ++ struct restool_misc *restool_misc = NULL; ++ struct restool_misc *restool_misc_cursor; ++ ++ mutex_lock(&misc_list_mutex); ++ ++ list_for_each_entry(restool_misc_cursor, &misc_list, list) { ++ if (restool_misc_cursor->miscdevt == inode->i_rdev) { ++ restool_misc = restool_misc_cursor; ++ break; ++ } ++ } ++ ++ mutex_unlock(&misc_list_mutex); ++ ++ if (!restool_misc) ++ return -EINVAL; ++ ++ if (WARN_ON(!restool_misc->dev)) ++ return -EINVAL; ++ ++ mutex_lock(&restool_misc->mutex); ++ ++ if (!restool_misc->static_instance_in_use) { ++ restool_misc->static_instance_in_use = true; ++ filep->private_data = restool_misc->static_mc_io; ++ } else { ++ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL); ++ if (!dynamic_mc_io) { ++ error = -ENOMEM; ++ goto err_unlock; ++ } ++ ++ root_mc_dev = to_fsl_mc_device(restool_misc->dev); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto free_dynamic_mc_io; ++ } ++ ++restool_misc->dynamic_instance_count; ++ filep->private_data = dynamic_mc_io; ++ } ++ ++ mutex_unlock(&restool_misc->mutex); ++ ++ return 0; ++ ++free_dynamic_mc_io: ++ kfree(dynamic_mc_io); ++err_unlock: ++ mutex_unlock(&restool_misc->mutex); ++ ++ return error; ++} ++ ++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_io *local_mc_io = filep->private_data; ++ struct restool_misc *restool_misc = NULL; ++ struct restool_misc *restool_misc_cursor; ++ ++ if (WARN_ON(!filep->private_data)) ++ return -EINVAL; ++ ++ mutex_lock(&misc_list_mutex); ++ ++ list_for_each_entry(restool_misc_cursor, &misc_list, list) { ++ if (restool_misc_cursor->miscdevt == inode->i_rdev) { ++ restool_misc = restool_misc_cursor; ++ break; ++ } ++ } ++ ++ mutex_unlock(&misc_list_mutex); ++ ++ if (!restool_misc) ++ return -EINVAL; ++ ++ mutex_lock(&restool_misc->mutex); ++ ++ if (WARN_ON(restool_misc->dynamic_instance_count == 0 && ++ !restool_misc->static_instance_in_use)) { ++ mutex_unlock(&restool_misc->mutex); ++ return -EINVAL; ++ } ++ ++ /* Globally clean up opened/untracked handles */ ++ fsl_mc_portal_reset(local_mc_io); ++ ++ /* ++ * must check ++ * whether local_mc_io is dynamic or static instance ++ * Otherwise it will free up the reserved portal by accident ++ * or even not free up the dynamic allocated portal ++ * if 2 or more instances running concurrently ++ */ ++ if (local_mc_io == restool_misc->static_mc_io) { ++ restool_misc->static_instance_in_use = false; ++ } else { ++ fsl_mc_portal_free(local_mc_io); ++ kfree(filep->private_data); ++ --restool_misc->dynamic_instance_count; ++ } ++ ++ filep->private_data = NULL; ++ mutex_unlock(&restool_misc->mutex); ++ ++ return 0; ++} ++ ++static int restool_send_mc_command(unsigned long arg, ++ struct fsl_mc_io *local_mc_io) ++{ ++ int error; ++ struct mc_command mc_cmd; ++ ++ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd))) ++ return -EFAULT; ++ ++ /* ++ * Send MC command to the MC: ++ */ ++ error = mc_send_command(local_mc_io, &mc_cmd); ++ if (error < 0) ++ return error; ++ ++ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static long ++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int error; ++ ++ switch (cmd) { ++ case RESTOOL_SEND_MC_COMMAND: ++ error = restool_send_mc_command(arg, file->private_data); ++ break; ++ default: ++ pr_err("%s: unexpected ioctl call number\n", __func__); ++ error = -EINVAL; ++ } ++ ++ return error; ++} ++ ++static const struct file_operations fsl_mc_restool_dev_fops = { ++ .owner = THIS_MODULE, ++ .open = fsl_mc_restool_dev_open, ++ .release = fsl_mc_restool_dev_release, ++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, ++}; ++ ++static int restool_add_device_file(struct device *dev) ++{ ++ u32 name1 = 0; ++ char name2[20] = {0}; ++ int error; ++ struct fsl_mc_device *root_mc_dev; ++ struct restool_misc *restool_misc; ++ ++ if (dev->bus == &platform_bus_type && dev->driver_data) { ++ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2) ++ return -EINVAL; ++ ++ if (strcmp(name2, "fsl-mc") == 0) ++ pr_debug("platform's root dprc name is: %s\n", ++ dev_name(&(((struct fsl_mc *) ++ (dev->driver_data))->root_mc_bus_dev->dev))); ++ } ++ ++ if (!fsl_mc_is_root_dprc(dev)) ++ return 0; ++ ++ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL); ++ if (!restool_misc) ++ return -ENOMEM; ++ ++ restool_misc->dev = dev; ++ root_mc_dev = to_fsl_mc_device(dev); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, ++ &restool_misc->static_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto free_restool_misc; ++ } ++ ++ restool_misc->misc.minor = MISC_DYNAMIC_MINOR; ++ restool_misc->misc.name = dev_name(dev); ++ restool_misc->misc.fops = &fsl_mc_restool_dev_fops; ++ ++ error = misc_register(&restool_misc->misc); ++ if (error < 0) { ++ pr_err("misc_register() failed: %d\n", error); ++ goto free_portal; ++ } ++ ++ restool_misc->miscdevt = restool_misc->misc.this_device->devt; ++ mutex_init(&restool_misc->mutex); ++ mutex_lock(&misc_list_mutex); ++ list_add(&restool_misc->list, &misc_list); ++ mutex_unlock(&misc_list_mutex); ++ ++ pr_info("/dev/%s driver registered\n", dev_name(dev)); ++ ++ return 0; ++ ++free_portal: ++ fsl_mc_portal_free(restool_misc->static_mc_io); ++free_restool_misc: ++ kfree(restool_misc); ++ ++ return error; ++} ++ ++static int restool_bus_notifier(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ int error; ++ struct device *dev = data; ++ ++ switch (action) { ++ case BUS_NOTIFY_ADD_DEVICE: ++ error = restool_add_device_file(dev); ++ if (error) ++ return error; ++ break; ++ case BUS_NOTIFY_DEL_DEVICE: ++ case BUS_NOTIFY_REMOVED_DEVICE: ++ case BUS_NOTIFY_BIND_DRIVER: ++ case BUS_NOTIFY_BOUND_DRIVER: ++ case BUS_NOTIFY_UNBIND_DRIVER: ++ case BUS_NOTIFY_UNBOUND_DRIVER: ++ break; ++ default: ++ pr_err("%s: unrecognized device action from %s\n", __func__, ++ dev_name(dev)); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int add_to_restool(struct device *dev, void *data) ++{ ++ return restool_add_device_file(dev); ++} ++ ++static int __init fsl_mc_restool_driver_init(void) ++{ ++ int error; ++ struct notifier_block *nb; ++ ++ nb = kzalloc(sizeof(*nb), GFP_KERNEL); ++ if (!nb) ++ return -ENOMEM; ++ ++ nb->notifier_call = restool_bus_notifier; ++ error = bus_register_notifier(&fsl_mc_bus_type, nb); ++ if (error) ++ goto free_nb; ++ ++ /* ++ * This driver runs after fsl-mc bus driver runs. ++ * Hence, many of the root dprcs are already attached to fsl-mc bus ++ * In order to make sure we find all the root dprcs, ++ * we need to scan the fsl_mc_bus_type. ++ */ ++ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool); ++ if (error) { ++ bus_unregister_notifier(&fsl_mc_bus_type, nb); ++ kfree(nb); ++ pr_err("restool driver registration failure\n"); ++ return error; ++ } ++ ++ return 0; ++ ++free_nb: ++ kfree(nb); ++ return error; ++} ++ ++module_init(fsl_mc_restool_driver_init); ++ ++static void __exit fsl_mc_restool_driver_exit(void) ++{ ++ struct restool_misc *restool_misc; ++ struct restool_misc *restool_misc_tmp; ++ char name1[20] = {0}; ++ u32 name2 = 0; ++ ++ list_for_each_entry_safe(restool_misc, restool_misc_tmp, ++ &misc_list, list) { ++ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2) ++ != 2) ++ continue; ++ ++ pr_debug("name1=%s,name2=%u\n", name1, name2); ++ pr_debug("misc-device: %s\n", restool_misc->misc.name); ++ if (strcmp(name1, "dprc") != 0) ++ continue; ++ ++ if (WARN_ON(!restool_misc->static_mc_io)) ++ return; ++ ++ if (WARN_ON(restool_misc->dynamic_instance_count != 0)) ++ return; ++ ++ if (WARN_ON(restool_misc->static_instance_in_use)) ++ return; ++ ++ misc_deregister(&restool_misc->misc); ++ pr_info("/dev/%s driver unregistered\n", ++ restool_misc->misc.name); ++ fsl_mc_portal_free(restool_misc->static_mc_io); ++ list_del(&restool_misc->list); ++ kfree(restool_misc); ++ } ++} ++ ++module_exit(fsl_mc_restool_driver_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor Inc."); ++MODULE_DESCRIPTION("Freescale's MC restool driver"); ++MODULE_LICENSE("GPL"); +--- a/drivers/staging/fsl-mc/bus/mc-sys.c ++++ b/drivers/staging/fsl-mc/bus/mc-sys.c +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2014 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * I/O services to send MC commands to the MC hardware + * +@@ -13,7 +14,6 @@ + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * +- * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any +@@ -46,7 +46,7 @@ + /** + * Timeout in milliseconds to wait for the completion of an MC command + */ +-#define MC_CMD_COMPLETION_TIMEOUT_MS 500 ++#define MC_CMD_COMPLETION_TIMEOUT_MS 15000 + + /* + * usleep_range() min and max values used to throttle down polling +@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; + u16 cmd_id = le16_to_cpu(hdr->cmd_id); + +- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT; ++ return cmd_id; + } + + static int mc_status_to_error(enum mc_cmd_status status) +@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(s + + if (time_after_eq(jiffies, jiffies_until_timeout)) { + dev_dbg(mc_io->dev, +- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", ++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", + mc_io->portal_phys_addr, + (unsigned int)mc_cmd_hdr_read_token(cmd), + (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); +@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct + timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; + if (timeout_usecs == 0) { + dev_dbg(mc_io->dev, +- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", ++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", + mc_io->portal_phys_addr, + (unsigned int)mc_cmd_hdr_read_token(cmd), + (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); +@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc + + if (status != MC_CMD_STATUS_OK) { + dev_dbg(mc_io->dev, +- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", ++ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n", + mc_io->portal_phys_addr, + (unsigned int)mc_cmd_hdr_read_token(cmd), + (unsigned int)mc_cmd_hdr_read_cmdid(cmd), +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h +@@ -0,0 +1,706 @@ ++/* ++ * Copyright 2014-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_FD_H ++#define __FSL_DPAA2_FD_H ++ ++#include ++ ++/** ++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 ++ * ++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. ++ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed ++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) ++ * ++ * There are three types of frames: single, scatter gather, and frame lists. ++ * ++ * The set of APIs in this file must be used to create, manipulate and ++ * query Frame Descriptors. ++ */ ++ ++/** ++ * struct dpaa2_fd - Struct describing FDs ++ * @words: for easier/faster copying the whole FD structure ++ * @addr: address in the FD ++ * @len: length in the FD ++ * @bpid: buffer pool ID ++ * @format_offset: format, offset, and short-length fields ++ * @frc: frame context ++ * @ctrl: control bits...including dd, sc, va, err, etc ++ * @flc: flow context address ++ * ++ * This structure represents the basic Frame Descriptor used in the system. ++ */ ++struct dpaa2_fd { ++ union { ++ u32 words[8]; ++ struct dpaa2_fd_simple { ++ __le64 addr; ++ __le32 len; ++ __le16 bpid; ++ __le16 format_offset; ++ __le32 frc; ++ __le32 ctrl; ++ __le64 flc; ++ } simple; ++ }; ++}; ++ ++#define FD_SHORT_LEN_FLAG_MASK 0x1 ++#define FD_SHORT_LEN_FLAG_SHIFT 14 ++#define FD_SHORT_LEN_MASK 0x3FFFF ++#define FD_OFFSET_MASK 0x0FFF ++#define FD_FORMAT_MASK 0x3 ++#define FD_FORMAT_SHIFT 12 ++#define FD_BPID_MASK 0x3FFF ++#define SG_SHORT_LEN_FLAG_MASK 0x1 ++#define SG_SHORT_LEN_FLAG_SHIFT 14 ++#define SG_SHORT_LEN_MASK 0x1FFFF ++#define SG_OFFSET_MASK 0x0FFF ++#define SG_FORMAT_MASK 0x3 ++#define SG_FORMAT_SHIFT 12 ++#define SG_BPID_MASK 0x3FFF ++#define SG_FINAL_FLAG_MASK 0x1 ++#define SG_FINAL_FLAG_SHIFT 15 ++#define FL_SHORT_LEN_FLAG_MASK 0x1 ++#define FL_SHORT_LEN_FLAG_SHIFT 14 ++#define FL_SHORT_LEN_MASK 0x3FFFF ++#define FL_OFFSET_MASK 0x0FFF ++#define FL_FORMAT_MASK 0x3 ++#define FL_FORMAT_SHIFT 12 ++#define FL_BPID_MASK 0x3FFF ++#define FL_FINAL_FLAG_MASK 0x1 ++#define FL_FINAL_FLAG_SHIFT 15 ++ ++/* Error bits in FD CTRL */ ++#define FD_CTRL_ERR_MASK 0x000000FF ++#define FD_CTRL_UFD 0x00000004 ++#define FD_CTRL_SBE 0x00000008 ++#define FD_CTRL_FLC 0x00000010 ++#define FD_CTRL_FSE 0x00000020 ++#define FD_CTRL_FAERR 0x00000040 ++ ++/* Annotation bits in FD CTRL */ ++#define FD_CTRL_PTA 0x00800000 ++#define FD_CTRL_PTV1 0x00400000 ++ ++enum dpaa2_fd_format { ++ dpaa2_fd_single = 0, ++ dpaa2_fd_list, ++ dpaa2_fd_sg ++}; ++ ++/** ++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the address in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)le64_to_cpu(fd->simple.addr); ++} ++ ++/** ++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor ++ * @fd: the given frame descriptor ++ * @addr: the address needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) ++{ ++ fd->simple.addr = cpu_to_le64(addr); ++} ++ ++/** ++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the frame context field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) ++{ ++ return le32_to_cpu(fd->simple.frc); ++} ++ ++/** ++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor ++ * @fd: the given frame descriptor ++ * @frc: the frame context needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) ++{ ++ fd->simple.frc = cpu_to_le32(frc); ++} ++ ++/** ++ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the control bits field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd) ++{ ++ return le32_to_cpu(fd->simple.ctrl); ++} ++ ++/** ++ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor ++ * @fd: the given frame descriptor ++ * @ctrl: the control bits to be set in the frame descriptor ++ */ ++static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl) ++{ ++ fd->simple.ctrl = cpu_to_le32(ctrl); ++} ++ ++/** ++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the flow context in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)le64_to_cpu(fd->simple.flc); ++} ++ ++/** ++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor ++ * @fd: the given frame descriptor ++ * @flc_addr: the flow context needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) ++{ ++ fd->simple.flc = cpu_to_le64(flc_addr); ++} ++ ++static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd) ++{ ++ return !!((le16_to_cpu(fd->simple.format_offset) >> ++ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK); ++} ++ ++/** ++ * dpaa2_fd_get_len() - Get the length in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the length field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) ++{ ++ if (dpaa2_fd_short_len(fd)) ++ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK; ++ ++ return le32_to_cpu(fd->simple.len); ++} ++ ++/** ++ * dpaa2_fd_set_len() - Set the length field of frame descriptor ++ * @fd: the given frame descriptor ++ * @len: the length needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) ++{ ++ fd->simple.len = cpu_to_le32(len); ++} ++ ++/** ++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the offset. ++ */ ++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) ++{ ++ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK; ++} ++ ++/** ++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor ++ * @fd: the given frame descriptor ++ * @offset: the offset needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) ++{ ++ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK); ++ fd->simple.format_offset |= cpu_to_le16(offset); ++} ++ ++/** ++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_fd_format dpaa2_fd_get_format( ++ const struct dpaa2_fd *fd) ++{ ++ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset) ++ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK); ++} ++ ++/** ++ * dpaa2_fd_set_format() - Set the format field of frame descriptor ++ * @fd: the given frame descriptor ++ * @format: the format needs to be set in frame descriptor ++ */ ++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, ++ enum dpaa2_fd_format format) ++{ ++ fd->simple.format_offset &= ++ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT)); ++ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT); ++} ++ ++/** ++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor ++ * @fd: the given frame descriptor ++ * ++ * Return the buffer pool id. ++ */ ++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) ++{ ++ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK; ++} ++ ++/** ++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor ++ * @fd: the given frame descriptor ++ * @bpid: buffer pool id to be set ++ */ ++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) ++{ ++ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK)); ++ fd->simple.bpid |= cpu_to_le16(bpid); ++} ++ ++/** ++ * struct dpaa2_sg_entry - the scatter-gathering structure ++ * @addr: address of the sg entry ++ * @len: length in this sg entry ++ * @bpid: buffer pool id ++ * @format_offset: format and offset fields ++ */ ++struct dpaa2_sg_entry { ++ __le64 addr; ++ __le32 len; ++ __le16 bpid; ++ __le16 format_offset; ++}; ++ ++enum dpaa2_sg_format { ++ dpaa2_sg_single = 0, ++ dpaa2_sg_frame_data, ++ dpaa2_sg_sgt_ext ++}; ++ ++/* Accessors for SG entry fields */ ++ ++/** ++ * dpaa2_sg_get_addr() - Get the address from SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return the address. ++ */ ++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) ++{ ++ return le64_to_cpu((dma_addr_t)sg->addr); ++} ++ ++/** ++ * dpaa2_sg_set_addr() - Set the address in SG entry ++ * @sg: the given scatter-gathering object ++ * @addr: the address to be set ++ */ ++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) ++{ ++ sg->addr = cpu_to_le64(addr); ++} ++ ++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) ++{ ++ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT) ++ & SG_SHORT_LEN_FLAG_MASK); ++} ++ ++/** ++ * dpaa2_sg_get_len() - Get the length in SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return the length. ++ */ ++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) ++{ ++ if (dpaa2_sg_short_len(sg)) ++ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK; ++ ++ return le32_to_cpu(sg->len); ++} ++ ++/** ++ * dpaa2_sg_set_len() - Set the length in SG entry ++ * @sg: the given scatter-gathering object ++ * @len: the length to be set ++ */ ++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) ++{ ++ sg->len = cpu_to_le32(len); ++} ++ ++/** ++ * dpaa2_sg_get_offset() - Get the offset in SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return the offset. ++ */ ++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) ++{ ++ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK; ++} ++ ++/** ++ * dpaa2_sg_set_offset() - Set the offset in SG entry ++ * @sg: the given scatter-gathering object ++ * @offset: the offset to be set ++ */ ++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, ++ u16 offset) ++{ ++ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK); ++ sg->format_offset |= cpu_to_le16(offset); ++} ++ ++/** ++ * dpaa2_sg_get_format() - Get the SG format in SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_sg_format ++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) ++{ ++ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset) ++ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK); ++} ++ ++/** ++ * dpaa2_sg_set_format() - Set the SG format in SG entry ++ * @sg: the given scatter-gathering object ++ * @format: the format to be set ++ */ ++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, ++ enum dpaa2_sg_format format) ++{ ++ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT)); ++ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT); ++} ++ ++/** ++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return the bpid. ++ */ ++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) ++{ ++ return le16_to_cpu(sg->bpid) & SG_BPID_MASK; ++} ++ ++/** ++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object ++ * @bpid: the bpid to be set ++ */ ++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) ++{ ++ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK)); ++ sg->bpid |= cpu_to_le16(bpid); ++} ++ ++/** ++ * dpaa2_sg_is_final() - Check final bit in SG entry ++ * @sg: the given scatter-gathering object ++ * ++ * Return bool. ++ */ ++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) ++{ ++ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT); ++} ++ ++/** ++ * dpaa2_sg_set_final() - Set the final bit in SG entry ++ * @sg: the given scatter-gathering object ++ * @final: the final boolean to be set ++ */ ++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) ++{ ++ sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK ++ << SG_FINAL_FLAG_SHIFT)); ++ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT); ++} ++ ++/** ++ * struct dpaa2_fl_entry - structure for frame list entry. ++ * @addr: address in the FLE ++ * @len: length in the FLE ++ * @bpid: buffer pool ID ++ * @format_offset: format, offset, and short-length fields ++ * @frc: frame context ++ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc ++ * @flc: flow context address ++ */ ++struct dpaa2_fl_entry { ++ __le64 addr; ++ __le32 len; ++ __le16 bpid; ++ __le16 format_offset; ++ __le32 frc; ++ __le32 ctrl; ++ __le64 flc; ++}; ++ ++enum dpaa2_fl_format { ++ dpaa2_fl_single = 0, ++ dpaa2_fl_res, ++ dpaa2_fl_sg ++}; ++ ++/** ++ * dpaa2_fl_get_addr() - get the addr field of FLE ++ * @fle: the given frame list entry ++ * ++ * Return the address in the frame list entry. ++ */ ++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)le64_to_cpu(fle->addr); ++} ++ ++/** ++ * dpaa2_fl_set_addr() - Set the addr field of FLE ++ * @fle: the given frame list entry ++ * @addr: the address needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, ++ dma_addr_t addr) ++{ ++ fle->addr = cpu_to_le64(addr); ++} ++ ++/** ++ * dpaa2_fl_get_frc() - Get the frame context in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the frame context field in the frame lsit entry. ++ */ ++static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle) ++{ ++ return le32_to_cpu(fle->frc); ++} ++ ++/** ++ * dpaa2_fl_set_frc() - Set the frame context in the FLE ++ * @fle: the given frame list entry ++ * @frc: the frame context needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc) ++{ ++ fle->frc = cpu_to_le32(frc); ++} ++ ++/** ++ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the control bits field in the frame list entry. ++ */ ++static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle) ++{ ++ return le32_to_cpu(fle->ctrl); ++} ++ ++/** ++ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE ++ * @fle: the given frame list entry ++ * @ctrl: the control bits to be set in the frame list entry ++ */ ++static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl) ++{ ++ fle->ctrl = cpu_to_le32(ctrl); ++} ++ ++/** ++ * dpaa2_fl_get_flc() - Get the flow context in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the flow context in the frame list entry. ++ */ ++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)le64_to_cpu(fle->flc); ++} ++ ++/** ++ * dpaa2_fl_set_flc() - Set the flow context field of FLE ++ * @fle: the given frame list entry ++ * @flc_addr: the flow context needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, ++ dma_addr_t flc_addr) ++{ ++ fle->flc = cpu_to_le64(flc_addr); ++} ++ ++static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle) ++{ ++ return !!((le16_to_cpu(fle->format_offset) >> ++ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK); ++} ++ ++/** ++ * dpaa2_fl_get_len() - Get the length in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the length field in the frame list entry. ++ */ ++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) ++{ ++ if (dpaa2_fl_short_len(fle)) ++ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK; ++ ++ return le32_to_cpu(fle->len); ++} ++ ++/** ++ * dpaa2_fl_set_len() - Set the length field of FLE ++ * @fle: the given frame list entry ++ * @len: the length needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) ++{ ++ fle->len = cpu_to_le32(len); ++} ++ ++/** ++ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry ++ * @fle: the given frame list entry ++ * ++ * Return the offset. ++ */ ++static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) ++{ ++ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK; ++} ++ ++/** ++ * dpaa2_fl_set_offset() - Set the offset field of FLE ++ * @fle: the given frame list entry ++ * @offset: the offset needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset) ++{ ++ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK); ++ fle->format_offset |= cpu_to_le16(offset); ++} ++ ++/** ++ * dpaa2_fl_get_format() - Get the format field in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_fl_format dpaa2_fl_get_format( ++ const struct dpaa2_fl_entry *fle) ++{ ++ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >> ++ FL_FORMAT_SHIFT) & FL_FORMAT_MASK); ++} ++ ++/** ++ * dpaa2_fl_set_format() - Set the format field of FLE ++ * @fle: the given frame list entry ++ * @format: the format needs to be set in frame list entry ++ */ ++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, ++ enum dpaa2_fl_format format) ++{ ++ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT)); ++ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT); ++} ++ ++/** ++ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE ++ * @fle: the given frame list entry ++ * ++ * Return the buffer pool id. ++ */ ++static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) ++{ ++ return le16_to_cpu(fle->bpid) & FL_BPID_MASK; ++} ++ ++/** ++ * dpaa2_fl_set_bpid() - Set the bpid field of FLE ++ * @fle: the given frame list entry ++ * @bpid: buffer pool id to be set ++ */ ++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid) ++{ ++ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK)); ++ fle->bpid |= cpu_to_le16(bpid); ++} ++ ++/** ++ * dpaa2_fl_is_final() - Check final bit in FLE ++ * @fle: the given frame list entry ++ * ++ * Return bool. ++ */ ++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) ++{ ++ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT); ++} ++ ++/** ++ * dpaa2_fl_set_final() - Set the final bit in FLE ++ * @fle: the given frame list entry ++ * @final: the final boolean to be set ++ */ ++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) ++{ ++ fle->format_offset &= cpu_to_le16(~(FL_FINAL_FLAG_MASK << ++ FL_FINAL_FLAG_SHIFT)); ++ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT); ++} ++ ++#endif /* __FSL_DPAA2_FD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpaa2-global.h +@@ -0,0 +1,202 @@ ++/* ++ * Copyright 2014-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_GLOBAL_H ++#define __FSL_DPAA2_GLOBAL_H ++ ++#include ++#include ++#include "dpaa2-fd.h" ++ ++struct dpaa2_dq { ++ union { ++ struct common { ++ u8 verb; ++ u8 reserved[63]; ++ } common; ++ struct dq { ++ u8 verb; ++ u8 stat; ++ __le16 seqnum; ++ __le16 oprid; ++ u8 reserved; ++ u8 tok; ++ __le32 fqid; ++ u32 reserved2; ++ __le32 fq_byte_cnt; ++ __le32 fq_frm_cnt; ++ __le64 fqd_ctx; ++ u8 fd[32]; ++ } dq; ++ struct scn { ++ u8 verb; ++ u8 stat; ++ u8 state; ++ u8 reserved; ++ __le32 rid_tok; ++ __le64 ctx; ++ } scn; ++ }; ++}; ++ ++/* Parsing frame dequeue results */ ++/* FQ empty */ ++#define DPAA2_DQ_STAT_FQEMPTY 0x80 ++/* FQ held active */ ++#define DPAA2_DQ_STAT_HELDACTIVE 0x40 ++/* FQ force eligible */ ++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 ++/* valid frame */ ++#define DPAA2_DQ_STAT_VALIDFRAME 0x10 ++/* FQ ODP enable */ ++#define DPAA2_DQ_STAT_ODPVALID 0x04 ++/* volatile dequeue */ ++#define DPAA2_DQ_STAT_VOLATILE 0x02 ++/* volatile dequeue command is expired */ ++#define DPAA2_DQ_STAT_EXPIRED 0x01 ++ ++#define DQ_FQID_MASK 0x00FFFFFF ++#define DQ_FRAME_COUNT_MASK 0x00FFFFFF ++ ++/** ++ * dpaa2_dq_flags() - Get the stat field of dequeue response ++ * @dq: the dequeue result. ++ */ ++static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq) ++{ ++ return dq->dq.stat; ++} ++ ++/** ++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull ++ * command. ++ * @dq: the dequeue result ++ * ++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. ++ */ ++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) ++{ ++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); ++} ++ ++/** ++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. ++ * @dq: the dequeue result ++ * ++ * Return boolean. ++ */ ++static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq) ++{ ++ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); ++} ++ ++/** ++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response ++ * @dq: the dequeue result ++ * ++ * seqnum is valid only if VALIDFRAME flag is TRUE ++ * ++ * Return seqnum. ++ */ ++static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq) ++{ ++ return le16_to_cpu(dq->dq.seqnum); ++} ++ ++/** ++ * dpaa2_dq_odpid() - Get the odpid field in dequeue response ++ * @dq: the dequeue result ++ * ++ * odpid is valid only if ODPVALID flag is TRUE. ++ * ++ * Return odpid. ++ */ ++static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq) ++{ ++ return le16_to_cpu(dq->dq.oprid); ++} ++ ++/** ++ * dpaa2_dq_fqid() - Get the fqid in dequeue response ++ * @dq: the dequeue result ++ * ++ * Return fqid. ++ */ ++static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq) ++{ ++ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK; ++} ++ ++/** ++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response ++ * @dq: the dequeue result ++ * ++ * Return the byte count remaining in the FQ. ++ */ ++static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq) ++{ ++ return le32_to_cpu(dq->dq.fq_byte_cnt); ++} ++ ++/** ++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response ++ * @dq: the dequeue result ++ * ++ * Return the frame count remaining in the FQ. ++ */ ++static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq) ++{ ++ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK; ++} ++ ++/** ++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response ++ * @dq: the dequeue result ++ * ++ * Return the frame queue context. ++ */ ++static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) ++{ ++ return le64_to_cpu(dq->dq.fqd_ctx); ++} ++ ++/** ++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response ++ * @dq: the dequeue result ++ * ++ * Return the frame descriptor. ++ */ ++static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) ++{ ++ return (const struct dpaa2_fd *)&dq->dq.fd[0]; ++} ++ ++#endif /* __FSL_DPAA2_GLOBAL_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpaa2-io.h +@@ -0,0 +1,190 @@ ++/* ++ * Copyright 2014-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_IO_H ++#define __FSL_DPAA2_IO_H ++ ++#include ++#include ++ ++#include "dpaa2-fd.h" ++#include "dpaa2-global.h" ++ ++struct dpaa2_io; ++struct dpaa2_io_store; ++struct device; ++ ++/** ++ * DOC: DPIO Service ++ * ++ * The DPIO service provides APIs for users to interact with the datapath ++ * by enqueueing and dequeing frame descriptors. ++ * ++ * The following set of APIs can be used to enqueue and dequeue frames ++ * as well as producing notification callbacks when data is available ++ * for dequeue. ++ */ ++ ++/** ++ * struct dpaa2_io_desc - The DPIO descriptor ++ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO ++ * has a channel. ++ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored ++ * unless receives_notification is TRUE. ++ * @cpu: The cpu index that at least interrupt handlers will ++ * execute on. ++ * @stash_affinity: The stash affinity for this portal favour 'cpu' ++ * @regs_cena: The cache enabled regs. ++ * @regs_cinh: The cache inhibited regs ++ * @dpio_id: The dpio index ++ * @qman_version: The qman version ++ * ++ * Describes the attributes and features of the DPIO object. ++ */ ++struct dpaa2_io_desc { ++ int receives_notifications; ++ int has_8prio; ++ int cpu; ++ void *regs_cena; ++ void *regs_cinh; ++ int dpio_id; ++ u32 qman_version; ++}; ++ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); ++ ++void dpaa2_io_down(struct dpaa2_io *d); ++ ++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj); ++ ++/** ++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure ++ * @cb: The callback to be invoked when the notification arrives ++ * @is_cdan: Zero for FQDAN, non-zero for CDAN ++ * @id: FQID or channel ID, needed for rearm ++ * @desired_cpu: The cpu on which the notifications will show up. -1 means ++ * any CPU. ++ * @dpio_id: The dpio index ++ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN. ++ * @node: The list node ++ * @dpio_private: The dpio object internal to dpio_service ++ * ++ * Used when a FQDAN/CDAN registration is made by drivers. ++ */ ++struct dpaa2_io_notification_ctx { ++ void (*cb)(struct dpaa2_io_notification_ctx *); ++ int is_cdan; ++ u32 id; ++ int desired_cpu; ++ int dpio_id; ++ u64 qman64; ++ struct list_head node; ++ void *dpio_private; ++}; ++ ++int dpaa2_io_service_register(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++void dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++int dpaa2_io_service_rearm(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, ++ struct dpaa2_io_store *s); ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, ++ struct dpaa2_io_store *s); ++ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, ++ const struct dpaa2_fd *fd); ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, ++ u16 qdbin, const struct dpaa2_fd *fd); ++int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid, ++ const u64 *buffers, unsigned int num_buffers); ++int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid, ++ u64 *buffers, unsigned int num_buffers); ++ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev); ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s); ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ uint32_t *fcnt, uint32_t *bcnt); ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, ++ uint32_t *num); ++#endif ++ ++ ++/***************/ ++/* CSCN */ ++/***************/ ++ ++/** ++ * struct dpaa2_cscn - The CSCN message format ++ * @verb: identifies the type of message (should be 0x27). ++ * @stat: status bits related to dequeuing response (not used) ++ * @state: bit 0 = 0/1 if CG is no/is congested ++ * @reserved: reserved byte ++ * @cgid: congest grp ID - the first 16 bits ++ * @ctx: context data ++ * ++ * Congestion management can be implemented in software through ++ * the use of Congestion State Change Notifications (CSCN). These ++ * are messages written by DPAA2 hardware to memory whenever the ++ * instantaneous count (I_CNT field in the CG) exceeds the ++ * Congestion State (CS) entrance threshold, signifying congestion ++ * entrance, or when the instantaneous count returns below exit ++ * threshold, signifying congestion exit. The format of the message ++ * is given by the dpaa2_cscn structure. Bit 0 of the state field ++ * represents congestion state written by the hardware. ++ */ ++struct dpaa2_cscn { ++ u8 verb; ++ u8 stat; ++ u8 state; ++ u8 reserved; ++ __le32 cgid; ++ __le64 ctx; ++}; ++ ++#define DPAA2_CSCN_SIZE 64 ++#define DPAA2_CSCN_ALIGN 16 ++ ++#define DPAA2_CSCN_STATE_MASK 0x1 ++#define DPAA2_CSCN_CONGESTED 1 ++ ++static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn) ++{ ++ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED); ++} ++ ++#endif /* __FSL_DPAA2_IO_H */ +--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h ++++ /dev/null +@@ -1,185 +0,0 @@ +-/* Copyright 2013-2016 Freescale Semiconductor Inc. +- * +- * Redistribution and use in source and binary forms, with or without +- * modification, are permitted provided that the following conditions are met: +- * * Redistributions of source code must retain the above copyright +- * notice, this list of conditions and the following disclaimer. +- * * Redistributions in binary form must reproduce the above copyright +- * notice, this list of conditions and the following disclaimer in the +- * documentation and/or other materials provided with the distribution. +- * * Neither the name of the above-listed copyright holders nor the +- * names of any contributors may be used to endorse or promote products +- * derived from this software without specific prior written permission. +- * +- * +- * ALTERNATIVELY, this software may be distributed under the terms of the +- * GNU General Public License ("GPL") as published by the Free Software +- * Foundation, either version 2 of that License or (at your option) any +- * later version. +- * +- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +- * POSSIBILITY OF SUCH DAMAGE. +- */ +-#ifndef _FSL_DPBP_CMD_H +-#define _FSL_DPBP_CMD_H +- +-/* DPBP Version */ +-#define DPBP_VER_MAJOR 2 +-#define DPBP_VER_MINOR 2 +- +-/* Command IDs */ +-#define DPBP_CMDID_CLOSE 0x800 +-#define DPBP_CMDID_OPEN 0x804 +-#define DPBP_CMDID_CREATE 0x904 +-#define DPBP_CMDID_DESTROY 0x900 +- +-#define DPBP_CMDID_ENABLE 0x002 +-#define DPBP_CMDID_DISABLE 0x003 +-#define DPBP_CMDID_GET_ATTR 0x004 +-#define DPBP_CMDID_RESET 0x005 +-#define DPBP_CMDID_IS_ENABLED 0x006 +- +-#define DPBP_CMDID_SET_IRQ 0x010 +-#define DPBP_CMDID_GET_IRQ 0x011 +-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 +-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 +-#define DPBP_CMDID_SET_IRQ_MASK 0x014 +-#define DPBP_CMDID_GET_IRQ_MASK 0x015 +-#define DPBP_CMDID_GET_IRQ_STATUS 0x016 +-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 +- +-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 +-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 +- +-struct dpbp_cmd_open { +- __le32 dpbp_id; +-}; +- +-#define DPBP_ENABLE 0x1 +- +-struct dpbp_rsp_is_enabled { +- u8 enabled; +-}; +- +-struct dpbp_cmd_set_irq { +- /* cmd word 0 */ +- u8 irq_index; +- u8 pad[3]; +- __le32 irq_val; +- /* cmd word 1 */ +- __le64 irq_addr; +- /* cmd word 2 */ +- __le32 irq_num; +-}; +- +-struct dpbp_cmd_get_irq { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpbp_rsp_get_irq { +- /* response word 0 */ +- __le32 irq_val; +- __le32 pad; +- /* response word 1 */ +- __le64 irq_addr; +- /* response word 2 */ +- __le32 irq_num; +- __le32 type; +-}; +- +-struct dpbp_cmd_set_irq_enable { +- u8 enable; +- u8 pad[3]; +- u8 irq_index; +-}; +- +-struct dpbp_cmd_get_irq_enable { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpbp_rsp_get_irq_enable { +- u8 enabled; +-}; +- +-struct dpbp_cmd_set_irq_mask { +- __le32 mask; +- u8 irq_index; +-}; +- +-struct dpbp_cmd_get_irq_mask { +- __le32 pad; +- u8 irq_index; +-}; +- +-struct dpbp_rsp_get_irq_mask { +- __le32 mask; +-}; +- +-struct dpbp_cmd_get_irq_status { +- __le32 status; +- u8 irq_index; +-}; +- +-struct dpbp_rsp_get_irq_status { +- __le32 status; +-}; +- +-struct dpbp_cmd_clear_irq_status { +- __le32 status; +- u8 irq_index; +-}; +- +-struct dpbp_rsp_get_attributes { +- /* response word 0 */ +- __le16 pad; +- __le16 bpid; +- __le32 id; +- /* response word 1 */ +- __le16 version_major; +- __le16 version_minor; +-}; +- +-struct dpbp_cmd_set_notifications { +- /* cmd word 0 */ +- __le32 depletion_entry; +- __le32 depletion_exit; +- /* cmd word 1 */ +- __le32 surplus_entry; +- __le32 surplus_exit; +- /* cmd word 2 */ +- __le16 options; +- __le16 pad[3]; +- /* cmd word 3 */ +- __le64 message_ctx; +- /* cmd word 4 */ +- __le64 message_iova; +-}; +- +-struct dpbp_rsp_get_notifications { +- /* response word 0 */ +- __le32 depletion_entry; +- __le32 depletion_exit; +- /* response word 1 */ +- __le32 surplus_entry; +- __le32 surplus_exit; +- /* response word 2 */ +- __le16 options; +- __le16 pad[3]; +- /* response word 3 */ +- __le64 message_ctx; +- /* response word 4 */ +- __le64 message_iova; +-}; +- +-#endif /* _FSL_DPBP_CMD_H */ +--- a/drivers/staging/fsl-mc/include/dpbp.h ++++ b/drivers/staging/fsl-mc/include/dpbp.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -32,7 +33,8 @@ + #ifndef __FSL_DPBP_H + #define __FSL_DPBP_H + +-/* Data Path Buffer Pool API ++/* ++ * Data Path Buffer Pool API + * Contains initialization APIs and runtime control APIs for DPBP + */ + +@@ -44,25 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io, + u16 *token); + + int dpbp_close(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token); +- +-/** +- * struct dpbp_cfg - Structure representing DPBP configuration +- * @options: place holder +- */ +-struct dpbp_cfg { +- u32 options; +-}; +- +-int dpbp_create(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- const struct dpbp_cfg *cfg, +- u16 *token); +- +-int dpbp_destroy(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token); ++ u32 cmd_flags, ++ u16 token); + + int dpbp_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, +@@ -82,139 +67,24 @@ int dpbp_reset(struct fsl_mc_io *mc_io, + u16 token); + + /** +- * struct dpbp_irq_cfg - IRQ configuration +- * @addr: Address that must be written to signal a message-based interrupt +- * @val: Value to write into irq_addr address +- * @irq_num: A user defined number associated with this IRQ +- */ +-struct dpbp_irq_cfg { +- u64 addr; +- u32 val; +- int irq_num; +-}; +- +-int dpbp_set_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- struct dpbp_irq_cfg *irq_cfg); +- +-int dpbp_get_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- int *type, +- struct dpbp_irq_cfg *irq_cfg); +- +-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 en); +- +-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 *en); +- +-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 mask); +- +-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *mask); +- +-int dpbp_get_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *status); +- +-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 status); +- +-/** + * struct dpbp_attr - Structure representing DPBP attributes + * @id: DPBP object ID +- * @version: DPBP version + * @bpid: Hardware buffer pool ID; should be used as an argument in + * acquire/release operations on buffers + */ + struct dpbp_attr { + int id; +- /** +- * struct version - Structure representing DPBP version +- * @major: DPBP major version +- * @minor: DPBP minor version +- */ +- struct { +- u16 major; +- u16 minor; +- } version; + u16 bpid; + }; + +-int dpbp_get_attributes(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpbp_attr *attr); +- +-/** +- * DPBP notifications options +- */ +- +-/** +- * BPSCN write will attempt to allocate into a cache (coherent write) +- */ +-#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 +- +-/** +- * struct dpbp_notification_cfg - Structure representing DPBP notifications +- * towards software +- * @depletion_entry: below this threshold the pool is "depleted"; +- * set it to '0' to disable it +- * @depletion_exit: greater than or equal to this threshold the pool exit its +- * "depleted" state +- * @surplus_entry: above this threshold the pool is in "surplus" state; +- * set it to '0' to disable it +- * @surplus_exit: less than or equal to this threshold the pool exit its +- * "surplus" state +- * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' +- * is not '0' (enable); I/O virtual address (must be in DMA-able memory), +- * must be 16B aligned. +- * @message_ctx: The context that will be part of the BPSCN message and will +- * be written to 'message_iova' +- * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values +- */ +-struct dpbp_notification_cfg { +- u32 depletion_entry; +- u32 depletion_exit; +- u32 surplus_entry; +- u32 surplus_exit; +- u64 message_iova; +- u64 message_ctx; +- u16 options; +-}; +- +-int dpbp_set_notifications(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpbp_notification_cfg *cfg); +- +-int dpbp_get_notifications(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dpbp_notification_cfg *cfg); +- +-/** @} */ ++int dpbp_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpbp_attr *attr); ++ ++int dpbp_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); + + #endif /* __FSL_DPBP_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpcon.h +@@ -0,0 +1,115 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPCON_H ++#define __FSL_DPCON_H ++ ++/* Data Path Concentrator API ++ * Contains initialization APIs and runtime control APIs for DPCON ++ */ ++ ++struct fsl_mc_io; ++ ++/** General DPCON macros */ ++ ++/** ++ * Use it to disable notifications; see dpcon_set_notification() ++ */ ++#define DPCON_INVALID_DPIO_ID (int)(-1) ++ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpcon_id, ++ u16 *token); ++ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * struct dpcon_attr - Structure representing DPCON attributes ++ * @id: DPCON object ID ++ * @qbman_ch_id: Channel ID to be used by dequeue operation ++ * @num_priorities: Number of priorities for the DPCON channel (1-8) ++ */ ++struct dpcon_attr { ++ int id; ++ u16 qbman_ch_id; ++ u8 num_priorities; ++}; ++ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpcon_attr *attr); ++ ++/** ++ * struct dpcon_notification_cfg - Structure representing notification params ++ * @dpio_id: DPIO object ID; must be configured with a notification channel; ++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; ++ * @priority: Priority selection within the DPIO channel; valid values ++ * are 0-7, depending on the number of priorities in that channel ++ * @user_ctx: User context value provided with each CDAN message ++ */ ++struct dpcon_notification_cfg { ++ int dpio_id; ++ u8 priority; ++ u64 user_ctx; ++}; ++ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpcon_notification_cfg *cfg); ++ ++int dpcon_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); ++ ++#endif /* __FSL_DPCON_H */ +--- a/drivers/staging/fsl-mc/include/dpmng.h ++++ b/drivers/staging/fsl-mc/include/dpmng.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -32,7 +33,8 @@ + #ifndef __FSL_DPMNG_H + #define __FSL_DPMNG_H + +-/* Management Complex General API ++/* ++ * Management Complex General API + * Contains general API for the Management Complex firmware + */ + +@@ -58,12 +60,8 @@ struct mc_version { + u32 revision; + }; + +-int mc_get_version(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- struct mc_version *mc_ver_info); +- +-int dpmng_get_container_id(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- int *container_id); ++int mc_get_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ struct mc_version *mc_ver_info); + + #endif /* __FSL_DPMNG_H */ +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpopr.h +@@ -0,0 +1,110 @@ ++/* ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPOPR_H_ ++#define __FSL_DPOPR_H_ ++ ++/* Data Path Order Restoration API ++ * Contains initialization APIs and runtime APIs for the Order Restoration ++ */ ++ ++/** Order Restoration properties */ ++ ++/** ++ * Create a new Order Point Record option ++ */ ++#define OPR_OPT_CREATE 0x1 ++/** ++ * Retire an existing Order Point Record option ++ */ ++#define OPR_OPT_RETIRE 0x2 ++ ++/** ++ * struct opr_cfg - Structure representing OPR configuration ++ * @oprrws: Order point record (OPR) restoration window size (0 to 5) ++ * 0 - Window size is 32 frames. ++ * 1 - Window size is 64 frames. ++ * 2 - Window size is 128 frames. ++ * 3 - Window size is 256 frames. ++ * 4 - Window size is 512 frames. ++ * 5 - Window size is 1024 frames. ++ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled) ++ * @olws: OPR acceptable late arrival window size (0 to 3) ++ * 0 - Disabled. Late arrivals are always rejected. ++ * 1 - Window size is 32 frames. ++ * 2 - Window size is the same as the OPR restoration ++ * window size configured in the OPRRWS field. ++ * 3 - Window size is 8192 frames. Late arrivals are ++ * always accepted. ++ * @oeane: Order restoration list (ORL) resource exhaustion ++ * advance NESN enable (0 disabled, 1 enabled) ++ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled) ++ */ ++struct opr_cfg { ++ u8 oprrws; ++ u8 oa; ++ u8 olws; ++ u8 oeane; ++ u8 oloe; ++}; ++ ++/** ++ * struct opr_qry - Structure representing OPR configuration ++ * @enable: Enabled state ++ * @rip: Retirement In Progress ++ * @ndsn: Next dispensed sequence number ++ * @nesn: Next expected sequence number ++ * @ea_hseq: Early arrival head sequence number ++ * @hseq_nlis: HSEQ not last in sequence ++ * @ea_tseq: Early arrival tail sequence number ++ * @tseq_nlis: TSEQ not last in sequence ++ * @ea_tptr: Early arrival tail pointer ++ * @ea_hptr: Early arrival head pointer ++ * @opr_id: Order Point Record ID ++ * @opr_vid: Order Point Record Virtual ID ++ */ ++struct opr_qry { ++ char enable; ++ char rip; ++ u16 ndsn; ++ u16 nesn; ++ u16 ea_hseq; ++ char hseq_nlis; ++ u16 ea_tseq; ++ char tseq_nlis; ++ u16 ea_tptr; ++ u16 ea_hptr; ++ u16 opr_id; ++ u16 opr_vid; ++}; ++ ++#endif /* __FSL_DPOPR_H_ */ +--- a/drivers/staging/fsl-mc/include/dprc.h ++++ b/drivers/staging/fsl-mc/include/dprc.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -34,26 +35,13 @@ + + #include "mc-cmd.h" + +-/* Data Path Resource Container API ++/* ++ * Data Path Resource Container API + * Contains DPRC API for managing and querying DPAA resources + */ + + struct fsl_mc_io; + +-/** +- * Set this value as the icid value in dprc_cfg structure when creating a +- * container, in case the ICID is not selected by the user and should be +- * allocated by the DPRC from the pool of ICIDs. +- */ +-#define DPRC_GET_ICID_FROM_POOL (u16)(~(0)) +- +-/** +- * Set this value as the portal_id value in dprc_cfg structure when creating a +- * container, in case the portal ID is not specifically selected by the +- * user and should be allocated by the DPRC from the pool of portal ids. +- */ +-#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) +- + int dprc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int container_id, +@@ -63,75 +51,6 @@ int dprc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +-/** +- * Container general options +- * +- * These options may be selected at container creation by the container creator +- * and can be retrieved using dprc_get_attributes() +- */ +- +-/* Spawn Policy Option allowed - Indicates that the new container is allowed +- * to spawn and have its own child containers. +- */ +-#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 +- +-/* General Container allocation policy - Indicates that the new container is +- * allowed to allocate requested resources from its parent container; if not +- * set, the container is only allowed to use resources in its own pools; Note +- * that this is a container's global policy, but the parent container may +- * override it and set specific quota per resource type. +- */ +-#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 +- +-/* Object initialization allowed - software context associated with this +- * container is allowed to invoke object initialization operations. +- */ +-#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 +- +-/* Topology change allowed - software context associated with this +- * container is allowed to invoke topology operations, such as attach/detach +- * of network objects. +- */ +-#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 +- +-/* AIOP - Indicates that container belongs to AIOP. */ +-#define DPRC_CFG_OPT_AIOP 0x00000020 +- +-/* IRQ Config - Indicates that the container allowed to configure its IRQs. */ +-#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 +- +-/** +- * struct dprc_cfg - Container configuration options +- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free +- * ICID value is allocated by the DPRC +- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free +- * portal ID is allocated by the DPRC +- * @options: Combination of 'DPRC_CFG_OPT_' options +- * @label: Object's label +- */ +-struct dprc_cfg { +- u16 icid; +- int portal_id; +- u64 options; +- char label[16]; +-}; +- +-int dprc_create_container(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dprc_cfg *cfg, +- int *child_container_id, +- u64 *child_portal_offset); +- +-int dprc_destroy_container(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id); +- +-int dprc_reset_container(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id); + + /* IRQ */ + +@@ -139,7 +58,7 @@ int dprc_reset_container(struct fsl_mc_i + #define DPRC_IRQ_INDEX 0 + + /* Number of dprc's IRQs */ +-#define DPRC_NUM_OF_IRQS 1 ++#define DPRC_NUM_OF_IRQS 1 + + /* DPRC IRQ events */ + +@@ -151,12 +70,14 @@ int dprc_reset_container(struct fsl_mc_i + #define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 + /* IRQ event - Indicates that resources removed from the container */ + #define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 +-/* IRQ event - Indicates that one of the descendant containers that opened by ++/* ++ * IRQ event - Indicates that one of the descendant containers that opened by + * this container is destroyed + */ + #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 + +-/* IRQ event - Indicates that on one of the container's opened object is ++/* ++ * IRQ event - Indicates that on one of the container's opened object is + * destroyed + */ + #define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 +@@ -171,59 +92,59 @@ int dprc_reset_container(struct fsl_mc_i + * @irq_num: A user defined number associated with this IRQ + */ + struct dprc_irq_cfg { +- phys_addr_t paddr; +- u32 val; +- int irq_num; +-}; +- +-int dprc_set_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- struct dprc_irq_cfg *irq_cfg); +- +-int dprc_get_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- int *type, +- struct dprc_irq_cfg *irq_cfg); +- +-int dprc_set_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 en); +- +-int dprc_get_irq_enable(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u8 *en); +- +-int dprc_set_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 mask); +- +-int dprc_get_irq_mask(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *mask); +- +-int dprc_get_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 *status); +- +-int dprc_clear_irq_status(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- u8 irq_index, +- u32 status); ++ phys_addr_t paddr; ++ u32 val; ++ int irq_num; ++}; ++ ++int dprc_set_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++int dprc_get_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++int dprc_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en); ++ ++int dprc_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en); ++ ++int dprc_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask); ++ ++int dprc_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask); ++ ++int dprc_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status); ++ ++int dprc_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status); + + /** + * struct dprc_attributes - Container attributes +@@ -231,114 +152,23 @@ int dprc_clear_irq_status(struct fsl_mc_ + * @icid: Container's ICID + * @portal_id: Container's portal ID + * @options: Container's options as set at container's creation +- * @version: DPRC version + */ + struct dprc_attributes { + int container_id; + u16 icid; + int portal_id; + u64 options; +- /** +- * struct version - DPRC version +- * @major: DPRC major version +- * @minor: DPRC minor version +- */ +- struct { +- u16 major; +- u16 minor; +- } version; + }; + +-int dprc_get_attributes(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- struct dprc_attributes *attributes); +- +-int dprc_set_res_quota(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- char *type, +- u16 quota); +- +-int dprc_get_res_quota(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- char *type, +- u16 *quota); +- +-/* Resource request options */ +- +-/* Explicit resource ID request - The requested objects/resources +- * are explicit and sequential (in case of resources). +- * The base ID is given at res_req at base_align field +- */ +-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 +- +-/* Aligned resources request - Relevant only for resources +- * request (and not objects). Indicates that resources base ID should be +- * sequential and aligned to the value given at dprc_res_req base_align field +- */ +-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 +- +-/* Plugged Flag - Relevant only for object assignment request. +- * Indicates that after all objects assigned. An interrupt will be invoked at +- * the relevant GPP. The assigned object will be marked as plugged. +- * plugged objects can't be assigned from their container +- */ +-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 +- +-/** +- * struct dprc_res_req - Resource request descriptor, to be used in assignment +- * or un-assignment of resources and objects. +- * @type: Resource/object type: Represent as a NULL terminated string. +- * This string may received by using dprc_get_pool() to get resource +- * type and dprc_get_obj() to get object type; +- * Note: it is not possible to assign/un-assign DPRC objects +- * @num: Number of resources +- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options +- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT +- * is set at option), this field represents the required base ID +- * for resource allocation; In case of aligned assignment +- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field +- * indicates the required alignment for the resource ID(s) - +- * use 0 if there is no alignment or explicit ID requirements +- */ +-struct dprc_res_req { +- char type[16]; +- u32 num; +- u32 options; +- int id_base_align; +-}; +- +-int dprc_assign(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int container_id, +- struct dprc_res_req *res_req); +- +-int dprc_unassign(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int child_container_id, +- struct dprc_res_req *res_req); +- +-int dprc_get_pool_count(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int *pool_count); +- +-int dprc_get_pool(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int pool_index, +- char *type); ++int dprc_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dprc_attributes *attributes); + + int dprc_get_obj_count(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int *obj_count); ++ u32 cmd_flags, ++ u16 token, ++ int *obj_count); + + /* Objects Attributes Flags */ + +@@ -353,7 +183,7 @@ int dprc_get_obj_count(struct fsl_mc_io + * masters; + * user is responsible for proper memory handling through IOMMU configuration. + */ +-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 ++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 + + /** + * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() +@@ -381,41 +211,41 @@ struct dprc_obj_desc { + u16 flags; + }; + +-int dprc_get_obj(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- int obj_index, +- struct dprc_obj_desc *obj_desc); +- +-int dprc_get_obj_desc(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- struct dprc_obj_desc *obj_desc); +- +-int dprc_set_obj_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- u8 irq_index, +- struct dprc_irq_cfg *irq_cfg); +- +-int dprc_get_obj_irq(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- u8 irq_index, +- int *type, +- struct dprc_irq_cfg *irq_cfg); +- +-int dprc_get_res_count(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *type, +- int *res_count); ++int dprc_get_obj(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int obj_index, ++ struct dprc_obj_desc *obj_desc); ++ ++int dprc_get_obj_desc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ char *obj_type, ++ int obj_id, ++ struct dprc_obj_desc *obj_desc); ++ ++int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ char *obj_type, ++ int obj_id, ++ u8 irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++int dprc_get_obj_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ char *obj_type, ++ int obj_id, ++ u8 irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++int dprc_get_res_count(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ char *type, ++ int *res_count); + + /** + * enum dprc_iter_status - Iteration status +@@ -429,27 +259,6 @@ enum dprc_iter_status { + DPRC_ITER_STATUS_LAST = 2 + }; + +-/** +- * struct dprc_res_ids_range_desc - Resource ID range descriptor +- * @base_id: Base resource ID of this range +- * @last_id: Last resource ID of this range +- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at +- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, +- * additional iterations are needed, until the returned marker is +- * DPRC_ITER_STATUS_LAST +- */ +-struct dprc_res_ids_range_desc { +- int base_id; +- int last_id; +- enum dprc_iter_status iter_status; +-}; +- +-int dprc_get_res_ids(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *type, +- struct dprc_res_ids_range_desc *range_desc); +- + /* Region flags */ + /* Cacheable - Indicates that region should be mapped as cacheable */ + #define DPRC_REGION_CACHEABLE 0x00000001 +@@ -481,64 +290,27 @@ struct dprc_region_desc { + enum dprc_region_type type; + }; + +-int dprc_get_obj_region(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- u8 region_index, +- struct dprc_region_desc *region_desc); +- +-int dprc_set_obj_label(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- char *obj_type, +- int obj_id, +- char *label); ++int dprc_get_obj_region(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ char *obj_type, ++ int obj_id, ++ u8 region_index, ++ struct dprc_region_desc *region_desc); + +-/** +- * struct dprc_endpoint - Endpoint description for link connect/disconnect +- * operations +- * @type: Endpoint object type: NULL terminated string +- * @id: Endpoint object ID +- * @if_id: Interface ID; should be set for endpoints with multiple +- * interfaces ("dpsw", "dpdmux"); for others, always set to 0 +- */ +-struct dprc_endpoint { +- char type[16]; +- int id; +- int if_id; +-}; ++int dprc_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); + +-/** +- * struct dprc_connection_cfg - Connection configuration. +- * Used for virtual connections only +- * @committed_rate: Committed rate (Mbits/s) +- * @max_rate: Maximum rate (Mbits/s) +- */ +-struct dprc_connection_cfg { +- u32 committed_rate; +- u32 max_rate; +-}; ++int dprc_get_container_id(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int *container_id); + +-int dprc_connect(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint1, +- const struct dprc_endpoint *endpoint2, +- const struct dprc_connection_cfg *cfg); +- +-int dprc_disconnect(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint); +- +-int dprc_get_connection(struct fsl_mc_io *mc_io, +- u32 cmd_flags, +- u16 token, +- const struct dprc_endpoint *endpoint1, +- struct dprc_endpoint *endpoint2, +- int *state); ++int dprc_reset_container(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int child_container_id); + + #endif /* _FSL_DPRC_H */ + +--- a/drivers/staging/fsl-mc/include/mc-bus.h ++++ b/drivers/staging/fsl-mc/include/mc-bus.h +@@ -1,7 +1,7 @@ + /* + * Freescale Management Complex (MC) bus declarations + * +- * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -42,8 +42,8 @@ struct msi_domain_info; + */ + struct fsl_mc_resource_pool { + enum fsl_mc_pool_type type; +- int16_t max_count; +- int16_t free_count; ++ int max_count; ++ int free_count; + struct mutex mutex; /* serializes access to free_list */ + struct list_head free_list; + struct fsl_mc_bus *mc_bus; +@@ -73,6 +73,7 @@ struct fsl_mc_bus { + int dprc_scan_container(struct fsl_mc_device *mc_bus_dev); + + int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, + unsigned int *total_irq_count); + + int __init dprc_driver_init(void); +--- a/drivers/staging/fsl-mc/include/mc-cmd.h ++++ b/drivers/staging/fsl-mc/include/mc-cmd.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2015 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: +@@ -48,6 +49,15 @@ struct mc_command { + u64 params[MC_CMD_NUM_OF_PARAMS]; + }; + ++struct mc_rsp_create { ++ __le32 object_id; ++}; ++ ++struct mc_rsp_api_ver { ++ __le16 major_ver; ++ __le16 minor_ver; ++}; ++ + enum mc_cmd_status { + MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ + MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ +@@ -72,11 +82,6 @@ enum mc_cmd_status { + /* Command completion flag */ + #define MC_CMD_FLAG_INTR_DIS 0x01 + +-#define MC_CMD_HDR_CMDID_MASK 0xFFF0 +-#define MC_CMD_HDR_CMDID_SHIFT 4 +-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0 +-#define MC_CMD_HDR_TOKEN_SHIFT 6 +- + static inline u64 mc_encode_cmd_header(u16 cmd_id, + u32 cmd_flags, + u16 token) +@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u + u64 header = 0; + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; + +- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) & +- MC_CMD_HDR_CMDID_MASK); +- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) & +- MC_CMD_HDR_TOKEN_MASK); ++ hdr->cmd_id = cpu_to_le16(cmd_id); ++ hdr->token = cpu_to_le16(token); + hdr->status = MC_CMD_STATUS_READY; + if (cmd_flags & MC_CMD_FLAG_PRI) + hdr->flags_hw = MC_CMD_FLAG_PRI; +@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token( + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; + u16 token = le16_to_cpu(hdr->token); + +- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT; ++ return token; ++} ++ ++static inline u32 mc_cmd_read_object_id(struct mc_command *cmd) ++{ ++ struct mc_rsp_create *rsp_params; ++ ++ rsp_params = (struct mc_rsp_create *)cmd->params; ++ return le32_to_cpu(rsp_params->object_id); ++} ++ ++static inline void mc_cmd_read_api_version(struct mc_command *cmd, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct mc_rsp_api_ver *rsp_params; ++ ++ rsp_params = (struct mc_rsp_api_ver *)cmd->params; ++ *major_ver = le16_to_cpu(rsp_params->major_ver); ++ *minor_ver = le16_to_cpu(rsp_params->minor_ver); + } + + #endif /* __FSL_MC_CMD_H */ +--- a/drivers/staging/fsl-mc/include/mc-sys.h ++++ b/drivers/staging/fsl-mc/include/mc-sys.h +@@ -1,4 +1,5 @@ +-/* Copyright 2013-2014 Freescale Semiconductor Inc. ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Interface of the I/O services to send MC commands to the MC hardware + * +--- a/drivers/staging/fsl-mc/include/mc.h ++++ b/drivers/staging/fsl-mc/include/mc.h +@@ -1,7 +1,7 @@ + /* + * Freescale Management Complex (MC) bus public interface + * +- * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + * This file is licensed under the terms of the GNU General Public +@@ -81,7 +81,7 @@ enum fsl_mc_pool_type { + */ + struct fsl_mc_resource { + enum fsl_mc_pool_type type; +- int32_t id; ++ s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +@@ -122,6 +122,7 @@ struct fsl_mc_device_irq { + * @regions: pointer to array of MMIO region entries + * @irqs: pointer to array of pointers to interrupts allocated to this device + * @resource: generic resource associated with this MC object device, if any. ++ * @driver_override: Driver name to force a match + * + * Generic device object for MC object devices that are "attached" to a + * MC bus. +@@ -154,6 +155,7 @@ struct fsl_mc_device { + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; ++ const char *driver_override; + }; + + #define to_fsl_mc_device(_dev) \ +@@ -175,6 +177,8 @@ struct fsl_mc_device { + #define fsl_mc_driver_register(drv) \ + __fsl_mc_driver_register(drv, THIS_MODULE) + ++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); ++ + int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, + struct module *owner); + +@@ -198,4 +202,13 @@ int __must_check fsl_mc_allocate_irqs(st + + void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); + ++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev, ++ struct device_node *fsl_mc_platform_node, int coherent); ++ ++#ifdef CONFIG_FSL_MC_BUS ++struct iommu_group *fsl_mc_device_group(struct device *dev); ++#else ++#define fsl_mc_device_group(__dev) NULL ++#endif ++ + #endif /* _FSL_MC_H_ */ diff --git a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch new file mode 100644 index 000000000..51abc0325 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch @@ -0,0 +1,22907 @@ +From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:07:58 +0800 +Subject: [PATCH] dpaa2: support layerscape + +This is a integrated patch for layerscape dpaa2 support. + +Signed-off-by: Bogdan Purcareata +Signed-off-by: Ioana Radulescu +Signed-off-by: Razvan Stefanescu +Signed-off-by: costi +Signed-off-by: Catalin Horghidan +Signed-off-by: Yangbo Lu +--- + drivers/soc/fsl/ls2-console/Kconfig | 4 + + drivers/soc/fsl/ls2-console/Makefile | 1 + + drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++ + drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 + + drivers/staging/fsl-dpaa2/ethernet/README | 186 ++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 + + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++ + drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++ + drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++ + drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++ + drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 + + drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 + + drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++ + drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++ + drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++ + drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++ + drivers/staging/fsl-dpaa2/evb/Kconfig | 7 + + drivers/staging/fsl-dpaa2/evb/Makefile | 10 + + drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++ + drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++ + drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++ + drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++ + drivers/staging/fsl-dpaa2/mac/Kconfig | 23 + + drivers/staging/fsl-dpaa2/mac/Makefile | 10 + + drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++ + drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++ + drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++ + drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++ + drivers/staging/fsl-dpaa2/rtc/Makefile | 10 + + drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 + + drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++ + drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++ + drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++ + 39 files changed, 22696 insertions(+) + create mode 100644 drivers/soc/fsl/ls2-console/Kconfig + create mode 100644 drivers/soc/fsl/ls2-console/Makefile + create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h + create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c + create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c + create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h + create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c + create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c + create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c + create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h + create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c + +--- /dev/null ++++ b/drivers/soc/fsl/ls2-console/Kconfig +@@ -0,0 +1,4 @@ ++config FSL_LS2_CONSOLE ++ tristate "Layerscape MC and AIOP console support" ++ depends on ARCH_LAYERSCAPE ++ default y +--- /dev/null ++++ b/drivers/soc/fsl/ls2-console/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o +--- /dev/null ++++ b/drivers/soc/fsl/ls2-console/ls2-console.c +@@ -0,0 +1,284 @@ ++/* Copyright 2015-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* SoC address for the MC firmware base low/high registers */ ++#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020 ++#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2 ++/* MC firmware base low/high registers indexes */ ++#define MCFBALR_OFFSET 0 ++#define MCFBAHR_OFFSET 1 ++ ++/* Bit mask used to obtain the most significant part of the MC base address */ ++#define MC_FW_HIGH_ADDR_MASK 0x1FFFF ++/* Bit mask used to obtain the least significant part of the MC base address */ ++#define MC_FW_LOW_ADDR_MASK 0xE0000000 ++ ++#define MC_BUFFER_OFFSET 0x01000000 ++#define MC_BUFFER_SIZE (1024*1024*16) ++#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET) ++ ++#define AIOP_BUFFER_OFFSET 0x06000000 ++#define AIOP_BUFFER_SIZE (1024*1024*16) ++#define AIOP_OFFSET_DELTA (0) ++ ++struct log_header { ++ char magic_word[8]; /* magic word */ ++ uint32_t buf_start; /* holds the 32-bit little-endian ++ * offset of the start of the buffer ++ */ ++ uint32_t buf_length; /* holds the 32-bit little-endian ++ * length of the buffer ++ */ ++ uint32_t last_byte; /* holds the 32-bit little-endian offset ++ * of the byte after the last byte that ++ * was written ++ */ ++ char reserved[44]; ++}; ++ ++#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000 ++#define LOG_VERSION_MAJOR 1 ++#define LOG_VERSION_MINOR 0 ++ ++ ++#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); } ++ ++struct console_data { ++ char *map_addr; ++ struct log_header *hdr; ++ char *start_addr; /* Start of buffer */ ++ char *end_addr; /* End of buffer */ ++ char *end_of_data; /* Current end of data */ ++ char *cur_ptr; /* Last data sent to console */ ++}; ++ ++#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND)) ++ ++static inline void __adjust_end(struct console_data *cd) ++{ ++ cd->end_of_data = cd->start_addr ++ + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte)); ++} ++ ++static inline void adjust_end(struct console_data *cd) ++{ ++ invalidate(cd->hdr); ++ __adjust_end(cd); ++} ++ ++static inline uint64_t get_mc_fw_base_address(void) ++{ ++ u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS, ++ SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE); ++ u64 mcfwbase = 0ULL; ++ ++ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK; ++ mcfwbase <<= 32; ++ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK; ++ iounmap(mcfbaregs); ++ pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase); ++ return mcfwbase; ++} ++ ++static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp, ++ u64 offset, u64 size, ++ uint8_t *emagic, uint8_t magic_len, ++ u32 offset_delta) ++{ ++ struct console_data *cd; ++ uint8_t *magic; ++ uint32_t wrapped; ++ ++ cd = kmalloc(sizeof(*cd), GFP_KERNEL); ++ if (cd == NULL) ++ return -ENOMEM; ++ fp->private_data = cd; ++ cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size); ++ ++ cd->hdr = (struct log_header *) cd->map_addr; ++ invalidate(cd->hdr); ++ ++ magic = cd->hdr->magic_word; ++ if (memcmp(magic, emagic, magic_len)) { ++ pr_info("magic didn't match!\n"); ++ pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n", ++ emagic[0], emagic[1], emagic[2], emagic[3], ++ emagic[4], emagic[5], emagic[6], emagic[7]); ++ pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n", ++ magic[0], magic[1], magic[2], magic[3], ++ magic[4], magic[5], magic[6], magic[7]); ++ kfree(cd); ++ iounmap(cd->map_addr); ++ return -EIO; ++ } ++ ++ cd->start_addr = cd->map_addr ++ + le32_to_cpu(cd->hdr->buf_start) - offset_delta; ++ cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length); ++ ++ wrapped = le32_to_cpu(cd->hdr->last_byte) ++ & LOG_HEADER_FLAG_BUFFER_WRAPAROUND; ++ ++ __adjust_end(cd); ++ if (wrapped && (cd->end_of_data != cd->end_addr)) ++ cd->cur_ptr = cd->end_of_data+1; ++ else ++ cd->cur_ptr = cd->start_addr; ++ ++ return 0; ++} ++ ++static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp) ++{ ++ uint8_t magic_word[] = { 0, 1, 'C', 'M' }; ++ ++ return fsl_ls2_generic_console_open(node, fp, ++ MC_BUFFER_OFFSET, MC_BUFFER_SIZE, ++ magic_word, sizeof(magic_word), ++ MC_OFFSET_DELTA); ++} ++ ++static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp) ++{ ++ uint8_t magic_word[] = { 'P', 'O', 'I', 'A' }; ++ ++ return fsl_ls2_generic_console_open(node, fp, ++ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE, ++ magic_word, sizeof(magic_word), ++ AIOP_OFFSET_DELTA); ++} ++ ++static int fsl_ls2_console_close(struct inode *node, struct file *fp) ++{ ++ struct console_data *cd = fp->private_data; ++ ++ iounmap(cd->map_addr); ++ kfree(cd); ++ return 0; ++} ++ ++ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ struct console_data *cd = fp->private_data; ++ size_t bytes = 0; ++ char data; ++ ++ /* Check if we need to adjust the end of data addr */ ++ adjust_end(cd); ++ ++ while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) { ++ if (((u64)cd->cur_ptr) % 64 == 0) ++ invalidate(cd->cur_ptr); ++ ++ data = *(cd->cur_ptr); ++ if (copy_to_user(&buf[bytes], &data, 1)) ++ return -EFAULT; ++ cd->cur_ptr++; ++ if (cd->cur_ptr >= cd->end_addr) ++ cd->cur_ptr = cd->start_addr; ++ ++bytes; ++ } ++ return bytes; ++} ++ ++static const struct file_operations fsl_ls2_mc_console_fops = { ++ .owner = THIS_MODULE, ++ .open = fsl_ls2_mc_console_open, ++ .release = fsl_ls2_console_close, ++ .read = fsl_ls2_console_read, ++}; ++ ++static struct miscdevice fsl_ls2_mc_console_dev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "fsl_mc_console", ++ .fops = &fsl_ls2_mc_console_fops ++}; ++ ++static const struct file_operations fsl_ls2_aiop_console_fops = { ++ .owner = THIS_MODULE, ++ .open = fsl_ls2_aiop_console_open, ++ .release = fsl_ls2_console_close, ++ .read = fsl_ls2_console_read, ++}; ++ ++static struct miscdevice fsl_ls2_aiop_console_dev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "fsl_aiop_console", ++ .fops = &fsl_ls2_aiop_console_fops ++}; ++ ++static int __init fsl_ls2_console_init(void) ++{ ++ int err = 0; ++ ++ pr_info("Freescale LS2 console driver\n"); ++ err = misc_register(&fsl_ls2_mc_console_dev); ++ if (err) { ++ pr_err("fsl_mc_console: cannot register device\n"); ++ return err; ++ } ++ pr_info("fsl-ls2-console: device %s registered\n", ++ fsl_ls2_mc_console_dev.name); ++ ++ err = misc_register(&fsl_ls2_aiop_console_dev); ++ if (err) { ++ pr_err("fsl_aiop_console: cannot register device\n"); ++ return err; ++ } ++ pr_info("fsl-ls2-console: device %s registered\n", ++ fsl_ls2_aiop_console_dev.name); ++ ++ return 0; ++} ++ ++static void __exit fsl_ls2_console_exit(void) ++{ ++ misc_deregister(&fsl_ls2_mc_console_dev); ++ ++ misc_deregister(&fsl_ls2_aiop_console_dev); ++} ++ ++module_init(fsl_ls2_console_init); ++module_exit(fsl_ls2_console_exit); ++ ++MODULE_AUTHOR("Roy Pledge "); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("Freescale LS2 console driver"); +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile +@@ -0,0 +1,11 @@ ++# ++# Makefile for the Freescale DPAA2 Ethernet controller ++# ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o ++ ++fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o ++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o ++ ++# Needed by the tracing framework ++CFLAGS_dpaa2-eth.o := -I$(src) +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/README +@@ -0,0 +1,186 @@ ++Freescale DPAA2 Ethernet driver ++=============================== ++ ++This file provides documentation for the Freescale DPAA2 Ethernet driver. ++ ++ ++Contents ++======== ++ Supported Platforms ++ Architecture Overview ++ Creating a Network Interface ++ Features & Offloads ++ ++ ++Supported Platforms ++=================== ++This driver provides networking support for Freescale DPAA2 SoCs, e.g. ++LS2080A, LS2088A, LS1088A. ++ ++ ++Architecture Overview ++===================== ++Unlike regular NICs, in the DPAA2 architecture there is no single hardware block ++representing network interfaces; instead, several separate hardware resources ++concur to provide the networking functionality: ++ - network interfaces ++ - queues, channels ++ - buffer pools ++ - MAC/PHY ++ ++All hardware resources are allocated and configured through the Management ++Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects ++and exposes ABIs through which they can be configured and controlled. A few ++hardware resources, like queues, do not have a corresponding MC object and ++are treated as internal resources of other objects. ++ ++For a more detailed description of the DPAA2 architecture and its object ++abstractions see: ++ drivers/staging/fsl-mc/README.txt ++ ++Each Linux net device is built on top of a Datapath Network Interface (DPNI) ++object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators ++(DPCONs). ++ ++Configuration interface: ++ ++ ----------------------- ++ | DPAA2 Ethernet Driver | ++ ----------------------- ++ . . . ++ . . . ++ . . . . . . . . . . . . ++ . . . ++ . . . ++ ---------- ---------- ----------- ++ | DPBP API | | DPNI API | | DPCON API | ++ ---------- ---------- ----------- ++ . . . software ++=========== . ========== . ============ . =================== ++ . . . hardware ++ ------------------------------------------ ++ | MC hardware portals | ++ ------------------------------------------ ++ . . . ++ . . . ++ ------ ------ ------- ++ | DPBP | | DPNI | | DPCON | ++ ------ ------ ------- ++ ++The DPNIs are network interfaces without a direct one-on-one mapping to PHYs. ++DPBPs represent hardware buffer pools. Packet I/O is performed in the context ++of DPCON objects, using DPIO portals for managing and communicating with the ++hardware resources. ++ ++Datapath (I/O) interface: ++ ++ ----------------------------------------------- ++ | DPAA2 Ethernet Driver | ++ ----------------------------------------------- ++ | ^ ^ | | ++ | | | | | ++ enqueue| dequeue| data | dequeue| seed | ++ (Tx) | (Rx, TxC)| avail.| request| buffers| ++ | | notify| | | ++ | | | | | ++ V | | V V ++ ----------------------------------------------- ++ | DPIO Driver | ++ ----------------------------------------------- ++ | | | | | software ++ | | | | | ================ ++ | | | | | hardware ++ ----------------------------------------------- ++ | I/O hardware portals | ++ ----------------------------------------------- ++ | ^ ^ | | ++ | | | | | ++ | | | V | ++ V | ================ V ++ ---------------------- | ------------- ++ queues ---------------------- | | Buffer pool | ++ ---------------------- | ------------- ++ ======================= ++ Channel ++ ++Datapath I/O (DPIO) portals provide enqueue and dequeue services, data ++availability notifications and buffer pool management. DPIOs are shared between ++all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data ++frames, but must be affine to the CPUs for the purpose of traffic distribution. ++ ++Frames are transmitted and received through hardware frame queues, which can be ++grouped in channels for the purpose of hardware scheduling. The Ethernet driver ++enqueues TX frames on egress queues and after transmission is complete a TX ++confirmation frame is sent back to the CPU. ++ ++When frames are available on ingress queues, a data availability notification ++is sent to the CPU; notifications are raised per channel, so even if multiple ++queues in the same channel have available frames, only one notification is sent. ++After a channel fires a notification, is must be explicitly rearmed. ++ ++Each network interface can have multiple Rx, Tx and confirmation queues affined ++to CPUs, and one channel (DPCON) for each CPU that services at least one queue. ++DPCONs are used to distribute ingress traffic to different CPUs via the cores' ++affine DPIOs. ++ ++The role of hardware buffer pools is storage of ingress frame data. Each network ++interface has a privately owned buffer pool which it seeds with kernel allocated ++buffers. ++ ++ ++DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC ++object or to another DPNI through an internal link, but the connection is ++managed by MC and completely transparent to the Ethernet driver. ++ ++ --------- --------- --------- ++ | eth if1 | | eth if2 | | eth ifn | ++ --------- --------- --------- ++ . . . ++ . . . ++ . . . ++ --------------------------- ++ | DPAA2 Ethernet Driver | ++ --------------------------- ++ . . . ++ . . . ++ . . . ++ ------ ------ ------ ------- ++ | DPNI | | DPNI | | DPNI | | DPMAC |----+ ++ ------ ------ ------ ------- | ++ | | | | | ++ | | | | ----- ++ =========== ================== | PHY | ++ ----- ++ ++Creating a Network Interface ++============================ ++A net device is created for each DPNI object probed on the MC bus. Each DPNI has ++a number of properties which determine the network interface configuration ++options and associated hardware resources. ++ ++DPNI objects (and the other DPAA2 objects needed for a network interface) can be ++added to a container on the MC bus in one of two ways: statically, through a ++Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created ++dynamically at runtime, via the DPAA2 objects APIs. ++ ++ ++Features & Offloads ++=================== ++Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames. ++The checksum offloads can be independently configured on RX and TX through ++ethtool. ++ ++Hardware offload of unicast and multicast MAC filtering is supported on the ++ingress path and permanently enabled. ++ ++Scatter-gather frames are supported on both RX and TX paths. On TX, SG support ++is configurable via ethtool; on RX it is always enabled. ++ ++The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes. ++ ++The Ethernet driver defines a static flow hashing scheme that distributes ++traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port, ++L4 dst port. No user configuration is supported for now. ++ ++Hardware specific statistics for the network interface as well as some ++non-standard driver stats can be consulted through ethtool -S option. +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c +@@ -0,0 +1,350 @@ ++ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "dpaa2-eth.h" ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" ++ ++static struct dentry *dpaa2_dbg_root; ++ ++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct rtnl_link_stats64 *stats; ++ struct dpaa2_eth_drv_stats *extras; ++ int i; ++ ++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", ++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", ++ "Tx SG", "Enq busy"); ++ ++ for_each_online_cpu(i) { ++ stats = per_cpu_ptr(priv->percpu_stats, i); ++ extras = per_cpu_ptr(priv->percpu_extras, i); ++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", ++ i, ++ stats->rx_packets, ++ stats->rx_errors, ++ extras->rx_sg_frames, ++ stats->tx_packets, ++ stats->tx_errors, ++ extras->tx_conf_frames, ++ extras->tx_sg_frames, ++ extras->tx_portal_busy); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_cpu_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_cpu_ops = { ++ .open = dpaa2_dbg_cpu_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static char *fq_type_to_str(struct dpaa2_eth_fq *fq) ++{ ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ return "Rx"; ++ case DPAA2_TX_CONF_FQ: ++ return "Tx conf"; ++ case DPAA2_RX_ERR_FQ: ++ return "Rx err"; ++ default: ++ return "N/A"; ++ } ++} ++ ++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_fq *fq; ++ u32 fcnt, bcnt; ++ int i, err; ++ ++ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", ++ "VFQID", "CPU", "Type", "Frames", "Pending frames", ++ "Congestion"); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); ++ if (err) ++ fcnt = 0; ++ ++ seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n", ++ fq->fqid, ++ fq->target_cpu, ++ fq_type_to_str(fq), ++ fq->stats.frames, ++ fcnt, ++ fq->stats.congestion_entry); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_fqs_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_fq_ops = { ++ .open = dpaa2_dbg_fqs_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", ++ "CHID", "CPU", "Deq busy", "Frames", "CDANs", ++ "Avg frm/CDAN"); ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", ++ ch->ch_id, ++ ch->nctx.desired_cpu, ++ ch->stats.dequeue_portal_busy, ++ ch->stats.frames, ++ ch->stats.cdan, ++ ch->stats.frames / ch->stats.cdan); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_ch_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_ch_ops = { ++ .open = dpaa2_dbg_ch_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *offset) ++{ ++ struct dpaa2_eth_priv *priv = file->private_data; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct dpaa2_eth_fq *fq; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for_each_online_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ memset(percpu_stats, 0, sizeof(*percpu_stats)); ++ ++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); ++ memset(percpu_extras, 0, sizeof(*percpu_extras)); ++ } ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ memset(&fq->stats, 0, sizeof(fq->stats)); ++ } ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ memset(&ch->stats, 0, sizeof(ch->stats)); ++ } ++ ++ return count; ++} ++ ++static const struct file_operations dpaa2_dbg_reset_ops = { ++ .open = simple_open, ++ .write = dpaa2_dbg_reset_write, ++}; ++ ++static ssize_t dpaa2_dbg_reset_mc_write(struct file *file, ++ const char __user *buf, ++ size_t count, loff_t *offset) ++{ ++ struct dpaa2_eth_priv *priv = file->private_data; ++ int err; ++ ++ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token); ++ if (err) ++ netdev_err(priv->net_dev, ++ "dpni_reset_statistics() failed %d\n", err); ++ ++ return count; ++} ++ ++static const struct file_operations dpaa2_dbg_reset_mc_ops = { ++ .open = simple_open, ++ .write = dpaa2_dbg_reset_mc_write, ++}; ++ ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) ++{ ++ if (!dpaa2_dbg_root) ++ return; ++ ++ /* Create a directory for the interface */ ++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, ++ dpaa2_dbg_root); ++ if (!priv->dbg.dir) { ++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); ++ return; ++ } ++ ++ /* per-cpu stats file */ ++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_cpu_ops); ++ if (!priv->dbg.cpu_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_cpu_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_fq_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_fq_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_ch_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_ch_stats; ++ } ++ ++ /* reset stats */ ++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_reset_ops); ++ if (!priv->dbg.reset_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_reset_stats; ++ } ++ ++ /* reset MC stats */ ++ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats", ++ 0222, priv->dbg.dir, priv, ++ &dpaa2_dbg_reset_mc_ops); ++ if (!priv->dbg.reset_mc_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_reset_mc_stats; ++ } ++ ++ return; ++ ++err_reset_mc_stats: ++ debugfs_remove(priv->dbg.reset_stats); ++err_reset_stats: ++ debugfs_remove(priv->dbg.ch_stats); ++err_ch_stats: ++ debugfs_remove(priv->dbg.fq_stats); ++err_fq_stats: ++ debugfs_remove(priv->dbg.cpu_stats); ++err_cpu_stats: ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) ++{ ++ debugfs_remove(priv->dbg.reset_mc_stats); ++ debugfs_remove(priv->dbg.reset_stats); ++ debugfs_remove(priv->dbg.fq_stats); ++ debugfs_remove(priv->dbg.ch_stats); ++ debugfs_remove(priv->dbg.cpu_stats); ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_eth_dbg_init(void) ++{ ++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); ++ if (!dpaa2_dbg_root) { ++ pr_err("DPAA2-ETH: debugfs create failed\n"); ++ return; ++ } ++ ++ pr_info("DPAA2-ETH: debugfs created\n"); ++} ++ ++void __exit dpaa2_eth_dbg_exit(void) ++{ ++ debugfs_remove(dpaa2_dbg_root); ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h +@@ -0,0 +1,60 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPAA2_ETH_DEBUGFS_H ++#define DPAA2_ETH_DEBUGFS_H ++ ++#include ++ ++struct dpaa2_eth_priv; ++ ++struct dpaa2_debugfs { ++ struct dentry *dir; ++ struct dentry *fq_stats; ++ struct dentry *ch_stats; ++ struct dentry *cpu_stats; ++ struct dentry *reset_stats; ++ struct dentry *reset_mc_stats; ++}; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++void dpaa2_eth_dbg_init(void); ++void dpaa2_eth_dbg_exit(void); ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); ++#else ++static inline void dpaa2_eth_dbg_init(void) {} ++static inline void dpaa2_eth_dbg_exit(void) {} ++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} ++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} ++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ ++ ++#endif /* DPAA2_ETH_DEBUGFS_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +@@ -0,0 +1,184 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM dpaa2_eth ++ ++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _DPAA2_ETH_TRACE_H ++ ++#include ++#include ++#include ++ ++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" ++/* trace_printk format for raw buffer event class */ ++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" ++ ++/* This is used to declare a class of events. ++ * individual events of this type will be defined below. ++ */ ++ ++/* Store details about a frame descriptor */ ++DECLARE_EVENT_CLASS(dpaa2_eth_fd, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, fd), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(u64, fd_addr) ++ __field(u32, fd_len) ++ __field(u16, fd_offset) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->fd_addr = dpaa2_fd_get_addr(fd); ++ __entry->fd_len = dpaa2_fd_get_len(fd); ++ __entry->fd_offset = dpaa2_fd_get_offset(fd); ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_FMT, ++ __get_str(name), ++ __entry->fd_addr, ++ __entry->fd_len, ++ __entry->fd_offset) ++); ++ ++/* Now declare events of the above type. Format is: ++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class ++ */ ++ ++/* Tx (egress) fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Rx fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Tx confirmation fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Log data about raw buffers. Useful for tracing DPBP content. */ ++TRACE_EVENT(dpaa2_eth_buf_seed, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ /* virtual address and size */ ++ void *vaddr, ++ size_t size, ++ /* dma map address and size */ ++ dma_addr_t dma_addr, ++ size_t map_size, ++ /* buffer pool id, if relevant */ ++ u16 bpid), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(void *, vaddr) ++ __field(size_t, size) ++ __field(dma_addr_t, dma_addr) ++ __field(size_t, map_size) ++ __field(u16, bpid) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->vaddr = vaddr; ++ __entry->size = size; ++ __entry->dma_addr = dma_addr; ++ __entry->map_size = map_size; ++ __entry->bpid = bpid; ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_BUF_FMT, ++ __get_str(name), ++ __entry->vaddr, ++ __entry->size, ++ &__entry->dma_addr, ++ __entry->map_size, ++ __entry->bpid) ++); ++ ++/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). ++ * The syntax is the same as for DECLARE_EVENT_CLASS(). ++ */ ++ ++#endif /* _DPAA2_ETH_TRACE_H */ ++ ++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE dpaa2-eth-trace ++#include +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +@@ -0,0 +1,3155 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/dpbp.h" ++#include "../../fsl-mc/include/dpcon.h" ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++#include "dpaa2-eth.h" ++#include "dpkg.h" ++ ++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files ++ * using trace events only need to #include ++ */ ++#define CREATE_TRACE_POINTS ++#include "dpaa2-eth-trace.h" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); ++ ++const char dpaa2_eth_drv_version[] = "0.1"; ++ ++void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr) ++{ ++ phys_addr_t phys_addr; ++ ++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; ++ ++ return phys_to_virt(phys_addr); ++} ++ ++static void validate_rx_csum(struct dpaa2_eth_priv *priv, ++ u32 fd_status, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* HW checksum validation is disabled, nothing to do here */ ++ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) ++ return; ++ ++ /* Read checksum validation bits */ ++ if (!((fd_status & DPAA2_FAS_L3CV) && ++ (fd_status & DPAA2_FAS_L4CV))) ++ return; ++ ++ /* Inform the stack there's no need to compute L3/L4 csum anymore */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++/* Free a received FD. ++ * Not to be used for Tx conf FDs or on any other paths. ++ */ ++static void free_rx_fd(struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ void *vaddr) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ struct dpaa2_sg_entry *sgt; ++ void *sg_vaddr; ++ int i; ++ ++ /* If single buffer frame, just free the data buffer */ ++ if (fd_format == dpaa2_fd_single) ++ goto free_buf; ++ else if (fd_format != dpaa2_fd_sg) ++ /* we don't support any other format */ ++ return; ++ ++ /* For S/G frames, we first need to free all SG entries */ ++ sgt = vaddr + dpaa2_fd_get_offset(fd); ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ addr = dpaa2_sg_get_addr(&sgt[i]); ++ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); ++ ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ put_page(virt_to_head_page(sg_vaddr)); ++ ++ if (dpaa2_sg_is_final(&sgt[i])) ++ break; ++ } ++ ++free_buf: ++ put_page(virt_to_head_page(vaddr)); ++} ++ ++/* Build a linear skb based on a single-buffer frame descriptor */ ++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ void *fd_vaddr) ++{ ++ struct sk_buff *skb = NULL; ++ u16 fd_offset = dpaa2_fd_get_offset(fd); ++ u32 fd_length = dpaa2_fd_get_len(fd); ++ ++ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ skb_reserve(skb, fd_offset); ++ skb_put(skb, fd_length); ++ ++ ch->buf_count--; ++ ++ return skb; ++} ++ ++/* Build a non linear (fragmented) skb based on a S/G table */ ++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ struct dpaa2_sg_entry *sgt) ++{ ++ struct sk_buff *skb = NULL; ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sg_vaddr; ++ dma_addr_t sg_addr; ++ u16 sg_offset; ++ u32 sg_length; ++ struct page *page, *head_page; ++ int page_offset; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ struct dpaa2_sg_entry *sge = &sgt[i]; ++ ++ /* NOTE: We only support SG entries in dpaa2_sg_single format, ++ * but this is the only format we may receive from HW anyway ++ */ ++ ++ /* Get the address and length from the S/G entry */ ++ sg_addr = dpaa2_sg_get_addr(sge); ++ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr); ++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ sg_length = dpaa2_sg_get_len(sge); ++ ++ if (i == 0) { ++ /* We build the skb around the first data buffer */ ++ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ sg_offset = dpaa2_sg_get_offset(sge); ++ skb_reserve(skb, sg_offset); ++ skb_put(skb, sg_length); ++ } else { ++ /* Rest of the data buffers are stored as skb frags */ ++ page = virt_to_page(sg_vaddr); ++ head_page = virt_to_head_page(sg_vaddr); ++ ++ /* Offset in page (which may be compound). ++ * Data in subsequent SG entries is stored from the ++ * beginning of the buffer, so we don't need to add the ++ * sg_offset. ++ */ ++ page_offset = ((unsigned long)sg_vaddr & ++ (PAGE_SIZE - 1)) + ++ (page_address(page) - page_address(head_page)); ++ ++ skb_add_rx_frag(skb, i - 1, head_page, page_offset, ++ sg_length, DPAA2_ETH_RX_BUF_SIZE); ++ } ++ ++ if (dpaa2_sg_is_final(sge)) ++ break; ++ } ++ ++ /* Count all data buffers + SG table buffer */ ++ ch->buf_count -= i + 2; ++ ++ return skb; ++} ++ ++/* Main Rx frame processing routine */ ++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi, ++ u16 queue_id) ++{ ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ void *vaddr; ++ struct sk_buff *skb; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpaa2_fas *fas; ++ void *buf_data; ++ u32 status = 0; ++ ++ /* Tracing point */ ++ trace_dpaa2_rx_fd(priv->net_dev, fd); ++ ++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ ++ /* HWA - FAS, timestamp */ ++ fas = dpaa2_eth_get_fas(vaddr); ++ prefetch(fas); ++ /* data / SG table */ ++ buf_data = vaddr + dpaa2_fd_get_offset(fd); ++ prefetch(buf_data); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ switch (fd_format) { ++ case dpaa2_fd_single: ++ skb = build_linear_skb(priv, ch, fd, vaddr); ++ break; ++ case dpaa2_fd_sg: ++ skb = build_frag_skb(priv, ch, buf_data); ++ put_page(virt_to_head_page(vaddr)); ++ percpu_extras->rx_sg_frames++; ++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); ++ break; ++ default: ++ /* We don't support any other format */ ++ goto err_frame_format; ++ } ++ ++ if (unlikely(!skb)) ++ goto err_build_skb; ++ ++ prefetch(skb->data); ++ ++ /* Get the timestamp value */ ++ if (priv->ts_rx_en) { ++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); ++ u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr); ++ ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); ++ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); ++ shhwtstamps->hwtstamp = ns_to_ktime(*ns); ++ } ++ ++ /* Check if we need to validate the L4 csum */ ++ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { ++ status = le32_to_cpu(fas->status); ++ validate_rx_csum(priv, status, skb); ++ } ++ ++ skb->protocol = eth_type_trans(skb, priv->net_dev); ++ ++ /* Record Rx queue - this will be used when picking a Tx queue to ++ * forward the frames. We're keeping flow affinity through the ++ * network stack. ++ */ ++ skb_record_rx_queue(skb, queue_id); ++ ++ percpu_stats->rx_packets++; ++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); ++ ++ napi_gro_receive(napi, skb); ++ ++ return; ++ ++err_build_skb: ++ free_rx_fd(priv, fd, vaddr); ++err_frame_format: ++ percpu_stats->rx_dropped++; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++/* Processing of Rx frames received on the error FQ ++ * We check and print the error bits and then free the frame ++ */ ++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused, ++ u16 queue_id __always_unused) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ void *vaddr; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_fas *fas; ++ u32 status = 0; ++ bool check_fas_errors = false; ++ ++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ ++ /* check frame errors in the FD field */ ++ if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) { ++ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) && ++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); ++ if (net_ratelimit()) ++ netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n", ++ fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK); ++ } ++ ++ /* check frame errors in the FAS field */ ++ if (check_fas_errors) { ++ fas = dpaa2_eth_get_fas(vaddr); ++ status = le32_to_cpu(fas->status); ++ if (net_ratelimit()) ++ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n", ++ status & DPAA2_FAS_RX_ERR_MASK); ++ } ++ free_rx_fd(priv, fd, vaddr); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_stats->rx_errors++; ++} ++#endif ++ ++/* Consume all frames pull-dequeued into the store. This is the simplest way to ++ * make sure we don't accidentally issue another volatile dequeue which would ++ * overwrite (leak) frames already in the store. ++ * ++ * The number of frames is returned using the last 2 output arguments, ++ * separately for Rx and Tx confirmations. ++ * ++ * Observance of NAPI budget is not our concern, leaving that to the caller. ++ */ ++static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned, ++ int *tx_conf_cleaned) ++{ ++ struct dpaa2_eth_priv *priv = ch->priv; ++ struct dpaa2_eth_fq *fq = NULL; ++ struct dpaa2_dq *dq; ++ const struct dpaa2_fd *fd; ++ int cleaned = 0; ++ int is_last; ++ ++ do { ++ dq = dpaa2_io_store_next(ch->store, &is_last); ++ if (unlikely(!dq)) { ++ /* If we're here, we *must* have placed a ++ * volatile dequeue comnmand, so keep reading through ++ * the store until we get some sort of valid response ++ * token (either a valid frame or an "empty dequeue") ++ */ ++ continue; ++ } ++ ++ fd = dpaa2_dq_fd(dq); ++ ++ /* prefetch the frame descriptor */ ++ prefetch(fd); ++ ++ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); ++ fq->consume(priv, ch, fd, &ch->napi, fq->flowid); ++ cleaned++; ++ } while (!is_last); ++ ++ if (!cleaned) ++ return false; ++ ++ /* All frames brought in store by a volatile dequeue ++ * come from the same queue ++ */ ++ if (fq->type == DPAA2_TX_CONF_FQ) ++ *tx_conf_cleaned += cleaned; ++ else ++ *rx_cleaned += cleaned; ++ ++ fq->stats.frames += cleaned; ++ ch->stats.frames += cleaned; ++ ++ return true; ++} ++ ++/* Configure the egress frame annotation for timestamp update */ ++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) ++{ ++ struct dpaa2_faead *faead; ++ u32 ctrl; ++ u32 frc; ++ ++ /* Mark the egress frame annotation area as valid */ ++ frc = dpaa2_fd_get_frc(fd); ++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); ++ ++ /* enable UPD (update prepanded data) bit in FAEAD field of ++ * hardware frame annotation area ++ */ ++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; ++ faead = dpaa2_eth_get_faead(buf_start); ++ faead->ctrl = cpu_to_le32(ctrl); ++} ++ ++/* Create a frame descriptor based on a fragmented skb */ ++static int build_sg_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sgt_buf = NULL; ++ dma_addr_t addr; ++ int nr_frags = skb_shinfo(skb)->nr_frags; ++ struct dpaa2_sg_entry *sgt; ++ int i, err; ++ int sgt_buf_size; ++ struct scatterlist *scl, *crt_scl; ++ int num_sg; ++ int num_dma_bufs; ++ struct dpaa2_fas *fas; ++ struct dpaa2_eth_swa *swa; ++ ++ /* Create and map scatterlist. ++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have ++ * to go beyond nr_frags+1. ++ * Note: We don't support chained scatterlists ++ */ ++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) ++ return -EINVAL; ++ ++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); ++ if (unlikely(!scl)) ++ return -ENOMEM; ++ ++ sg_init_table(scl, nr_frags + 1); ++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); ++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ if (unlikely(!num_dma_bufs)) { ++ err = -ENOMEM; ++ goto dma_map_sg_failed; ++ } ++ ++ /* Prepare the HW SGT structure */ ++ sgt_buf_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); ++ if (unlikely(!sgt_buf)) { ++ err = -ENOMEM; ++ goto sgt_buf_alloc_failed; ++ } ++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field from the hardware annotation area ++ */ ++ fas = dpaa2_eth_get_fas(sgt_buf); ++ memset(fas, 0, DPAA2_FAS_SIZE); ++ ++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); ++ ++ /* Fill in the HW SGT structure. ++ * ++ * sgt_buf is zeroed out, so the following fields are implicit ++ * in all sgt entries: ++ * - offset is 0 ++ * - format is 'dpaa2_sg_single' ++ */ ++ for_each_sg(scl, crt_scl, num_dma_bufs, i) { ++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); ++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); ++ } ++ dpaa2_sg_set_final(&sgt[i - 1], true); ++ ++ /* Store the skb backpointer in the SGT buffer. ++ * Fit the scatterlist and the number of buffers alongside the ++ * skb backpointer in the software annotation area. We'll need ++ * all of them on Tx Conf. ++ */ ++ swa = (struct dpaa2_eth_swa *)sgt_buf; ++ swa->skb = skb; ++ swa->scl = scl; ++ swa->num_sg = num_sg; ++ swa->num_dma_bufs = num_dma_bufs; ++ ++ /* Separately map the SGT buffer */ ++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); ++ if (unlikely(dma_mapping_error(dev, addr))) { ++ err = -ENOMEM; ++ goto dma_map_single_failed; ++ } ++ dpaa2_fd_set_offset(fd, priv->tx_data_offset); ++ dpaa2_fd_set_format(fd, dpaa2_fd_sg); ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_len(fd, skb->len); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1; ++ ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, sgt_buf); ++ ++ return 0; ++ ++dma_map_single_failed: ++ kfree(sgt_buf); ++sgt_buf_alloc_failed: ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++dma_map_sg_failed: ++ kfree(scl); ++ return err; ++} ++ ++/* Create a frame descriptor based on a linear skb */ ++static int build_single_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u8 *buffer_start; ++ struct sk_buff **skbh; ++ dma_addr_t addr; ++ struct dpaa2_fas *fas; ++ ++ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - ++ DPAA2_ETH_TX_BUF_ALIGN, ++ DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field from the hardware annotation area ++ */ ++ fas = dpaa2_eth_get_fas(buffer_start); ++ memset(fas, 0, DPAA2_FAS_SIZE); ++ ++ /* Store a backpointer to the skb at the beginning of the buffer ++ * (in the private data area) such that we can release it ++ * on Tx confirm ++ */ ++ skbh = (struct sk_buff **)buffer_start; ++ *skbh = skb; ++ ++ addr = dma_map_single(dev, buffer_start, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ return -ENOMEM; ++ ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); ++ dpaa2_fd_set_len(fd, skb->len); ++ dpaa2_fd_set_format(fd, dpaa2_fd_single); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1; ++ ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, buffer_start); ++ ++ return 0; ++} ++ ++/* FD freeing routine on the Tx path ++ * ++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb ++ * back-pointed to is also freed. ++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of ++ * dpaa2_eth_tx(). ++ * Optionally, return the frame annotation status word (FAS), which needs ++ * to be checked if we're on the confirmation path. ++ */ ++static void free_tx_fd(const struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ u32 *status) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t fd_addr; ++ struct sk_buff **skbh, *skb; ++ unsigned char *buffer_start; ++ int unmap_size; ++ struct scatterlist *scl; ++ int num_sg, num_dma_bufs; ++ struct dpaa2_eth_swa *swa; ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ struct dpaa2_fas *fas; ++ ++ fd_addr = dpaa2_fd_get_addr(fd); ++ skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr); ++ ++ /* HWA - FAS, timestamp (for Tx confirmation frames) */ ++ fas = dpaa2_eth_get_fas(skbh); ++ prefetch(fas); ++ ++ switch (fd_format) { ++ case dpaa2_fd_single: ++ skb = *skbh; ++ buffer_start = (unsigned char *)skbh; ++ /* Accessing the skb buffer is safe before dma unmap, because ++ * we didn't map the actual skb shell. ++ */ ++ dma_unmap_single(dev, fd_addr, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_BIDIRECTIONAL); ++ break; ++ case dpaa2_fd_sg: ++ swa = (struct dpaa2_eth_swa *)skbh; ++ skb = swa->skb; ++ scl = swa->scl; ++ num_sg = swa->num_sg; ++ num_dma_bufs = swa->num_dma_bufs; ++ ++ /* Unmap the scatterlist */ ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ kfree(scl); ++ ++ /* Unmap the SGT buffer */ ++ unmap_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL); ++ break; ++ default: ++ /* Unsupported format, mark it as errored and give up */ ++ if (status) ++ *status = ~0; ++ return; ++ } ++ ++ /* Get the timestamp value */ ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { ++ struct skb_shared_hwtstamps shhwtstamps; ++ u64 *ns; ++ ++ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); ++ ++ ns = (u64 *)dpaa2_eth_get_ts(skbh); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); ++ shhwtstamps.hwtstamp = ns_to_ktime(*ns); ++ skb_tstamp_tx(skb, &shhwtstamps); ++ } ++ ++ /* Read the status from the Frame Annotation after we unmap the first ++ * buffer but before we free it. The caller function is responsible ++ * for checking the status value. ++ */ ++ if (status) ++ *status = le32_to_cpu(fas->status); ++ ++ /* Free SGT buffer kmalloc'ed on tx */ ++ if (fd_format != dpaa2_fd_single) ++ kfree(skbh); ++ ++ /* Move on with skb release */ ++ dev_kfree_skb(skb); ++} ++ ++static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_fd fd; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct dpaa2_eth_fq *fq; ++ u16 queue_mapping = skb_get_queue_mapping(skb); ++ int err, i; ++ ++ /* If we're congested, stop this tx queue; transmission of the ++ * current skb happens regardless of congestion state ++ */ ++ fq = &priv->fq[queue_mapping]; ++ ++ dma_sync_single_for_cpu(dev, priv->cscn_dma, ++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { ++ netif_stop_subqueue(net_dev, queue_mapping); ++ fq->stats.congestion_entry++; ++ } ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { ++ struct sk_buff *ns; ++ ++ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); ++ if (unlikely(!ns)) { ++ percpu_stats->tx_dropped++; ++ goto err_alloc_headroom; ++ } ++ dev_kfree_skb(skb); ++ skb = ns; ++ } ++ ++ /* We'll be holding a back-reference to the skb until Tx Confirmation; ++ * we don't want that overwritten by a concurrent Tx with a cloned skb. ++ */ ++ skb = skb_unshare(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) { ++ /* skb_unshare() has already freed the skb */ ++ percpu_stats->tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ ++ /* Setup the FD fields */ ++ memset(&fd, 0, sizeof(fd)); ++ ++ if (skb_is_nonlinear(skb)) { ++ err = build_sg_fd(priv, skb, &fd); ++ percpu_extras->tx_sg_frames++; ++ percpu_extras->tx_sg_bytes += skb->len; ++ } else { ++ err = build_single_fd(priv, skb, &fd); ++ } ++ ++ if (unlikely(err)) { ++ percpu_stats->tx_dropped++; ++ goto err_build_fd; ++ } ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_fd(net_dev, &fd); ++ ++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { ++ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, ++ fq->tx_qdbin, &fd); ++ /* TODO: This doesn't work. Check on simulator. ++ * err = dpaa2_io_service_enqueue_fq(NULL, ++ * priv->fq[0].fqid_tx, &fd); ++ */ ++ if (err != -EBUSY) ++ break; ++ } ++ percpu_extras->tx_portal_busy += i; ++ if (unlikely(err < 0)) { ++ percpu_stats->tx_errors++; ++ /* Clean up everything, including freeing the skb */ ++ free_tx_fd(priv, &fd, NULL); ++ } else { ++ percpu_stats->tx_packets++; ++ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); ++ } ++ ++ return NETDEV_TX_OK; ++ ++err_build_fd: ++err_alloc_headroom: ++ dev_kfree_skb(skb); ++ ++ return NETDEV_TX_OK; ++} ++ ++/* Tx confirmation frame processing routine */ ++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused, ++ u16 queue_id) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ u32 status = 0; ++ bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK); ++ bool check_fas_errors = false; ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); ++ ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ percpu_extras->tx_conf_frames++; ++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); ++ ++ /* Check congestion state and wake all queues if necessary */ ++ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) { ++ dma_sync_single_for_cpu(dev, priv->cscn_dma, ++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++ if (!dpaa2_cscn_state_congested(priv->cscn_mem)) ++ netif_tx_wake_all_queues(priv->net_dev); ++ } ++ ++ /* check frame errors in the FD field */ ++ if (unlikely(errors)) { ++ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) && ++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); ++ if (net_ratelimit()) ++ netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n", ++ fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK); ++ } ++ ++ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL); ++ ++ /* if there are no errors, we're done */ ++ if (likely(!errors)) ++ return; ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ /* Tx-conf logically pertains to the egress path. */ ++ percpu_stats->tx_errors++; ++ ++ if (net_ratelimit()) ++ netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n", ++ status & DPAA2_FAS_TX_ERR_MASK); ++} ++ ++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ int err; ++ ++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, ++ DPNI_OFF_RX_L3_CSUM, enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n"); ++ return err; ++ } ++ ++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, ++ DPNI_OFF_RX_L4_CSUM, enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ int err; ++ ++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, ++ DPNI_OFF_TX_L3_CSUM, enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n"); ++ return err; ++ } ++ ++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, ++ DPNI_OFF_TX_L4_CSUM, enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/* Perform a single release command to add buffers ++ * to the specified buffer pool ++ */ ++static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *buf; ++ dma_addr_t addr; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { ++ /* Allocate buffer visible to WRIOP + skb shared info + ++ * alignment padding. ++ */ ++ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv)); ++ if (unlikely(!buf)) ++ goto err_alloc; ++ ++ buf = PTR_ALIGN(buf, priv->rx_buf_align); ++ ++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ goto err_map; ++ ++ buf_array[i] = addr; ++ ++ /* tracing point */ ++ trace_dpaa2_eth_buf_seed(priv->net_dev, ++ buf, DPAA2_ETH_BUF_RAW_SIZE(priv), ++ addr, DPAA2_ETH_RX_BUF_SIZE, ++ bpid); ++ } ++ ++release_bufs: ++ /* In case the portal is busy, retry until successful. ++ * The buffer release function would only fail if the QBMan portal ++ * was busy, which implies portal contention (i.e. more CPUs than ++ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, ++ * there is little we can realistically do, short of giving up - ++ * in which case we'd risk depleting the buffer pool and never again ++ * receiving the Rx interrupt which would kick-start the refill logic. ++ * So just keep retrying, at the risk of being moved to ksoftirqd. ++ */ ++ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) ++ cpu_relax(); ++ return i; ++ ++err_map: ++ put_page(virt_to_head_page(buf)); ++err_alloc: ++ if (i) ++ goto release_bufs; ++ ++ return 0; ++} ++ ++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ int i, j; ++ int new_count; ++ ++ /* This is the lazy seeding of Rx buffer pools. ++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls ++ * napi_alloc_frag(). The trouble with that is that it in turn ends up ++ * calling this_cpu_ptr(), which mandates execution in atomic context. ++ * Rather than splitting up the code, do a one-off preempt disable. ++ */ ++ preempt_disable(); ++ for (j = 0; j < priv->num_channels; j++) { ++ priv->channel[j]->buf_count = 0; ++ for (i = 0; i < priv->num_bufs; ++ i += DPAA2_ETH_BUFS_PER_CMD) { ++ new_count = add_bufs(priv, bpid); ++ priv->channel[j]->buf_count += new_count; ++ ++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { ++ preempt_enable(); ++ return -ENOMEM; ++ } ++ } ++ } ++ preempt_enable(); ++ ++ return 0; ++} ++ ++/** ++ * Drain the specified number of buffers from the DPNI's private buffer pool. ++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD ++ */ ++static void drain_bufs(struct dpaa2_eth_priv *priv, int count) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *vaddr; ++ int ret, i; ++ ++ do { ++ ret = dpaa2_io_service_acquire(NULL, priv->bpid, ++ buf_array, count); ++ if (ret < 0) { ++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); ++ return; ++ } ++ for (i = 0; i < ret; i++) { ++ /* Same logic as on regular Rx path */ ++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, ++ buf_array[i]); ++ dma_unmap_single(dev, buf_array[i], ++ DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ put_page(virt_to_head_page(vaddr)); ++ } ++ } while (ret); ++} ++ ++static void drain_pool(struct dpaa2_eth_priv *priv) ++{ ++ preempt_disable(); ++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); ++ drain_bufs(priv, 1); ++ preempt_enable(); ++} ++ ++/* Function is called from softirq context only, so we don't need to guard ++ * the access to percpu count ++ */ ++static int refill_pool(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ u16 bpid) ++{ ++ int new_count; ++ ++ if (likely(ch->buf_count >= priv->refill_thresh)) ++ return 0; ++ ++ do { ++ new_count = add_bufs(priv, bpid); ++ if (unlikely(!new_count)) { ++ /* Out of memory; abort for now, we'll try later on */ ++ break; ++ } ++ ch->buf_count += new_count; ++ } while (ch->buf_count < priv->num_bufs); ++ ++ if (unlikely(ch->buf_count < priv->num_bufs)) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int pull_channel(struct dpaa2_eth_channel *ch) ++{ ++ int err; ++ int dequeues = -1; ++ ++ /* Retry while portal is busy */ ++ do { ++ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); ++ dequeues++; ++ cpu_relax(); ++ } while (err == -EBUSY); ++ ++ ch->stats.dequeue_portal_busy += dequeues; ++ if (unlikely(err)) ++ ch->stats.pull_err++; ++ ++ return err; ++} ++ ++/* NAPI poll routine ++ * ++ * Frames are dequeued from the QMan channel associated with this NAPI context. ++ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx ++ * confirmation frames are limited by a threshold per NAPI poll cycle. ++ */ ++static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ++{ ++ struct dpaa2_eth_channel *ch; ++ int rx_cleaned = 0, tx_conf_cleaned = 0; ++ bool store_cleaned; ++ struct dpaa2_eth_priv *priv; ++ int err; ++ ++ ch = container_of(napi, struct dpaa2_eth_channel, napi); ++ priv = ch->priv; ++ ++ do { ++ err = pull_channel(ch); ++ if (unlikely(err)) ++ break; ++ ++ /* Refill pool if appropriate */ ++ refill_pool(priv, ch, priv->bpid); ++ ++ store_cleaned = consume_frames(ch, &rx_cleaned, ++ &tx_conf_cleaned); ++ ++ /* If we've either consumed the budget with Rx frames, ++ * or reached the Tx conf threshold, we're done. ++ */ ++ if (rx_cleaned >= budget || ++ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL) ++ return budget; ++ } while (store_cleaned); ++ ++ /* We didn't consume the entire budget, finish napi and ++ * re-enable data availability notifications. ++ */ ++ napi_complete(napi); ++ do { ++ err = dpaa2_io_service_rearm(NULL, &ch->nctx); ++ cpu_relax(); ++ } while (err == -EBUSY); ++ ++ return max(rx_cleaned, 1); ++} ++ ++static void enable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_enable(&ch->napi); ++ } ++} ++ ++static void disable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_disable(&ch->napi); ++ } ++} ++ ++static int link_state_update(struct dpaa2_eth_priv *priv) ++{ ++ struct dpni_link_state state; ++ int err; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (unlikely(err)) { ++ netdev_err(priv->net_dev, ++ "dpni_get_link_state() failed\n"); ++ return err; ++ } ++ ++ /* Chech link state; speed / duplex changes are not treated yet */ ++ if (priv->link_state.up == state.up) ++ return 0; ++ ++ priv->link_state = state; ++ if (state.up) { ++ netif_carrier_on(priv->net_dev); ++ netif_tx_start_all_queues(priv->net_dev); ++ } else { ++ netif_tx_stop_all_queues(priv->net_dev); ++ netif_carrier_off(priv->net_dev); ++ } ++ ++ netdev_info(priv->net_dev, "Link Event: state %s", ++ state.up ? "up" : "down"); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_open(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ /* We'll only start the txqs when the link is actually ready; make sure ++ * we don't race against the link up notification, which may come ++ * immediately after dpni_enable(); ++ */ ++ netif_tx_stop_all_queues(net_dev); ++ ++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will ++ * return true and cause 'ip link show' to report the LOWER_UP flag, ++ * even though the link notification wasn't even received. ++ */ ++ netif_carrier_off(net_dev); ++ ++ err = seed_pool(priv, priv->bpid); ++ if (err) { ++ /* Not much to do; the buffer pool, though not filled up, ++ * may still contain some buffers which would enable us ++ * to limp on. ++ */ ++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", ++ priv->dpbp_dev->obj_desc.id, priv->bpid); ++ } ++ ++ if (priv->tx_pause_frames) ++ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; ++ else ++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD; ++ ++ err = dpni_enable(priv->mc_io, 0, priv->mc_token); ++ if (err < 0) { ++ netdev_err(net_dev, "dpni_enable() failed\n"); ++ goto enable_err; ++ } ++ ++ /* If the DPMAC object has already processed the link up interrupt, ++ * we have to learn the link state ourselves. ++ */ ++ err = link_state_update(priv); ++ if (err < 0) { ++ netdev_err(net_dev, "Can't update link state\n"); ++ goto link_state_err; ++ } ++ ++ return 0; ++ ++link_state_err: ++enable_err: ++ priv->refill_thresh = 0; ++ drain_pool(priv); ++ return err; ++} ++ ++static int dpaa2_eth_stop(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int dpni_enabled; ++ int retries = 10, i; ++ ++ netif_tx_stop_all_queues(net_dev); ++ netif_carrier_off(net_dev); ++ ++ /* Loop while dpni_disable() attempts to drain the egress FQs ++ * and confirm them back to us. ++ */ ++ do { ++ dpni_disable(priv->mc_io, 0, priv->mc_token); ++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); ++ if (dpni_enabled) ++ /* Allow the MC some slack */ ++ msleep(100); ++ } while (dpni_enabled && --retries); ++ if (!retries) { ++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); ++ /* Must go on and disable NAPI nonetheless, so we don't crash at ++ * the next "ifconfig up" ++ */ ++ } ++ ++ priv->refill_thresh = 0; ++ ++ /* Wait for all running napi poll routines to finish, so that no ++ * new refill operations are started. ++ */ ++ for (i = 0; i < priv->num_channels; i++) ++ napi_synchronize(&priv->channel[i]->napi); ++ ++ /* Empty the buffer pool */ ++ drain_pool(priv); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_init(struct net_device *net_dev) ++{ ++ u64 supported = 0; ++ u64 not_supported = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u32 options = priv->dpni_attrs.options; ++ ++ /* Capabilities listing */ ++ supported |= IFF_LIVE_ADDR_CHANGE; ++ ++ if (options & DPNI_OPT_NO_MAC_FILTER) ++ not_supported |= IFF_UNICAST_FLT; ++ else ++ supported |= IFF_UNICAST_FLT; ++ ++ net_dev->priv_flags |= supported; ++ net_dev->priv_flags &= ~not_supported; ++ ++ /* Features */ ++ net_dev->features = NETIF_F_RXCSUM | ++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_SG | NETIF_F_HIGHDMA | ++ NETIF_F_LLTX; ++ net_dev->hw_features = net_dev->features; ++ ++ return 0; ++} ++ ++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ int err; ++ ++ err = eth_mac_addr(net_dev, addr); ++ if (err < 0) { ++ dev_err(dev, "eth_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/** Fill in counters maintained by the GPP driver. These may be different from ++ * the hardware counters obtained by ethtool. ++ */ ++static void dpaa2_eth_get_stats(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct rtnl_link_stats64 *percpu_stats; ++ u64 *cpustats; ++ u64 *netstats = (u64 *)stats; ++ int i, j; ++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); ++ ++ for_each_possible_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ cpustats = (u64 *)percpu_stats; ++ for (j = 0; j < num; j++) ++ netstats[j] += cpustats[j]; ++ } ++} ++ ++static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ /* Set the maximum Rx frame length to match the transmit side; ++ * account for L2 headers when computing the MFL ++ */ ++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, ++ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); ++ if (err) { ++ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); ++ return err; ++ } ++ ++ net_dev->mtu = mtu; ++ return 0; ++} ++ ++/* Copy mac unicast addresses from @net_dev to @priv. ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_uc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_uc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add ucast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++/* Copy mac multicast addresses from @net_dev to @priv ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_mc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_mc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add mcast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int uc_count = netdev_uc_count(net_dev); ++ int mc_count = netdev_mc_count(net_dev); ++ u8 max_mac = priv->dpni_attrs.mac_filter_entries; ++ u32 options = priv->dpni_attrs.options; ++ u16 mc_token = priv->mc_token; ++ struct fsl_mc_io *mc_io = priv->mc_io; ++ int err; ++ ++ /* Basic sanity checks; these probably indicate a misconfiguration */ ++ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) ++ netdev_info(net_dev, ++ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", ++ max_mac); ++ ++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ ++ if (uc_count > max_mac) { ++ netdev_info(net_dev, ++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ uc_count, max_mac); ++ goto force_promisc; ++ } ++ if (mc_count + uc_count > max_mac) { ++ netdev_info(net_dev, ++ "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ uc_count + mc_count, max_mac); ++ goto force_mc_promisc; ++ } ++ ++ /* Adjust promisc settings due to flag combinations */ ++ if (net_dev->flags & IFF_PROMISC) ++ goto force_promisc; ++ if (net_dev->flags & IFF_ALLMULTI) { ++ /* First, rebuild unicast filtering table. This should be done ++ * in promisc mode, in order to avoid frame loss while we ++ * progressively add entries to the table. ++ * We don't know whether we had been in promisc already, and ++ * making an MC call to find out is expensive; so set uc promisc ++ * nonetheless. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc\n"); ++ ++ /* Actual uc table reconstruction. */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc filters\n"); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Finally, clear uc promisc and set mc promisc as requested. */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc promisc\n"); ++ goto force_mc_promisc; ++ } ++ ++ /* Neither unicast, nor multicast promisc will be on... eventually. ++ * For now, rebuild mac filtering tables while forcing both of them on. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); ++ ++ /* Actual mac filtering tables reconstruction */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mac filters\n"); ++ add_mc_hw_addr(net_dev, priv); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Now we can clear both ucast and mcast promisc, without risking ++ * to drop legitimate frames anymore. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear ucast promisc\n"); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mcast promisc\n"); ++ ++ return; ++ ++force_promisc: ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set ucast promisc\n"); ++force_mc_promisc: ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mcast promisc\n"); ++} ++ ++static int dpaa2_eth_set_features(struct net_device *net_dev, ++ netdev_features_t features) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ netdev_features_t changed = features ^ net_dev->features; ++ bool enable; ++ int err; ++ ++ if (changed & NETIF_F_RXCSUM) { ++ enable = !!(features & NETIF_F_RXCSUM); ++ err = set_rx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { ++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); ++ err = set_tx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(dev); ++ struct hwtstamp_config config; ++ ++ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) ++ return -EFAULT; ++ ++ switch (config.tx_type) { ++ case HWTSTAMP_TX_OFF: ++ priv->ts_tx_en = false; ++ break; ++ case HWTSTAMP_TX_ON: ++ priv->ts_tx_en = true; ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { ++ priv->ts_rx_en = false; ++ } else { ++ priv->ts_rx_en = true; ++ /* TS is set for all frame types, not only those requested */ ++ config.rx_filter = HWTSTAMP_FILTER_ALL; ++ } ++ ++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? ++ -EFAULT : 0; ++} ++ ++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ if (cmd == SIOCSHWTSTAMP) ++ return dpaa2_eth_ts_ioctl(dev, rq, cmd); ++ ++ return -EINVAL; ++} ++ ++static const struct net_device_ops dpaa2_eth_ops = { ++ .ndo_open = dpaa2_eth_open, ++ .ndo_start_xmit = dpaa2_eth_tx, ++ .ndo_stop = dpaa2_eth_stop, ++ .ndo_init = dpaa2_eth_init, ++ .ndo_set_mac_address = dpaa2_eth_set_addr, ++ .ndo_get_stats64 = dpaa2_eth_get_stats, ++ .ndo_change_mtu = dpaa2_eth_change_mtu, ++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, ++ .ndo_set_features = dpaa2_eth_set_features, ++ .ndo_do_ioctl = dpaa2_eth_ioctl, ++}; ++ ++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_eth_channel *ch; ++ ++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); ++ ++ /* Update NAPI statistics */ ++ ch->stats.cdan++; ++ ++ napi_schedule_irqoff(&ch->napi); ++} ++ ++/* Allocate and configure a DPCON object */ ++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) ++{ ++ struct fsl_mc_device *dpcon; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpcon_attr attrs; ++ int err; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), ++ FSL_MC_POOL_DPCON, &dpcon); ++ if (err) { ++ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); ++ return NULL; ++ } ++ ++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_reset() failed\n"); ++ goto err_reset; ++ } ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ return dpcon; ++ ++err_enable: ++err_get_attr: ++err_reset: ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++err_open: ++ fsl_mc_object_free(dpcon); ++ ++ return NULL; ++} ++ ++static void free_dpcon(struct dpaa2_eth_priv *priv, ++ struct fsl_mc_device *dpcon) ++{ ++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++ fsl_mc_object_free(dpcon); ++} ++ ++static struct dpaa2_eth_channel * ++alloc_channel(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_attr attr; ++ struct device *dev = priv->net_dev->dev.parent; ++ int err; ++ ++ channel = kzalloc(sizeof(*channel), GFP_KERNEL); ++ if (!channel) ++ return NULL; ++ ++ channel->dpcon = setup_dpcon(priv); ++ if (!channel->dpcon) ++ goto err_setup; ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, ++ &attr); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ channel->dpcon_id = attr.id; ++ channel->ch_id = attr.qbman_ch_id; ++ channel->priv = priv; ++ ++ return channel; ++ ++err_get_attr: ++ free_dpcon(priv, channel->dpcon); ++err_setup: ++ kfree(channel); ++ return NULL; ++} ++ ++static void free_channel(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *channel) ++{ ++ free_dpcon(priv, channel->dpcon); ++ kfree(channel); ++} ++ ++/* DPIO setup: allocate and configure QBMan channels, setup core affinity ++ * and register data availability notifications ++ */ ++static int setup_dpio(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_io_notification_ctx *nctx; ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_notification_cfg dpcon_notif_cfg; ++ struct device *dev = priv->net_dev->dev.parent; ++ int i, err; ++ ++ /* We want the ability to spread ingress traffic (RX, TX conf) to as ++ * many cores as possible, so we need one channel for each core ++ * (unless there's fewer queues than cores, in which case the extra ++ * channels would be wasted). ++ * Allocate one channel per core and register it to the core's ++ * affine DPIO. If not enough channels are available for all cores ++ * or if some cores don't have an affine DPIO, there will be no ++ * ingress frame processing on those cores. ++ */ ++ cpumask_clear(&priv->dpio_cpumask); ++ for_each_online_cpu(i) { ++ /* Try to allocate a channel */ ++ channel = alloc_channel(priv); ++ if (!channel) { ++ dev_info(dev, ++ "No affine channel for cpu %d and above\n", i); ++ goto err_alloc_ch; ++ } ++ ++ priv->channel[priv->num_channels] = channel; ++ ++ nctx = &channel->nctx; ++ nctx->is_cdan = 1; ++ nctx->cb = cdan_cb; ++ nctx->id = channel->ch_id; ++ nctx->desired_cpu = i; ++ ++ /* Register the new context */ ++ err = dpaa2_io_service_register(NULL, nctx); ++ if (err) { ++ dev_dbg(dev, "No affine DPIO for cpu %d\n", i); ++ /* If no affine DPIO for this core, there's probably ++ * none available for next cores either. ++ */ ++ goto err_service_reg; ++ } ++ ++ /* Register DPCON notification with MC */ ++ dpcon_notif_cfg.dpio_id = nctx->dpio_id; ++ dpcon_notif_cfg.priority = 0; ++ dpcon_notif_cfg.user_ctx = nctx->qman64; ++ err = dpcon_set_notification(priv->mc_io, 0, ++ channel->dpcon->mc_handle, ++ &dpcon_notif_cfg); ++ if (err) { ++ dev_err(dev, "dpcon_set_notification failed()\n"); ++ goto err_set_cdan; ++ } ++ ++ /* If we managed to allocate a channel and also found an affine ++ * DPIO for this core, add it to the final mask ++ */ ++ cpumask_set_cpu(i, &priv->dpio_cpumask); ++ priv->num_channels++; ++ ++ /* Stop if we already have enough channels to accommodate all ++ * RX and TX conf queues ++ */ ++ if (priv->num_channels == dpaa2_eth_queue_count(priv)) ++ break; ++ } ++ ++ /* Tx confirmation queues can only be serviced by cpus ++ * with an affine DPIO/channel ++ */ ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ return 0; ++ ++err_set_cdan: ++ dpaa2_io_service_deregister(NULL, nctx); ++err_service_reg: ++ free_channel(priv, channel); ++err_alloc_ch: ++ if (cpumask_empty(&priv->dpio_cpumask)) { ++ dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n"); ++ return -ENODEV; ++ } ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", ++ cpumask_pr_args(&priv->dpio_cpumask)); ++ ++ return 0; ++} ++ ++static void free_dpio(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ /* deregister CDAN notifications and free channels */ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ dpaa2_io_service_deregister(NULL, &ch->nctx); ++ free_channel(priv, ch); ++ } ++} ++ ++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, ++ int cpu) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ if (priv->channel[i]->nctx.desired_cpu == cpu) ++ return priv->channel[i]; ++ ++ /* We should never get here. Issue a warning and return ++ * the first channel, because it's still better than nothing ++ */ ++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); ++ ++ return priv->channel[0]; ++} ++ ++static void set_fq_affinity(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct cpumask xps_mask = CPU_MASK_NONE; ++ struct dpaa2_eth_fq *fq; ++ int rx_cpu, txc_cpu; ++ int i, err; ++ ++ /* For each FQ, pick one channel/CPU to deliver frames to. ++ * This may well change at runtime, either through irqbalance or ++ * through direct user intervention. ++ */ ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ case DPAA2_RX_ERR_FQ: ++ fq->target_cpu = rx_cpu; ++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); ++ if (rx_cpu >= nr_cpu_ids) ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ fq->target_cpu = txc_cpu; ++ ++ /* register txc_cpu to XPS */ ++ cpumask_set_cpu(txc_cpu, &xps_mask); ++ err = netif_set_xps_queue(priv->net_dev, &xps_mask, ++ fq->flowid); ++ if (err) ++ dev_info_once(dev, ++ "Tx: error setting XPS queue\n"); ++ cpumask_clear_cpu(txc_cpu, &xps_mask); ++ ++ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); ++ if (txc_cpu >= nr_cpu_ids) ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ break; ++ default: ++ dev_err(dev, "Unknown FQ type: %d\n", fq->type); ++ } ++ fq->channel = get_affine_channel(priv, fq->target_cpu); ++ } ++} ++ ++static void setup_fqs(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the ++ * beginning of the queue array. ++ * Number of Rx and Tx queues are the same. ++ * We only support one traffic class for now. ++ */ ++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; ++ priv->fq[priv->num_fqs++].flowid = (u16)i; ++ } ++ ++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; ++ priv->fq[priv->num_fqs++].flowid = (u16)i; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ /* We have exactly one Rx error queue per DPNI */ ++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; ++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; ++#endif ++ ++ /* For each FQ, decide on which core to process incoming frames */ ++ set_fq_affinity(priv); ++} ++ ++/* Allocate and configure one buffer pool for each interface */ ++static int setup_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ int err; ++ struct fsl_mc_device *dpbp_dev; ++ struct dpbp_attr dpbp_attrs; ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, ++ &dpbp_dev); ++ if (err) { ++ dev_err(dev, "DPBP device allocation failed\n"); ++ return err; ++ } ++ ++ priv->dpbp_dev = dpbp_dev; ++ ++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, ++ &dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_reset() failed\n"); ++ goto err_reset; ++ } ++ ++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, ++ &dpbp_attrs); ++ if (err) { ++ dev_err(dev, "dpbp_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ priv->bpid = dpbp_attrs.bpid; ++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels; ++ ++ return 0; ++ ++err_get_attr: ++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_enable: ++err_reset: ++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_open: ++ fsl_mc_object_free(dpbp_dev); ++ ++ return err; ++} ++ ++static void free_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ drain_pool(priv); ++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ fsl_mc_object_free(priv->dpbp_dev); ++} ++ ++static int setup_tx_congestion(struct dpaa2_eth_priv *priv) ++{ ++ struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 }; ++ struct device *dev = priv->net_dev->dev.parent; ++ int err; ++ ++ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, ++ GFP_KERNEL); ++ if (!priv->cscn_unaligned) ++ return -ENOMEM; ++ ++ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN); ++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, priv->cscn_dma)) { ++ dev_err(dev, "Error mapping CSCN memory area\n"); ++ err = -ENOMEM; ++ goto err_dma_map; ++ } ++ ++ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; ++ cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH; ++ cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH; ++ cong_notif_cfg.message_ctx = (u64)priv; ++ cong_notif_cfg.message_iova = priv->cscn_dma; ++ cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | ++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | ++ DPNI_CONG_OPT_COHERENT_WRITE; ++ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX, 0, ++ &cong_notif_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_congestion_notification failed\n"); ++ goto err_set_cong; ++ } ++ ++ return 0; ++ ++err_set_cong: ++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++err_dma_map: ++ kfree(priv->cscn_unaligned); ++ ++ return err; ++} ++ ++/* Configure the DPNI object this interface is associated with */ ++static int setup_dpni(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_eth_priv *priv; ++ struct net_device *net_dev; ++ struct dpni_buffer_layout buf_layout; ++ struct dpni_link_cfg cfg = {0}; ++ int err; ++ ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++ priv->dpni_id = ls_dev->obj_desc.id; ++ ++ /* get a handle for the DPNI object */ ++ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_open() failed\n"); ++ goto err_open; ++ } ++ ++ ls_dev->mc_io = priv->mc_io; ++ ls_dev->mc_handle = priv->mc_token; ++ ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_reset() failed\n"); ++ goto err_reset; ++ } ++ ++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, ++ &priv->dpni_attrs); ++ ++ if (err) { ++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); ++ goto err_get_attr; ++ } ++ ++ /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf ++ * align value must be a multiple of 256. ++ */ ++ priv->rx_buf_align = ++ priv->dpni_attrs.wriop_version & 0x3ff ? ++ DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1; ++ ++ /* Update number of logical FQs in netdev */ ++ err = netif_set_real_num_tx_queues(net_dev, ++ dpaa2_eth_queue_count(priv)); ++ if (err) { ++ dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err); ++ goto err_set_tx_queues; ++ } ++ ++ err = netif_set_real_num_rx_queues(net_dev, ++ dpaa2_eth_queue_count(priv)); ++ if (err) { ++ dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err); ++ goto err_set_rx_queues; ++ } ++ ++ /* Configure buffer layouts */ ++ /* rx buffer */ ++ buf_layout.pass_parser_result = true; ++ buf_layout.pass_frame_status = true; ++ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; ++ buf_layout.data_align = priv->rx_buf_align; ++ buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM; ++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | ++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | ++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | ++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | ++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; ++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_RX, &buf_layout); ++ if (err) { ++ dev_err(dev, ++ "dpni_set_buffer_layout(RX) failed\n"); ++ goto err_buf_layout; ++ } ++ ++ /* tx buffer */ ++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | ++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP | ++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; ++ buf_layout.pass_timestamp = true; ++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX, &buf_layout); ++ if (err) { ++ dev_err(dev, ++ "dpni_set_buffer_layout(TX) failed\n"); ++ goto err_buf_layout; ++ } ++ ++ /* tx-confirm buffer */ ++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | ++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP; ++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX_CONFIRM, &buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); ++ goto err_buf_layout; ++ } ++ ++ /* Now that we've set our tx buffer layout, retrieve the minimum ++ * required tx data offset. ++ */ ++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, ++ &priv->tx_data_offset); ++ if (err) { ++ dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err); ++ goto err_data_offset; ++ } ++ ++ if ((priv->tx_data_offset % 64) != 0) ++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", ++ priv->tx_data_offset); ++ ++ /* Accommodate software annotation space (SWA) */ ++ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; ++ ++ /* Enable congestion notifications for Tx queues */ ++ err = setup_tx_congestion(priv); ++ if (err) ++ goto err_tx_cong; ++ ++ /* allocate classification rule space */ ++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * ++ dpaa2_eth_fs_count(priv), GFP_KERNEL); ++ if (!priv->cls_rule) ++ goto err_cls_rule; ++ ++ /* Enable flow control */ ++ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE; ++ priv->tx_pause_frames = 1; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d setting link cfg", err); ++ goto err_set_link_cfg; ++ } ++ ++ return 0; ++ ++err_set_link_cfg: ++err_cls_rule: ++err_tx_cong: ++err_data_offset: ++err_buf_layout: ++err_set_rx_queues: ++err_set_tx_queues: ++err_get_attr: ++err_reset: ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++err_open: ++ return err; ++} ++ ++static void free_dpni(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ int err; ++ ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) ++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", ++ err); ++ ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++ ++ kfree(priv->cls_rule); ++ ++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++ kfree(priv->cscn_unaligned); ++} ++ ++int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, ++ bool enable) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_taildrop td; ++ int err = 0, i; ++ ++ td.enable = enable; ++ td.threshold = DPAA2_ETH_TAILDROP_THRESH; ++ ++ if (enable) { ++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD; ++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD; ++ } else { ++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / ++ priv->num_channels; ++ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; ++ } ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ if (priv->fq[i].type != DPAA2_RX_FQ) ++ continue; ++ ++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, ++ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0, ++ priv->fq[i].flowid, &td); ++ if (err) { ++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); ++ break; ++ } ++ } ++ ++ return err; ++} ++ ++static int setup_rx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_queue q = { { 0 } }; ++ struct dpni_queue_id qid; ++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; ++ int err; ++ ++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid); ++ if (err) { ++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ fq->fqid = qid.fqid; ++ ++ q.destination.id = fq->channel->dpcon_id; ++ q.destination.type = DPNI_DEST_DPCON; ++ q.destination.priority = 1; ++ q.user_context = (u64)fq; ++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q); ++ if (err) { ++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int setup_tx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_queue q = { { 0 } }; ++ struct dpni_queue_id qid; ++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; ++ int err; ++ ++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid); ++ if (err) { ++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ fq->tx_qdbin = qid.qdbin; ++ ++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid); ++ if (err) { ++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ fq->fqid = qid.fqid; ++ ++ q.destination.id = fq->channel->dpcon_id; ++ q.destination.type = DPNI_DEST_DPCON; ++ q.destination.priority = 0; ++ q.user_context = (u64)fq; ++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q); ++ if (err) { ++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_queue q = { { 0 } }; ++ struct dpni_queue_id qid; ++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; ++ int err; ++ ++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); ++ if (err) { ++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ fq->fqid = qid.fqid; ++ ++ q.destination.id = fq->channel->dpcon_id; ++ q.destination.type = DPNI_DEST_DPCON; ++ q.destination.priority = 1; ++ q.user_context = (u64)fq; ++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, ++ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); ++ if (err) { ++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++#endif ++ ++/* default hash key fields */ ++static struct dpaa2_eth_hash_fields default_hash_fields[] = { ++ { ++ /* L2 header */ ++ .rxnfc_field = RXH_L2DA, ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_DA, ++ .size = 6, ++ }, { ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_SA, ++ .size = 6, ++ }, { ++ /* This is the last ethertype field parsed: ++ * depending on frame format, it can be the MAC ethertype ++ * or the VLAN etype. ++ */ ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_TYPE, ++ .size = 2, ++ }, { ++ /* VLAN header */ ++ .rxnfc_field = RXH_VLAN, ++ .cls_prot = NET_PROT_VLAN, ++ .cls_field = NH_FLD_VLAN_TCI, ++ .size = 2, ++ }, { ++ /* IP header */ ++ .rxnfc_field = RXH_IP_SRC, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_SRC, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_IP_DST, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_DST, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_L3_PROTO, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_PROTO, ++ .size = 1, ++ }, { ++ /* Using UDP ports, this is functionally equivalent to raw ++ * byte pairs from L4 header. ++ */ ++ .rxnfc_field = RXH_L4_B_0_1, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_SRC, ++ .size = 2, ++ }, { ++ .rxnfc_field = RXH_L4_B_2_3, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_DST, ++ .size = 2, ++ }, ++}; ++ ++/* Set RX hash options */ ++static int set_hash(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpkg_profile_cfg cls_cfg; ++ struct dpni_rx_tc_dist_cfg dist_cfg; ++ u8 *dma_mem; ++ int i; ++ int err = 0; ++ ++ memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ struct dpkg_extract *key = ++ &cls_cfg.extracts[cls_cfg.num_extracts]; ++ ++ key->type = DPKG_EXTRACT_FROM_HDR; ++ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; ++ key->extract.from_hdr.type = DPKG_FULL_FIELD; ++ key->extract.from_hdr.field = priv->hash_fields[i].cls_field; ++ cls_cfg.num_extracts++; ++ ++ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; ++ } ++ ++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err); ++ goto err_prep_key; ++ } ++ ++ memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ ++ /* Prepare for setting the rx dist */ ++ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ err = -ENOMEM; ++ goto err_dma_map; ++ } ++ ++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); ++ if (dpaa2_eth_fs_enabled(priv)) { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; ++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; ++ } else { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ dma_unmap_single(dev, dist_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ if (err) ++ dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err); ++ ++err_dma_map: ++err_prep_key: ++ kfree(dma_mem); ++ return err; ++} ++ ++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, ++ * frame queues and channels ++ */ ++static int bind_dpni(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ struct dpni_pools_cfg pools_params; ++ struct dpni_error_cfg err_cfg; ++ int err = 0; ++ int i; ++ ++ pools_params.num_dpbp = 1; ++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; ++ pools_params.pools[0].backup_pool = 0; ++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; ++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); ++ if (err) { ++ dev_err(dev, "dpni_set_pools() failed\n"); ++ return err; ++ } ++ ++ /* Verify classification options and disable hashing and/or ++ * flow steering support in case of invalid configuration values ++ */ ++ priv->hash_fields = default_hash_fields; ++ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); ++ check_cls_support(priv); ++ ++ /* have the interface implicitly distribute traffic based on ++ * a static hash key ++ */ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ err = set_hash(priv); ++ if (err) { ++ dev_err(dev, "Hashing configuration failed\n"); ++ return err; ++ } ++ } ++ ++ /* Configure handling of error frames */ ++ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; ++ err_cfg.set_frame_annotation = 1; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; ++#else ++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; ++#endif ++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, ++ &err_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err); ++ return err; ++ } ++ ++ /* Configure Rx and Tx conf queues to generate CDANs */ ++ for (i = 0; i < priv->num_fqs; i++) { ++ switch (priv->fq[i].type) { ++ case DPAA2_RX_FQ: ++ err = setup_rx_flow(priv, &priv->fq[i]); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ err = setup_tx_flow(priv, &priv->fq[i]); ++ break; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ case DPAA2_RX_ERR_FQ: ++ err = setup_rx_err_flow(priv, &priv->fq[i]); ++ break; ++#endif ++ default: ++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); ++ return -EINVAL; ++ } ++ if (err) ++ return err; ++ } ++ ++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX, ++ &priv->tx_qdid); ++ if (err) { ++ dev_err(dev, "dpni_get_qdid() failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/* Allocate rings for storing incoming frame descriptors */ ++static int alloc_rings(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ priv->channel[i]->store = ++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); ++ if (!priv->channel[i]->store) { ++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); ++ goto err_ring; ++ } ++ } ++ ++ return 0; ++ ++err_ring: ++ for (i = 0; i < priv->num_channels; i++) { ++ if (!priv->channel[i]->store) ++ break; ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++ } ++ ++ return -ENOMEM; ++} ++ ++static void free_rings(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++} ++ ++static int netdev_init(struct net_device *net_dev) ++{ ++ int err; ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; ++ u8 bcast_addr[ETH_ALEN]; ++ u16 rx_headroom, rx_req_headroom; ++ ++ net_dev->netdev_ops = &dpaa2_eth_ops; ++ ++ /* Get firmware address, if any */ ++ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); ++ if (err) { ++ dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ /* Get DPNI atttributes address, if any */ ++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ dpni_mac_addr); ++ if (err) { ++ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ /* First check if firmware has any address configured by bootloader */ ++ if (!is_zero_ether_addr(mac_addr)) { ++ /* If the DPMAC addr != the DPNI addr, update it */ ++ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, ++ priv->mc_token, ++ mac_addr); ++ if (err) { ++ dev_err(dev, ++ "dpni_set_primary_mac_addr() failed (%d)\n", ++ err); ++ return err; ++ } ++ } ++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); ++ } else if (is_zero_ether_addr(dpni_mac_addr)) { ++ /* Fills in net_dev->dev_addr, as required by ++ * register_netdevice() ++ */ ++ eth_hw_addr_random(net_dev); ++ /* Make the user aware, without cluttering the boot log */ ++ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n"); ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, ++ priv->mc_token, net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, ++ "dpni_set_primary_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all ++ * practical purposes, this will be our "permanent" mac address, ++ * at least until the next reboot. This move will also permit ++ * register_netdevice() to properly fill up net_dev->perm_addr. ++ */ ++ net_dev->addr_assign_type = NET_ADDR_PERM; ++ /* If DPMAC address is non-zero, use that one */ ++ } else { ++ /* NET_ADDR_PERM is default, all we have to do is ++ * fill in the device addr. ++ */ ++ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); ++ } ++ ++ /* Explicitly add the broadcast address to the MAC filtering table; ++ * the MC won't do that for us. ++ */ ++ eth_broadcast_addr(bcast_addr); ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); ++ if (err) { ++ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); ++ /* Won't return an error; at least, we'd have egress traffic */ ++ } ++ ++ /* Reserve enough space to align buffer as per hardware requirement; ++ * NOTE: priv->tx_data_offset MUST be initialized at this point. ++ */ ++ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); ++ ++ /* Set MTU limits */ ++ net_dev->min_mtu = 68; ++ net_dev->max_mtu = DPAA2_ETH_MAX_MTU; ++ ++ /* Required headroom for Rx skbs, to avoid reallocation on ++ * forwarding path. ++ */ ++ rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN; ++ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE + ++ DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align); ++ if (rx_req_headroom > rx_headroom) ++ dev_info_once(dev, ++ "Required headroom (%d) greater than available (%d).\n" ++ "This will impact performance due to reallocations.\n", ++ rx_req_headroom, rx_headroom); ++ ++ /* Our .ndo_init will be called herein */ ++ err = register_netdev(net_dev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int poll_link_state(void *arg) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; ++ int err; ++ ++ while (!kthread_should_stop()) { ++ err = link_state_update(priv); ++ if (unlikely(err)) ++ return err; ++ ++ msleep(DPAA2_ETH_LINK_STATE_REFRESH); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) ++{ ++ u32 status = 0, clear = 0; ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); ++ struct net_device *net_dev = dev_get_drvdata(dev); ++ int err; ++ ++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ DPNI_IRQ_INDEX, &status); ++ if (unlikely(err)) { ++ netdev_err(net_dev, "Can't get irq status (err %d)", err); ++ clear = 0xffffffff; ++ goto out; ++ } ++ ++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { ++ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; ++ link_state_update(netdev_priv(net_dev)); ++ } ++ ++out: ++ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ DPNI_IRQ_INDEX, clear); ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *ls_dev) ++{ ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ ++ err = fsl_mc_allocate_irqs(ls_dev); ++ if (err) { ++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); ++ return err; ++ } ++ ++ irq = ls_dev->irqs[0]; ++ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, ++ dpni_irq0_handler, ++ dpni_irq0_handler_thread, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&ls_dev->dev), &ls_dev->dev); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); ++ goto free_mc_irq; ++ } ++ ++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); ++ goto free_irq; ++ } ++ ++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ DPNI_IRQ_INDEX, 1); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); ++free_mc_irq: ++ fsl_mc_free_irqs(ls_dev); ++ ++ return err; ++} ++ ++static void add_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ ++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, ++ NAPI_POLL_WEIGHT); ++ } ++} ++ ++static void del_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ netif_napi_del(&ch->napi); ++ } ++} ++ ++/* SysFS support */ ++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ /* No MC API for getting the shaping config. We're stateful. */ ++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; ++ ++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); ++} ++ ++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ int err, items; ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpni_tx_shaping_cfg scfg; ++ ++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); ++ if (items != 2) { ++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); ++ return -EINVAL; ++ } ++ /* Size restriction as per MC API documentation */ ++ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) { ++ pr_err("max_burst_size must be <= %d\n", ++ DPAA2_ETH_MAX_BURST_SIZE); ++ return -EINVAL; ++ } ++ ++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_shaping() failed\n"); ++ return -EPERM; ++ } ++ /* If successful, save the current configuration for future inquiries */ ++ priv->shaping_cfg = scfg; ++ ++ return count; ++} ++ ++static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ ++ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); ++} ++ ++static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpaa2_eth_fq *fq; ++ bool running = netif_running(priv->net_dev); ++ int i, err; ++ ++ err = cpulist_parse(buf, &priv->txconf_cpumask); ++ if (err) ++ return err; ++ ++ /* Only accept CPUs that have an affine DPIO */ ++ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { ++ netdev_info(priv->net_dev, ++ "cpumask must be a subset of 0x%lx\n", ++ *cpumask_bits(&priv->dpio_cpumask)); ++ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, ++ &priv->txconf_cpumask); ++ } ++ ++ /* Rewiring the TxConf FQs requires interface shutdown. ++ */ ++ if (running) { ++ err = dpaa2_eth_stop(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ /* Set the new TxConf FQ affinities */ ++ set_fq_affinity(priv); ++ ++ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit ++ * link up notification is received. Give the polling thread enough time ++ * to detect the link state change, or else we'll end up with the ++ * transmission side forever shut down. ++ */ ++ if (priv->do_link_poll) ++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ if (fq->type != DPAA2_TX_CONF_FQ) ++ continue; ++ setup_tx_flow(priv, fq); ++ } ++ ++ if (running) { ++ err = dpaa2_eth_open(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ return count; ++} ++ ++static struct device_attribute dpaa2_eth_attrs[] = { ++ __ATTR(txconf_cpumask, ++ 0600, ++ dpaa2_eth_show_txconf_cpumask, ++ dpaa2_eth_write_txconf_cpumask), ++ ++ __ATTR(tx_shaping, ++ 0600, ++ dpaa2_eth_show_tx_shaping, ++ dpaa2_eth_write_tx_shaping), ++}; ++ ++static void dpaa2_eth_sysfs_init(struct device *dev) ++{ ++ int i, err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { ++ err = device_create_file(dev, &dpaa2_eth_attrs[i]); ++ if (err) { ++ dev_err(dev, "ERROR creating sysfs file\n"); ++ goto undo; ++ } ++ } ++ return; ++ ++undo: ++ while (i > 0) ++ device_remove_file(dev, &dpaa2_eth_attrs[--i]); ++} ++ ++static void dpaa2_eth_sysfs_remove(struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) ++ device_remove_file(dev, &dpaa2_eth_attrs[i]); ++} ++ ++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev = NULL; ++ struct dpaa2_eth_priv *priv = NULL; ++ int err = 0; ++ ++ dev = &dpni_dev->dev; ++ ++ /* Net device */ ++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); ++ if (!net_dev) { ++ dev_err(dev, "alloc_etherdev_mq() failed\n"); ++ return -ENOMEM; ++ } ++ ++ SET_NETDEV_DEV(net_dev, dev); ++ dev_set_drvdata(dev, net_dev); ++ ++ priv = netdev_priv(net_dev); ++ priv->net_dev = net_dev; ++ ++ priv->iommu_domain = iommu_get_domain_for_dev(dev); ++ ++ /* Obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_portal_alloc; ++ } ++ ++ /* MC objects initialization and configuration */ ++ err = setup_dpni(dpni_dev); ++ if (err) ++ goto err_dpni_setup; ++ ++ err = setup_dpio(priv); ++ if (err) { ++ dev_info(dev, "Defer probing as no DPIO available\n"); ++ err = -EPROBE_DEFER; ++ goto err_dpio_setup; ++ } ++ ++ setup_fqs(priv); ++ ++ err = setup_dpbp(priv); ++ if (err) ++ goto err_dpbp_setup; ++ ++ err = bind_dpni(priv); ++ if (err) ++ goto err_bind; ++ ++ /* Add a NAPI context for each channel */ ++ add_ch_napi(priv); ++ enable_ch_napi(priv); ++ ++ /* Percpu statistics */ ++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); ++ if (!priv->percpu_stats) { ++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_stats; ++ } ++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); ++ if (!priv->percpu_extras) { ++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_extras; ++ } ++ ++ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); ++ if (!dev_valid_name(net_dev->name)) { ++ dev_warn(&net_dev->dev, ++ "netdevice name \"%s\" cannot be used, reverting to default..\n", ++ net_dev->name); ++ dev_alloc_name(net_dev, "eth%d"); ++ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); ++ } ++ ++ err = netdev_init(net_dev); ++ if (err) ++ goto err_netdev_init; ++ ++ /* Configure checksum offload based on current interface flags */ ++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); ++ if (err) ++ goto err_csum; ++ ++ err = set_tx_csum(priv, !!(net_dev->features & ++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); ++ if (err) ++ goto err_csum; ++ ++ err = alloc_rings(priv); ++ if (err) ++ goto err_alloc_rings; ++ ++ net_dev->ethtool_ops = &dpaa2_ethtool_ops; ++ ++ err = setup_irqs(dpni_dev); ++ if (err) { ++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); ++ priv->poll_thread = kthread_run(poll_link_state, priv, ++ "%s_poll_link", net_dev->name); ++ if (IS_ERR(priv->poll_thread)) { ++ netdev_err(net_dev, "Error starting polling thread\n"); ++ goto err_poll_thread; ++ } ++ priv->do_link_poll = true; ++ } ++ ++ dpaa2_eth_sysfs_init(&net_dev->dev); ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++ dpaa2_dbg_add(priv); ++#endif ++ ++ dev_info(dev, "Probed interface %s\n", net_dev->name); ++ return 0; ++ ++err_poll_thread: ++ free_rings(priv); ++err_alloc_rings: ++err_csum: ++ unregister_netdev(net_dev); ++err_netdev_init: ++ free_percpu(priv->percpu_extras); ++err_alloc_percpu_extras: ++ free_percpu(priv->percpu_stats); ++err_alloc_percpu_stats: ++ disable_ch_napi(priv); ++ del_ch_napi(priv); ++err_bind: ++ free_dpbp(priv); ++err_dpbp_setup: ++ free_dpio(priv); ++err_dpio_setup: ++ free_dpni(priv); ++err_dpni_setup: ++ fsl_mc_portal_free(priv->mc_io); ++err_portal_alloc: ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return err; ++} ++ ++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev; ++ struct dpaa2_eth_priv *priv; ++ ++ dev = &ls_dev->dev; ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++ dpaa2_dbg_remove(priv); ++#endif ++ dpaa2_eth_sysfs_remove(&net_dev->dev); ++ ++ unregister_netdev(net_dev); ++ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); ++ ++ if (priv->do_link_poll) ++ kthread_stop(priv->poll_thread); ++ else ++ fsl_mc_free_irqs(ls_dev); ++ ++ free_rings(priv); ++ free_percpu(priv->percpu_stats); ++ free_percpu(priv->percpu_extras); ++ ++ disable_ch_napi(priv); ++ del_ch_napi(priv); ++ free_dpbp(priv); ++ free_dpio(priv); ++ free_dpni(priv); ++ ++ fsl_mc_portal_free(priv->mc_io); ++ ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpni", ++ }, ++ { .vendor = 0x0 } ++}; ++MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); ++ ++static struct fsl_mc_driver dpaa2_eth_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_eth_probe, ++ .remove = dpaa2_eth_remove, ++ .match_id_table = dpaa2_eth_match_id_table ++}; ++ ++static int __init dpaa2_eth_driver_init(void) ++{ ++ int err; ++ ++ dpaa2_eth_dbg_init(); ++ err = fsl_mc_driver_register(&dpaa2_eth_driver); ++ if (err) { ++ dpaa2_eth_dbg_exit(); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void __exit dpaa2_eth_driver_exit(void) ++{ ++ dpaa2_eth_dbg_exit(); ++ fsl_mc_driver_unregister(&dpaa2_eth_driver); ++} ++ ++module_init(dpaa2_eth_driver_init); ++module_exit(dpaa2_eth_driver_exit); +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +@@ -0,0 +1,460 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA2_ETH_H ++#define __DPAA2_ETH_H ++ ++#include ++#include ++#include ++#include "../../fsl-mc/include/dpaa2-io.h" ++#include "dpni.h" ++#include "net.h" ++ ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_STORE_SIZE 16 ++ ++/* We set a max threshold for how many Tx confirmations we should process ++ * on a NAPI poll call, they take less processing time. ++ */ ++#define TX_CONF_PER_NAPI_POLL 256 ++ ++/* Maximum number of scatter-gather entries in an ingress frame, ++ * considering the maximum receive frame size is 64K ++ */ ++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) ++ ++/* Maximum acceptable MTU value. It is in direct relation with the hardware ++ * enforced Max Frame Length (currently 10k). ++ */ ++#define DPAA2_ETH_MFL (10 * 1024) ++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) ++/* Convert L3 MTU to L2 MFL */ ++#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) ++ ++/* Maximum burst size value for Tx shaping */ ++#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF ++ ++/* Maximum number of buffers that can be acquired/released through a single ++ * QBMan command ++ */ ++#define DPAA2_ETH_BUFS_PER_CMD 7 ++ ++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo ++ * frames in the Rx queues (length of the current frame is not ++ * taken into account when making the taildrop decision) ++ */ ++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) ++ ++/* Buffer quota per queue. Must be large enough such that for minimum sized ++ * frames taildrop kicks in before the bpool gets depleted, so we compute ++ * how many 64B frames fit inside the taildrop threshold and add a margin ++ * to accommodate the buffer refill delay. ++ */ ++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) ++#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) ++#define DPAA2_ETH_REFILL_THRESH_TD \ ++ (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD) ++ ++/* Buffer quota per queue to use when flow control is active. */ ++#define DPAA2_ETH_NUM_BUFS_FC 256 ++ ++/* Hardware requires alignment for ingress/egress buffer addresses ++ * and ingress buffer lengths. ++ */ ++#define DPAA2_ETH_RX_BUF_SIZE 2048 ++#define DPAA2_ETH_TX_BUF_ALIGN 64 ++#define DPAA2_ETH_RX_BUF_ALIGN 64 ++#define DPAA2_ETH_RX_BUF_ALIGN_V1 256 ++#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ ++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) ++ ++/* rx_extra_head prevents reallocations in L3 processing. */ ++#define DPAA2_ETH_SKB_SIZE \ ++ (DPAA2_ETH_RX_BUF_SIZE + \ ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++ ++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress ++ * buffers large enough to allow building an skb around them and also account ++ * for alignment restrictions. ++ */ ++#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \ ++ (DPAA2_ETH_SKB_SIZE + \ ++ (p_priv)->rx_buf_align) ++ ++/* PTP nominal frequency 1GHz */ ++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 ++ ++/* Leave enough extra space in the headroom to make sure the skb is ++ * not realloc'd in forwarding scenarios. ++ */ ++#define DPAA2_ETH_RX_HEAD_ROOM 192 ++ ++/* We are accommodating a skb backpointer and some S/G info ++ * in the frame's software annotation. The hardware ++ * options are either 0 or 64, so we choose the latter. ++ */ ++#define DPAA2_ETH_SWA_SIZE 64 ++ ++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ ++struct dpaa2_eth_swa { ++ struct sk_buff *skb; ++ struct scatterlist *scl; ++ int num_sg; ++ int num_dma_bufs; ++}; ++ ++/* Annotation valid bits in FD FRC */ ++#define DPAA2_FD_FRC_FASV 0x8000 ++#define DPAA2_FD_FRC_FAEADV 0x4000 ++#define DPAA2_FD_FRC_FAPRV 0x2000 ++#define DPAA2_FD_FRC_FAIADV 0x1000 ++#define DPAA2_FD_FRC_FASWOV 0x0800 ++#define DPAA2_FD_FRC_FAICFDV 0x0400 ++ ++#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) ++#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ ++ FD_CTRL_SBE | \ ++ FD_CTRL_FSE | \ ++ FD_CTRL_FAERR) ++ ++/* Annotation bits in FD CTRL */ ++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ ++ ++/* Size of hardware annotation area based on the current buffer layout ++ * configuration ++ */ ++#define DPAA2_ETH_RX_HWA_SIZE 64 ++#define DPAA2_ETH_TX_HWA_SIZE 128 ++ ++/* Frame annotation status */ ++struct dpaa2_fas { ++ u8 reserved; ++ u8 ppid; ++ __le16 ifpid; ++ __le32 status; ++} __packed; ++ ++/* Frame annotation status word is located in the first 8 bytes ++ * of the buffer's hardware annotation area ++ */ ++#define DPAA2_FAS_OFFSET 0 ++#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) ++ ++/* Timestamp is located in the next 8 bytes of the buffer's ++ * hardware annotation area ++ */ ++#define DPAA2_TS_OFFSET 0x8 ++ ++/* Frame annotation egress action descriptor */ ++#define DPAA2_FAEAD_OFFSET 0x58 ++ ++struct dpaa2_faead { ++ __le32 conf_fqid; ++ __le32 ctrl; ++}; ++ ++#define DPAA2_FAEAD_A2V 0x20000000 ++#define DPAA2_FAEAD_UPDV 0x00001000 ++#define DPAA2_FAEAD_UPD 0x00000010 ++ ++/* accessors for the hardware annotation fields that we use */ ++#define dpaa2_eth_get_hwa(buf_addr) \ ++ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE) ++ ++#define dpaa2_eth_get_fas(buf_addr) \ ++ (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET) ++ ++#define dpaa2_eth_get_ts(buf_addr) \ ++ (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET) ++ ++#define dpaa2_eth_get_faead(buf_addr) \ ++ (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET) ++ ++/* Error and status bits in the frame annotation status word */ ++/* Debug frame, otherwise supposed to be discarded */ ++#define DPAA2_FAS_DISC 0x80000000 ++/* MACSEC frame */ ++#define DPAA2_FAS_MS 0x40000000 ++#define DPAA2_FAS_PTP 0x08000000 ++/* Ethernet multicast frame */ ++#define DPAA2_FAS_MC 0x04000000 ++/* Ethernet broadcast frame */ ++#define DPAA2_FAS_BC 0x02000000 ++#define DPAA2_FAS_KSE 0x00040000 ++#define DPAA2_FAS_EOFHE 0x00020000 ++#define DPAA2_FAS_MNLE 0x00010000 ++#define DPAA2_FAS_TIDE 0x00008000 ++#define DPAA2_FAS_PIEE 0x00004000 ++/* Frame length error */ ++#define DPAA2_FAS_FLE 0x00002000 ++/* Frame physical error */ ++#define DPAA2_FAS_FPE 0x00001000 ++#define DPAA2_FAS_PTE 0x00000080 ++#define DPAA2_FAS_ISP 0x00000040 ++#define DPAA2_FAS_PHE 0x00000020 ++#define DPAA2_FAS_BLE 0x00000010 ++/* L3 csum validation performed */ ++#define DPAA2_FAS_L3CV 0x00000008 ++/* L3 csum error */ ++#define DPAA2_FAS_L3CE 0x00000004 ++/* L4 csum validation performed */ ++#define DPAA2_FAS_L4CV 0x00000002 ++/* L4 csum error */ ++#define DPAA2_FAS_L4CE 0x00000001 ++/* Possible errors on the ingress path */ ++#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \ ++ (DPAA2_FAS_EOFHE) | \ ++ (DPAA2_FAS_MNLE) | \ ++ (DPAA2_FAS_TIDE) | \ ++ (DPAA2_FAS_PIEE) | \ ++ (DPAA2_FAS_FLE) | \ ++ (DPAA2_FAS_FPE) | \ ++ (DPAA2_FAS_PTE) | \ ++ (DPAA2_FAS_ISP) | \ ++ (DPAA2_FAS_PHE) | \ ++ (DPAA2_FAS_BLE) | \ ++ (DPAA2_FAS_L3CE) | \ ++ (DPAA2_FAS_L4CE)) ++/* Tx errors */ ++#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \ ++ (DPAA2_FAS_EOFHE) | \ ++ (DPAA2_FAS_MNLE) | \ ++ (DPAA2_FAS_TIDE)) ++ ++/* Time in milliseconds between link state updates */ ++#define DPAA2_ETH_LINK_STATE_REFRESH 1000 ++ ++/* Number of times to retry a frame enqueue before giving up. ++ * Value determined empirically, in order to minimize the number ++ * of frames dropped on Tx ++ */ ++#define DPAA2_ETH_ENQUEUE_RETRIES 10 ++ ++/* Tx congestion entry & exit thresholds, in number of bytes. ++ * We allow a maximum of 512KB worth of frames pending processing on the Tx ++ * queues of an interface ++ */ ++#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024) ++#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10) ++ ++/* Driver statistics, other than those in struct rtnl_link_stats64. ++ * These are usually collected per-CPU and aggregated by ethtool. ++ */ ++struct dpaa2_eth_drv_stats { ++ __u64 tx_conf_frames; ++ __u64 tx_conf_bytes; ++ __u64 tx_sg_frames; ++ __u64 tx_sg_bytes; ++ __u64 rx_sg_frames; ++ __u64 rx_sg_bytes; ++ /* Enqueues retried due to portal busy */ ++ __u64 tx_portal_busy; ++}; ++ ++/* Per-FQ statistics */ ++struct dpaa2_eth_fq_stats { ++ /* Number of frames received on this queue */ ++ __u64 frames; ++ /* Number of times this queue entered congestion */ ++ __u64 congestion_entry; ++}; ++ ++/* Per-channel statistics */ ++struct dpaa2_eth_ch_stats { ++ /* Volatile dequeues retried due to portal busy */ ++ __u64 dequeue_portal_busy; ++ /* Number of CDANs; useful to estimate avg NAPI len */ ++ __u64 cdan; ++ /* Number of frames received on queues from this channel */ ++ __u64 frames; ++ /* Pull errors */ ++ __u64 pull_err; ++}; ++ ++/* Maximum number of queues associated with a DPNI */ ++#define DPAA2_ETH_MAX_RX_QUEUES 16 ++#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS ++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 ++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ ++ DPAA2_ETH_MAX_TX_QUEUES + \ ++ DPAA2_ETH_MAX_RX_ERR_QUEUES) ++ ++#define DPAA2_ETH_MAX_DPCONS NR_CPUS ++ ++enum dpaa2_eth_fq_type { ++ DPAA2_RX_FQ = 0, ++ DPAA2_TX_CONF_FQ, ++ DPAA2_RX_ERR_FQ ++}; ++ ++struct dpaa2_eth_priv; ++ ++struct dpaa2_eth_fq { ++ u32 fqid; ++ u32 tx_qdbin; ++ u16 flowid; ++ int target_cpu; ++ struct dpaa2_eth_channel *channel; ++ enum dpaa2_eth_fq_type type; ++ ++ void (*consume)(struct dpaa2_eth_priv *, ++ struct dpaa2_eth_channel *, ++ const struct dpaa2_fd *, ++ struct napi_struct *, ++ u16 queue_id); ++ struct dpaa2_eth_fq_stats stats; ++}; ++ ++struct dpaa2_eth_channel { ++ struct dpaa2_io_notification_ctx nctx; ++ struct fsl_mc_device *dpcon; ++ int dpcon_id; ++ int ch_id; ++ int dpio_id; ++ struct napi_struct napi; ++ struct dpaa2_io_store *store; ++ struct dpaa2_eth_priv *priv; ++ int buf_count; ++ struct dpaa2_eth_ch_stats stats; ++}; ++ ++struct dpaa2_eth_cls_rule { ++ struct ethtool_rx_flow_spec fs; ++ bool in_use; ++}; ++ ++struct dpaa2_eth_hash_fields { ++ u64 rxnfc_field; ++ enum net_prot cls_prot; ++ int cls_field; ++ int offset; ++ int size; ++}; ++ ++/* Driver private data */ ++struct dpaa2_eth_priv { ++ struct net_device *net_dev; ++ ++ /* Standard statistics */ ++ struct rtnl_link_stats64 __percpu *percpu_stats; ++ /* Extra stats, in addition to the ones known by the kernel */ ++ struct dpaa2_eth_drv_stats __percpu *percpu_extras; ++ struct iommu_domain *iommu_domain; ++ ++ bool ts_tx_en; /* Tx timestamping enabled */ ++ bool ts_rx_en; /* Rx timestamping enabled */ ++ ++ u16 tx_data_offset; ++ u16 rx_buf_align; ++ ++ u16 bpid; ++ u16 tx_qdid; ++ ++ int tx_pause_frames; ++ int num_bufs; ++ int refill_thresh; ++ ++ /* Tx congestion notifications are written here */ ++ void *cscn_mem; ++ void *cscn_unaligned; ++ dma_addr_t cscn_dma; ++ ++ u8 num_fqs; ++ /* Tx queues are at the beginning of the array */ ++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; ++ ++ u8 num_channels; ++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; ++ ++ int dpni_id; ++ struct dpni_attr dpni_attrs; ++ struct fsl_mc_device *dpbp_dev; ++ ++ struct fsl_mc_io *mc_io; ++ /* SysFS-controlled affinity mask for TxConf FQs */ ++ struct cpumask txconf_cpumask; ++ /* Cores which have an affine DPIO/DPCON. ++ * This is the cpu set on which Rx frames are processed; ++ * Tx confirmation frames are processed on a subset of this, ++ * depending on user settings. ++ */ ++ struct cpumask dpio_cpumask; ++ ++ u16 mc_token; ++ ++ struct dpni_link_state link_state; ++ bool do_link_poll; ++ struct task_struct *poll_thread; ++ ++ struct dpaa2_eth_hash_fields *hash_fields; ++ u8 num_hash_fields; ++ /* enabled ethtool hashing bits */ ++ u64 rx_flow_hash; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++ struct dpaa2_debugfs dbg; ++#endif ++ ++ /* array of classification rules */ ++ struct dpaa2_eth_cls_rule *cls_rule; ++ ++ struct dpni_tx_shaping_cfg shaping_cfg; ++}; ++ ++#define dpaa2_eth_hash_enabled(priv) \ ++ ((priv)->dpni_attrs.num_queues > 1) ++ ++#define dpaa2_eth_fs_enabled(priv) \ ++ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS)) ++ ++#define dpaa2_eth_fs_mask_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING) ++ ++#define dpaa2_eth_fs_count(priv) \ ++ ((priv)->dpni_attrs.fs_entries) ++ ++/* size of DMA memory used to pass configuration to classifier, in bytes */ ++#define DPAA2_CLASSIFIER_DMA_SIZE 256 ++ ++extern const struct ethtool_ops dpaa2_ethtool_ops; ++extern const char dpaa2_eth_drv_version[]; ++ ++static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) ++{ ++ return priv->dpni_attrs.num_queues; ++} ++ ++void check_cls_support(struct dpaa2_eth_priv *priv); ++ ++int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable); ++#endif /* __DPAA2_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +@@ -0,0 +1,856 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpni.h" /* DPNI_LINK_OPT_* */ ++#include "dpaa2-eth.h" ++ ++/* To be kept in sync with dpni_statistics */ ++static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { ++ "rx frames", ++ "rx bytes", ++ "rx mcast frames", ++ "rx mcast bytes", ++ "rx bcast frames", ++ "rx bcast bytes", ++ "tx frames", ++ "tx bytes", ++ "tx mcast frames", ++ "tx mcast bytes", ++ "tx bcast frames", ++ "tx bcast bytes", ++ "rx filtered frames", ++ "rx discarded frames", ++ "rx nobuffer discards", ++ "tx discarded frames", ++ "tx confirmed frames", ++}; ++ ++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) ++ ++/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ ++static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { ++ /* per-cpu stats */ ++ ++ "tx conf frames", ++ "tx conf bytes", ++ "tx sg frames", ++ "tx sg bytes", ++ "rx sg frames", ++ "rx sg bytes", ++ /* how many times we had to retry the enqueue command */ ++ "enqueue portal busy", ++ ++ /* Channel stats */ ++ /* How many times we had to retry the volatile dequeue command */ ++ "dequeue portal busy", ++ "channel pull errors", ++ /* Number of notifications received */ ++ "cdan", ++ "tx congestion state", ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ /* FQ stats */ ++ "rx pending frames", ++ "rx pending bytes", ++ "tx conf pending frames", ++ "tx conf pending bytes", ++ "buffer count" ++#endif ++}; ++ ++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) ++ ++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->version, dpaa2_eth_drv_version, ++ sizeof(drvinfo->version)); ++ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); ++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)); ++} ++ ++static int dpaa2_eth_get_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpni_link_state state = {0}; ++ int err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* At the moment, we have no way of interrogating the DPMAC ++ * from the DPNI side - and for that matter there may exist ++ * no DPMAC at all. So for now we just don't report anything ++ * beyond the DPNI attributes. ++ */ ++ if (state.options & DPNI_LINK_OPT_AUTONEG) ++ cmd->autoneg = AUTONEG_ENABLE; ++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) ++ cmd->duplex = DUPLEX_FULL; ++ ethtool_cmd_speed_set(cmd, state.rate); ++ ++out: ++ return err; ++} ++ ++static int dpaa2_eth_set_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpni_link_state state = {0}; ++ struct dpni_link_cfg cfg = {0}; ++ int err = 0; ++ ++ netdev_dbg(net_dev, "Setting link parameters..."); ++ ++ /* Need to interrogate on link state to get flow control params */ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ cfg.options = state.options; ++ cfg.rate = ethtool_cmd_speed(cmd); ++ if (cmd->autoneg == AUTONEG_ENABLE) ++ cfg.options |= DPNI_LINK_OPT_AUTONEG; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; ++ if (cmd->duplex == DUPLEX_HALF) ++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); ++ ++out: ++ return err; ++} ++ ++static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, ++ struct ethtool_pauseparam *pause) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpni_link_state state = {0}; ++ int err; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) ++ netdev_dbg(net_dev, "ERROR %d getting link state", err); ++ ++ /* for now, pause frames autonegotiation is not separate */ ++ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG); ++ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE); ++ pause->tx_pause = pause->rx_pause ^ ++ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE); ++} ++ ++static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, ++ struct ethtool_pauseparam *pause) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpni_link_state state = {0}; ++ struct dpni_link_cfg cfg = {0}; ++ u32 current_tx_pause; ++ int err = 0; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_dbg(net_dev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ cfg.rate = state.rate; ++ cfg.options = state.options; ++ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^ ++ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE); ++ ++ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG)) ++ netdev_warn(net_dev, ++ "WARN: Can't change pause frames autoneg separately\n"); ++ ++ if (pause->rx_pause) ++ cfg.options |= DPNI_LINK_OPT_PAUSE; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_PAUSE; ++ ++ if (pause->rx_pause ^ pause->tx_pause) ++ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) { ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); ++ goto out; ++ } ++ ++ /* Enable / disable taildrops if Tx pause frames have changed */ ++ if (current_tx_pause == pause->tx_pause) ++ goto out; ++ ++ err = setup_fqs_taildrop(priv, !pause->tx_pause); ++ if (err) ++ netdev_dbg(net_dev, "ERROR %d configuring taildrop", err); ++ ++ priv->tx_pause_frames = pause->tx_pause; ++out: ++ return err; ++} ++ ++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, ++ u8 *data) ++{ ++ u8 *p = data; ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ break; ++ } ++} ++ ++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ ++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++/** Fill in hardware counters, as returned by MC. ++ */ ++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ int i = 0; /* Current index in the data array */ ++ int j = 0, k, err; ++ union dpni_statistics dpni_stats; ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ u32 fcnt, bcnt; ++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; ++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; ++ u32 buf_cnt; ++#endif ++ u64 cdan = 0; ++ u64 portal_busy = 0, pull_err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpaa2_eth_drv_stats *extras; ++ struct dpaa2_eth_ch_stats *ch_stats; ++ ++ memset(data, 0, ++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); ++ ++ /* Print standard counters, from DPNI statistics */ ++ for (j = 0; j <= 2; j++) { ++ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, ++ j, &dpni_stats); ++ if (err != 0) ++ netdev_warn(net_dev, "Err %d getting DPNI stats page %d", ++ err, j); ++ ++ switch (j) { ++ case 0: ++ *(data + i++) = dpni_stats.page_0.ingress_all_frames; ++ *(data + i++) = dpni_stats.page_0.ingress_all_bytes; ++ *(data + i++) = dpni_stats.page_0.ingress_multicast_frames; ++ *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes; ++ *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames; ++ *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes; ++ break; ++ case 1: ++ *(data + i++) = dpni_stats.page_1.egress_all_frames; ++ *(data + i++) = dpni_stats.page_1.egress_all_bytes; ++ *(data + i++) = dpni_stats.page_1.egress_multicast_frames; ++ *(data + i++) = dpni_stats.page_1.egress_multicast_bytes; ++ *(data + i++) = dpni_stats.page_1.egress_broadcast_frames; ++ *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes; ++ break; ++ case 2: ++ *(data + i++) = dpni_stats.page_2.ingress_filtered_frames; ++ *(data + i++) = dpni_stats.page_2.ingress_discarded_frames; ++ *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards; ++ *(data + i++) = dpni_stats.page_2.egress_discarded_frames; ++ *(data + i++) = dpni_stats.page_2.egress_confirmed_frames; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ /* Print per-cpu extra stats */ ++ for_each_online_cpu(k) { ++ extras = per_cpu_ptr(priv->percpu_extras, k); ++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) ++ *((__u64 *)data + i + j) += *((__u64 *)extras + j); ++ } ++ ++ i += j; ++ ++ /* We may be using fewer DPIOs than actual CPUs */ ++ for (j = 0; j < priv->num_channels; j++) { ++ ch_stats = &priv->channel[j]->stats; ++ cdan += ch_stats->cdan; ++ portal_busy += ch_stats->dequeue_portal_busy; ++ pull_err += ch_stats->pull_err; ++ } ++ ++ *(data + i++) = portal_busy; ++ *(data + i++) = pull_err; ++ *(data + i++) = cdan; ++ ++ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ for (j = 0; j < priv->num_fqs; j++) { ++ /* Print FQ instantaneous counts */ ++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, ++ &fcnt, &bcnt); ++ if (err) { ++ netdev_warn(net_dev, "FQ query error %d", err); ++ return; ++ } ++ ++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { ++ fcnt_tx_total += fcnt; ++ bcnt_tx_total += bcnt; ++ } else { ++ fcnt_rx_total += fcnt; ++ bcnt_rx_total += bcnt; ++ } ++ } ++ ++ *(data + i++) = fcnt_rx_total; ++ *(data + i++) = bcnt_rx_total; ++ *(data + i++) = fcnt_tx_total; ++ *(data + i++) = bcnt_tx_total; ++ ++ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); ++ if (err) { ++ netdev_warn(net_dev, "Buffer count query error %d\n", err); ++ return; ++ } ++ *(data + i++) = buf_cnt; ++#endif ++} ++ ++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) ++{ ++ int i, off = 0; ++ ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ if (priv->hash_fields[i].cls_prot == prot && ++ priv->hash_fields[i].cls_field == field) ++ return off; ++ off += priv->hash_fields[i].size; ++ } ++ ++ return -1; ++} ++ ++static u8 cls_key_size(struct dpaa2_eth_priv *priv) ++{ ++ u8 i, size = 0; ++ ++ for (i = 0; i < priv->num_hash_fields; i++) ++ size += priv->hash_fields[i].size; ++ ++ return size; ++} ++ ++void check_cls_support(struct dpaa2_eth_priv *priv) ++{ ++ u8 key_size = cls_key_size(priv); ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ if (priv->dpni_attrs.fs_key_size < key_size) { ++ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", ++ priv->dpni_attrs.fs_key_size, ++ key_size); ++ goto disable_fs; ++ } ++ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { ++ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", ++ DPKG_MAX_NUM_OF_EXTRACTS); ++ goto disable_fs; ++ } ++ } ++ ++ if (dpaa2_eth_fs_enabled(priv)) { ++ if (!dpaa2_eth_hash_enabled(priv)) { ++ dev_info(dev, "Insufficient queues. Steering is disabled\n"); ++ goto disable_fs; ++ } ++ ++ if (!dpaa2_eth_fs_mask_enabled(priv)) { ++ dev_info(dev, "Key masks not supported. Steering is disabled\n"); ++ goto disable_fs; ++ } ++ } ++ ++ return; ++ ++disable_fs: ++ priv->dpni_attrs.options |= DPNI_OPT_NO_FS; ++ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING; ++} ++ ++static int prep_l4_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_tcpip4_spec *l4_value, ++ struct ethtool_tcpip4_spec *l4_mask, ++ void *key, void *mask, u8 l4_proto) ++{ ++ int offset; ++ ++ if (l4_mask->tos) { ++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (l4_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = l4_value->ip4src; ++ *(u32 *)(mask + offset) = l4_mask->ip4src; ++ } ++ ++ if (l4_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = l4_value->ip4dst; ++ *(u32 *)(mask + offset) = l4_mask->ip4dst; ++ } ++ ++ if (l4_mask->psrc) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u32 *)(key + offset) = l4_value->psrc; ++ *(u32 *)(mask + offset) = l4_mask->psrc; ++ } ++ ++ if (l4_mask->pdst) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u32 *)(key + offset) = l4_value->pdst; ++ *(u32 *)(mask + offset) = l4_mask->pdst; ++ } ++ ++ /* Only apply the rule for the user-specified L4 protocol ++ * and if ethertype matches IPv4 ++ */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; ++ ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u8 *)(key + offset) = l4_proto; ++ *(u8 *)(mask + offset) = 0xFF; ++ ++ /* TODO: check IP version */ ++ ++ return 0; ++} ++ ++static int prep_eth_rule(struct dpaa2_eth_priv *priv, ++ struct ethhdr *eth_value, struct ethhdr *eth_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (eth_mask->h_proto) { ++ netdev_err(priv->net_dev, "Ethertype is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (!is_zero_ether_addr(eth_mask->h_source)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); ++ ether_addr_copy(key + offset, eth_value->h_source); ++ ether_addr_copy(mask + offset, eth_mask->h_source); ++ } ++ ++ if (!is_zero_ether_addr(eth_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, eth_value->h_dest); ++ ether_addr_copy(mask + offset, eth_mask->h_dest); ++ } ++ ++ return 0; ++} ++ ++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_usrip4_spec *uip_value, ++ struct ethtool_usrip4_spec *uip_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (uip_mask->tos) ++ return -EOPNOTSUPP; ++ ++ if (uip_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = uip_value->ip4src; ++ *(u32 *)(mask + offset) = uip_mask->ip4src; ++ } ++ ++ if (uip_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = uip_value->ip4dst; ++ *(u32 *)(mask + offset) = uip_mask->ip4dst; ++ } ++ ++ if (uip_mask->proto) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u32 *)(key + offset) = uip_value->proto; ++ *(u32 *)(mask + offset) = uip_mask->proto; ++ } ++ if (uip_mask->l4_4_bytes) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; ++ ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; ++ } ++ ++ /* Ethertype must be IP */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; ++ ++ return 0; ++} ++ ++static int prep_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (ext_mask->vlan_etype) ++ return -EOPNOTSUPP; ++ ++ if (ext_mask->vlan_tci) { ++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); ++ *(u16 *)(key + offset) = ext_value->vlan_tci; ++ *(u16 *)(mask + offset) = ext_mask->vlan_tci; ++ } ++ ++ return 0; ++} ++ ++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (!is_zero_ether_addr(ext_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, ext_value->h_dest); ++ ether_addr_copy(mask + offset, ext_mask->h_dest); ++ } ++ ++ return 0; ++} ++ ++static int prep_cls_rule(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ void *key) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const u8 key_size = cls_key_size(priv); ++ void *msk = key + key_size; ++ int err; ++ ++ memset(key, 0, key_size * 2); ++ ++ switch (fs->flow_type & 0xff) { ++ case TCP_V4_FLOW: ++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, ++ &fs->m_u.tcp_ip4_spec, key, msk, ++ IPPROTO_TCP); ++ break; ++ case UDP_V4_FLOW: ++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, ++ &fs->m_u.udp_ip4_spec, key, msk, ++ IPPROTO_UDP); ++ break; ++ case SCTP_V4_FLOW: ++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, ++ &fs->m_u.sctp_ip4_spec, key, msk, ++ IPPROTO_SCTP); ++ break; ++ case ETHER_FLOW: ++ err = prep_eth_rule(priv, &fs->h_u.ether_spec, ++ &fs->m_u.ether_spec, key, msk); ++ break; ++ case IP_USER_FLOW: ++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, ++ &fs->m_u.usr_ip4_spec, key, msk); ++ break; ++ default: ++ /* TODO: AH, ESP */ ++ return -EOPNOTSUPP; ++ } ++ if (err) ++ return err; ++ ++ if (fs->flow_type & FLOW_EXT) { ++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; ++ } ++ ++ if (fs->flow_type & FLOW_MAC_EXT) { ++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int del_cls(struct net_device *net_dev, int location); ++ ++static int do_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ bool add) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ const int rule_cnt = dpaa2_eth_fs_count(priv); ++ struct dpni_rule_cfg rule_cfg; ++ struct dpni_fs_action_cfg fs_act = { 0 }; ++ void *dma_mem; ++ int err = 0; ++ ++ if (!dpaa2_eth_fs_enabled(priv)) { ++ netdev_err(net_dev, "dev does not support steering!\n"); ++ /* dev doesn't support steering */ ++ return -EOPNOTSUPP; ++ } ++ ++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && ++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || ++ fs->location >= rule_cnt) ++ return -EINVAL; ++ ++ /* When adding a new rule, check if location if available, ++ * and if not free the existing table entry before inserting ++ * the new one ++ */ ++ if (add && (priv->cls_rule[fs->location].in_use == true)) ++ del_cls(net_dev, fs->location); ++ ++ memset(&rule_cfg, 0, sizeof(rule_cfg)); ++ rule_cfg.key_size = cls_key_size(priv); ++ ++ /* allocate twice the key size, for the actual key and for mask */ ++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = prep_cls_rule(net_dev, fs, dma_mem); ++ if (err) ++ goto err_free_mem; ++ ++ rule_cfg.key_iova = dma_map_single(dev, dma_mem, ++ rule_cfg.key_size * 2, ++ DMA_TO_DEVICE); ++ ++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; ++ ++ if (fs->ring_cookie == RX_CLS_FLOW_DISC) ++ fs_act.options |= DPNI_FS_OPT_DISCARD; ++ else ++ fs_act.flow_id = fs->ring_cookie; ++ ++ if (add) ++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, ++ 0, fs->location, &rule_cfg, &fs_act); ++ else ++ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, ++ 0, &rule_cfg); ++ ++ dma_unmap_single(dev, rule_cfg.key_iova, ++ rule_cfg.key_size * 2, DMA_TO_DEVICE); ++ ++ if (err) ++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); ++ ++err_free_mem: ++ kfree(dma_mem); ++ ++ return err; ++} ++ ++static int add_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, fs, true); ++ if (err) ++ return err; ++ ++ priv->cls_rule[fs->location].in_use = true; ++ priv->cls_rule[fs->location].fs = *fs; ++ ++ return 0; ++} ++ ++static int del_cls(struct net_device *net_dev, int location) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); ++ if (err) ++ return err; ++ ++ priv->cls_rule[location].in_use = false; ++ ++ return 0; ++} ++ ++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc) ++{ ++ int err = 0; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_SRXCLSRLINS: ++ err = add_cls(net_dev, &rxnfc->fs); ++ break; ++ ++ case ETHTOOL_SRXCLSRLDEL: ++ err = del_cls(net_dev, rxnfc->fs.location); ++ break; ++ ++ default: ++ err = -EOPNOTSUPP; ++ } ++ ++ return err; ++} ++ ++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const int rule_cnt = dpaa2_eth_fs_count(priv); ++ int i, j; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_GRXFH: ++ /* we purposely ignore cmd->flow_type, because the hashing key ++ * is the same (and fixed) for all protocols ++ */ ++ rxnfc->data = priv->rx_flow_hash; ++ break; ++ ++ case ETHTOOL_GRXRINGS: ++ rxnfc->data = dpaa2_eth_queue_count(priv); ++ break; ++ ++ case ETHTOOL_GRXCLSRLCNT: ++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) ++ if (priv->cls_rule[i].in_use) ++ rxnfc->rule_cnt++; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ case ETHTOOL_GRXCLSRULE: ++ if (!priv->cls_rule[rxnfc->fs.location].in_use) ++ return -EINVAL; ++ ++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; ++ break; ++ ++ case ETHTOOL_GRXCLSRLALL: ++ for (i = 0, j = 0; i < rule_cnt; i++) { ++ if (!priv->cls_rule[i].in_use) ++ continue; ++ if (j == rxnfc->rule_cnt) ++ return -EMSGSIZE; ++ rule_locs[j++] = i; ++ } ++ rxnfc->rule_cnt = j; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++const struct ethtool_ops dpaa2_ethtool_ops = { ++ .get_drvinfo = dpaa2_eth_get_drvinfo, ++ .get_link = ethtool_op_get_link, ++ .get_settings = dpaa2_eth_get_settings, ++ .set_settings = dpaa2_eth_set_settings, ++ .get_pauseparam = dpaa2_eth_get_pauseparam, ++ .set_pauseparam = dpaa2_eth_set_pauseparam, ++ .get_sset_count = dpaa2_eth_get_sset_count, ++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, ++ .get_strings = dpaa2_eth_get_strings, ++ .get_rxnfc = dpaa2_eth_get_rxnfc, ++ .set_rxnfc = dpaa2_eth_set_rxnfc, ++}; +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +@@ -0,0 +1,176 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPKG_H_ ++#define __FSL_DPKG_H_ ++ ++#include ++#include "net.h" ++ ++/* Data Path Key Generator API ++ * Contains initialization APIs and runtime APIs for the Key Generator ++ */ ++ ++/** Key Generator properties */ ++ ++/** ++ * Number of masks per key extraction ++ */ ++#define DPKG_NUM_OF_MASKS 4 ++/** ++ * Number of extractions per key profile ++ */ ++#define DPKG_MAX_NUM_OF_EXTRACTS 10 ++ ++/** ++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types ++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset ++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field ++ * @DPKG_FULL_FIELD: Extract a full field ++ */ ++enum dpkg_extract_from_hdr_type { ++ DPKG_FROM_HDR = 0, ++ DPKG_FROM_FIELD = 1, ++ DPKG_FULL_FIELD = 2 ++}; ++ ++/** ++ * enum dpkg_extract_type - Enumeration for selecting extraction type ++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header ++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header ++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; ++ * e.g. can be used to extract header existence; ++ * please refer to 'Parse Result definition' section in the parser BG ++ */ ++enum dpkg_extract_type { ++ DPKG_EXTRACT_FROM_HDR = 0, ++ DPKG_EXTRACT_FROM_DATA = 1, ++ DPKG_EXTRACT_FROM_PARSE = 3 ++}; ++ ++/** ++ * struct dpkg_mask - A structure for defining a single extraction mask ++ * @mask: Byte mask for the extracted content ++ * @offset: Offset within the extracted content ++ */ ++struct dpkg_mask { ++ u8 mask; ++ u8 offset; ++}; ++ ++/** ++ * struct dpkg_extract - A structure for defining a single extraction ++ * @type: Determines how the union below is interpreted: ++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; ++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; ++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' ++ * @extract: Selects extraction method ++ * @num_of_byte_masks: Defines the number of valid entries in the array below; ++ * This is also the number of bytes to be used as masks ++ * @masks: Masks parameters ++ */ ++struct dpkg_extract { ++ enum dpkg_extract_type type; ++ /** ++ * union extract - Selects extraction method ++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' ++ */ ++ union { ++ /** ++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @prot: Any of the supported headers ++ * @type: Defines the type of header extraction: ++ * DPKG_FROM_HDR: use size & offset below; ++ * DPKG_FROM_FIELD: use field, size and offset below; ++ * DPKG_FULL_FIELD: use field below ++ * @field: One of the supported fields (NH_FLD_) ++ * ++ * @size: Size in bytes ++ * @offset: Byte offset ++ * @hdr_index: Clear for cases not listed below; ++ * Used for protocols that may have more than a single ++ * header, 0 indicates an outer header; ++ * Supported protocols (possible values): ++ * NET_PROT_VLAN (0, HDR_INDEX_LAST); ++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); ++ * NET_PROT_IP(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv4(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv6(0, HDR_INDEX_LAST); ++ */ ++ ++ struct { ++ enum net_prot prot; ++ enum dpkg_extract_from_hdr_type type; ++ u32 field; ++ u8 size; ++ u8 offset; ++ u8 hdr_index; ++ } from_hdr; ++ /** ++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ u8 size; ++ u8 offset; ++ } from_data; ++ ++ /** ++ * struct from_parse - Used when ++ * 'type = DPKG_EXTRACT_FROM_PARSE' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ u8 size; ++ u8 offset; ++ } from_parse; ++ } extract; ++ ++ u8 num_of_byte_masks; ++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; ++}; ++ ++/** ++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation ++ * profile (rule) ++ * @num_extracts: Defines the number of valid entries in the array below ++ * @extracts: Array of required extractions ++ */ ++struct dpkg_profile_cfg { ++ u8 num_extracts; ++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; ++}; ++ ++#endif /* __FSL_DPKG_H_ */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +@@ -0,0 +1,600 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPNI_CMD_H ++#define _FSL_DPNI_CMD_H ++ ++/* DPNI Version */ ++#define DPNI_VER_MAJOR 7 ++#define DPNI_VER_MINOR 0 ++#define DPNI_CMD_BASE_VERSION 1 ++#define DPNI_CMD_ID_OFFSET 4 ++ ++#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) ++ ++#define DPNI_CMDID_OPEN DPNI_CMD(0x801) ++#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) ++#define DPNI_CMDID_CREATE DPNI_CMD(0x901) ++#define DPNI_CMDID_DESTROY DPNI_CMD(0x900) ++#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) ++ ++#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) ++#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) ++#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004) ++#define DPNI_CMDID_RESET DPNI_CMD(0x005) ++#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) ++ ++#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010) ++#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011) ++#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) ++#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) ++#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) ++#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) ++#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) ++#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) ++ ++#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) ++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) ++ ++#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) ++#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) ++#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215) ++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) ++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) ++#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) ++#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) ++ ++#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) ++#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) ++#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) ++#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) ++#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) ++#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) ++#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) ++#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) ++#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) ++ ++#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) ++ ++#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) ++#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) ++#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) ++ ++#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D) ++#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) ++#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) ++#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) ++#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261) ++#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262) ++ ++#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) ++ ++#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264) ++#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265) ++ ++#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) ++#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267) ++#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268) ++#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269) ++#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A) ++#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) ++#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) ++ ++/* Macros for accessing command fields smaller than 1byte */ ++#define DPNI_MASK(field) \ ++ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ ++ DPNI_##field##_SHIFT) ++ ++#define dpni_set_field(var, field, val) \ ++ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) ++#define dpni_get_field(var, field) \ ++ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) ++ ++struct dpni_cmd_open { ++ __le32 dpni_id; ++}; ++ ++#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) ++struct dpni_cmd_set_pools { ++ /* cmd word 0 */ ++ u8 num_dpbp; ++ u8 backup_pool_mask; ++ __le16 pad; ++ /* cmd word 0..4 */ ++ __le32 dpbp_id[DPNI_MAX_DPBP]; ++ /* cmd word 4..6 */ ++ __le16 buffer_size[DPNI_MAX_DPBP]; ++}; ++ ++/* The enable indication is always the least significant bit */ ++#define DPNI_ENABLE_SHIFT 0 ++#define DPNI_ENABLE_SIZE 1 ++ ++struct dpni_rsp_is_enabled { ++ u8 enabled; ++}; ++ ++struct dpni_rsp_get_irq { ++ /* response word 0 */ ++ __le32 irq_val; ++ __le32 pad; ++ /* response word 1 */ ++ __le64 irq_addr; ++ /* response word 2 */ ++ __le32 irq_num; ++ __le32 type; ++}; ++ ++struct dpni_cmd_set_irq_enable { ++ u8 enable; ++ u8 pad[3]; ++ u8 irq_index; ++}; ++ ++struct dpni_cmd_get_irq_enable { ++ __le32 pad; ++ u8 irq_index; ++}; ++ ++struct dpni_rsp_get_irq_enable { ++ u8 enabled; ++}; ++ ++struct dpni_cmd_set_irq_mask { ++ __le32 mask; ++ u8 irq_index; ++}; ++ ++struct dpni_cmd_get_irq_mask { ++ __le32 pad; ++ u8 irq_index; ++}; ++ ++struct dpni_rsp_get_irq_mask { ++ __le32 mask; ++}; ++ ++struct dpni_cmd_get_irq_status { ++ __le32 status; ++ u8 irq_index; ++}; ++ ++struct dpni_rsp_get_irq_status { ++ __le32 status; ++}; ++ ++struct dpni_cmd_clear_irq_status { ++ __le32 status; ++ u8 irq_index; ++}; ++ ++struct dpni_rsp_get_attr { ++ /* response word 0 */ ++ __le32 options; ++ u8 num_queues; ++ u8 num_tcs; ++ u8 mac_filter_entries; ++ u8 pad0; ++ /* response word 1 */ ++ u8 vlan_filter_entries; ++ u8 pad1; ++ u8 qos_entries; ++ u8 pad2; ++ __le16 fs_entries; ++ __le16 pad3; ++ /* response word 2 */ ++ u8 qos_key_size; ++ u8 fs_key_size; ++ __le16 wriop_version; ++}; ++ ++#define DPNI_ERROR_ACTION_SHIFT 0 ++#define DPNI_ERROR_ACTION_SIZE 4 ++#define DPNI_FRAME_ANN_SHIFT 4 ++#define DPNI_FRAME_ANN_SIZE 1 ++ ++struct dpni_cmd_set_errors_behavior { ++ __le32 errors; ++ /* from least significant bit: error_action:4, set_frame_annotation:1 */ ++ u8 flags; ++}; ++ ++/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation ++ * buffer layouts, but they all share the same parameters. ++ * If one of the functions changes, below structure needs to be split. ++ */ ++ ++#define DPNI_PASS_TS_SHIFT 0 ++#define DPNI_PASS_TS_SIZE 1 ++#define DPNI_PASS_PR_SHIFT 1 ++#define DPNI_PASS_PR_SIZE 1 ++#define DPNI_PASS_FS_SHIFT 2 ++#define DPNI_PASS_FS_SIZE 1 ++ ++struct dpni_cmd_get_buffer_layout { ++ u8 qtype; ++}; ++ ++struct dpni_rsp_get_buffer_layout { ++ /* response word 0 */ ++ u8 pad0[6]; ++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ ++ u8 flags; ++ u8 pad1; ++ /* response word 1 */ ++ __le16 private_data_size; ++ __le16 data_align; ++ __le16 head_room; ++ __le16 tail_room; ++}; ++ ++struct dpni_cmd_set_buffer_layout { ++ /* cmd word 0 */ ++ u8 qtype; ++ u8 pad0[3]; ++ __le16 options; ++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ ++ u8 flags; ++ u8 pad1; ++ /* cmd word 1 */ ++ __le16 private_data_size; ++ __le16 data_align; ++ __le16 head_room; ++ __le16 tail_room; ++}; ++ ++struct dpni_cmd_set_offload { ++ u8 pad[3]; ++ u8 dpni_offload; ++ __le32 config; ++}; ++ ++struct dpni_cmd_get_offload { ++ u8 pad[3]; ++ u8 dpni_offload; ++}; ++ ++struct dpni_rsp_get_offload { ++ __le32 pad; ++ __le32 config; ++}; ++ ++struct dpni_cmd_get_qdid { ++ u8 qtype; ++}; ++ ++struct dpni_rsp_get_qdid { ++ __le16 qdid; ++}; ++ ++struct dpni_rsp_get_tx_data_offset { ++ __le16 data_offset; ++}; ++ ++struct dpni_cmd_get_statistics { ++ u8 page_number; ++}; ++ ++struct dpni_rsp_get_statistics { ++ __le64 counter[DPNI_STATISTICS_CNT]; ++}; ++ ++struct dpni_cmd_set_link_cfg { ++ /* cmd word 0 */ ++ __le64 pad0; ++ /* cmd word 1 */ ++ __le32 rate; ++ __le32 pad1; ++ /* cmd word 2 */ ++ __le64 options; ++}; ++ ++#define DPNI_LINK_STATE_SHIFT 0 ++#define DPNI_LINK_STATE_SIZE 1 ++ ++struct dpni_rsp_get_link_state { ++ /* response word 0 */ ++ __le32 pad0; ++ /* from LSB: up:1 */ ++ u8 flags; ++ u8 pad1[3]; ++ /* response word 1 */ ++ __le32 rate; ++ __le32 pad2; ++ /* response word 2 */ ++ __le64 options; ++}; ++ ++struct dpni_cmd_set_tx_shaping { ++ /* cmd word 0 */ ++ __le16 max_burst_size; ++ __le16 pad0[3]; ++ /* cmd word 1 */ ++ __le32 rate_limit; ++}; ++ ++struct dpni_cmd_set_max_frame_length { ++ __le16 max_frame_length; ++}; ++ ++struct dpni_rsp_get_max_frame_length { ++ __le16 max_frame_length; ++}; ++ ++struct dpni_cmd_set_multicast_promisc { ++ u8 enable; ++}; ++ ++struct dpni_rsp_get_multicast_promisc { ++ u8 enabled; ++}; ++ ++struct dpni_cmd_set_unicast_promisc { ++ u8 enable; ++}; ++ ++struct dpni_rsp_get_unicast_promisc { ++ u8 enabled; ++}; ++ ++struct dpni_cmd_set_primary_mac_addr { ++ __le16 pad; ++ u8 mac_addr[6]; ++}; ++ ++struct dpni_rsp_get_primary_mac_addr { ++ __le16 pad; ++ u8 mac_addr[6]; ++}; ++ ++struct dpni_rsp_get_port_mac_addr { ++ __le16 pad; ++ u8 mac_addr[6]; ++}; ++ ++struct dpni_cmd_add_mac_addr { ++ __le16 pad; ++ u8 mac_addr[6]; ++}; ++ ++struct dpni_cmd_remove_mac_addr { ++ __le16 pad; ++ u8 mac_addr[6]; ++}; ++ ++#define DPNI_UNICAST_FILTERS_SHIFT 0 ++#define DPNI_UNICAST_FILTERS_SIZE 1 ++#define DPNI_MULTICAST_FILTERS_SHIFT 1 ++#define DPNI_MULTICAST_FILTERS_SIZE 1 ++ ++struct dpni_cmd_clear_mac_filters { ++ /* from LSB: unicast:1, multicast:1 */ ++ u8 flags; ++}; ++ ++#define DPNI_DIST_MODE_SHIFT 0 ++#define DPNI_DIST_MODE_SIZE 4 ++#define DPNI_MISS_ACTION_SHIFT 4 ++#define DPNI_MISS_ACTION_SIZE 4 ++ ++struct dpni_cmd_set_rx_tc_dist { ++ /* cmd word 0 */ ++ __le16 dist_size; ++ u8 tc_id; ++ /* from LSB: dist_mode:4, miss_action:4 */ ++ u8 flags; ++ __le16 pad0; ++ __le16 default_flow_id; ++ /* cmd word 1..5 */ ++ __le64 pad1[5]; ++ /* cmd word 6 */ ++ __le64 key_cfg_iova; ++}; ++ ++/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at ++ * key_cfg_iova) ++ */ ++struct dpni_mask_cfg { ++ u8 mask; ++ u8 offset; ++}; ++ ++#define DPNI_EFH_TYPE_SHIFT 0 ++#define DPNI_EFH_TYPE_SIZE 4 ++#define DPNI_EXTRACT_TYPE_SHIFT 0 ++#define DPNI_EXTRACT_TYPE_SIZE 4 ++ ++struct dpni_dist_extract { ++ /* word 0 */ ++ u8 prot; ++ /* EFH type stored in the 4 least significant bits */ ++ u8 efh_type; ++ u8 size; ++ u8 offset; ++ __le32 field; ++ /* word 1 */ ++ u8 hdr_index; ++ u8 constant; ++ u8 num_of_repeats; ++ u8 num_of_byte_masks; ++ /* Extraction type is stored in the 4 LSBs */ ++ u8 extract_type; ++ u8 pad[3]; ++ /* word 2 */ ++ struct dpni_mask_cfg masks[4]; ++}; ++ ++struct dpni_ext_set_rx_tc_dist { ++ /* extension word 0 */ ++ u8 num_extracts; ++ u8 pad[7]; ++ /* words 1..25 */ ++ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; ++}; ++ ++struct dpni_cmd_get_queue { ++ u8 qtype; ++ u8 tc; ++ u8 index; ++}; ++ ++#define DPNI_DEST_TYPE_SHIFT 0 ++#define DPNI_DEST_TYPE_SIZE 4 ++#define DPNI_STASH_CTRL_SHIFT 6 ++#define DPNI_STASH_CTRL_SIZE 1 ++#define DPNI_HOLD_ACTIVE_SHIFT 7 ++#define DPNI_HOLD_ACTIVE_SIZE 1 ++ ++struct dpni_rsp_get_queue { ++ /* response word 0 */ ++ __le64 pad0; ++ /* response word 1 */ ++ __le32 dest_id; ++ __le16 pad1; ++ u8 dest_prio; ++ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */ ++ u8 flags; ++ /* response word 2 */ ++ __le64 flc; ++ /* response word 3 */ ++ __le64 user_context; ++ /* response word 4 */ ++ __le32 fqid; ++ __le16 qdbin; ++}; ++ ++struct dpni_cmd_set_queue { ++ /* cmd word 0 */ ++ u8 qtype; ++ u8 tc; ++ u8 index; ++ u8 options; ++ __le32 pad0; ++ /* cmd word 1 */ ++ __le32 dest_id; ++ __le16 pad1; ++ u8 dest_prio; ++ u8 flags; ++ /* cmd word 2 */ ++ __le64 flc; ++ /* cmd word 3 */ ++ __le64 user_context; ++}; ++ ++struct dpni_cmd_add_fs_entry { ++ /* cmd word 0 */ ++ u16 options; ++ u8 tc_id; ++ u8 key_size; ++ u16 index; ++ u16 flow_id; ++ /* cmd word 1 */ ++ u64 key_iova; ++ /* cmd word 2 */ ++ u64 mask_iova; ++ /* cmd word 3 */ ++ u64 flc; ++}; ++ ++struct dpni_cmd_remove_fs_entry { ++ /* cmd word 0 */ ++ __le16 pad0; ++ u8 tc_id; ++ u8 key_size; ++ __le32 pad1; ++ /* cmd word 1 */ ++ u64 key_iova; ++ /* cmd word 2 */ ++ u64 mask_iova; ++}; ++ ++struct dpni_cmd_set_taildrop { ++ /* cmd word 0 */ ++ u8 congestion_point; ++ u8 qtype; ++ u8 tc; ++ u8 index; ++ __le32 pad0; ++ /* cmd word 1 */ ++ /* Only least significant bit is relevant */ ++ u8 enable; ++ u8 pad1; ++ u8 units; ++ u8 pad2; ++ __le32 threshold; ++}; ++ ++struct dpni_cmd_get_taildrop { ++ u8 congestion_point; ++ u8 qtype; ++ u8 tc; ++ u8 index; ++}; ++ ++struct dpni_rsp_get_taildrop { ++ /* cmd word 0 */ ++ __le64 pad0; ++ /* cmd word 1 */ ++ /* only least significant bit is relevant */ ++ u8 enable; ++ u8 pad1; ++ u8 units; ++ u8 pad2; ++ __le32 threshold; ++}; ++ ++#define DPNI_DEST_TYPE_SHIFT 0 ++#define DPNI_DEST_TYPE_SIZE 4 ++#define DPNI_CONG_UNITS_SHIFT 4 ++#define DPNI_CONG_UNITS_SIZE 2 ++ ++struct dpni_cmd_set_congestion_notification { ++ /* cmd word 0 */ ++ u8 qtype; ++ u8 tc; ++ u8 pad[6]; ++ /* cmd word 1 */ ++ u32 dest_id; ++ u16 notification_mode; ++ u8 dest_priority; ++ /* from LSB: dest_type: 4 units:2 */ ++ u8 type_units; ++ /* cmd word 2 */ ++ u64 message_iova; ++ /* cmd word 3 */ ++ u64 message_ctx; ++ /* cmd word 4 */ ++ u32 threshold_entry; ++ u32 threshold_exit; ++}; ++ ++#endif /* _FSL_DPNI_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c +@@ -0,0 +1,1770 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpni.h" ++#include "dpni-cmd.h" ++ ++/** ++ * dpni_prepare_key_cfg() - function prepare extract parameters ++ * @cfg: defining a full Key Generation profile (rule) ++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before the following functions: ++ * - dpni_set_rx_tc_dist() ++ * - dpni_set_qos_table() ++ */ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) ++{ ++ int i, j; ++ struct dpni_ext_set_rx_tc_dist *dpni_ext; ++ struct dpni_dist_extract *extr; ++ ++ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) ++ return -EINVAL; ++ ++ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; ++ dpni_ext->num_extracts = cfg->num_extracts; ++ ++ for (i = 0; i < cfg->num_extracts; i++) { ++ extr = &dpni_ext->extracts[i]; ++ ++ switch (cfg->extracts[i].type) { ++ case DPKG_EXTRACT_FROM_HDR: ++ extr->prot = cfg->extracts[i].extract.from_hdr.prot; ++ dpni_set_field(extr->efh_type, EFH_TYPE, ++ cfg->extracts[i].extract.from_hdr.type); ++ extr->size = cfg->extracts[i].extract.from_hdr.size; ++ extr->offset = cfg->extracts[i].extract.from_hdr.offset; ++ extr->field = cpu_to_le32( ++ cfg->extracts[i].extract.from_hdr.field); ++ extr->hdr_index = ++ cfg->extracts[i].extract.from_hdr.hdr_index; ++ break; ++ case DPKG_EXTRACT_FROM_DATA: ++ extr->size = cfg->extracts[i].extract.from_data.size; ++ extr->offset = ++ cfg->extracts[i].extract.from_data.offset; ++ break; ++ case DPKG_EXTRACT_FROM_PARSE: ++ extr->size = cfg->extracts[i].extract.from_parse.size; ++ extr->offset = ++ cfg->extracts[i].extract.from_parse.offset; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; ++ dpni_set_field(extr->extract_type, EXTRACT_TYPE, ++ cfg->extracts[i].type); ++ ++ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { ++ extr->masks[j].mask = cfg->extracts[i].masks[j].mask; ++ extr->masks[j].offset = ++ cfg->extracts[i].masks[j].offset; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * dpni_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpni_id: DPNI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpni_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpni_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_open *cmd_params; ++ ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dpni_cmd_open *)cmd.params; ++ cmd_params->dpni_id = cpu_to_le32(dpni_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpni_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_pools() - Set buffer pools configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Buffer pools configuration ++ * ++ * mandatory for DPNI operation ++ * warning:Allowed only when DPNI is disabled ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_pools_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_pools *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_pools *)cmd.params; ++ cmd_params->num_dpbp = cfg->num_dpbp; ++ for (i = 0; i < DPNI_MAX_DPBP; i++) { ++ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); ++ cmd_params->buffer_size[i] = ++ cpu_to_le16(cfg->pools[i].buffer_size); ++ cmd_params->backup_pool_mask |= ++ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); ++ } ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_is_enabled() - Check if the DPNI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_is_enabled *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; ++ *en = dpni_get_field(rsp_params->enabled, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpni_reset() - Reset the DPNI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state: - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_irq_enable *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; ++ dpni_set_field(cmd_params->enable, ENABLE, en); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_irq_enable *cmd_params; ++ struct dpni_rsp_get_irq_enable *rsp_params; ++ ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; ++ *en = dpni_get_field(rsp_params->enabled, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_irq_mask *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_irq_mask *cmd_params; ++ struct dpni_rsp_get_irq_mask *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; ++ *mask = le32_to_cpu(rsp_params->mask); ++ ++ return 0; ++} ++ ++/** ++ * dpni_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_irq_status *cmd_params; ++ struct dpni_rsp_get_irq_status *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; ++ *status = le32_to_cpu(rsp_params->status); ++ ++ return 0; ++} ++ ++/** ++ * dpni_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_clear_irq_status *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->status = cpu_to_le32(status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_attributes() - Retrieve DPNI attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_attr *rsp_params; ++ ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_attr *)cmd.params; ++ attr->options = le32_to_cpu(rsp_params->options); ++ attr->num_queues = rsp_params->num_queues; ++ attr->num_tcs = rsp_params->num_tcs; ++ attr->mac_filter_entries = rsp_params->mac_filter_entries; ++ attr->vlan_filter_entries = rsp_params->vlan_filter_entries; ++ attr->qos_entries = rsp_params->qos_entries; ++ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); ++ attr->qos_key_size = rsp_params->qos_key_size; ++ attr->fs_key_size = rsp_params->fs_key_size; ++ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_errors_behavior() - Set errors behavior ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Errors configuration ++ * ++ * this function may be called numerous times with different ++ * error masks ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_error_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_errors_behavior *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; ++ cmd_params->errors = cpu_to_le32(cfg->errors); ++ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); ++ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_buffer_layout() - Retrieve buffer layout attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue to retrieve configuration for ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_buffer_layout *cmd_params; ++ struct dpni_rsp_get_buffer_layout *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; ++ cmd_params->qtype = qtype; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; ++ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS); ++ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR); ++ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS); ++ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); ++ layout->data_align = le16_to_cpu(rsp_params->data_align); ++ layout->data_head_room = le16_to_cpu(rsp_params->head_room); ++ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_buffer_layout() - Set buffer layout configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue this configuration applies to ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_buffer_layout *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; ++ cmd_params->qtype = qtype; ++ cmd_params->options = cpu_to_le16(layout->options); ++ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); ++ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); ++ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); ++ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); ++ cmd_params->data_align = cpu_to_le16(layout->data_align); ++ cmd_params->head_room = cpu_to_le16(layout->data_head_room); ++ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_offload() - Set DPNI offload configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @type: Type of DPNI offload ++ * @config: Offload configuration. ++ * For checksum offloads, non-zero value enables the offload ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++ ++int dpni_set_offload(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_offload type, ++ u32 config) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_offload *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_offload *)cmd.params; ++ cmd_params->dpni_offload = type; ++ cmd_params->config = cpu_to_le32(config); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_offload(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_offload type, ++ u32 *config) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_offload *cmd_params; ++ struct dpni_rsp_get_offload *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_offload *)cmd.params; ++ cmd_params->dpni_offload = type; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_offload *)cmd.params; ++ *config = le32_to_cpu(rsp_params->config); ++ ++ return 0; ++} ++ ++/** ++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used ++ * for enqueue operations ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue to receive QDID for ++ * @qdid: Returned virtual QDID value that should be used as an argument ++ * in all enqueue operations ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u16 *qdid) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_qdid *cmd_params; ++ struct dpni_rsp_get_qdid *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; ++ cmd_params->qtype = qtype; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; ++ *qdid = le16_to_cpu(rsp_params->qdid); ++ ++ return 0; ++} ++ ++/** ++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @data_offset: Tx data offset (from start of buffer) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *data_offset) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_tx_data_offset *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; ++ *data_offset = le16_to_cpu(rsp_params->data_offset); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_link_cfg() - set the link configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_link_cfg *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; ++ cmd_params->rate = cpu_to_le32(cfg->rate); ++ cmd_params->options = cpu_to_le64(cfg->options); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_link_state() - Return the link state (either up or down) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @state: Returned link state; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_link_state *state) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_link_state *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; ++ state->up = dpni_get_field(rsp_params->flags, LINK_STATE); ++ state->rate = le32_to_cpu(rsp_params->rate); ++ state->options = le64_to_cpu(rsp_params->options); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_tx_shaping() - Set the transmit shaping ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tx_shaper: tx shaping configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_tx_shaping_cfg *tx_shaper) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_tx_shaping *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params; ++ cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size); ++ cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_max_frame_length() - Set the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_max_frame_length *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; ++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_max_frame_length() - Get the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_max_frame_length *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; ++ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_multicast_promisc *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; ++ dpni_set_field(cmd_params->enable, ENABLE, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_multicast_promisc *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; ++ *en = dpni_get_field(rsp_params->enabled, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_unicast_promisc *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; ++ dpni_set_field(cmd_params->enable, ENABLE, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_unicast_promisc *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; ++ *en = dpni_get_field(rsp_params->enabled, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpni_set_primary_mac_addr() - Set the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to set as primary address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_primary_mac_addr *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_primary_mac_addr() - Get the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: Returned MAC address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_primary_mac_addr *rsp_params; ++ int i, err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; ++ for (i = 0; i < 6; i++) ++ mac_addr[5 - i] = rsp_params->mac_addr[i]; ++ ++ return 0; ++} ++ ++/** ++ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical ++ * port the DPNI is attached to ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address of the physical port, if any, otherwise 0 ++ * ++ * The primary MAC address is not cleared by this operation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_rsp_get_port_mac_addr *rsp_params; ++ int i, err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; ++ for (i = 0; i < 6; i++) ++ mac_addr[5 - i] = rsp_params->mac_addr[i]; ++ ++ return 0; ++} ++ ++/** ++ * dpni_add_mac_addr() - Add MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to add ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_add_mac_addr *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_remove_mac_addr() - Remove MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_remove_mac_addr *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @unicast: Set to '1' to clear unicast addresses ++ * @multicast: Set to '1' to clear multicast addresses ++ * ++ * The primary MAC address is not cleared by this operation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int unicast, ++ int multicast) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_clear_mac_filters *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; ++ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); ++ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class distribution configuration ++ * ++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() ++ * first to prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_rx_tc_dist *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; ++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size); ++ cmd_params->tc_id = tc_id; ++ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode); ++ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action); ++ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); ++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class ++ * (to select a flow ID) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @index: Location in the QoS table where to insert the entry. ++ * Only relevant if MASKING is enabled for QoS ++ * classification on this DPNI, it is ignored for exact match. ++ * @cfg: Flow steering rule to add ++ * @action: Action to be taken as result of a classification hit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ u16 index, ++ const struct dpni_rule_cfg *cfg, ++ const struct dpni_fs_action_cfg *action) ++{ ++ struct dpni_cmd_add_fs_entry *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; ++ cmd_params->tc_id = tc_id; ++ cmd_params->key_size = cfg->key_size; ++ cmd_params->index = cpu_to_le16(index); ++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova); ++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); ++ cmd_params->options = cpu_to_le16(action->options); ++ cmd_params->flow_id = cpu_to_le16(action->flow_id); ++ cmd_params->flc = cpu_to_le64(action->flc); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific ++ * traffic class ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Flow steering rule to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ const struct dpni_rule_cfg *cfg) ++{ ++ struct dpni_cmd_remove_fs_entry *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; ++ cmd_params->tc_id = tc_id; ++ cmd_params->key_size = cfg->key_size; ++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova); ++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_congestion_notification() - Set traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct dpni_cmd_set_congestion_notification *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc_id; ++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); ++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); ++ cmd_params->dest_priority = cfg->dest_cfg.priority; ++ dpni_set_field(cmd_params->type_units, DEST_TYPE, ++ cfg->dest_cfg.dest_type); ++ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units); ++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova); ++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); ++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); ++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_queue() - Set queue parameters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue - all queue types are supported, although ++ * the command is ignored for Tx ++ * @tc: Traffic class, in range 0 to NUM_TCS - 1 ++ * @index: Selects the specific queue out of the set allocated for the ++ * same TC. Value must be in range 0 to NUM_QUEUES - 1 ++ * @options: A combination of DPNI_QUEUE_OPT_ values that control what ++ * configuration options are set on the queue ++ * @queue: Queue structure ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_queue(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ u8 options, ++ const struct dpni_queue *queue) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_queue *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_queue *)cmd.params; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc; ++ cmd_params->index = index; ++ cmd_params->options = options; ++ cmd_params->dest_id = cpu_to_le32(queue->destination.id); ++ cmd_params->dest_prio = queue->destination.priority; ++ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); ++ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); ++ dpni_set_field(cmd_params->flags, HOLD_ACTIVE, ++ queue->destination.hold_active); ++ cmd_params->flc = cpu_to_le64(queue->flc.value); ++ cmd_params->user_context = cpu_to_le64(queue->user_context); ++ ++ /* send command to mc */ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_queue() - Get queue parameters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue - all queue types are supported ++ * @tc: Traffic class, in range 0 to NUM_TCS - 1 ++ * @index: Selects the specific queue out of the set allocated for the ++ * same TC. Value must be in range 0 to NUM_QUEUES - 1 ++ * @queue: Queue configuration structure ++ * @qid: Queue identification ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_queue(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ struct dpni_queue *queue, ++ struct dpni_queue_id *qid) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_queue *cmd_params; ++ struct dpni_rsp_get_queue *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_queue *)cmd.params; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc; ++ cmd_params->index = index; ++ ++ /* send command to mc */ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_queue *)cmd.params; ++ queue->destination.id = le32_to_cpu(rsp_params->dest_id); ++ queue->destination.priority = rsp_params->dest_prio; ++ queue->destination.type = dpni_get_field(rsp_params->flags, ++ DEST_TYPE); ++ queue->flc.stash_control = dpni_get_field(rsp_params->flags, ++ STASH_CTRL); ++ queue->destination.hold_active = dpni_get_field(rsp_params->flags, ++ HOLD_ACTIVE); ++ queue->flc.value = le64_to_cpu(rsp_params->flc); ++ queue->user_context = le64_to_cpu(rsp_params->user_context); ++ qid->fqid = le32_to_cpu(rsp_params->fqid); ++ qid->qdbin = le16_to_cpu(rsp_params->qdbin); ++ ++ return 0; ++} ++ ++/** ++ * dpni_get_statistics() - Get DPNI statistics ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @page: Selects the statistics page to retrieve, see ++ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. ++ * @stat: Structure containing the statistics ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_statistics(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 page, ++ union dpni_statistics *stat) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_statistics *cmd_params; ++ struct dpni_rsp_get_statistics *rsp_params; ++ int i, err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; ++ cmd_params->page_number = page; ++ ++ /* send command to mc */ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; ++ for (i = 0; i < DPNI_STATISTICS_CNT; i++) ++ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); ++ ++ return 0; ++} ++ ++/** ++ * dpni_reset_statistics() - Clears DPNI statistics ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_reset_statistics(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_set_taildrop() - Set taildrop per queue or TC ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cg_point: Congestion point ++ * @q_type: Queue type on which the taildrop is configured. ++ * Only Rx queues are supported for now ++ * @tc: Traffic class to apply this taildrop to ++ * @q_index: Index of the queue if the DPNI supports multiple queues for ++ * traffic distribution. Ignored if CONGESTION_POINT is not 0. ++ * @taildrop: Taildrop structure ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_taildrop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_congestion_point cg_point, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ struct dpni_taildrop *taildrop) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_set_taildrop *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; ++ cmd_params->congestion_point = cg_point; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc; ++ cmd_params->index = index; ++ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable); ++ cmd_params->units = taildrop->units; ++ cmd_params->threshold = cpu_to_le32(taildrop->threshold); ++ ++ /* send command to mc */ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_get_taildrop() - Get taildrop information ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cg_point: Congestion point ++ * @q_type: Queue type on which the taildrop is configured. ++ * Only Rx queues are supported for now ++ * @tc: Traffic class to apply this taildrop to ++ * @q_index: Index of the queue if the DPNI supports multiple queues for ++ * traffic distribution. Ignored if CONGESTION_POINT is not 0. ++ * @taildrop: Taildrop structure ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_taildrop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_congestion_point cg_point, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ struct dpni_taildrop *taildrop) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpni_cmd_get_taildrop *cmd_params; ++ struct dpni_rsp_get_taildrop *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; ++ cmd_params->congestion_point = cg_point; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc; ++ cmd_params->index = index; ++ ++ /* send command to mc */ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; ++ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE); ++ taildrop->units = rsp_params->units; ++ taildrop->threshold = le32_to_cpu(rsp_params->threshold); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h +@@ -0,0 +1,989 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2016 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPNI_H ++#define __FSL_DPNI_H ++ ++#include "dpkg.h" ++ ++struct fsl_mc_io; ++ ++/** ++ * Data Path Network Interface API ++ * Contains initialization APIs and runtime control APIs for DPNI ++ */ ++ ++/** General DPNI macros */ ++ ++/** ++ * Maximum number of traffic classes ++ */ ++#define DPNI_MAX_TC 8 ++/** ++ * Maximum number of buffer pools per DPNI ++ */ ++#define DPNI_MAX_DPBP 8 ++ ++/** ++ * All traffic classes considered; see dpni_set_queue() ++ */ ++#define DPNI_ALL_TCS (u8)(-1) ++/** ++ * All flows within traffic class considered; see dpni_set_queue() ++ */ ++#define DPNI_ALL_TC_FLOWS (u16)(-1) ++/** ++ * Generate new flow ID; see dpni_set_queue() ++ */ ++#define DPNI_NEW_FLOW_ID (u16)(-1) ++ ++/** ++ * Tx traffic is always released to a buffer pool on transmit, there are no ++ * resources allocated to have the frames confirmed back to the source after ++ * transmission. ++ */ ++#define DPNI_OPT_TX_FRM_RELEASE 0x000001 ++/** ++ * Disables support for MAC address filtering for addresses other than primary ++ * MAC address. This affects both unicast and multicast. Promiscuous mode can ++ * still be enabled/disabled for both unicast and multicast. If promiscuous mode ++ * is disabled, only traffic matching the primary MAC address will be accepted. ++ */ ++#define DPNI_OPT_NO_MAC_FILTER 0x000002 ++/** ++ * Allocate policers for this DPNI. They can be used to rate-limit traffic per ++ * traffic class (TC) basis. ++ */ ++#define DPNI_OPT_HAS_POLICING 0x000004 ++/** ++ * Congestion can be managed in several ways, allowing the buffer pool to ++ * deplete on ingress, taildrop on each queue or use congestion groups for sets ++ * of queues. If set, it configures a single congestion groups across all TCs. ++ * If reset, a congestion group is allocated for each TC. Only relevant if the ++ * DPNI has multiple traffic classes. ++ */ ++#define DPNI_OPT_SHARED_CONGESTION 0x000008 ++/** ++ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all ++ * look-ups are exact match. Note that TCAM is not available on LS1088 and its ++ * variants. Setting this bit on these SoCs will trigger an error. ++ */ ++#define DPNI_OPT_HAS_KEY_MASKING 0x000010 ++/** ++ * Disables the flow steering table. ++ */ ++#define DPNI_OPT_NO_FS 0x000020 ++ ++int dpni_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpni_id, ++ u16 *token); ++ ++int dpni_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * struct dpni_pools_cfg - Structure representing buffer pools configuration ++ * @num_dpbp: Number of DPBPs ++ * @pools: Array of buffer pools parameters; The number of valid entries ++ * must match 'num_dpbp' value ++ */ ++struct dpni_pools_cfg { ++ u8 num_dpbp; ++ /** ++ * struct pools - Buffer pools parameters ++ * @dpbp_id: DPBP object ID ++ * @buffer_size: Buffer size ++ * @backup_pool: Backup pool ++ */ ++ struct { ++ int dpbp_id; ++ u16 buffer_size; ++ int backup_pool; ++ } pools[DPNI_MAX_DPBP]; ++}; ++ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_pools_cfg *cfg); ++ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * DPNI IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPNI_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 ++ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en); ++ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en); ++ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask); ++ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask); ++ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status); ++ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status); ++ ++/** ++ * struct dpni_attr - Structure representing DPNI attributes ++ * @options: Any combination of the following options: ++ * DPNI_OPT_TX_FRM_RELEASE ++ * DPNI_OPT_NO_MAC_FILTER ++ * DPNI_OPT_HAS_POLICING ++ * DPNI_OPT_SHARED_CONGESTION ++ * DPNI_OPT_HAS_KEY_MASKING ++ * DPNI_OPT_NO_FS ++ * @num_queues: Number of Tx and Rx queues used for traffic distribution. ++ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. ++ * @mac_filter_entries: Number of entries in the MAC address filtering table. ++ * @vlan_filter_entries: Number of entries in the VLAN address filtering table. ++ * @qos_entries: Number of entries in the QoS classification table. ++ * @fs_entries: Number of entries in the flow steering table. ++ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger ++ * than this when adding QoS entries will result in an error. ++ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a ++ * key larger than this when composing the hash + FS key will ++ * result in an error. ++ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored ++ * on 6, 5, 5 bits respectively. ++ */ ++struct dpni_attr { ++ u32 options; ++ u8 num_queues; ++ u8 num_tcs; ++ u8 mac_filter_entries; ++ u8 vlan_filter_entries; ++ u8 qos_entries; ++ u16 fs_entries; ++ u8 qos_key_size; ++ u8 fs_key_size; ++ u16 wriop_version; ++}; ++ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_attr *attr); ++ ++/** ++ * DPNI errors ++ */ ++ ++/** ++ * Extract out of frame header error ++ */ ++#define DPNI_ERROR_EOFHE 0x00020000 ++/** ++ * Frame length error ++ */ ++#define DPNI_ERROR_FLE 0x00002000 ++/** ++ * Frame physical error ++ */ ++#define DPNI_ERROR_FPE 0x00001000 ++/** ++ * Parsing header error ++ */ ++#define DPNI_ERROR_PHE 0x00000020 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L3CE 0x00000004 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L4CE 0x00000001 ++ ++/** ++ * enum dpni_error_action - Defines DPNI behavior for errors ++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame ++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow ++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue ++ */ ++enum dpni_error_action { ++ DPNI_ERROR_ACTION_DISCARD = 0, ++ DPNI_ERROR_ACTION_CONTINUE = 1, ++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 ++}; ++ ++/** ++ * struct dpni_error_cfg - Structure representing DPNI errors treatment ++ * @errors: Errors mask; use 'DPNI_ERROR__ ++ * @error_action: The desired action for the errors mask ++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation ++ * status (FAS); relevant only for the non-discard action ++ */ ++struct dpni_error_cfg { ++ u32 errors; ++ enum dpni_error_action error_action; ++ int set_frame_annotation; ++}; ++ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_error_cfg *cfg); ++ ++/** ++ * DPNI buffer layout modification options ++ */ ++ ++/** ++ * Select to modify the time-stamp setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 ++/** ++ * Select to modify the parser-result setting; not applicable for Tx ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 ++/** ++ * Select to modify the frame-status setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 ++/** ++ * Select to modify the private-data-size setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 ++/** ++ * Select to modify the data-alignment setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 ++/** ++ * Select to modify the data-head-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 ++/** ++ * Select to modify the data-tail-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 ++ ++/** ++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout ++ * @options: Flags representing the suggested modifications to the buffer ++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags ++ * @pass_timestamp: Pass timestamp value ++ * @pass_parser_result: Pass parser results ++ * @pass_frame_status: Pass frame status ++ * @private_data_size: Size kept for private data (in bytes) ++ * @data_align: Data alignment ++ * @data_head_room: Data head room ++ * @data_tail_room: Data tail room ++ */ ++struct dpni_buffer_layout { ++ u32 options; ++ int pass_timestamp; ++ int pass_parser_result; ++ int pass_frame_status; ++ u16 private_data_size; ++ u16 data_align; ++ u16 data_head_room; ++ u16 data_tail_room; ++}; ++ ++/** ++ * enum dpni_queue_type - Identifies a type of queue targeted by the command ++ * @DPNI_QUEUE_RX: Rx queue ++ * @DPNI_QUEUE_TX: Tx queue ++ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue ++ * @DPNI_QUEUE_RX_ERR: Rx error queue ++ */enum dpni_queue_type { ++ DPNI_QUEUE_RX, ++ DPNI_QUEUE_TX, ++ DPNI_QUEUE_TX_CONFIRM, ++ DPNI_QUEUE_RX_ERR, ++}; ++ ++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ struct dpni_buffer_layout *layout); ++ ++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * enum dpni_offload - Identifies a type of offload targeted by the command ++ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation ++ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation ++ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation ++ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation ++ */ ++enum dpni_offload { ++ DPNI_OFF_RX_L3_CSUM, ++ DPNI_OFF_RX_L4_CSUM, ++ DPNI_OFF_TX_L3_CSUM, ++ DPNI_OFF_TX_L4_CSUM, ++}; ++ ++int dpni_set_offload(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_offload type, ++ u32 config); ++ ++int dpni_get_offload(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_offload type, ++ u32 *config); ++ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u16 *qdid); ++ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *data_offset); ++ ++#define DPNI_STATISTICS_CNT 7 ++ ++union dpni_statistics { ++ /** ++ * struct page_0 - Page_0 statistics structure ++ * @ingress_all_frames: Ingress frame count ++ * @ingress_all_bytes: Ingress byte count ++ * @ingress_multicast_frames: Ingress multicast frame count ++ * @ingress_multicast_bytes: Ingress multicast byte count ++ * @ingress_broadcast_frames: Ingress broadcast frame count ++ * @ingress_broadcast_bytes: Ingress broadcast byte count ++ */ ++ struct { ++ u64 ingress_all_frames; ++ u64 ingress_all_bytes; ++ u64 ingress_multicast_frames; ++ u64 ingress_multicast_bytes; ++ u64 ingress_broadcast_frames; ++ u64 ingress_broadcast_bytes; ++ } page_0; ++ /** ++ * struct page_1 - Page_1 statistics structure ++ * @egress_all_frames: Egress frame count ++ * @egress_all_bytes: Egress byte count ++ * @egress_multicast_frames: Egress multicast frame count ++ * @egress_multicast_bytes: Egress multicast byte count ++ * @egress_broadcast_frames: Egress broadcast frame count ++ * @egress_broadcast_bytes: Egress broadcast byte count ++ */ ++ struct { ++ u64 egress_all_frames; ++ u64 egress_all_bytes; ++ u64 egress_multicast_frames; ++ u64 egress_multicast_bytes; ++ u64 egress_broadcast_frames; ++ u64 egress_broadcast_bytes; ++ } page_1; ++ /** ++ * struct page_2 - Page_2 statistics structure ++ * @ingress_filtered_frames: Ingress filtered frame count ++ * @ingress_discarded_frames: Ingress discarded frame count ++ * @ingress_nobuffer_discards: Ingress discarded frame count ++ * due to lack of buffers ++ * @egress_discarded_frames: Egress discarded frame count ++ * @egress_confirmed_frames: Egress confirmed frame count ++ */ ++ struct { ++ u64 ingress_filtered_frames; ++ u64 ingress_discarded_frames; ++ u64 ingress_nobuffer_discards; ++ u64 egress_discarded_frames; ++ u64 egress_confirmed_frames; ++ } page_2; ++ /** ++ * struct raw - raw statistics structure ++ */ ++ struct { ++ u64 counter[DPNI_STATISTICS_CNT]; ++ } raw; ++}; ++ ++int dpni_get_statistics(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 page, ++ union dpni_statistics *stat); ++ ++int dpni_reset_statistics(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct - Structure representing DPNI link configuration ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ */ ++struct dpni_link_cfg { ++ u32 rate; ++ u64 options; ++}; ++ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_link_cfg *cfg); ++ ++/** ++ * struct dpni_link_state - Structure representing DPNI link state ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ * @up: Link state; '0' for down, '1' for up ++ */ ++struct dpni_link_state { ++ u32 rate; ++ u64 options; ++ int up; ++}; ++ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpni_link_state *state); ++ ++/** ++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration ++ * @rate_limit: rate in Mbps ++ * @max_burst_size: burst size in bytes (up to 64KB) ++ */ ++struct dpni_tx_shaping_cfg { ++ u32 rate_limit; ++ u16 max_burst_size; ++}; ++ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_tx_shaping_cfg *tx_shaper); ++ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 max_frame_length); ++ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *max_frame_length); ++ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int en); ++ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int en); ++ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]); ++ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 mac_addr[6]); ++ ++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cm_flags, ++ u16 token, ++ u8 mac_addr[6]); ++ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]); ++ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 mac_addr[6]); ++ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int unicast, ++ int multicast); ++ ++/** ++ * enum dpni_dist_mode - DPNI distribution mode ++ * @DPNI_DIST_MODE_NONE: No distribution ++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if ++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation ++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if ++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation ++ */ ++enum dpni_dist_mode { ++ DPNI_DIST_MODE_NONE = 0, ++ DPNI_DIST_MODE_HASH = 1, ++ DPNI_DIST_MODE_FS = 2 ++}; ++ ++/** ++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action ++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame ++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id ++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash ++ */ ++enum dpni_fs_miss_action { ++ DPNI_FS_MISS_DROP = 0, ++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, ++ DPNI_FS_MISS_HASH = 2 ++}; ++ ++/** ++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration ++ * @miss_action: Miss action selection ++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' ++ */ ++struct dpni_fs_tbl_cfg { ++ enum dpni_fs_miss_action miss_action; ++ u16 default_flow_id; ++}; ++ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, ++ u8 *key_cfg_buf); ++ ++/** ++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration ++ * @dist_size: Set the distribution size; ++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, ++ * 112,128,192,224,256,384,448,512,768,896,1024 ++ * @dist_mode: Distribution mode ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * the extractions to be used for the distribution key by calling ++ * dpni_prepare_key_cfg() relevant only when ++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' ++ * @fs_cfg: Flow Steering table configuration; only relevant if ++ * 'dist_mode = DPNI_DIST_MODE_FS' ++ */ ++struct dpni_rx_tc_dist_cfg { ++ u16 dist_size; ++ enum dpni_dist_mode dist_mode; ++ u64 key_cfg_iova; ++ struct dpni_fs_tbl_cfg fs_cfg; ++}; ++ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg); ++ ++/** ++ * enum dpni_dest - DPNI destination types ++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and ++ * does not generate FQDAN notifications; user is expected to ++ * dequeue from the queue based on polling or other user-defined ++ * method ++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue ++ * from the queue only after notification is received ++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON ++ * object; user is expected to dequeue from the DPCON channel ++ */ ++enum dpni_dest { ++ DPNI_DEST_NONE = 0, ++ DPNI_DEST_DPIO = 1, ++ DPNI_DEST_DPCON = 2 ++}; ++ ++/** ++ * struct dpni_queue - Queue structure ++ * @user_context: User data, presented to the user along with any frames from ++ * this queue. Not relevant for Tx queues. ++ */ ++struct dpni_queue { ++/** ++ * struct destination - Destination structure ++ * @id: ID of the destination, only relevant if DEST_TYPE is > 0. ++ * Identifies either a DPIO or a DPCON object. Not relevant for ++ * Tx queues. ++ * @type: May be one of the following: ++ * 0 - No destination, queue can be manually queried, but will not ++ * push traffic or notifications to a DPIO; ++ * 1 - The destination is a DPIO. When traffic becomes available in ++ * the queue a FQDAN (FQ data available notification) will be ++ * generated to selected DPIO; ++ * 2 - The destination is a DPCON. The queue is associated with a ++ * DPCON object for the purpose of scheduling between multiple ++ * queues. The DPCON may be independently configured to ++ * generate notifications. Not relevant for Tx queues. ++ * @hold_active: Hold active, maintains a queue scheduled for longer ++ * in a DPIO during dequeue to reduce spread of traffic. ++ * Only relevant if queues are not affined to a single DPIO. ++ */ ++ struct { ++ u16 id; ++ enum dpni_dest type; ++ char hold_active; ++ u8 priority; ++ } destination; ++ u64 user_context; ++ struct { ++ u64 value; ++ char stash_control; ++ } flc; ++}; ++ ++/** ++ * struct dpni_queue_id - Queue identification, used for enqueue commands ++ * or queue control ++ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ ++ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant ++ * for Tx queues. ++ */ ++struct dpni_queue_id { ++ u32 fqid; ++ u16 qdbin; ++}; ++ ++/** ++ * Set User Context ++ */ ++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 ++#define DPNI_QUEUE_OPT_DEST 0x00000002 ++#define DPNI_QUEUE_OPT_FLC 0x00000004 ++#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 ++ ++int dpni_set_queue(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ u8 options, ++ const struct dpni_queue *queue); ++ ++int dpni_get_queue(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc, ++ u8 index, ++ struct dpni_queue *queue, ++ struct dpni_queue_id *qid); ++ ++/** ++ * enum dpni_congestion_unit - DPNI congestion units ++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units ++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units ++ */ ++enum dpni_congestion_unit { ++ DPNI_CONGESTION_UNIT_BYTES = 0, ++ DPNI_CONGESTION_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpni_congestion_point - Structure representing congestion point ++ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and ++ * QUEUE_INDEX ++ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to ++ * define the DPNI this can be either per TC (default) or per ++ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create). ++ * QUEUE_INDEX is ignored if this type is used. ++ */ ++enum dpni_congestion_point { ++ DPNI_CP_QUEUE, ++ DPNI_CP_GROUP, ++}; ++ ++/** ++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that ++ * channel; not relevant for 'DPNI_DEST_NONE' option ++ */ ++struct dpni_dest_cfg { ++ enum dpni_dest dest_type; ++ int dest_id; ++ u8 priority; ++}; ++ ++/* DPNI congestion options */ ++ ++/** ++ * CSCN message is written to message_iova once entering a ++ * congestion state (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 ++/** ++ * CSCN message is written to message_iova once exiting a ++ * congestion state (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 ++/** ++ * CSCN write will attempt to allocate into a cache (coherent write); ++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected ++ */ ++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once entering a congestion state ++ * (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once exiting a congestion state ++ * (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the ++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) ++ */ ++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 ++ ++/** ++ * struct dpni_congestion_notification_cfg - congestion notification ++ * configuration ++ * @units: units type ++ * @threshold_entry: above this threshold we enter a congestion state. ++ * set it to '0' to disable it ++ * @threshold_exit: below this threshold we exit the congestion state. ++ * @message_ctx: The context that will be part of the CSCN message ++ * @message_iova: I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is ++ * contained in 'options' ++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel ++ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_' values ++ */ ++ ++struct dpni_congestion_notification_cfg { ++ enum dpni_congestion_unit units; ++ u32 threshold_entry; ++ u32 threshold_exit; ++ u64 message_ctx; ++ u64 message_iova; ++ struct dpni_dest_cfg dest_cfg; ++ u16 notification_mode; ++}; ++ ++int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * struct dpni_taildrop - Structure representing the taildrop ++ * @enable: Indicates whether the taildrop is active or not. ++ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports ++ * byte units, this field is ignored and assumed = 0 if ++ * CONGESTION_POINT is 0. ++ * @threshold: Threshold value, in units identified by UNITS field. Value 0 ++ * cannot be used as a valid taildrop threshold, THRESHOLD must ++ * be > 0 if the taildrop is enabled. ++ */ ++struct dpni_taildrop { ++ char enable; ++ enum dpni_congestion_unit units; ++ u32 threshold; ++}; ++ ++int dpni_set_taildrop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_congestion_point cg_point, ++ enum dpni_queue_type q_type, ++ u8 tc, ++ u8 q_index, ++ struct dpni_taildrop *taildrop); ++ ++int dpni_get_taildrop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_congestion_point cg_point, ++ enum dpni_queue_type q_type, ++ u8 tc, ++ u8 q_index, ++ struct dpni_taildrop *taildrop); ++ ++/** ++ * struct dpni_rule_cfg - Rule configuration for table lookup ++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) ++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) ++ * @key_size: key and mask size (in bytes) ++ */ ++struct dpni_rule_cfg { ++ u64 key_iova; ++ u64 mask_iova; ++ u8 key_size; ++}; ++ ++/** ++ * Discard matching traffic. If set, this takes precedence over any other ++ * configuration and matching traffic is always discarded. ++ */ ++ #define DPNI_FS_OPT_DISCARD 0x1 ++ ++/** ++ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to ++ * override the FLC value set per queue. ++ * For more details check the Frame Descriptor section in the hardware ++ * documentation. ++ */ ++#define DPNI_FS_OPT_SET_FLC 0x2 ++ ++/* ++ * Indicates whether the 6 lowest significant bits of FLC are used for stash ++ * control. If set, the 6 least significant bits in value are interpreted as ++ * follows: ++ * - bits 0-1: indicates the number of 64 byte units of context that are ++ * stashed. FLC value is interpreted as a memory address in this case, ++ * excluding the 6 LS bits. ++ * - bits 2-3: indicates the number of 64 byte units of frame annotation ++ * to be stashed. Annotation is placed at FD[ADDR]. ++ * - bits 4-5: indicates the number of 64 byte units of frame data to be ++ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. ++ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. ++ */ ++#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 ++ ++/** ++ * struct dpni_fs_action_cfg - Action configuration for table look-up ++ * @flc: FLC value for traffic matching this rule. Please check the Frame ++ * Descriptor section in the hardware documentation for more information. ++ * @flow_id: Identifies the Rx queue used for matching traffic. Supported ++ * values are in range 0 to num_queue-1. ++ * @options: Any combination of DPNI_FS_OPT_ values. ++ */ ++struct dpni_fs_action_cfg { ++ u64 flc; ++ u16 flow_id; ++ u16 options; ++}; ++ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ u16 index, ++ const struct dpni_rule_cfg *cfg, ++ const struct dpni_fs_action_cfg *action); ++ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 tc_id, ++ const struct dpni_rule_cfg *cfg); ++ ++#endif /* __FSL_DPNI_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/net.h +@@ -0,0 +1,480 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_NET_H ++#define __FSL_NET_H ++ ++#define LAST_HDR_INDEX 0xFFFFFFFF ++ ++/*****************************************************************************/ ++/* Protocol fields */ ++/*****************************************************************************/ ++ ++/************************* Ethernet fields *********************************/ ++#define NH_FLD_ETH_DA (1) ++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) ++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) ++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) ++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) ++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) ++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) ++ ++#define NH_FLD_ETH_ADDR_SIZE 6 ++ ++/*************************** VLAN fields ***********************************/ ++#define NH_FLD_VLAN_VPRI (1) ++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) ++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) ++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) ++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) ++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) ++ ++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ ++ NH_FLD_VLAN_CFI | \ ++ NH_FLD_VLAN_VID) ++ ++/************************ IP (generic) fields ******************************/ ++#define NH_FLD_IP_VER (1) ++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) ++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) ++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) ++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) ++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) ++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) ++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) ++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) ++ ++#define NH_FLD_IP_PROTO_SIZE 1 ++ ++/***************************** IPV4 fields *********************************/ ++#define NH_FLD_IPV4_VER (1) ++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) ++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) ++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) ++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) ++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) ++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) ++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) ++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) ++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) ++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) ++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) ++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) ++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) ++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) ++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) ++ ++#define NH_FLD_IPV4_ADDR_SIZE 4 ++#define NH_FLD_IPV4_PROTO_SIZE 1 ++ ++/***************************** IPV6 fields *********************************/ ++#define NH_FLD_IPV6_VER (1) ++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) ++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) ++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) ++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) ++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) ++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) ++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) ++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) ++ ++#define NH_FLD_IPV6_ADDR_SIZE 16 ++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 ++ ++/***************************** ICMP fields *********************************/ ++#define NH_FLD_ICMP_TYPE (1) ++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) ++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) ++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) ++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) ++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) ++ ++#define NH_FLD_ICMP_CODE_SIZE 1 ++#define NH_FLD_ICMP_TYPE_SIZE 1 ++ ++/***************************** IGMP fields *********************************/ ++#define NH_FLD_IGMP_VERSION (1) ++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) ++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) ++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) ++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) ++ ++/***************************** TCP fields **********************************/ ++#define NH_FLD_TCP_PORT_SRC (1) ++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) ++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) ++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) ++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) ++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) ++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) ++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) ++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) ++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) ++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) ++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) ++ ++#define NH_FLD_TCP_PORT_SIZE 2 ++ ++/***************************** UDP fields **********************************/ ++#define NH_FLD_UDP_PORT_SRC (1) ++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) ++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) ++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) ++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_UDP_PORT_SIZE 2 ++ ++/*************************** UDP-lite fields *******************************/ ++#define NH_FLD_UDP_LITE_PORT_SRC (1) ++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) ++#define NH_FLD_UDP_LITE_ALL_FIELDS \ ++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_UDP_LITE_PORT_SIZE 2 ++ ++/*************************** UDP-encap-ESP fields **************************/ ++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) ++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) ++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) ++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) ++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) ++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) ++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ ++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) ++ ++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 ++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_PORT_SRC (1) ++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) ++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) ++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) ++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_SCTP_PORT_SIZE 2 ++ ++/***************************** DCCP fields *********************************/ ++#define NH_FLD_DCCP_PORT_SRC (1) ++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) ++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_DCCP_PORT_SIZE 2 ++ ++/***************************** IPHC fields *********************************/ ++#define NH_FLD_IPHC_CID (1) ++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) ++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) ++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) ++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) ++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) ++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) ++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) ++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) ++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) ++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) ++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) ++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) ++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ ++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) ++ ++/*************************** L2TPV2 fields *********************************/ ++#define NH_FLD_L2TPV2_TYPE_BIT (1) ++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) ++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) ++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) ++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) ++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) ++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) ++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) ++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) ++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) ++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) ++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) ++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) ++#define NH_FLD_L2TPV2_ALL_FIELDS \ ++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) ++ ++/*************************** L2TPV3 fields *********************************/ ++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) ++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) ++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) ++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) ++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) ++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) ++ ++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) ++ ++/**************************** PPP fields ***********************************/ ++#define NH_FLD_PPP_PID (1) ++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) ++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) ++ ++/************************** PPPoE fields ***********************************/ ++#define NH_FLD_PPPOE_VER (1) ++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) ++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) ++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) ++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) ++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) ++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) ++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) ++ ++/************************* PPP-Mux fields **********************************/ ++#define NH_FLD_PPPMUX_PID (1) ++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) ++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) ++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) ++ ++/*********************** PPP-Mux sub-frame fields **************************/ ++#define NH_FLD_PPPMUX_SUBFRM_PFF (1) ++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) ++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) ++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) ++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) ++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ ++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) ++ ++/*************************** LLC fields ************************************/ ++#define NH_FLD_LLC_DSAP (1) ++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) ++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) ++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) ++ ++/*************************** NLPID fields **********************************/ ++#define NH_FLD_NLPID_NLPID (1) ++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) ++ ++/*************************** SNAP fields ***********************************/ ++#define NH_FLD_SNAP_OUI (1) ++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) ++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) ++ ++/*************************** LLC SNAP fields *******************************/ ++#define NH_FLD_LLC_SNAP_TYPE (1) ++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) ++ ++#define NH_FLD_ARP_HTYPE (1) ++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) ++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) ++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) ++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) ++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) ++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) ++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) ++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) ++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) ++ ++/*************************** RFC2684 fields ********************************/ ++#define NH_FLD_RFC2684_LLC (1) ++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) ++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) ++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) ++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) ++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) ++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) ++ ++/*************************** User defined fields ***************************/ ++#define NH_FLD_USER_DEFINED_SRCPORT (1) ++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) ++#define NH_FLD_USER_DEFINED_ALL_FIELDS \ ++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) ++ ++/*************************** Payload fields ********************************/ ++#define NH_FLD_PAYLOAD_BUFFER (1) ++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) ++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) ++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) ++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) ++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) ++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) ++ ++/*************************** GRE fields ************************************/ ++#define NH_FLD_GRE_TYPE (1) ++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) ++ ++/*************************** MINENCAP fields *******************************/ ++#define NH_FLD_MINENCAP_SRC_IP (1) ++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) ++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) ++#define NH_FLD_MINENCAP_ALL_FIELDS \ ++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) ++ ++/*************************** IPSEC AH fields *******************************/ ++#define NH_FLD_IPSEC_AH_SPI (1) ++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) ++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) ++ ++/*************************** IPSEC ESP fields ******************************/ ++#define NH_FLD_IPSEC_ESP_SPI (1) ++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) ++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) ++ ++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 ++ ++/*************************** MPLS fields ***********************************/ ++#define NH_FLD_MPLS_LABEL_STACK (1) ++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ ++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) ++ ++/*************************** MACSEC fields *********************************/ ++#define NH_FLD_MACSEC_SECTAG (1) ++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) ++ ++/*************************** GTP fields ************************************/ ++#define NH_FLD_GTP_TEID (1) ++ ++/* Protocol options */ ++ ++/* Ethernet options */ ++#define NH_OPT_ETH_BROADCAST 1 ++#define NH_OPT_ETH_MULTICAST 2 ++#define NH_OPT_ETH_UNICAST 3 ++#define NH_OPT_ETH_BPDU 4 ++ ++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) ++/* also applicable for broadcast */ ++ ++/* VLAN options */ ++#define NH_OPT_VLAN_CFI 1 ++ ++/* IPV4 options */ ++#define NH_OPT_IPV4_UNICAST 1 ++#define NH_OPT_IPV4_MULTICAST 2 ++#define NH_OPT_IPV4_BROADCAST 3 ++#define NH_OPT_IPV4_OPTION 4 ++#define NH_OPT_IPV4_FRAG 5 ++#define NH_OPT_IPV4_INITIAL_FRAG 6 ++ ++/* IPV6 options */ ++#define NH_OPT_IPV6_UNICAST 1 ++#define NH_OPT_IPV6_MULTICAST 2 ++#define NH_OPT_IPV6_OPTION 3 ++#define NH_OPT_IPV6_FRAG 4 ++#define NH_OPT_IPV6_INITIAL_FRAG 5 ++ ++/* General IP options (may be used for any version) */ ++#define NH_OPT_IP_FRAG 1 ++#define NH_OPT_IP_INITIAL_FRAG 2 ++#define NH_OPT_IP_OPTION 3 ++ ++/* Minenc. options */ ++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 ++ ++/* GRE. options */ ++#define NH_OPT_GRE_ROUTING_PRESENT 1 ++ ++/* TCP options */ ++#define NH_OPT_TCP_OPTIONS 1 ++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 ++#define NH_OPT_TCP_CONTROL_LOW_BITS 3 ++ ++/* CAPWAP options */ ++#define NH_OPT_CAPWAP_DTLS 1 ++ ++enum net_prot { ++ NET_PROT_NONE = 0, ++ NET_PROT_PAYLOAD, ++ NET_PROT_ETH, ++ NET_PROT_VLAN, ++ NET_PROT_IPV4, ++ NET_PROT_IPV6, ++ NET_PROT_IP, ++ NET_PROT_TCP, ++ NET_PROT_UDP, ++ NET_PROT_UDP_LITE, ++ NET_PROT_IPHC, ++ NET_PROT_SCTP, ++ NET_PROT_SCTP_CHUNK_DATA, ++ NET_PROT_PPPOE, ++ NET_PROT_PPP, ++ NET_PROT_PPPMUX, ++ NET_PROT_PPPMUX_SUBFRM, ++ NET_PROT_L2TPV2, ++ NET_PROT_L2TPV3_CTRL, ++ NET_PROT_L2TPV3_SESS, ++ NET_PROT_LLC, ++ NET_PROT_LLC_SNAP, ++ NET_PROT_NLPID, ++ NET_PROT_SNAP, ++ NET_PROT_MPLS, ++ NET_PROT_IPSEC_AH, ++ NET_PROT_IPSEC_ESP, ++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ ++ NET_PROT_MACSEC, ++ NET_PROT_GRE, ++ NET_PROT_MINENCAP, ++ NET_PROT_DCCP, ++ NET_PROT_ICMP, ++ NET_PROT_IGMP, ++ NET_PROT_ARP, ++ NET_PROT_CAPWAP_DATA, ++ NET_PROT_CAPWAP_CTRL, ++ NET_PROT_RFC2684, ++ NET_PROT_ICMPV6, ++ NET_PROT_FCOE, ++ NET_PROT_FIP, ++ NET_PROT_ISCSI, ++ NET_PROT_GTP, ++ NET_PROT_USER_DEFINED_L2, ++ NET_PROT_USER_DEFINED_L3, ++ NET_PROT_USER_DEFINED_L4, ++ NET_PROT_USER_DEFINED_L5, ++ NET_PROT_USER_DEFINED_SHIM1, ++ NET_PROT_USER_DEFINED_SHIM2, ++ ++ NET_PROT_DUMMY_LAST ++}; ++ ++/*! IEEE8021.Q */ ++#define NH_IEEE8021Q_ETYPE 0x8100 ++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ ++ ((((u32)((etype) & 0xFFFF)) << 16) | \ ++ (((u32)((pcp) & 0x07)) << 13) | \ ++ (((u32)((dei) & 0x01)) << 12) | \ ++ (((u32)((vlan_id) & 0xFFF)))) ++ ++#endif /* __FSL_NET_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig +@@ -0,0 +1,6 @@ ++config FSL_DPAA2_ETHSW ++ tristate "DPAA2 Ethernet Switch" ++ depends on FSL_MC_BUS && FSL_DPAA2 ++ default y ++ ---help--- ++ Prototype driver for DPAA2 Ethernet Switch. +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o ++ ++dpaa2-ethsw-objs := switch.o dpsw.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h +@@ -0,0 +1,851 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPSW_CMD_H ++#define __FSL_DPSW_CMD_H ++ ++/* DPSW Version */ ++#define DPSW_VER_MAJOR 8 ++#define DPSW_VER_MINOR 0 ++ ++#define DPSW_CMD_BASE_VERSION 1 ++#define DPSW_CMD_ID_OFFSET 4 ++ ++#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800) ++#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802) ++ ++#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02) ++ ++#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002) ++#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003) ++#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004) ++#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005) ++#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006) ++ ++#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010) ++#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011) ++#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012) ++#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013) ++#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014) ++#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015) ++#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016) ++#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017) ++ ++#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022) ++ ++#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024) ++ ++#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026) ++ ++#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030) ++#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031) ++#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032) ++#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033) ++#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034) ++#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035) ++#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036) ++#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037) ++#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038) ++#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039) ++#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A) ++#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B) ++ ++#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) ++#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) ++ ++#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042) ++ ++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044) ++#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045) ++#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046) ++#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047) ++#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048) ++#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049) ++#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A) ++ ++#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C) ++ ++#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060) ++#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061) ++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062) ++#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063) ++#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064) ++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065) ++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066) ++#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067) ++#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068) ++#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069) ++#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A) ++#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B) ++ ++#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080) ++#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081) ++#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082) ++#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083) ++#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084) ++#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085) ++#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086) ++#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087) ++#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088) ++#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089) ++ ++#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090) ++#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091) ++#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092) ++#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093) ++#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094) ++#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095) ++#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096) ++ ++#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0) ++#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1) ++#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2) ++#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3) ++ ++/* Macros for accessing command fields smaller than 1byte */ ++#define DPSW_MASK(field) \ ++ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \ ++ DPSW_##field##_SHIFT) ++#define dpsw_set_field(var, field, val) \ ++ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field))) ++#define dpsw_get_field(var, field) \ ++ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT) ++#define dpsw_get_bit(var, bit) \ ++ (((var) >> (bit)) & GENMASK(0, 0)) ++ ++static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val) ++{ ++ var |= (u64)val << bit & GENMASK(bit, bit); ++ return var; ++} ++ ++struct dpsw_cmd_open { ++ __le32 dpsw_id; ++}; ++ ++#define DPSW_COMPONENT_TYPE_SHIFT 0 ++#define DPSW_COMPONENT_TYPE_SIZE 4 ++ ++struct dpsw_cmd_create { ++ /* cmd word 0 */ ++ __le16 num_ifs; ++ u8 max_fdbs; ++ u8 max_meters_per_if; ++ /* from LSB: only the first 4 bits */ ++ u8 component_type; ++ u8 pad[3]; ++ /* cmd word 1 */ ++ __le16 max_vlans; ++ __le16 max_fdb_entries; ++ __le16 fdb_aging_time; ++ __le16 max_fdb_mc_groups; ++ /* cmd word 2 */ ++ __le64 options; ++}; ++ ++struct dpsw_cmd_destroy { ++ __le32 dpsw_id; ++}; ++ ++#define DPSW_ENABLE_SHIFT 0 ++#define DPSW_ENABLE_SIZE 1 ++ ++struct dpsw_rsp_is_enabled { ++ /* from LSB: enable:1 */ ++ u8 enabled; ++}; ++ ++struct dpsw_cmd_set_irq { ++ /* cmd word 0 */ ++ u8 irq_index; ++ u8 pad[3]; ++ __le32 irq_val; ++ /* cmd word 1 */ ++ __le64 irq_addr; ++ /* cmd word 2 */ ++ __le32 irq_num; ++}; ++ ++struct dpsw_cmd_get_irq { ++ __le32 pad; ++ u8 irq_index; ++}; ++ ++struct dpsw_rsp_get_irq { ++ /* cmd word 0 */ ++ __le32 irq_val; ++ __le32 pad; ++ /* cmd word 1 */ ++ __le64 irq_addr; ++ /* cmd word 2 */ ++ __le32 irq_num; ++ __le32 irq_type; ++}; ++ ++struct dpsw_cmd_set_irq_enable { ++ u8 enable_state; ++ u8 pad[3]; ++ u8 irq_index; ++}; ++ ++struct dpsw_cmd_get_irq_enable { ++ __le32 pad; ++ u8 irq_index; ++}; ++ ++struct dpsw_rsp_get_irq_enable { ++ u8 enable_state; ++}; ++ ++struct dpsw_cmd_set_irq_mask { ++ __le32 mask; ++ u8 irq_index; ++}; ++ ++struct dpsw_cmd_get_irq_mask { ++ __le32 pad; ++ u8 irq_index; ++}; ++ ++struct dpsw_rsp_get_irq_mask { ++ __le32 mask; ++}; ++ ++struct dpsw_cmd_get_irq_status { ++ __le32 status; ++ u8 irq_index; ++}; ++ ++struct dpsw_rsp_get_irq_status { ++ __le32 status; ++}; ++ ++struct dpsw_cmd_clear_irq_status { ++ __le32 status; ++ u8 irq_index; ++}; ++ ++#define DPSW_COMPONENT_TYPE_SHIFT 0 ++#define DPSW_COMPONENT_TYPE_SIZE 4 ++ ++struct dpsw_rsp_get_attr { ++ /* cmd word 0 */ ++ __le16 num_ifs; ++ u8 max_fdbs; ++ u8 num_fdbs; ++ __le16 max_vlans; ++ __le16 num_vlans; ++ /* cmd word 1 */ ++ __le16 max_fdb_entries; ++ __le16 fdb_aging_time; ++ __le32 dpsw_id; ++ /* cmd word 2 */ ++ __le16 mem_size; ++ __le16 max_fdb_mc_groups; ++ u8 max_meters_per_if; ++ /* from LSB only the ffirst 4 bits */ ++ u8 component_type; ++ __le16 pad; ++ /* cmd word 3 */ ++ __le64 options; ++}; ++ ++struct dpsw_cmd_set_reflection_if { ++ __le16 if_id; ++}; ++ ++struct dpsw_cmd_if_set_flooding { ++ __le16 if_id; ++ /* from LSB: enable:1 */ ++ u8 enable; ++}; ++ ++struct dpsw_cmd_if_set_broadcast { ++ __le16 if_id; ++ /* from LSB: enable:1 */ ++ u8 enable; ++}; ++ ++struct dpsw_cmd_if_set_multicast { ++ __le16 if_id; ++ /* from LSB: enable:1 */ ++ u8 enable; ++}; ++ ++#define DPSW_VLAN_ID_SHIFT 0 ++#define DPSW_VLAN_ID_SIZE 12 ++#define DPSW_DEI_SHIFT 12 ++#define DPSW_DEI_SIZE 1 ++#define DPSW_PCP_SHIFT 13 ++#define DPSW_PCP_SIZE 3 ++ ++struct dpsw_cmd_if_set_tci { ++ __le16 if_id; ++ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */ ++ __le16 conf; ++}; ++ ++struct dpsw_cmd_if_get_tci { ++ __le16 if_id; ++}; ++ ++struct dpsw_rsp_if_get_tci { ++ __le16 pad; ++ __le16 vlan_id; ++ u8 dei; ++ u8 pcp; ++}; ++ ++#define DPSW_STATE_SHIFT 0 ++#define DPSW_STATE_SIZE 4 ++ ++struct dpsw_cmd_if_set_stp { ++ __le16 if_id; ++ __le16 vlan_id; ++ /* only the first LSB 4 bits */ ++ u8 state; ++}; ++ ++#define DPSW_FRAME_TYPE_SHIFT 0 ++#define DPSW_FRAME_TYPE_SIZE 4 ++#define DPSW_UNACCEPTED_ACT_SHIFT 4 ++#define DPSW_UNACCEPTED_ACT_SIZE 4 ++ ++struct dpsw_cmd_if_set_accepted_frames { ++ __le16 if_id; ++ /* from LSB: type:4 unaccepted_act:4 */ ++ u8 unaccepted; ++}; ++ ++#define DPSW_ACCEPT_ALL_SHIFT 0 ++#define DPSW_ACCEPT_ALL_SIZE 1 ++ ++struct dpsw_cmd_if_set_accept_all_vlan { ++ __le16 if_id; ++ /* only the least significant bit */ ++ u8 accept_all; ++}; ++ ++#define DPSW_COUNTER_TYPE_SHIFT 0 ++#define DPSW_COUNTER_TYPE_SIZE 5 ++ ++struct dpsw_cmd_if_get_counter { ++ __le16 if_id; ++ /* from LSB: type:5 */ ++ u8 type; ++}; ++ ++struct dpsw_rsp_if_get_counter { ++ __le64 pad; ++ __le64 counter; ++}; ++ ++struct dpsw_cmd_if_set_counter { ++ /* cmd word 0 */ ++ __le16 if_id; ++ /* from LSB: type:5 */ ++ u8 type; ++ /* cmd word 1 */ ++ __le64 counter; ++}; ++ ++#define DPSW_PRIORITY_SELECTOR_SHIFT 0 ++#define DPSW_PRIORITY_SELECTOR_SIZE 3 ++#define DPSW_SCHED_MODE_SHIFT 0 ++#define DPSW_SCHED_MODE_SIZE 4 ++ ++struct dpsw_cmd_if_set_tx_selection { ++ __le16 if_id; ++ /* from LSB: priority_selector:3 */ ++ u8 priority_selector; ++ u8 pad[5]; ++ u8 tc_id[8]; ++ ++ struct dpsw_tc_sched { ++ __le16 delta_bandwidth; ++ u8 mode; ++ u8 pad; ++ } tc_sched[8]; ++}; ++ ++#define DPSW_FILTER_SHIFT 0 ++#define DPSW_FILTER_SIZE 2 ++ ++struct dpsw_cmd_if_reflection { ++ __le16 if_id; ++ __le16 vlan_id; ++ /* only 2 bits from the LSB */ ++ u8 filter; ++}; ++ ++#define DPSW_MODE_SHIFT 0 ++#define DPSW_MODE_SIZE 4 ++#define DPSW_UNITS_SHIFT 4 ++#define DPSW_UNITS_SIZE 4 ++ ++struct dpsw_cmd_if_set_flooding_metering { ++ /* cmd word 0 */ ++ __le16 if_id; ++ u8 pad; ++ /* from LSB: mode:4 units:4 */ ++ u8 mode_units; ++ __le32 cir; ++ /* cmd word 1 */ ++ __le32 eir; ++ __le32 cbs; ++ /* cmd word 2 */ ++ __le32 ebs; ++}; ++ ++struct dpsw_cmd_if_set_metering { ++ /* cmd word 0 */ ++ __le16 if_id; ++ u8 tc_id; ++ /* from LSB: mode:4 units:4 */ ++ u8 mode_units; ++ __le32 cir; ++ /* cmd word 1 */ ++ __le32 eir; ++ __le32 cbs; ++ /* cmd word 2 */ ++ __le32 ebs; ++}; ++ ++#define DPSW_EARLY_DROP_MODE_SHIFT 0 ++#define DPSW_EARLY_DROP_MODE_SIZE 2 ++#define DPSW_EARLY_DROP_UNIT_SHIFT 2 ++#define DPSW_EARLY_DROP_UNIT_SIZE 2 ++ ++struct dpsw_prep_early_drop { ++ /* from LSB: mode:2 units:2 */ ++ u8 conf; ++ u8 pad0[3]; ++ __le32 tail_drop_threshold; ++ u8 green_drop_probability; ++ u8 pad1[7]; ++ __le64 green_max_threshold; ++ __le64 green_min_threshold; ++ __le64 pad2; ++ u8 yellow_drop_probability; ++ u8 pad3[7]; ++ __le64 yellow_max_threshold; ++ __le64 yellow_min_threshold; ++}; ++ ++struct dpsw_cmd_if_set_early_drop { ++ /* cmd word 0 */ ++ u8 pad0; ++ u8 tc_id; ++ __le16 if_id; ++ __le32 pad1; ++ /* cmd word 1 */ ++ __le64 early_drop_iova; ++}; ++ ++struct dpsw_cmd_custom_tpid { ++ __le16 pad; ++ __le16 tpid; ++}; ++ ++struct dpsw_cmd_if { ++ __le16 if_id; ++}; ++ ++#define DPSW_ADMIT_UNTAGGED_SHIFT 0 ++#define DPSW_ADMIT_UNTAGGED_SIZE 4 ++#define DPSW_ENABLED_SHIFT 5 ++#define DPSW_ENABLED_SIZE 1 ++#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6 ++#define DPSW_ACCEPT_ALL_VLAN_SIZE 1 ++ ++struct dpsw_rsp_if_get_attr { ++ /* cmd word 0 */ ++ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */ ++ u8 conf; ++ u8 pad1; ++ u8 num_tcs; ++ u8 pad2; ++ __le16 qdid; ++ /* cmd word 1 */ ++ __le32 options; ++ __le32 pad3; ++ /* cmd word 2 */ ++ __le32 rate; ++}; ++ ++struct dpsw_cmd_if_set_max_frame_length { ++ __le16 if_id; ++ __le16 frame_length; ++}; ++ ++struct dpsw_cmd_if_get_max_frame_length { ++ __le16 if_id; ++}; ++ ++struct dpsw_rsp_if_get_max_frame_length { ++ __le16 pad; ++ __le16 frame_length; ++}; ++ ++struct dpsw_cmd_if_set_link_cfg { ++ /* cmd word 0 */ ++ __le16 if_id; ++ u8 pad[6]; ++ /* cmd word 1 */ ++ __le32 rate; ++ __le32 pad1; ++ /* cmd word 2 */ ++ __le64 options; ++}; ++ ++struct dpsw_cmd_if_get_link_state { ++ __le16 if_id; ++}; ++ ++#define DPSW_UP_SHIFT 0 ++#define DPSW_UP_SIZE 1 ++ ++struct dpsw_rsp_if_get_link_state { ++ /* cmd word 0 */ ++ __le32 pad0; ++ u8 up; ++ u8 pad1[3]; ++ /* cmd word 1 */ ++ __le32 rate; ++ __le32 pad2; ++ /* cmd word 2 */ ++ __le64 options; ++}; ++ ++struct dpsw_vlan_add { ++ __le16 fdb_id; ++ __le16 vlan_id; ++}; ++ ++struct dpsw_cmd_vlan_manage_if { ++ /* cmd word 0 */ ++ __le16 pad0; ++ __le16 vlan_id; ++ __le32 pad1; ++ /* cmd word 1 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_vlan_remove { ++ __le16 pad; ++ __le16 vlan_id; ++}; ++ ++struct dpsw_cmd_vlan_get_attr { ++ __le16 vlan_id; ++}; ++ ++struct dpsw_rsp_vlan_get_attr { ++ /* cmd word 0 */ ++ __le64 pad; ++ /* cmd word 1 */ ++ __le16 fdb_id; ++ __le16 num_ifs; ++ __le16 num_untagged_ifs; ++ __le16 num_flooding_ifs; ++}; ++ ++struct dpsw_cmd_vlan_get_if { ++ __le16 vlan_id; ++}; ++ ++struct dpsw_rsp_vlan_get_if { ++ /* cmd word 0 */ ++ __le16 pad0; ++ __le16 num_ifs; ++ u8 pad1[4]; ++ /* cmd word 1 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_vlan_get_if_untagged { ++ __le16 vlan_id; ++}; ++ ++struct dpsw_rsp_vlan_get_if_untagged { ++ /* cmd word 0 */ ++ __le16 pad0; ++ __le16 num_ifs; ++ u8 pad1[4]; ++ /* cmd word 1 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_vlan_get_if_flooding { ++ __le16 vlan_id; ++}; ++ ++struct dpsw_rsp_vlan_get_if_flooding { ++ /* cmd word 0 */ ++ __le16 pad0; ++ __le16 num_ifs; ++ u8 pad1[4]; ++ /* cmd word 1 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_fdb_add { ++ __le32 pad; ++ __le16 fdb_aging_time; ++ __le16 num_fdb_entries; ++}; ++ ++struct dpsw_rsp_fdb_add { ++ __le16 fdb_id; ++}; ++ ++struct dpsw_cmd_fdb_remove { ++ __le16 fdb_id; ++}; ++ ++#define DPSW_ENTRY_TYPE_SHIFT 0 ++#define DPSW_ENTRY_TYPE_SIZE 4 ++ ++struct dpsw_cmd_fdb_add_unicast { ++ /* cmd word 0 */ ++ __le16 fdb_id; ++ u8 mac_addr[6]; ++ /* cmd word 1 */ ++ u8 if_egress; ++ u8 pad; ++ /* only the first 4 bits from LSB */ ++ u8 type; ++}; ++ ++struct dpsw_cmd_fdb_get_unicast { ++ __le16 fdb_id; ++ u8 mac_addr[6]; ++}; ++ ++struct dpsw_rsp_fdb_get_unicast { ++ __le64 pad; ++ __le16 if_egress; ++ /* only first 4 bits from LSB */ ++ u8 type; ++}; ++ ++struct dpsw_cmd_fdb_remove_unicast { ++ /* cmd word 0 */ ++ __le16 fdb_id; ++ u8 mac_addr[6]; ++ /* cmd word 1 */ ++ __le16 if_egress; ++ /* only the first 4 bits from LSB */ ++ u8 type; ++}; ++ ++struct dpsw_cmd_fdb_add_multicast { ++ /* cmd word 0 */ ++ __le16 fdb_id; ++ __le16 num_ifs; ++ /* only the first 4 bits from LSB */ ++ u8 type; ++ u8 pad[3]; ++ /* cmd word 1 */ ++ u8 mac_addr[6]; ++ __le16 pad2; ++ /* cmd word 2 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_fdb_get_multicast { ++ __le16 fdb_id; ++ u8 mac_addr[6]; ++}; ++ ++struct dpsw_rsp_fdb_get_multicast { ++ /* cmd word 0 */ ++ __le64 pad0; ++ /* cmd word 1 */ ++ __le16 num_ifs; ++ /* only the first 4 bits from LSB */ ++ u8 type; ++ u8 pad1[5]; ++ /* cmd word 2 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_fdb_remove_multicast { ++ /* cmd word 0 */ ++ __le16 fdb_id; ++ __le16 num_ifs; ++ /* only the first 4 bits from LSB */ ++ u8 type; ++ u8 pad[3]; ++ /* cmd word 1 */ ++ u8 mac_addr[6]; ++ __le16 pad2; ++ /* cmd word 2 */ ++ __le64 if_id[4]; ++}; ++ ++#define DPSW_LEARNING_MODE_SHIFT 0 ++#define DPSW_LEARNING_MODE_SIZE 4 ++ ++struct dpsw_cmd_fdb_set_learning_mode { ++ __le16 fdb_id; ++ /* only the first 4 bits from LSB */ ++ u8 mode; ++}; ++ ++struct dpsw_cmd_fdb_get_attr { ++ __le16 fdb_id; ++}; ++ ++struct dpsw_rsp_fdb_get_attr { ++ /* cmd word 0 */ ++ __le16 pad; ++ __le16 max_fdb_entries; ++ __le16 fdb_aging_time; ++ __le16 num_fdb_mc_groups; ++ /* cmd word 1 */ ++ __le16 max_fdb_mc_groups; ++ /* only the first 4 bits from LSB */ ++ u8 learning_mode; ++}; ++ ++struct dpsw_cmd_acl_add { ++ __le16 pad; ++ __le16 max_entries; ++}; ++ ++struct dpsw_rsp_acl_add { ++ __le16 acl_id; ++}; ++ ++struct dpsw_cmd_acl_remove { ++ __le16 acl_id; ++}; ++ ++struct dpsw_prep_acl_entry { ++ u8 match_l2_dest_mac[6]; ++ __le16 match_l2_tpid; ++ ++ u8 match_l2_source_mac[6]; ++ __le16 match_l2_vlan_id; ++ ++ __le32 match_l3_dest_ip; ++ __le32 match_l3_source_ip; ++ ++ __le16 match_l4_dest_port; ++ __le16 match_l4_source_port; ++ __le16 match_l2_ether_type; ++ u8 match_l2_pcp_dei; ++ u8 match_l3_dscp; ++ ++ u8 mask_l2_dest_mac[6]; ++ __le16 mask_l2_tpid; ++ ++ u8 mask_l2_source_mac[6]; ++ __le16 mask_l2_vlan_id; ++ ++ __le32 mask_l3_dest_ip; ++ __le32 mask_l3_source_ip; ++ ++ __le16 mask_l4_dest_port; ++ __le16 mask_l4_source_port; ++ __le16 mask_l2_ether_type; ++ u8 mask_l2_pcp_dei; ++ u8 mask_l3_dscp; ++ ++ u8 match_l3_protocol; ++ u8 mask_l3_protocol; ++}; ++ ++#define DPSW_RESULT_ACTION_SHIFT 0 ++#define DPSW_RESULT_ACTION_SIZE 4 ++ ++struct dpsw_cmd_acl_entry { ++ __le16 acl_id; ++ __le16 result_if_id; ++ __le32 precedence; ++ /* from LSB only the first 4 bits */ ++ u8 result_action; ++ u8 pad[7]; ++ __le64 pad2[4]; ++ __le64 key_iova; ++}; ++ ++struct dpsw_cmd_acl_if { ++ /* cmd word 0 */ ++ __le16 acl_id; ++ __le16 num_ifs; ++ __le32 pad; ++ /* cmd word 1 */ ++ __le64 if_id[4]; ++}; ++ ++struct dpsw_cmd_acl_get_attr { ++ __le16 acl_id; ++}; ++ ++struct dpsw_rsp_acl_get_attr { ++ /* cmd word 0 */ ++ __le64 pad; ++ /* cmd word 1 */ ++ __le16 max_entries; ++ __le16 num_entries; ++ __le16 num_ifs; ++}; ++ ++struct dpsw_rsp_ctrl_if_get_attr { ++ /* cmd word 0 */ ++ __le64 pad; ++ /* cmd word 1 */ ++ __le32 rx_fqid; ++ __le32 rx_err_fqid; ++ /* cmd word 2 */ ++ __le32 tx_err_conf_fqid; ++}; ++ ++struct dpsw_cmd_ctrl_if_set_pools { ++ u8 num_dpbp; ++ /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */ ++ u8 backup_pool; ++ __le16 pad; ++ __le32 dpbp_id[8]; ++ __le16 buffer_size[8]; ++}; ++ ++struct dpsw_rsp_get_api_version { ++ __le16 version_major; ++ __le16 version_minor; ++}; ++ ++#endif /* __FSL_DPSW_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c +@@ -0,0 +1,2762 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpsw.h" ++#include "dpsw-cmd.h" ++ ++static void build_if_id_bitmap(__le64 *bmap, ++ const u16 *id, ++ const u16 num_ifs) { ++ int i; ++ ++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) ++ bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64], ++ (id[i] % 64), ++ 1); ++} ++ ++static void read_if_id_bitmap(u16 *if_id, ++ u16 *num_ifs, ++ __le64 *bmap) { ++ int bitmap[DPSW_MAX_IF] = { 0 }; ++ int i, j = 0; ++ int count = 0; ++ ++ for (i = 0; i < DPSW_MAX_IF; i++) { ++ bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]), ++ i % 64); ++ count += bitmap[i]; ++ } ++ ++ *num_ifs = (u16)count; ++ ++ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) { ++ if (bitmap[i]) { ++ if_id[j] = (u16)i; ++ j++; ++ } ++ } ++} ++ ++/** ++ * dpsw_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpsw_id: DPSW unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpsw_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpsw_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_open *cmd_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dpsw_cmd_open *)cmd.params; ++ cmd_params->dpsw_id = cpu_to_le32(dpsw_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_enable() - Enable DPSW functionality ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_disable() - Disable DPSW functionality ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_is_enabled() - Check if the DPSW is enabled ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpsw_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_rsp_is_enabled *cmd_rsp; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params; ++ *en = dpsw_get_field(cmd_rsp->enabled, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_reset() - Reset the DPSW, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_set_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ struct dpsw_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_set_irq *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_set_irq *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->irq_val = cpu_to_le32(irq_cfg->val); ++ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr); ++ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_get_irq() - Get IRQ information from the DPSW ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_get_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ int *type, ++ struct dpsw_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_get_irq *cmd_params; ++ struct dpsw_rsp_get_irq *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_get_irq *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_get_irq *)cmd.params; ++ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr); ++ irq_cfg->val = le32_to_cpu(rsp_params->irq_val); ++ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num); ++ *type = le32_to_cpu(rsp_params->irq_type); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_set_irq_enable *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params; ++ dpsw_set_field(cmd_params->enable_state, ENABLE, en); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_set_irq_mask *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_get_irq_status() - Get the current status of any pending interrupts ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_get_irq_status *cmd_params; ++ struct dpsw_rsp_get_irq_status *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params; ++ *status = le32_to_cpu(rsp_params->status); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_clear_irq_status *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_get_attributes() - Retrieve DPSW attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @attr: Returned DPSW attributes ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpsw_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_rsp_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params; ++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ attr->max_fdbs = rsp_params->max_fdbs; ++ attr->num_fdbs = rsp_params->num_fdbs; ++ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans); ++ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans); ++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries); ++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time); ++ attr->id = le32_to_cpu(rsp_params->dpsw_id); ++ attr->mem_size = le16_to_cpu(rsp_params->mem_size); ++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); ++ attr->max_meters_per_if = rsp_params->max_meters_per_if; ++ attr->options = le64_to_cpu(rsp_params->options); ++ attr->component_type = dpsw_get_field(rsp_params->component_type, ++ COMPONENT_TYPE); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_set_reflection_if() - Set target interface for reflected interfaces. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Id ++ * ++ * Only one reflection receive interface is allowed per switch ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_set_reflection_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_link_cfg() - Set the link configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface id ++ * @cfg: Link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_link_cfg *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->rate = cpu_to_le32(cfg->rate); ++ cmd_params->options = cpu_to_le64(cfg->options); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_get_link_state - Return the link state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface id ++ * @state: Link state 1 - linkup, 0 - link down or disconnected ++ * ++ * @Return '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_link_state *state) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_get_link_state *cmd_params; ++ struct dpsw_rsp_if_get_link_state *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params; ++ state->rate = le32_to_cpu(rsp_params->rate); ++ state->options = le64_to_cpu(rsp_params->options); ++ state->up = dpsw_get_field(rsp_params->up, UP); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @en: 1 - enable, 0 - disable ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_flooding *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->enable, ENABLE, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @en: 1 - enable, 0 - disable ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_broadcast *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->enable, ENABLE, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @en: 1 - enable, 0 - disable ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_multicast *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->enable, ENABLE, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Tag Control Information Configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_tci(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_tci_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_tci *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id); ++ dpsw_set_field(cmd_params->conf, DEI, cfg->dei); ++ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp); ++ cmd_params->conf = cpu_to_le16(cmd_params->conf); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Tag Control Information Configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_get_tci(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_tci_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_get_tci *cmd_params; ++ struct dpsw_rsp_if_get_tci *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params; ++ cfg->pcp = rsp_params->pcp; ++ cfg->dei = rsp_params->dei; ++ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: STP State configuration parameters ++ * ++ * The following STP states are supported - ++ * blocking, listening, learning, forwarding and disabled. ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_stp(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_stp_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_stp *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); ++ dpsw_set_field(cmd_params->state, STATE, cfg->state); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_accepted_frames() ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Frame types configuration ++ * ++ * When is admit_only_vlan_tagged- the device will discard untagged ++ * frames or Priority-Tagged frames received on this interface. ++ * When admit_only_untagged- untagged frames or Priority-Tagged ++ * frames received on this interface will be accepted and assigned ++ * to a VID based on the PVID and VID Set for this interface. ++ * When admit_all - the device will accept VLAN tagged, untagged ++ * and priority tagged frames. ++ * The default is admit_all ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_accepted_frames_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_accepted_frames *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type); ++ dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT, ++ cfg->unaccept_act); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_accept_all_vlan() ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @accept_all: Accept or drop frames having different VLAN ++ * ++ * When this is accept (FALSE), the device will discard incoming ++ * frames for VLANs that do not include this interface in its ++ * Member set. When accept (TRUE), the interface will accept all incoming frames ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int accept_all) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_accept_all_vlan *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_get_counter() - Get specific counter of particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @type: Counter type ++ * @counter: return value ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpsw_counter type, ++ u64 *counter) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_get_counter *cmd_params; ++ struct dpsw_rsp_if_get_counter *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params; ++ *counter = le64_to_cpu(rsp_params->counter); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_if_set_counter() - Set specific counter of particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @type: Counter type ++ * @counter: New counter value ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpsw_counter type, ++ u64 counter) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_counter *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->counter = cpu_to_le64(counter); ++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_tx_selection() - Function is used for mapping variety ++ * of frame fields ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Traffic class mapping configuration ++ * ++ * Function is used for mapping variety of frame fields (DSCP, PCP) ++ * to Traffic Class. Traffic class is a number ++ * in the range from 0 to 7 ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_tx_selection_cfg *cfg) ++{ ++ struct dpsw_cmd_if_set_tx_selection *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR, ++ cfg->priority_selector); ++ ++ for (i = 0; i < 8; i++) { ++ cmd_params->tc_sched[i].delta_bandwidth = ++ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth); ++ dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE, ++ cfg->tc_sched[i].mode); ++ cmd_params->tc_id[i] = cfg->tc_id[i]; ++ } ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Reflection configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_reflection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_reflection *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); ++ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Reflection configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_reflection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_reflection *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); ++ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_flooding_metering() - Set flooding metering ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @cfg: Metering parameters ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_metering_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_flooding_metering *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode); ++ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units); ++ cmd_params->cir = cpu_to_le32(cfg->cir); ++ cmd_params->eir = cpu_to_le32(cfg->eir); ++ cmd_params->cbs = cpu_to_le32(cfg->cbs); ++ cmd_params->ebs = cpu_to_le32(cfg->ebs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_set_metering() - Set interface metering for flooding ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @tc_id: Traffic class ID ++ * @cfg: Metering parameters ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_metering(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u8 tc_id, ++ const struct dpsw_metering_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_metering *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->tc_id = tc_id; ++ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode); ++ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units); ++ cmd_params->cir = cpu_to_le32(cfg->cir); ++ cmd_params->eir = cpu_to_le32(cfg->eir); ++ cmd_params->cbs = cpu_to_le32(cfg->cbs); ++ cmd_params->ebs = cpu_to_le32(cfg->ebs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface ++ * @cfg: Early-drop configuration ++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before dpsw_if_tc_set_early_drop ++ * ++ */ ++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, ++ u8 *early_drop_buf) ++{ ++ struct dpsw_prep_early_drop *ext_params; ++ ++ ext_params = (struct dpsw_prep_early_drop *)early_drop_buf; ++ dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode); ++ dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units); ++ ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold); ++ ext_params->green_drop_probability = cfg->green.drop_probability; ++ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold); ++ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold); ++ ext_params->yellow_drop_probability = cfg->yellow.drop_probability; ++ ext_params->yellow_max_threshold = ++ cpu_to_le64(cfg->yellow.max_threshold); ++ ext_params->yellow_min_threshold = ++ cpu_to_le64(cfg->yellow.min_threshold); ++} ++ ++/** ++ * dpsw_if_set_early_drop() - Set interface traffic class early-drop ++ * configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 64 bytes; ++ * Must be cacheline-aligned and DMA-able memory ++ * ++ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop() ++ * to prepare the early_drop_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u8 tc_id, ++ u64 early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_early_drop *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params; ++ cmd_params->tc_id = tc_id; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @cfg: Tag Protocol identifier ++ * ++ * API Configures a distinct Ethernet type value (or TPID value) ++ * to indicate a VLAN tag in addition to the common ++ * TPID values 0x8100 and 0x88A8. ++ * Two additional TPID's are supported ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_custom_tpid_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_custom_tpid *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params; ++ cmd_params->tpid = cpu_to_le16(cfg->tpid); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @cfg: Tag Protocol identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_custom_tpid_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_custom_tpid *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params; ++ cmd_params->tpid = cpu_to_le16(cfg->tpid); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_enable() - Enable Interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_disable() - Disable Interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_get_attributes() - Function obtains attributes of interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @attr: Returned interface attributes ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_if_attr *attr) ++{ ++ struct dpsw_rsp_if_get_attr *rsp_params; ++ struct dpsw_cmd_if *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params; ++ attr->num_tcs = rsp_params->num_tcs; ++ attr->rate = le32_to_cpu(rsp_params->rate); ++ attr->options = le32_to_cpu(rsp_params->options); ++ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED); ++ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf, ++ ACCEPT_ALL_VLAN); ++ attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED); ++ attr->qdid = le16_to_cpu(rsp_params->qdid); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @frame_length: Maximum Frame Length ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u16 frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_set_max_frame_length *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->frame_length = cpu_to_le16(frame_length); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: Interface Identifier ++ * @frame_length: Returned maximum Frame Length ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u16 *frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_if_get_max_frame_length *cmd_params; ++ struct dpsw_rsp_if_get_max_frame_length *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params; ++ *frame_length = le16_to_cpu(rsp_params->frame_length); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_vlan_add() - Adding new VLAN to DPSW. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: VLAN configuration ++ * ++ * Only VLAN ID and FDB ID are required parameters here. ++ * 12 bit VLAN ID is defined in IEEE802.1Q. ++ * Adding a duplicate VLAN ID is not allowed. ++ * FDB ID can be shared across multiple VLANs. Shared learning ++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs ++ * with same fdb_id ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_vlan_add *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_vlan_add *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces to add ++ * ++ * It adds only interfaces not belonging to this VLAN yet, ++ * otherwise an error is generated and an entire command is ++ * ignored. This function can be called numerous times always ++ * providing required interfaces delta. ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be ++ * transmitted as untagged. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces that should be transmitted as untagged ++ * ++ * These interfaces should already belong to this VLAN. ++ * By default all interfaces are transmitted as tagged. ++ * Providing un-existing interface or untagged interface that is ++ * configured untagged already generates an error and the entire ++ * command is ignored. ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be ++ * included in flooding when frame with unknown destination ++ * unicast MAC arrived. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces that should be used for flooding ++ * ++ * These interfaces should belong to this VLAN. By default all ++ * interfaces are included into flooding list. Providing ++ * un-existing interface or an interface that already in the ++ * flooding list generates an error and the entire command is ++ * ignored. ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces that should be removed ++ * ++ * Interfaces must belong to this VLAN, otherwise an error ++ * is returned and an the command is ignored ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be ++ * converted from transmitted as untagged to transmit as tagged. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces that should be removed ++ * ++ * Interfaces provided by API have to belong to this VLAN and ++ * configured untagged, otherwise an error is returned and the ++ * command is ignored ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be ++ * removed from the flooding list. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Set of interfaces used for flooding ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_manage_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_remove() - Remove an entire VLAN ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_remove *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_vlan_get_attributes() - Get VLAN attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @attr: Returned DPSW attributes ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_get_attr *cmd_params; ++ struct dpsw_rsp_vlan_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params; ++ attr->fdb_id = le16_to_cpu(rsp_params->fdb_id); ++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs); ++ attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Returned set of interfaces belong to this VLAN ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_get_if *cmd_params; ++ struct dpsw_rsp_vlan_get_if *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params; ++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Returned set of flooding interfaces ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++ ++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_get_if_flooding *cmd_params; ++ struct dpsw_rsp_vlan_get_if_flooding *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params; ++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as ++ * untagged ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @vlan_id: VLAN Identifier ++ * @cfg: Returned set of untagged interfaces ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_vlan_get_if_untagged *cmd_params; ++ struct dpsw_rsp_vlan_get_if_untagged *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params; ++ cmd_params->vlan_id = cpu_to_le16(vlan_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params; ++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for ++ * the reference ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Returned Forwarding Database Identifier ++ * @cfg: FDB Configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *fdb_id, ++ const struct dpsw_fdb_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_add *cmd_params; ++ struct dpsw_rsp_fdb_add *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params; ++ cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time); ++ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params; ++ *fdb_id = le16_to_cpu(rsp_params->fdb_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_fdb_remove() - Remove FDB from switch ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_remove *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Unicast entry configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_unicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_add_unicast *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by ++ * unicast Ethernet address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Returned unicast entry configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_unicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_get_unicast *cmd_params; ++ struct dpsw_rsp_fdb_get_unicast *rsp_params; ++ int err, i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params; ++ cfg->if_egress = le16_to_cpu(rsp_params->if_egress); ++ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Unicast entry configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_unicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_remove_unicast *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress); ++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Multicast entry configuration ++ * ++ * If group doesn't exist, it will be created. ++ * It adds only interfaces not belonging to this multicast group ++ * yet, otherwise error will be generated and the command is ++ * ignored. ++ * This function may be called numerous times always providing ++ * required interfaces delta. ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_multicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_add_multicast *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); ++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet ++ * address. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Returned multicast entry configuration ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_multicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_get_multicast *cmd_params; ++ struct dpsw_rsp_fdb_get_multicast *rsp_params; ++ int err, i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params; ++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE); ++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast ++ * group. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @cfg: Multicast entry configuration ++ * ++ * Interfaces provided by this API have to exist in the group, ++ * otherwise an error will be returned and an entire command ++ * ignored. If there is no interface left in the group, ++ * an entire group is deleted ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_multicast_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_remove_multicast *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); ++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ for (i = 0; i < 6; i++) ++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @mode: Learning mode ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ enum dpsw_fdb_learning_mode mode) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_set_learning_mode *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_fdb_get_attributes() - Get FDB attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @fdb_id: Forwarding Database Identifier ++ * @attr: Returned FDB attributes ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_fdb_get_attr *cmd_params; ++ struct dpsw_rsp_fdb_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params; ++ cmd_params->fdb_id = cpu_to_le16(fdb_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params; ++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries); ++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time); ++ attr->learning_mode = dpsw_get_field(rsp_params->learning_mode, ++ LEARNING_MODE); ++ attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups); ++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_acl_add() - Adds ACL to L2 switch. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: Returned ACL ID, for the future reference ++ * @cfg: ACL configuration ++ * ++ * Create Access Control List. Multiple ACLs can be created and ++ * co-exist in L2 switch ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *acl_id, ++ const struct dpsw_acl_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_add *cmd_params; ++ struct dpsw_rsp_acl_add *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params; ++ cmd_params->max_entries = cpu_to_le16(cfg->max_entries); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params; ++ *acl_id = le16_to_cpu(rsp_params->acl_id); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_acl_remove() - Removes ACL from L2 switch. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL ID ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_remove *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL. ++ * @key: Key ++ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before adding or removing acl_entry ++ * ++ */ ++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, ++ u8 *entry_cfg_buf) ++{ ++ struct dpsw_prep_acl_entry *ext_params; ++ int i; ++ ++ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf; ++ ++ for (i = 0; i < 6; i++) { ++ ext_params->match_l2_dest_mac[i] = ++ key->match.l2_dest_mac[5 - i]; ++ ext_params->match_l2_source_mac[i] = ++ key->match.l2_source_mac[5 - i]; ++ ext_params->mask_l2_dest_mac[i] = ++ key->mask.l2_dest_mac[5 - i]; ++ ext_params->mask_l2_source_mac[i] = ++ key->mask.l2_source_mac[5 - i]; ++ } ++ ++ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid); ++ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id); ++ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip); ++ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip); ++ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port); ++ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type); ++ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei; ++ ext_params->match_l3_dscp = key->match.l3_dscp; ++ ext_params->match_l4_source_port = ++ cpu_to_le16(key->match.l4_source_port); ++ ++ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid); ++ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id); ++ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip); ++ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip); ++ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port); ++ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port); ++ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type); ++ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei; ++ ext_params->mask_l3_dscp = key->mask.l3_dscp; ++ ext_params->match_l3_protocol = key->match.l3_protocol; ++ ext_params->mask_l3_protocol = key->mask.l3_protocol; ++} ++ ++/** ++ * dpsw_acl_add_entry() - Adds an entry to ACL. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL ID ++ * @cfg: Entry configuration ++ * ++ * warning: This function has to be called after dpsw_acl_set_entry_cfg() ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_entry_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_entry *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); ++ cmd_params->precedence = cpu_to_le32(cfg->precedence); ++ dpsw_set_field(cmd_params->result_action, RESULT_ACTION, ++ cfg->result.action); ++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_acl_remove_entry() - Removes an entry from ACL. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL ID ++ * @cfg: Entry configuration ++ * ++ * warning: This function has to be called after dpsw_acl_set_entry_cfg() ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_entry_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_entry *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); ++ cmd_params->precedence = cpu_to_le32(cfg->precedence); ++ dpsw_set_field(cmd_params->result_action, RESULT_ACTION, ++ cfg->result.action); ++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_acl_add_if() - Associate interface/interfaces with ACL. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL ID ++ * @cfg: Interfaces list ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_add_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL ID ++ * @cfg: Interfaces list ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_if_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_if *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); ++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_acl_get_attributes() - Get specific counter of particular interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @acl_id: ACL Identifier ++ * @attr: Returned ACL attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ struct dpsw_acl_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_acl_get_attr *cmd_params; ++ struct dpsw_rsp_acl_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params; ++ cmd_params->acl_id = cpu_to_le16(acl_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params; ++ attr->max_entries = le16_to_cpu(rsp_params->max_entries); ++ attr->num_entries = le16_to_cpu(rsp_params->num_entries); ++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @attr: Returned control interface attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpsw_ctrl_if_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_rsp_ctrl_if_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params; ++ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid); ++ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid); ++ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid); ++ ++ return 0; ++} ++ ++/** ++ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @cfg: Buffer pools configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_ctrl_if_pools_cfg *pools) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_cmd_ctrl_if_set_pools *cmd_params; ++ int i; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params; ++ cmd_params->num_dpbp = pools->num_dpbp; ++ for (i = 0; i < 8; i++) { ++ cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool, ++ i, ++ pools->pools[i].backup_pool); ++ cmd_params->buffer_size[i] = ++ cpu_to_le16(pools->pools[i].buffer_size); ++ cmd_params->dpbp_id[i] = ++ cpu_to_le32(pools->pools[i].dpbp_id); ++ } ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_ctrl_if_enable() - Enable control interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_ctrl_if_disable() - Function disables control interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpsw_get_api_version() - Get Data Path Switch API version ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of data path switch API ++ * @minor_ver: Minor version of data path switch API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpsw_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpsw_rsp_get_api_version *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION, ++ cmd_flags, ++ 0); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params; ++ *major_ver = le16_to_cpu(rsp_params->version_major); ++ *minor_ver = le16_to_cpu(rsp_params->version_minor); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h +@@ -0,0 +1,1269 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPSW_H ++#define __FSL_DPSW_H ++ ++/* Data Path L2-Switch API ++ * Contains API for handling DPSW topology and functionality ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * DPSW general definitions ++ */ ++ ++/** ++ * Maximum number of traffic class priorities ++ */ ++#define DPSW_MAX_PRIORITIES 8 ++/** ++ * Maximum number of interfaces ++ */ ++#define DPSW_MAX_IF 64 ++ ++int dpsw_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpsw_id, ++ u16 *token); ++ ++int dpsw_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * DPSW options ++ */ ++ ++/** ++ * Disable flooding ++ */ ++#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL ++/** ++ * Disable Multicast ++ */ ++#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL ++/** ++ * Support control interface ++ */ ++#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL ++/** ++ * Disable flooding metering ++ */ ++#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL ++/** ++ * Enable metering ++ */ ++#define DPSW_OPT_METERING_EN 0x0000000000000040ULL ++ ++/** ++ * enum dpsw_component_type - component type of a bridge ++ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an ++ * enterprise VLAN bridge or of a Provider Bridge used ++ * to process C-tagged frames ++ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a ++ * Provider Bridge ++ * ++ */ ++enum dpsw_component_type { ++ DPSW_COMPONENT_TYPE_C_VLAN = 0, ++ DPSW_COMPONENT_TYPE_S_VLAN ++}; ++ ++/** ++ * struct dpsw_cfg - DPSW configuration ++ * @num_ifs: Number of external and internal interfaces ++ * @adv: Advanced parameters; default is all zeros; ++ * use this structure to change default settings ++ */ ++struct dpsw_cfg { ++ u16 num_ifs; ++ /** ++ * struct adv - Advanced parameters ++ * @options: Enable/Disable DPSW features (bitmap) ++ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16 ++ * @max_meters_per_if: Number of meters per interface ++ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16 ++ * @max_fdb_entries: Number of FDB entries for default FDB table; ++ * 0 - indicates default 1024 entries. ++ * @fdb_aging_time: Default FDB aging time for default FDB table; ++ * 0 - indicates default 300 seconds ++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; ++ * 0 - indicates default 32 ++ * @component_type: Indicates the component type of this bridge ++ */ ++ struct { ++ u64 options; ++ u16 max_vlans; ++ u8 max_meters_per_if; ++ u8 max_fdbs; ++ u16 max_fdb_entries; ++ u16 fdb_aging_time; ++ u16 max_fdb_mc_groups; ++ enum dpsw_component_type component_type; ++ } adv; ++}; ++ ++int dpsw_create(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ const struct dpsw_cfg *cfg, ++ u32 *obj_id); ++ ++int dpsw_destroy(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ u32 object_id); ++ ++int dpsw_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpsw_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpsw_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpsw_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * DPSW IRQ Index and Events ++ */ ++ ++#define DPSW_IRQ_INDEX_IF 0x0000 ++#define DPSW_IRQ_INDEX_L2SW 0x0001 ++ ++/** ++ * IRQ event - Indicates that the link state changed ++ */ ++#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 ++ ++/** ++ * struct dpsw_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpsw_irq_cfg { ++ u64 addr; ++ u32 val; ++ int irq_num; ++}; ++ ++int dpsw_set_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ struct dpsw_irq_cfg *irq_cfg); ++ ++int dpsw_get_irq(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ int *type, ++ struct dpsw_irq_cfg *irq_cfg); ++ ++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en); ++ ++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en); ++ ++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask); ++ ++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask); ++ ++int dpsw_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status); ++ ++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status); ++ ++/** ++ * struct dpsw_attr - Structure representing DPSW attributes ++ * @id: DPSW object ID ++ * @options: Enable/Disable DPSW features ++ * @max_vlans: Maximum Number of VLANs ++ * @max_meters_per_if: Number of meters per interface ++ * @max_fdbs: Maximum Number of FDBs ++ * @max_fdb_entries: Number of FDB entries for default FDB table; ++ * 0 - indicates default 1024 entries. ++ * @fdb_aging_time: Default FDB aging time for default FDB table; ++ * 0 - indicates default 300 seconds ++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; ++ * 0 - indicates default 32 ++ * @mem_size: DPSW frame storage memory size ++ * @num_ifs: Number of interfaces ++ * @num_vlans: Current number of VLANs ++ * @num_fdbs: Current number of FDBs ++ * @component_type: Component type of this bridge ++ */ ++struct dpsw_attr { ++ int id; ++ u64 options; ++ u16 max_vlans; ++ u8 max_meters_per_if; ++ u8 max_fdbs; ++ u16 max_fdb_entries; ++ u16 fdb_aging_time; ++ u16 max_fdb_mc_groups; ++ u16 num_ifs; ++ u16 mem_size; ++ u16 num_vlans; ++ u8 num_fdbs; ++ enum dpsw_component_type component_type; ++}; ++ ++int dpsw_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpsw_attr *attr); ++ ++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id); ++ ++/** ++ * enum dpsw_action - Action selection for special/control frames ++ * @DPSW_ACTION_DROP: Drop frame ++ * @DPSW_ACTION_REDIRECT: Redirect frame to control port ++ */ ++enum dpsw_action { ++ DPSW_ACTION_DROP = 0, ++ DPSW_ACTION_REDIRECT = 1 ++}; ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpsw_link_cfg - Structure representing DPSW link configuration ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values ++ */ ++struct dpsw_link_cfg { ++ u32 rate; ++ u64 options; ++}; ++ ++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_link_cfg *cfg); ++/** ++ * struct dpsw_link_state - Structure representing DPSW link state ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values ++ * @up: 0 - covers two cases: down and disconnected, 1 - up ++ */ ++struct dpsw_link_state { ++ u32 rate; ++ u64 options; ++ int up; ++}; ++ ++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_link_state *state); ++ ++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en); ++ ++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en); ++ ++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int en); ++ ++/** ++ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration ++ * @pcp: Priority Code Point (PCP): a 3-bit field which refers ++ * to the IEEE 802.1p priority ++ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used ++ * separately or in conjunction with PCP to indicate frames ++ * eligible to be dropped in the presence of congestion ++ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN ++ * to which the frame belongs. The hexadecimal values ++ * of 0x000 and 0xFFF are reserved; ++ * all other values may be used as VLAN identifiers, ++ * allowing up to 4,094 VLANs ++ */ ++struct dpsw_tci_cfg { ++ u8 pcp; ++ u8 dei; ++ u16 vlan_id; ++}; ++ ++int dpsw_if_set_tci(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_tci_cfg *cfg); ++ ++int dpsw_if_get_tci(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_tci_cfg *cfg); ++ ++/** ++ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states ++ * @DPSW_STP_STATE_BLOCKING: Blocking state ++ * @DPSW_STP_STATE_LISTENING: Listening state ++ * @DPSW_STP_STATE_LEARNING: Learning state ++ * @DPSW_STP_STATE_FORWARDING: Forwarding state ++ * ++ */ ++enum dpsw_stp_state { ++ DPSW_STP_STATE_BLOCKING = 0, ++ DPSW_STP_STATE_LISTENING = 1, ++ DPSW_STP_STATE_LEARNING = 2, ++ DPSW_STP_STATE_FORWARDING = 3 ++}; ++ ++/** ++ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration ++ * @vlan_id: VLAN ID STP state ++ * @state: STP state ++ */ ++struct dpsw_stp_cfg { ++ u16 vlan_id; ++ enum dpsw_stp_state state; ++}; ++ ++int dpsw_if_set_stp(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_stp_cfg *cfg); ++ ++/** ++ * enum dpsw_accepted_frames - Types of frames to accept ++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and ++ * priority tagged frames ++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or ++ * Priority-Tagged frames received on this interface. ++ * ++ */ ++enum dpsw_accepted_frames { ++ DPSW_ADMIT_ALL = 1, ++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 ++}; ++ ++/** ++ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration ++ * @type: Defines ingress accepted frames ++ * @unaccept_act: When a frame is not accepted, it may be discarded or ++ * redirected to control interface depending on this mode ++ */ ++struct dpsw_accepted_frames_cfg { ++ enum dpsw_accepted_frames type; ++ enum dpsw_action unaccept_act; ++}; ++ ++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_accepted_frames_cfg *cfg); ++ ++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ int accept_all); ++ ++/** ++ * enum dpsw_counter - Counters types ++ * @DPSW_CNT_ING_FRAME: Counts ingress frames ++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes ++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames ++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame ++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames ++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes ++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames ++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes ++ * @DPSW_CNT_EGR_FRAME: Counts egress frames ++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes ++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames ++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames ++ */ ++enum dpsw_counter { ++ DPSW_CNT_ING_FRAME = 0x0, ++ DPSW_CNT_ING_BYTE = 0x1, ++ DPSW_CNT_ING_FLTR_FRAME = 0x2, ++ DPSW_CNT_ING_FRAME_DISCARD = 0x3, ++ DPSW_CNT_ING_MCAST_FRAME = 0x4, ++ DPSW_CNT_ING_MCAST_BYTE = 0x5, ++ DPSW_CNT_ING_BCAST_FRAME = 0x6, ++ DPSW_CNT_ING_BCAST_BYTES = 0x7, ++ DPSW_CNT_EGR_FRAME = 0x8, ++ DPSW_CNT_EGR_BYTE = 0x9, ++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa, ++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb ++}; ++ ++int dpsw_if_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpsw_counter type, ++ u64 *counter); ++ ++int dpsw_if_set_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpsw_counter type, ++ u64 counter); ++ ++/** ++ * Maximum number of TC ++ */ ++#define DPSW_MAX_TC 8 ++ ++/** ++ * enum dpsw_priority_selector - User priority ++ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which ++ * refers to the IEEE 802.1p priority. ++ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit ++ * field from IP header ++ * ++ */ ++enum dpsw_priority_selector { ++ DPSW_UP_PCP = 0, ++ DPSW_UP_DSCP = 1 ++}; ++ ++/** ++ * enum dpsw_schedule_mode - Traffic classes scheduling ++ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority ++ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm ++ */ ++enum dpsw_schedule_mode { ++ DPSW_SCHED_STRICT_PRIORITY, ++ DPSW_SCHED_WEIGHTED ++}; ++ ++/** ++ * struct dpsw_tx_schedule_cfg - traffic class configuration ++ * @mode: Strict or weight-based scheduling ++ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000 ++ */ ++struct dpsw_tx_schedule_cfg { ++ enum dpsw_schedule_mode mode; ++ u16 delta_bandwidth; ++}; ++ ++/** ++ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic ++ * class configuration ++ * @priority_selector: Source for user priority regeneration ++ * @tc_id: The Regenerated User priority that the incoming ++ * User Priority is mapped to for this interface ++ * @tc_sched: Traffic classes configuration ++ */ ++struct dpsw_tx_selection_cfg { ++ enum dpsw_priority_selector priority_selector; ++ u8 tc_id[DPSW_MAX_PRIORITIES]; ++ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC]; ++}; ++ ++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_tx_selection_cfg *cfg); ++ ++/** ++ * enum dpsw_reflection_filter - Filter type for frames to reflect ++ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames ++ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to ++ * particular VLAN defined by vid parameter ++ * ++ */ ++enum dpsw_reflection_filter { ++ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, ++ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 ++}; ++ ++/** ++ * struct dpsw_reflection_cfg - Structure representing reflection information ++ * @filter: Filter type for frames to reflect ++ * @vlan_id: Vlan Id to reflect; valid only when filter type is ++ * DPSW_INGRESS_VLAN ++ */ ++struct dpsw_reflection_cfg { ++ enum dpsw_reflection_filter filter; ++ u16 vlan_id; ++}; ++ ++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_reflection_cfg *cfg); ++ ++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_reflection_cfg *cfg); ++ ++/** ++ * enum dpsw_metering_mode - Metering modes ++ * @DPSW_METERING_MODE_NONE: metering disabled ++ * @DPSW_METERING_MODE_RFC2698: RFC 2698 ++ * @DPSW_METERING_MODE_RFC4115: RFC 4115 ++ */ ++enum dpsw_metering_mode { ++ DPSW_METERING_MODE_NONE = 0, ++ DPSW_METERING_MODE_RFC2698, ++ DPSW_METERING_MODE_RFC4115 ++}; ++ ++/** ++ * enum dpsw_metering_unit - Metering count ++ * @DPSW_METERING_UNIT_BYTES: count bytes ++ * @DPSW_METERING_UNIT_FRAMES: count frames ++ */ ++enum dpsw_metering_unit { ++ DPSW_METERING_UNIT_BYTES = 0, ++ DPSW_METERING_UNIT_FRAMES ++}; ++ ++/** ++ * struct dpsw_metering_cfg - Metering configuration ++ * @mode: metering modes ++ * @units: Bytes or frame units ++ * @cir: Committed information rate (CIR) in Kbits/s ++ * @eir: Peak information rate (PIR) Kbit/s rfc2698 ++ * Excess information rate (EIR) Kbit/s rfc4115 ++ * @cbs: Committed burst size (CBS) in bytes ++ * @ebs: Peak burst size (PBS) in bytes for rfc2698 ++ * Excess bust size (EBS) in bytes rfc4115 ++ * ++ */ ++struct dpsw_metering_cfg { ++ enum dpsw_metering_mode mode; ++ enum dpsw_metering_unit units; ++ u32 cir; ++ u32 eir; ++ u32 cbs; ++ u32 ebs; ++}; ++ ++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpsw_metering_cfg *cfg); ++ ++int dpsw_if_set_metering(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u8 tc_id, ++ const struct dpsw_metering_cfg *cfg); ++ ++/** ++ * enum dpsw_early_drop_unit - DPSW early drop unit ++ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes ++ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames ++ */ ++enum dpsw_early_drop_unit { ++ DPSW_EARLY_DROP_UNIT_BYTE = 0, ++ DPSW_EARLY_DROP_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpsw_early_drop_mode - DPSW early drop mode ++ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled ++ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode ++ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode ++ */ ++enum dpsw_early_drop_mode { ++ DPSW_EARLY_DROP_MODE_NONE = 0, ++ DPSW_EARLY_DROP_MODE_TAIL, ++ DPSW_EARLY_DROP_MODE_WRED ++}; ++ ++/** ++ * struct dpsw_wred_cfg - WRED configuration ++ * @max_threshold: maximum threshold that packets may be discarded. Above this ++ * threshold all packets are discarded; must be less than 2^39; ++ * approximated to be expressed as (x+256)*2^(y-1) due to HW ++ * implementation. ++ * @min_threshold: minimum threshold that packets may be discarded at ++ * @drop_probability: probability that a packet will be discarded (1-100, ++ * associated with the maximum threshold) ++ */ ++struct dpsw_wred_cfg { ++ u64 min_threshold; ++ u64 max_threshold; ++ u8 drop_probability; ++}; ++ ++/** ++ * struct dpsw_early_drop_cfg - early-drop configuration ++ * @drop_mode: drop mode ++ * @units: count units ++ * @yellow: WRED - 'yellow' configuration ++ * @green: WRED - 'green' configuration ++ * @tail_drop_threshold: tail drop threshold ++ */ ++struct dpsw_early_drop_cfg { ++ enum dpsw_early_drop_mode drop_mode; ++ enum dpsw_early_drop_unit units; ++ struct dpsw_wred_cfg yellow; ++ struct dpsw_wred_cfg green; ++ u32 tail_drop_threshold; ++}; ++ ++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, ++ u8 *early_drop_buf); ++ ++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u8 tc_id, ++ u64 early_drop_iova); ++ ++/** ++ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier ++ * @tpid: An additional tag protocol identifier ++ */ ++struct dpsw_custom_tpid_cfg { ++ u16 tpid; ++}; ++ ++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_custom_tpid_cfg *cfg); ++ ++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_custom_tpid_cfg *cfg); ++ ++int dpsw_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id); ++ ++int dpsw_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id); ++ ++/** ++ * struct dpsw_if_attr - Structure representing DPSW interface attributes ++ * @num_tcs: Number of traffic classes ++ * @rate: Transmit rate in bits per second ++ * @options: Interface configuration options (bitmap) ++ * @enabled: Indicates if interface is enabled ++ * @accept_all_vlan: The device discards/accepts incoming frames ++ * for VLANs that do not include this interface ++ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device ++ * discards untagged frames or priority-tagged frames received on ++ * this interface; ++ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- ++ * tagged frames received on this interface are accepted ++ * @qdid: control frames transmit qdid ++ */ ++struct dpsw_if_attr { ++ u8 num_tcs; ++ u32 rate; ++ u32 options; ++ int enabled; ++ int accept_all_vlan; ++ enum dpsw_accepted_frames admit_untagged; ++ u16 qdid; ++}; ++ ++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpsw_if_attr *attr); ++ ++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u16 frame_length); ++ ++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ u16 *frame_length); ++ ++/** ++ * struct dpsw_vlan_cfg - VLAN Configuration ++ * @fdb_id: Forwarding Data Base ++ */ ++struct dpsw_vlan_cfg { ++ u16 fdb_id; ++}; ++ ++int dpsw_vlan_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_cfg *cfg); ++ ++/** ++ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces ++ * @num_ifs: The number of interfaces that are assigned to the egress ++ * list for this VLAN ++ * @if_id: The set of interfaces that are ++ * assigned to the egress list for this VLAN ++ */ ++struct dpsw_vlan_if_cfg { ++ u16 num_ifs; ++ u16 if_id[DPSW_MAX_IF]; ++}; ++ ++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ const struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id); ++ ++/** ++ * struct dpsw_vlan_attr - VLAN attributes ++ * @fdb_id: Associated FDB ID ++ * @num_ifs: Number of interfaces ++ * @num_untagged_ifs: Number of untagged interfaces ++ * @num_flooding_ifs: Number of flooding interfaces ++ */ ++struct dpsw_vlan_attr { ++ u16 fdb_id; ++ u16 num_ifs; ++ u16 num_untagged_ifs; ++ u16 num_flooding_ifs; ++}; ++ ++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_attr *attr); ++ ++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg); ++ ++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 vlan_id, ++ struct dpsw_vlan_if_cfg *cfg); ++ ++/** ++ * struct dpsw_fdb_cfg - FDB Configuration ++ * @num_fdb_entries: Number of FDB entries ++ * @fdb_aging_time: Aging time in seconds ++ */ ++struct dpsw_fdb_cfg { ++ u16 num_fdb_entries; ++ u16 fdb_aging_time; ++}; ++ ++int dpsw_fdb_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *fdb_id, ++ const struct dpsw_fdb_cfg *cfg); ++ ++int dpsw_fdb_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id); ++ ++/** ++ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic ++ * @DPSW_FDB_ENTRY_STATIC: Static entry ++ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry ++ */ ++enum dpsw_fdb_entry_type { ++ DPSW_FDB_ENTRY_STATIC = 0, ++ DPSW_FDB_ENTRY_DINAMIC = 1 ++}; ++ ++/** ++ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration ++ * @type: Select static or dynamic entry ++ * @mac_addr: MAC address ++ * @if_egress: Egress interface ID ++ */ ++struct dpsw_fdb_unicast_cfg { ++ enum dpsw_fdb_entry_type type; ++ u8 mac_addr[6]; ++ u16 if_egress; ++}; ++ ++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_unicast_cfg *cfg); ++ ++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_unicast_cfg *cfg); ++ ++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_unicast_cfg *cfg); ++ ++/** ++ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration ++ * @type: Select static or dynamic entry ++ * @mac_addr: MAC address ++ * @num_ifs: Number of external and internal interfaces ++ * @if_id: Egress interface IDs ++ */ ++struct dpsw_fdb_multicast_cfg { ++ enum dpsw_fdb_entry_type type; ++ u8 mac_addr[6]; ++ u16 num_ifs; ++ u16 if_id[DPSW_MAX_IF]; ++}; ++ ++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_multicast_cfg *cfg); ++ ++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_multicast_cfg *cfg); ++ ++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ const struct dpsw_fdb_multicast_cfg *cfg); ++ ++/** ++ * enum dpsw_fdb_learning_mode - Auto-learning modes ++ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning ++ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning ++ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU ++ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU ++ * ++ * NONE - SECURE LEARNING ++ * SMAC found DMAC found CTLU Action ++ * v v Forward frame to ++ * 1. DMAC destination ++ * - v Forward frame to ++ * 1. DMAC destination ++ * 2. Control interface ++ * v - Forward frame to ++ * 1. Flooding list of interfaces ++ * - - Forward frame to ++ * 1. Flooding list of interfaces ++ * 2. Control interface ++ * SECURE LEARING ++ * SMAC found DMAC found CTLU Action ++ * v v Forward frame to ++ * 1. DMAC destination ++ * - v Forward frame to ++ * 1. Control interface ++ * v - Forward frame to ++ * 1. Flooding list of interfaces ++ * - - Forward frame to ++ * 1. Control interface ++ */ ++enum dpsw_fdb_learning_mode { ++ DPSW_FDB_LEARNING_MODE_DIS = 0, ++ DPSW_FDB_LEARNING_MODE_HW = 1, ++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, ++ DPSW_FDB_LEARNING_MODE_SECURE = 3 ++}; ++ ++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ enum dpsw_fdb_learning_mode mode); ++ ++/** ++ * struct dpsw_fdb_attr - FDB Attributes ++ * @max_fdb_entries: Number of FDB entries ++ * @fdb_aging_time: Aging time in seconds ++ * @learning_mode: Learning mode ++ * @num_fdb_mc_groups: Current number of multicast groups ++ * @max_fdb_mc_groups: Maximum number of multicast groups ++ */ ++struct dpsw_fdb_attr { ++ u16 max_fdb_entries; ++ u16 fdb_aging_time; ++ enum dpsw_fdb_learning_mode learning_mode; ++ u16 num_fdb_mc_groups; ++ u16 max_fdb_mc_groups; ++}; ++ ++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 fdb_id, ++ struct dpsw_fdb_attr *attr); ++ ++/** ++ * struct dpsw_acl_cfg - ACL Configuration ++ * @max_entries: Number of FDB entries ++ */ ++struct dpsw_acl_cfg { ++ u16 max_entries; ++}; ++ ++/** ++ * struct dpsw_acl_fields - ACL fields. ++ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, ++ * slow protocols, MVRP, STP ++ * @l2_source_mac: Source MAC address ++ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following ++ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, ++ * Q-in-Q, IPv4, IPv6, PPPoE ++ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload ++ * @l2_vlan_id: layer 2 VLAN ID ++ * @l2_ether_type: layer 2 Ethernet type ++ * @l3_dscp: Layer 3 differentiated services code point ++ * @l3_protocol: Tells the Network layer at the destination host, to which ++ * Protocol this packet belongs to. The following protocol are ++ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 ++ * (encapsulation), GRE, PTP ++ * @l3_source_ip: Source IPv4 IP ++ * @l3_dest_ip: Destination IPv4 IP ++ * @l4_source_port: Source TCP/UDP Port ++ * @l4_dest_port: Destination TCP/UDP Port ++ */ ++struct dpsw_acl_fields { ++ u8 l2_dest_mac[6]; ++ u8 l2_source_mac[6]; ++ u16 l2_tpid; ++ u8 l2_pcp_dei; ++ u16 l2_vlan_id; ++ u16 l2_ether_type; ++ u8 l3_dscp; ++ u8 l3_protocol; ++ u32 l3_source_ip; ++ u32 l3_dest_ip; ++ u16 l4_source_port; ++ u16 l4_dest_port; ++}; ++ ++/** ++ * struct dpsw_acl_key - ACL key ++ * @match: Match fields ++ * @mask: Mask: b'1 - valid, b'0 don't care ++ */ ++struct dpsw_acl_key { ++ struct dpsw_acl_fields match; ++ struct dpsw_acl_fields mask; ++}; ++ ++/** ++ * enum dpsw_acl_action ++ * @DPSW_ACL_ACTION_DROP: Drop frame ++ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port ++ * @DPSW_ACL_ACTION_ACCEPT: Accept frame ++ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface ++ */ ++enum dpsw_acl_action { ++ DPSW_ACL_ACTION_DROP, ++ DPSW_ACL_ACTION_REDIRECT, ++ DPSW_ACL_ACTION_ACCEPT, ++ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF ++}; ++ ++/** ++ * struct dpsw_acl_result - ACL action ++ * @action: Action should be taken when ACL entry hit ++ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for ++ * action ++ */ ++struct dpsw_acl_result { ++ enum dpsw_acl_action action; ++ u16 if_id; ++}; ++ ++/** ++ * struct dpsw_acl_entry_cfg - ACL entry ++ * @key_iova: I/O virtual address of DMA-able memory filled with key after call ++ * to dpsw_acl_prepare_entry_cfg() ++ * @result: Required action when entry hit occurs ++ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change ++ * during the lifetime of a Policy. It is user responsibility to ++ * space the priorities according to consequent rule additions. ++ */ ++struct dpsw_acl_entry_cfg { ++ u64 key_iova; ++ struct dpsw_acl_result result; ++ int precedence; ++}; ++ ++int dpsw_acl_add(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 *acl_id, ++ const struct dpsw_acl_cfg *cfg); ++ ++int dpsw_acl_remove(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id); ++ ++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, ++ uint8_t *entry_cfg_buf); ++ ++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_entry_cfg *cfg); ++ ++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_entry_cfg *cfg); ++ ++/** ++ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL ++ * @num_ifs: Number of interfaces ++ * @if_id: List of interfaces ++ */ ++struct dpsw_acl_if_cfg { ++ u16 num_ifs; ++ u16 if_id[DPSW_MAX_IF]; ++}; ++ ++int dpsw_acl_add_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_if_cfg *cfg); ++ ++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ const struct dpsw_acl_if_cfg *cfg); ++ ++/** ++ * struct dpsw_acl_attr - ACL Attributes ++ * @max_entries: Max number of ACL entries ++ * @num_entries: Number of used ACL entries ++ * @num_ifs: Number of interfaces associated with ACL ++ */ ++struct dpsw_acl_attr { ++ u16 max_entries; ++ u16 num_entries; ++ u16 num_ifs; ++}; ++ ++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 acl_id, ++ struct dpsw_acl_attr *attr); ++/** ++ * struct dpsw_ctrl_if_attr - Control interface attributes ++ * @rx_fqid: Receive FQID ++ * @rx_err_fqid: Receive error FQID ++ * @tx_err_conf_fqid: Transmit error and confirmation FQID ++ */ ++struct dpsw_ctrl_if_attr { ++ u32 rx_fqid; ++ u32 rx_err_fqid; ++ u32 tx_err_conf_fqid; ++}; ++ ++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpsw_ctrl_if_attr *attr); ++ ++/** ++ * Maximum number of DPBP ++ */ ++#define DPSW_MAX_DPBP 8 ++ ++/** ++ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration ++ * @num_dpbp: Number of DPBPs ++ * @pools: Array of buffer pools parameters; The number of valid entries ++ * must match 'num_dpbp' value ++ */ ++struct dpsw_ctrl_if_pools_cfg { ++ u8 num_dpbp; ++ /** ++ * struct pools - Buffer pools parameters ++ * @dpbp_id: DPBP object ID ++ * @buffer_size: Buffer size ++ * @backup_pool: Backup pool ++ */ ++ struct { ++ int dpbp_id; ++ u16 buffer_size; ++ int backup_pool; ++ } pools[DPSW_MAX_DPBP]; ++}; ++ ++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpsw_ctrl_if_pools_cfg *cfg); ++ ++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpsw_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); ++ ++#endif /* __FSL_DPSW_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c +@@ -0,0 +1,1857 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "dpsw.h" ++#include "dpsw-cmd.h" ++ ++static const char ethsw_drv_version[] = "0.1"; ++ ++/* Minimal supported DPSE version */ ++#define DPSW_MIN_VER_MAJOR 8 ++#define DPSW_MIN_VER_MINOR 0 ++ ++/* IRQ index */ ++#define DPSW_MAX_IRQ_NUM 2 ++ ++#define ETHSW_VLAN_MEMBER 1 ++#define ETHSW_VLAN_UNTAGGED 2 ++#define ETHSW_VLAN_PVID 4 ++#define ETHSW_VLAN_GLOBAL 8 ++ ++/* Maximum Frame Length supported by HW (currently 10k) */ ++#define DPAA2_MFL (10 * 1024) ++#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) ++#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) ++ ++struct ethsw_port_priv { ++ struct net_device *netdev; ++ struct list_head list; ++ u16 port_index; ++ struct ethsw_dev_priv *ethsw_priv; ++ u8 stp_state; ++ ++ char vlans[VLAN_VID_MASK + 1]; ++ ++}; ++ ++struct ethsw_dev_priv { ++ struct net_device *netdev; ++ struct fsl_mc_io *mc_io; ++ u16 dpsw_handle; ++ struct dpsw_attr sw_attr; ++ int dev_id; ++ /*TODO: redundant, we can use the slave dev list */ ++ struct list_head port_list; ++ ++ bool flood; ++ bool learning; ++ ++ char vlans[VLAN_VID_MASK + 1]; ++}; ++ ++static int ethsw_port_stop(struct net_device *netdev); ++static int ethsw_port_open(struct net_device *netdev); ++ ++static inline void __get_priv(struct net_device *netdev, ++ struct ethsw_dev_priv **priv, ++ struct ethsw_port_priv **port_priv) ++{ ++ struct ethsw_dev_priv *_priv = NULL; ++ struct ethsw_port_priv *_port_priv = NULL; ++ ++ if (netdev->flags & IFF_MASTER) { ++ _priv = netdev_priv(netdev); ++ } else { ++ _port_priv = netdev_priv(netdev); ++ _priv = _port_priv->ethsw_priv; ++ } ++ ++ if (priv) ++ *priv = _priv; ++ if (port_priv) ++ *port_priv = _port_priv; ++} ++ ++/* -------------------------------------------------------------------------- */ ++/* ethsw netdevice ops */ ++ ++static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ ++static int ethsw_open(struct net_device *netdev) ++{ ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ struct list_head *pos; ++ struct ethsw_port_priv *port_priv = NULL; ++ int err; ++ ++ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle); ++ if (err) { ++ netdev_err(netdev, "dpsw_enable err %d\n", err); ++ return err; ++ } ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct ethsw_port_priv, list); ++ err = dev_open(port_priv->netdev); ++ if (err) ++ netdev_err(port_priv->netdev, "dev_open err %d\n", err); ++ } ++ ++ return 0; ++} ++ ++static int ethsw_stop(struct net_device *netdev) ++{ ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ struct list_head *pos; ++ struct ethsw_port_priv *port_priv = NULL; ++ int err; ++ ++ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle); ++ if (err) { ++ netdev_err(netdev, "dpsw_disable err %d\n", err); ++ return err; ++ } ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct ethsw_port_priv, list); ++ err = dev_close(port_priv->netdev); ++ if (err) ++ netdev_err(port_priv->netdev, ++ "dev_close err %d\n", err); ++ } ++ ++ return 0; ++} ++ ++static int ethsw_add_vlan(struct net_device *netdev, u16 vid) ++{ ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ int err; ++ ++ struct dpsw_vlan_cfg vcfg = { ++ /* TODO: add support for VLAN private FDBs */ ++ .fdb_id = 0, ++ }; ++ if (priv->vlans[vid]) { ++ netdev_err(netdev, "VLAN already configured\n"); ++ return -EEXIST; ++ } ++ ++ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); ++ if (err) { ++ netdev_err(netdev, "dpsw_vlan_add err %d\n", err); ++ return err; ++ } ++ priv->vlans[vid] = ETHSW_VLAN_MEMBER; ++ ++ return 0; ++} ++ ++static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; ++ int err; ++ ++ struct dpsw_vlan_if_cfg vcfg = { ++ .num_ifs = 1, ++ .if_id[0] = port_priv->port_index, ++ }; ++ ++ if (port_priv->vlans[vid]) { ++ netdev_err(netdev, "VLAN already configured\n"); ++ return -EEXIST; ++ } ++ ++ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) { ++ netdev_err(netdev, "interface must be down to change PVID!\n"); ++ return -EBUSY; ++ } ++ ++ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); ++ if (err) { ++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); ++ return err; ++ } ++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; ++ ++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { ++ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0, ++ priv->dpsw_handle, vid, &vcfg); ++ if (err) { ++ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n", ++ err); ++ return err; ++ } ++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; ++ } ++ ++ if (flags & BRIDGE_VLAN_INFO_PVID) { ++ struct dpsw_tci_cfg tci_cfg = { ++ /* TODO: at least add better defaults if these cannot ++ * be configured ++ */ ++ .pcp = 0, ++ .dei = 0, ++ .vlan_id = vid, ++ }; ++ ++ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle, ++ port_priv->port_index, &tci_cfg); ++ if (err) { ++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); ++ return err; ++ } ++ port_priv->vlans[vid] |= ETHSW_VLAN_PVID; ++ } ++ ++ return 0; ++} ++ ++static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = { ++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, ++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, ++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, ++ .len = sizeof(struct bridge_vlan_info), }, ++}; ++ ++static int ethsw_setlink_af_spec(struct net_device *netdev, ++ struct nlattr **tb) ++{ ++ struct bridge_vlan_info *vinfo; ++ struct ethsw_dev_priv *priv = NULL; ++ struct ethsw_port_priv *port_priv = NULL; ++ int err = 0; ++ ++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) { ++ netdev_err(netdev, "no VLAN INFO in nlmsg\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); ++ ++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) ++ return -EINVAL; ++ ++ __get_priv(netdev, &priv, &port_priv); ++ ++ if (!port_priv || !priv->vlans[vinfo->vid]) { ++ /* command targets switch device or this is a new VLAN */ ++ err = ethsw_add_vlan(priv->netdev, vinfo->vid); ++ if (err) ++ return err; ++ ++ /* command targets switch device; mark it*/ ++ if (!port_priv) ++ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL; ++ } ++ ++ if (port_priv) { ++ /* command targets switch port */ ++ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { ++ [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_COST] = { .type = NLA_U32 }, ++ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, ++ [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, ++}; ++ ++static int ethsw_set_learning(struct net_device *netdev, u8 flag) ++{ ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ enum dpsw_fdb_learning_mode learn_mode; ++ int err; ++ ++ if (flag) ++ learn_mode = DPSW_FDB_LEARNING_MODE_HW; ++ else ++ learn_mode = DPSW_FDB_LEARNING_MODE_DIS; ++ ++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, ++ 0, learn_mode); ++ if (err) { ++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); ++ return err; ++ } ++ priv->learning = !!flag; ++ ++ return 0; ++} ++ ++static int ethsw_port_set_flood(struct net_device *netdev, u8 flag) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; ++ int err; ++ ++ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle, ++ port_priv->port_index, (int)flag); ++ if (err) { ++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); ++ return err; ++ } ++ priv->flood = !!flag; ++ ++ return 0; ++} ++ ++static int ethsw_port_set_state(struct net_device *netdev, u8 state) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; ++ u8 old_state = port_priv->stp_state; ++ int err; ++ ++ struct dpsw_stp_cfg stp_cfg = { ++ .vlan_id = 1, ++ .state = state, ++ }; ++ /* TODO: check port state, interface may be down */ ++ ++ if (state > BR_STATE_BLOCKING) ++ return -EINVAL; ++ ++ if (state == port_priv->stp_state) ++ return 0; ++ ++ if (state == BR_STATE_DISABLED) { ++ port_priv->stp_state = state; ++ ++ err = ethsw_port_stop(netdev); ++ if (err) ++ goto error; ++ } else { ++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, ++ port_priv->port_index, &stp_cfg); ++ if (err) { ++ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err); ++ return err; ++ } ++ ++ port_priv->stp_state = state; ++ ++ if (old_state == BR_STATE_DISABLED) { ++ err = ethsw_port_open(netdev); ++ if (err) ++ goto error; ++ } ++ } ++ ++ return 0; ++error: ++ port_priv->stp_state = old_state; ++ return err; ++} ++ ++static int ethsw_setlink_protinfo(struct net_device *netdev, ++ struct nlattr **tb) ++{ ++ struct ethsw_dev_priv *priv; ++ struct ethsw_port_priv *port_priv = NULL; ++ int err = 0; ++ ++ __get_priv(netdev, &priv, &port_priv); ++ ++ if (tb[IFLA_BRPORT_LEARNING]) { ++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]); ++ ++ if (port_priv) ++ netdev_warn(netdev, ++ "learning set on whole switch dev\n"); ++ ++ err = ethsw_set_learning(priv->netdev, flag); ++ if (err) ++ return err; ++ ++ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) { ++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]); ++ ++ err = ethsw_port_set_flood(port_priv->netdev, flag); ++ if (err) ++ return err; ++ ++ } else if (tb[IFLA_BRPORT_STATE] && port_priv) { ++ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]); ++ ++ err = ethsw_port_set_state(port_priv->netdev, state); ++ if (err) ++ return err; ++ ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static int ethsw_setlink(struct net_device *netdev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++{ ++ struct nlattr *attr; ++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? ++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1]; ++ int err = 0; ++ ++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ if (attr) { ++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, ++ ifla_br_policy); ++ if (err) { ++ netdev_err(netdev, ++ "nla_parse_nested for br_policy err %d\n", ++ err); ++ return err; ++ } ++ ++ err = ethsw_setlink_af_spec(netdev, tb); ++ return err; ++ } ++ ++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); ++ if (attr) { ++ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr, ++ ifla_brport_policy); ++ if (err) { ++ netdev_err(netdev, ++ "nla_parse_nested for brport_policy err %d\n", ++ err); ++ return err; ++ } ++ ++ err = ethsw_setlink_protinfo(netdev, tb); ++ return err; ++ } ++ ++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n"); ++ return -EOPNOTSUPP; ++} ++ ++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev, ++ struct ethsw_dev_priv *priv) ++{ ++ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN; ++ int iflink; ++ int err; ++ ++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); ++ if (err) ++ goto nla_put_err; ++ if (netdev->addr_len) { ++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, ++ netdev->dev_addr); ++ if (err) ++ goto nla_put_err; ++ } ++ ++ iflink = dev_get_iflink(netdev); ++ if (netdev->ifindex != iflink) { ++ err = nla_put_u32(skb, IFLA_LINK, iflink); ++ if (err) ++ goto nla_put_err; ++ } ++ ++ return 0; ++ ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ return err; ++} ++ ++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev, ++ struct ethsw_port_priv *port_priv) ++{ ++ struct nlattr *nest; ++ int err; ++ ++ u8 stp_state = port_priv->stp_state; ++ ++ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING) ++ stp_state = BR_STATE_BLOCKING; ++ ++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); ++ if (!nest) { ++ netdev_err(netdev, "nla_nest_start failed\n"); ++ return -ENOMEM; ++ } ++ ++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, ++ port_priv->ethsw_priv->learning); ++ if (err) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, ++ port_priv->ethsw_priv->flood); ++ if (err) ++ goto nla_put_err; ++ nla_nest_end(skb, nest); ++ ++ return 0; ++ ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ nla_nest_cancel(skb, nest); ++ return err; ++} ++ ++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev, ++ struct ethsw_dev_priv *priv, ++ struct ethsw_port_priv *port_priv) ++{ ++ struct nlattr *nest; ++ struct bridge_vlan_info vinfo; ++ const char *vlans; ++ u16 i; ++ int err; ++ ++ nest = nla_nest_start(skb, IFLA_AF_SPEC); ++ if (!nest) { ++ netdev_err(netdev, "nla_nest_start failed"); ++ return -ENOMEM; ++ } ++ ++ if (port_priv) ++ vlans = port_priv->vlans; ++ else ++ vlans = priv->vlans; ++ ++ for (i = 0; i < VLAN_VID_MASK + 1; i++) { ++ vinfo.flags = 0; ++ vinfo.vid = i; ++ ++ if (vlans[i] & ETHSW_VLAN_UNTAGGED) ++ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; ++ ++ if (vlans[i] & ETHSW_VLAN_PVID) ++ vinfo.flags |= BRIDGE_VLAN_INFO_PVID; ++ ++ if (vlans[i] & ETHSW_VLAN_MEMBER) { ++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, ++ sizeof(vinfo), &vinfo); ++ if (err) ++ goto nla_put_err; ++ } ++ } ++ ++ nla_nest_end(skb, nest); ++ ++ return 0; ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ nla_nest_cancel(skb, nest); ++ return err; ++} ++ ++static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *netdev, u32 filter_mask, ++ int nlflags) ++{ ++ struct ethsw_dev_priv *priv; ++ struct ethsw_port_priv *port_priv = NULL; ++ struct ifinfomsg *hdr; ++ struct nlmsghdr *nlh; ++ int err; ++ ++ __get_priv(netdev, &priv, &port_priv); ++ ++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); ++ if (!nlh) ++ return -EMSGSIZE; ++ ++ hdr = nlmsg_data(nlh); ++ memset(hdr, 0, sizeof(*hdr)); ++ hdr->ifi_family = AF_BRIDGE; ++ hdr->ifi_type = netdev->type; ++ hdr->ifi_index = netdev->ifindex; ++ hdr->ifi_flags = dev_get_flags(netdev); ++ ++ err = __nla_put_netdev(skb, netdev, priv); ++ if (err) ++ goto nla_put_err; ++ ++ if (port_priv) { ++ err = __nla_put_port(skb, netdev, port_priv); ++ if (err) ++ goto nla_put_err; ++ } ++ ++ /* Check if the VID information is requested */ ++ if (filter_mask & RTEXT_FILTER_BRVLAN) { ++ err = __nla_put_vlan(skb, netdev, priv, port_priv); ++ if (err) ++ goto nla_put_err; ++ } ++ ++ nlmsg_end(skb, nlh); ++ return skb->len; ++ ++nla_put_err: ++ nlmsg_cancel(skb, nlh); ++ return -EMSGSIZE; ++} ++ ++static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid) ++{ ++ struct list_head *pos; ++ struct ethsw_port_priv *ppriv_local = NULL; ++ int err = 0; ++ ++ if (!priv->vlans[vid]) ++ return -ENOENT; ++ ++ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid); ++ if (err) { ++ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err); ++ return err; ++ } ++ priv->vlans[vid] = 0; ++ ++ list_for_each(pos, &priv->port_list) { ++ ppriv_local = list_entry(pos, struct ethsw_port_priv, ++ list); ++ ppriv_local->vlans[vid] = 0; ++ } ++ ++ return 0; ++} ++ ++static int ethsw_dellink_port(struct ethsw_dev_priv *priv, ++ struct ethsw_port_priv *port_priv, ++ u16 vid) ++{ ++ struct list_head *pos; ++ struct ethsw_port_priv *ppriv_local = NULL; ++ struct dpsw_vlan_if_cfg vcfg = { ++ .num_ifs = 1, ++ .if_id[0] = port_priv->port_index, ++ }; ++ unsigned int count = 0; ++ int err = 0; ++ ++ if (!port_priv->vlans[vid]) ++ return -ENOENT; ++ ++ /* VLAN will be deleted from switch if global flag is not set ++ * and is configured on only one port ++ */ ++ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) { ++ list_for_each(pos, &priv->port_list) { ++ ppriv_local = list_entry(pos, struct ethsw_port_priv, ++ list); ++ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER) ++ count++; ++ } ++ ++ if (count == 1) ++ return ethsw_dellink_switch(priv, vid); ++ } ++ ++ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle, ++ vid, &vcfg); ++ if (err) { ++ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err); ++ return err; ++ } ++ port_priv->vlans[vid] = 0; ++ return 0; ++} ++ ++static int ethsw_dellink(struct net_device *netdev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++{ ++ struct nlattr *tb[IFLA_BRIDGE_MAX + 1]; ++ struct nlattr *spec; ++ struct bridge_vlan_info *vinfo; ++ struct ethsw_dev_priv *priv; ++ struct ethsw_port_priv *port_priv = NULL; ++ int err = 0; ++ ++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ if (!spec) ++ return 0; ++ ++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); ++ if (err) ++ return err; ++ ++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) ++ return -EOPNOTSUPP; ++ ++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); ++ ++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) ++ return -EINVAL; ++ ++ __get_priv(netdev, &priv, &port_priv); ++ ++ /* decide if command targets switch device or port */ ++ if (!port_priv) ++ err = ethsw_dellink_switch(priv, vinfo->vid); ++ else ++ err = ethsw_dellink_port(priv, port_priv, vinfo->vid); ++ ++ return err; ++} ++ ++static const struct net_device_ops ethsw_ops = { ++ .ndo_open = ðsw_open, ++ .ndo_stop = ðsw_stop, ++ ++ .ndo_bridge_setlink = ðsw_setlink, ++ .ndo_bridge_getlink = ðsw_getlink, ++ .ndo_bridge_dellink = ðsw_dellink, ++ ++ .ndo_start_xmit = ðsw_dropframe, ++}; ++ ++/*--------------------------------------------------------------------------- */ ++/* switch port netdevice ops */ ++ ++static int _ethsw_port_carrier_state_sync(struct net_device *netdev) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_link_state state; ++ int err; ++ ++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, &state); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); ++ return err; ++ } ++ ++ WARN_ONCE(state.up > 1, "Garbage read into link_state"); ++ ++ if (state.up) ++ netif_carrier_on(port_priv->netdev); ++ else ++ netif_carrier_off(port_priv->netdev); ++ ++ return 0; ++} ++ ++static int ethsw_port_open(struct net_device *netdev) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ int err; ++ ++ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index); ++ if (err) { ++ netdev_err(netdev, "dpsw_if_enable err %d\n", err); ++ return err; ++ } ++ ++ /* sync carrier state */ ++ err = _ethsw_port_carrier_state_sync(netdev); ++ if (err) { ++ netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n", ++ err); ++ goto err_carrier_sync; ++ } ++ ++ return 0; ++ ++err_carrier_sync: ++ dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index); ++ return err; ++} ++ ++static int ethsw_port_stop(struct net_device *netdev) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ int err; ++ ++ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index); ++ if (err) { ++ netdev_err(netdev, "dpsw_if_disable err %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int ethsw_port_fdb_add_uc(struct net_device *netdev, ++ const unsigned char *addr) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_fdb_unicast_cfg entry = {0}; ++ int err; ++ ++ entry.if_egress = port_priv->port_index; ++ entry.type = DPSW_FDB_ENTRY_STATIC; ++ ether_addr_copy(entry.mac_addr, addr); ++ ++ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ 0, &entry); ++ if (err) ++ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err); ++ return err; ++} ++ ++static int ethsw_port_fdb_del_uc(struct net_device *netdev, ++ const unsigned char *addr) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_fdb_unicast_cfg entry = {0}; ++ int err; ++ ++ entry.if_egress = port_priv->port_index; ++ entry.type = DPSW_FDB_ENTRY_STATIC; ++ ether_addr_copy(entry.mac_addr, addr); ++ ++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ 0, &entry); ++ if (err) ++ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err); ++ return err; ++} ++ ++static int ethsw_port_fdb_add_mc(struct net_device *netdev, ++ const unsigned char *addr) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_fdb_multicast_cfg entry = {0}; ++ int err; ++ ++ ether_addr_copy(entry.mac_addr, addr); ++ entry.type = DPSW_FDB_ENTRY_STATIC; ++ entry.num_ifs = 1; ++ entry.if_id[0] = port_priv->port_index; ++ ++ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ 0, &entry); ++ if (err) ++ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err); ++ return err; ++} ++ ++static int ethsw_port_fdb_del_mc(struct net_device *netdev, ++ const unsigned char *addr) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_fdb_multicast_cfg entry = {0}; ++ int err; ++ ++ ether_addr_copy(entry.mac_addr, addr); ++ entry.type = DPSW_FDB_ENTRY_STATIC; ++ entry.num_ifs = 1; ++ entry.if_id[0] = port_priv->port_index; ++ ++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ 0, &entry); ++ if (err) ++ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err); ++ return err; ++} ++ ++static int _lookup_address(struct net_device *netdev, int is_uc, ++ const unsigned char *addr) ++{ ++ struct netdev_hw_addr *ha; ++ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; ++ ++ netif_addr_lock_bh(netdev); ++ list_for_each_entry(ha, &list->list, list) { ++ if (ether_addr_equal(ha->addr, addr)) { ++ netif_addr_unlock_bh(netdev); ++ return 1; ++ } ++ } ++ netif_addr_unlock_bh(netdev); ++ return 0; ++} ++ ++static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *netdev, ++ const unsigned char *addr, u16 vid, ++ u16 flags) ++{ ++ struct list_head *pos; ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv; ++ int err; ++ ++ /* TODO: add replace support when added to iproute bridge */ ++ if (!(flags & NLM_F_REQUEST)) { ++ netdev_err(netdev, ++ "ethsw_port_fdb_add unexpected flags value %08x\n", ++ flags); ++ return -EINVAL; ++ } ++ ++ if (is_unicast_ether_addr(addr)) { ++ /* if entry cannot be replaced, return error if exists */ ++ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) { ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, ++ struct ethsw_port_priv, ++ list); ++ if (_lookup_address(port_priv->netdev, ++ 1, addr)) ++ return -EEXIST; ++ } ++ } ++ ++ err = ethsw_port_fdb_add_uc(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n", ++ err); ++ return err; ++ } ++ ++ /* we might have replaced an existing entry for a different ++ * switch port, make sure the address doesn't linger in any ++ * port address list ++ */ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct ethsw_port_priv, ++ list); ++ dev_uc_del(port_priv->netdev, addr); ++ } ++ ++ err = dev_uc_add(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "dev_uc_add err %d\n", err); ++ return err; ++ } ++ } else { ++ struct dpsw_fdb_multicast_cfg entry = { ++ .type = DPSW_FDB_ENTRY_STATIC, ++ .num_ifs = 0, ++ }; ++ ++ /* check if address is already set on this port */ ++ if (_lookup_address(netdev, 0, addr)) ++ return -EEXIST; ++ ++ /* check if the address exists on other port */ ++ ether_addr_copy(entry.mac_addr, addr); ++ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle, ++ 0, &entry); ++ if (!err) { ++ /* entry exists, can we replace it? */ ++ if (flags & NLM_F_EXCL) ++ return -EEXIST; ++ } else if (err != -ENAVAIL) { ++ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n", ++ err); ++ return err; ++ } ++ ++ err = ethsw_port_fdb_add_mc(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n", ++ err); ++ return err; ++ } ++ ++ err = dev_mc_add(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "dev_mc_add err %d\n", err); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *netdev, ++ const unsigned char *addr, u16 vid) ++{ ++ int err; ++ ++ if (is_unicast_ether_addr(addr)) { ++ err = ethsw_port_fdb_del_uc(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n", ++ err); ++ return err; ++ } ++ ++ /* also delete if configured on port */ ++ err = dev_uc_del(netdev, addr); ++ if (err && err != -ENOENT) { ++ netdev_err(netdev, "dev_uc_del err %d\n", err); ++ return err; ++ } ++ } else { ++ if (!_lookup_address(netdev, 0, addr)) ++ return -ENOENT; ++ ++ err = dev_mc_del(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "dev_mc_del err %d\n", err); ++ return err; ++ } ++ ++ err = ethsw_port_fdb_del_mc(netdev, addr); ++ if (err) { ++ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n", ++ err); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++void ethsw_port_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ u64 tmp; ++ int err; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_ING_FRAME, &storage->rx_packets); ++ if (err) ++ goto error; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_EGR_FRAME, &storage->tx_packets); ++ if (err) ++ goto error; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_ING_BYTE, &storage->rx_bytes); ++ if (err) ++ goto error; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_EGR_BYTE, &storage->tx_bytes); ++ if (err) ++ goto error; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_ING_FRAME_DISCARD, ++ &storage->rx_dropped); ++ if (err) ++ goto error; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_ING_FLTR_FRAME, ++ &tmp); ++ if (err) ++ goto error; ++ storage->rx_dropped += tmp; ++ ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ DPSW_CNT_EGR_FRAME_DISCARD, ++ &storage->tx_dropped); ++ if (err) ++ goto error; ++ ++ return; ++ ++error: ++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); ++} ++ ++static int ethsw_port_change_mtu(struct net_device *netdev, int mtu) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ int err; ++ ++ if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) { ++ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n", ++ mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH); ++ return -EINVAL; ++ } ++ ++ err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io, ++ 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ (u16)ETHSW_L2_MAX_FRM(mtu)); ++ if (err) { ++ netdev_err(netdev, ++ "dpsw_if_set_max_frame_length() err %d\n", err); ++ return err; ++ } ++ ++ netdev->mtu = mtu; ++ return 0; ++} ++ ++static const struct net_device_ops ethsw_port_ops = { ++ .ndo_open = ðsw_port_open, ++ .ndo_stop = ðsw_port_stop, ++ ++ .ndo_fdb_add = ðsw_port_fdb_add, ++ .ndo_fdb_del = ðsw_port_fdb_del, ++ .ndo_fdb_dump = &ndo_dflt_fdb_dump, ++ ++ .ndo_get_stats64 = ðsw_port_get_stats, ++ .ndo_change_mtu = ðsw_port_change_mtu, ++ ++ .ndo_start_xmit = ðsw_dropframe, ++}; ++ ++static void ethsw_get_drvinfo(struct net_device *netdev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ u16 version_major, version_minor; ++ int err; ++ ++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version)); ++ ++ err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0, ++ &version_major, ++ &version_minor); ++ if (err) ++ strlcpy(drvinfo->fw_version, "N/A", ++ sizeof(drvinfo->fw_version)); ++ else ++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), ++ "%u.%u", version_major, version_minor); ++ ++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)); ++} ++ ++static int ethsw_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_link_state state = {0}; ++ int err = 0; ++ ++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ &state); ++ if (err) { ++ netdev_err(netdev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* At the moment, we have no way of interrogating the DPMAC ++ * from the DPSW side or there may not exist a DPMAC at all. ++ * Report only autoneg state, duplexity and speed. ++ */ ++ if (state.options & DPSW_LINK_OPT_AUTONEG) ++ cmd->autoneg = AUTONEG_ENABLE; ++ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX)) ++ cmd->autoneg = DUPLEX_FULL; ++ ethtool_cmd_speed_set(cmd, state.rate); ++ ++out: ++ return err; ++} ++ ++static int ethsw_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ struct dpsw_link_state state = {0}; ++ struct dpsw_link_cfg cfg = {0}; ++ int err = 0; ++ ++ netdev_dbg(netdev, "Setting link parameters..."); ++ ++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ &state); ++ if (err) { ++ netdev_err(netdev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* Due to a temporary MC limitation, the DPSW port must be down ++ * in order to be able to change link settings. Taking steps to let ++ * the user know that. ++ */ ++ if (netif_running(netdev)) { ++ netdev_info(netdev, ++ "Sorry, interface must be brought down first.\n"); ++ return -EACCES; ++ } ++ ++ cfg.options = state.options; ++ cfg.rate = ethtool_cmd_speed(cmd); ++ if (cmd->autoneg == AUTONEG_ENABLE) ++ cfg.options |= DPSW_LINK_OPT_AUTONEG; ++ else ++ cfg.options &= ~DPSW_LINK_OPT_AUTONEG; ++ if (cmd->duplex == DUPLEX_HALF) ++ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX; ++ else ++ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX; ++ ++ err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ &cfg); ++ if (err) ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(netdev, "ERROR %d setting link cfg", err); ++ ++out: ++ return err; ++} ++ ++static struct { ++ enum dpsw_counter id; ++ char name[ETH_GSTRING_LEN]; ++} ethsw_ethtool_counters[] = { ++ {DPSW_CNT_ING_FRAME, "rx frames"}, ++ {DPSW_CNT_ING_BYTE, "rx bytes"}, ++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"}, ++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, ++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, ++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, ++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, ++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, ++ {DPSW_CNT_EGR_FRAME, "tx frames"}, ++ {DPSW_CNT_EGR_BYTE, "tx bytes"}, ++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, ++ ++}; ++ ++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(ethsw_ethtool_counters); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static void ethsw_ethtool_get_strings(struct net_device *netdev, ++ u32 stringset, u8 *data) ++{ ++ u32 i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN); ++ break; ++ } ++} ++ ++static void ethsw_ethtool_get_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ struct ethsw_port_priv *port_priv = netdev_priv(netdev); ++ u32 i; ++ int err; ++ ++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) { ++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, ++ port_priv->ethsw_priv->dpsw_handle, ++ port_priv->port_index, ++ ethsw_ethtool_counters[i].id, ++ &data[i]); ++ if (err) ++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n", ++ ethsw_ethtool_counters[i].name, err); ++ } ++} ++ ++static const struct ethtool_ops ethsw_port_ethtool_ops = { ++ .get_drvinfo = ðsw_get_drvinfo, ++ .get_link = ðtool_op_get_link, ++ .get_settings = ðsw_get_settings, ++ .set_settings = ðsw_set_settings, ++ .get_strings = ðsw_ethtool_get_strings, ++ .get_ethtool_stats = ðsw_ethtool_get_stats, ++ .get_sset_count = ðsw_ethtool_get_sset_count, ++}; ++ ++/* -------------------------------------------------------------------------- */ ++/* ethsw driver functions */ ++ ++static int ethsw_links_state_update(struct ethsw_dev_priv *priv) ++{ ++ struct list_head *pos; ++ struct ethsw_port_priv *port_priv; ++ int err; ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct ethsw_port_priv, ++ list); ++ ++ err = _ethsw_port_carrier_state_sync(port_priv->netdev); ++ if (err) ++ netdev_err(port_priv->netdev, ++ "_ethsw_port_carrier_state_sync err %d\n", ++ err); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ ++ struct fsl_mc_io *io = priv->mc_io; ++ u16 token = priv->dpsw_handle; ++ int irq_index = DPSW_IRQ_INDEX_IF; ++ ++ /* Mask the events and the if_id reserved bits to be cleared on read */ ++ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; ++ int err; ++ ++ err = dpsw_get_irq_status(io, 0, token, irq_index, &status); ++ if (unlikely(err)) { ++ netdev_err(netdev, "Can't get irq status (err %d)", err); ++ ++ err = dpsw_clear_irq_status(io, 0, token, irq_index, ++ 0xFFFFFFFF); ++ if (unlikely(err)) ++ netdev_err(netdev, "Can't clear irq status (err %d)", ++ err); ++ goto out; ++ } ++ ++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { ++ err = ethsw_links_state_update(priv); ++ if (unlikely(err)) ++ goto out; ++ } ++ ++out: ++ return IRQ_HANDLED; ++} ++ ++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev = &sw_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ const int irq_index = DPSW_IRQ_INDEX_IF; ++ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; ++ ++ err = fsl_mc_allocate_irqs(sw_dev); ++ if (unlikely(err)) { ++ dev_err(dev, "MC irqs allocation failed\n"); ++ return err; ++ } ++ ++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) { ++ err = -EINVAL; ++ goto free_irq; ++ } ++ ++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, ++ irq_index, 0); ++ if (unlikely(err)) { ++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err); ++ goto free_irq; ++ } ++ ++ irq = sw_dev->irqs[irq_index]; ++ ++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq, ++ ethsw_irq0_handler, ++ _ethsw_irq0_handler_thread, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(dev), dev); ++ if (unlikely(err)) { ++ dev_err(dev, "devm_request_threaded_irq(): %d", err); ++ goto free_irq; ++ } ++ ++ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle, ++ irq_index, mask); ++ if (unlikely(err)) { ++ dev_err(dev, "dpsw_set_irq_mask(): %d", err); ++ goto free_devm_irq; ++ } ++ ++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, ++ irq_index, 1); ++ if (unlikely(err)) { ++ dev_err(dev, "dpsw_set_irq_enable(): %d", err); ++ goto free_devm_irq; ++ } ++ ++ return 0; ++ ++free_devm_irq: ++ devm_free_irq(dev, irq->msi_desc->irq, dev); ++free_irq: ++ fsl_mc_free_irqs(sw_dev); ++ return err; ++} ++ ++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev = &sw_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct ethsw_dev_priv *priv = netdev_priv(netdev); ++ ++ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, ++ DPSW_IRQ_INDEX_IF, 0); ++ devm_free_irq(dev, ++ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq, ++ dev); ++ fsl_mc_free_irqs(sw_dev); ++} ++ ++static int __cold ++ethsw_init(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev = &sw_dev->dev; ++ struct ethsw_dev_priv *priv; ++ struct net_device *netdev; ++ int err = 0; ++ u16 i; ++ u16 version_major, version_minor; ++ const struct dpsw_stp_cfg stp_cfg = { ++ .vlan_id = 1, ++ .state = DPSW_STP_STATE_FORWARDING, ++ }; ++ ++ netdev = dev_get_drvdata(dev); ++ priv = netdev_priv(netdev); ++ ++ priv->dev_id = sw_dev->obj_desc.id; ++ ++ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle); ++ if (err) { ++ dev_err(dev, "dpsw_open err %d\n", err); ++ goto err_exit; ++ } ++ if (!priv->dpsw_handle) { ++ dev_err(dev, "dpsw_open returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_exit; ++ } ++ ++ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle, ++ &priv->sw_attr); ++ if (err) { ++ dev_err(dev, "dpsw_get_attributes err %d\n", err); ++ goto err_close; ++ } ++ ++ err = dpsw_get_api_version(priv->mc_io, 0, ++ &version_major, ++ &version_minor); ++ if (err) { ++ dev_err(dev, "dpsw_get_api_version err %d\n", err); ++ goto err_close; ++ } ++ ++ /* Minimum supported DPSW version check */ ++ if (version_major < DPSW_MIN_VER_MAJOR || ++ (version_major == DPSW_MIN_VER_MAJOR && ++ version_minor < DPSW_MIN_VER_MINOR)) { ++ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n", ++ version_major, ++ version_minor, ++ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR); ++ err = -ENOTSUPP; ++ goto err_close; ++ } ++ ++ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle); ++ if (err) { ++ dev_err(dev, "dpsw_reset err %d\n", err); ++ goto err_close; ++ } ++ ++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0, ++ DPSW_FDB_LEARNING_MODE_HW); ++ if (err) { ++ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err); ++ goto err_close; ++ } ++ ++ for (i = 0; i < priv->sw_attr.num_ifs; i++) { ++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i, ++ &stp_cfg); ++ if (err) { ++ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", ++ err, i); ++ goto err_close; ++ } ++ ++ err = dpsw_if_set_broadcast(priv->mc_io, 0, ++ priv->dpsw_handle, i, 1); ++ if (err) { ++ dev_err(dev, ++ "dpsw_if_set_broadcast err %d for port %d\n", ++ err, i); ++ goto err_close; ++ } ++ } ++ ++ return 0; ++ ++err_close: ++ dpsw_close(priv->mc_io, 0, priv->dpsw_handle); ++err_exit: ++ return err; ++} ++ ++static int __cold ++ethsw_takedown(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev = &sw_dev->dev; ++ struct net_device *netdev; ++ struct ethsw_dev_priv *priv; ++ int err; ++ ++ netdev = dev_get_drvdata(dev); ++ priv = netdev_priv(netdev); ++ ++ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle); ++ if (err) ++ dev_warn(dev, "dpsw_close err %d\n", err); ++ ++ return 0; ++} ++ ++static int __cold ++ethsw_remove(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev; ++ struct net_device *netdev; ++ struct ethsw_dev_priv *priv; ++ struct ethsw_port_priv *port_priv; ++ struct list_head *pos; ++ ++ dev = &sw_dev->dev; ++ netdev = dev_get_drvdata(dev); ++ priv = netdev_priv(netdev); ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct ethsw_port_priv, list); ++ ++ rtnl_lock(); ++ netdev_upper_dev_unlink(port_priv->netdev, netdev); ++ rtnl_unlock(); ++ ++ unregister_netdev(port_priv->netdev); ++ free_netdev(port_priv->netdev); ++ } ++ ++ ethsw_teardown_irqs(sw_dev); ++ ++ unregister_netdev(netdev); ++ ++ ethsw_takedown(sw_dev); ++ fsl_mc_portal_free(priv->mc_io); ++ ++ dev_set_drvdata(dev, NULL); ++ free_netdev(netdev); ++ ++ return 0; ++} ++ ++static int __cold ++ethsw_probe(struct fsl_mc_device *sw_dev) ++{ ++ struct device *dev; ++ struct net_device *netdev = NULL; ++ struct ethsw_dev_priv *priv = NULL; ++ int err = 0; ++ u16 i; ++ const char def_mcast[ETH_ALEN] = { ++ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01, ++ }; ++ char port_name[IFNAMSIZ]; ++ ++ dev = &sw_dev->dev; ++ ++ /* register switch device, it's for management only - no I/O */ ++ netdev = alloc_etherdev(sizeof(*priv)); ++ if (!netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ return -ENOMEM; ++ } ++ netdev->netdev_ops = ðsw_ops; ++ ++ SET_NETDEV_DEV(netdev, dev); ++ dev_set_drvdata(dev, netdev); ++ ++ priv = netdev_priv(netdev); ++ priv->netdev = netdev; ++ ++ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io); ++ if (err) { ++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); ++ goto err_free_netdev; ++ } ++ if (!priv->mc_io) { ++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_free_netdev; ++ } ++ ++ err = ethsw_init(sw_dev); ++ if (err) { ++ dev_err(dev, "switch init err %d\n", err); ++ goto err_free_cmdport; ++ } ++ ++ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER; ++ ++ /* TODO: should we hold rtnl_lock here? We can't register_netdev under ++ * lock ++ */ ++ dev_alloc_name(netdev, "sw%d"); ++ err = register_netdev(netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ goto err_takedown; ++ } ++ if (err) ++ dev_info(dev, "register_netdev res %d\n", err); ++ ++ /* VLAN 1 is implicitly configured on the switch */ ++ priv->vlans[1] = ETHSW_VLAN_MEMBER; ++ /* Flooding, learning are implicitly enabled */ ++ priv->learning = true; ++ priv->flood = true; ++ ++ /* register switch ports */ ++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); ++ ++ INIT_LIST_HEAD(&priv->port_list); ++ for (i = 0; i < priv->sw_attr.num_ifs; i++) { ++ struct net_device *port_netdev; ++ struct ethsw_port_priv *port_priv; ++ ++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); ++ if (!port_netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ goto err_takedown; ++ } ++ ++ port_priv = netdev_priv(port_netdev); ++ port_priv->netdev = port_netdev; ++ port_priv->ethsw_priv = priv; ++ ++ port_priv->port_index = i; ++ port_priv->stp_state = BR_STATE_FORWARDING; ++ /* VLAN 1 is configured by default on all switch ports */ ++ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED | ++ ETHSW_VLAN_PVID; ++ ++ SET_NETDEV_DEV(port_netdev, dev); ++ port_netdev->netdev_ops = ðsw_port_ops; ++ port_netdev->ethtool_ops = ðsw_port_ethtool_ops; ++ ++ port_netdev->flags = port_netdev->flags | ++ IFF_PROMISC | IFF_SLAVE; ++ ++ dev_alloc_name(port_netdev, port_name); ++ err = register_netdev(port_netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ free_netdev(port_netdev); ++ goto err_takedown; ++ } ++ ++ rtnl_lock(); ++ ++ err = netdev_master_upper_dev_link(port_netdev, netdev, ++ NULL, NULL); ++ if (err) { ++ dev_err(dev, "netdev_master_upper_dev_link error %d\n", ++ err); ++ unregister_netdev(port_netdev); ++ free_netdev(port_netdev); ++ rtnl_unlock(); ++ goto err_takedown; ++ } ++ ++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL); ++ ++ rtnl_unlock(); ++ ++ list_add(&port_priv->list, &priv->port_list); ++ ++ /* TODO: implmenet set_rm_mode instead of this */ ++ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast); ++ if (err) ++ dev_warn(&netdev->dev, ++ "ethsw_port_fdb_add_mc err %d\n", err); ++ } ++ ++ /* the switch starts up enabled */ ++ rtnl_lock(); ++ err = dev_open(netdev); ++ rtnl_unlock(); ++ if (err) ++ dev_warn(dev, "dev_open err %d\n", err); ++ ++ /* setup irqs */ ++ err = ethsw_setup_irqs(sw_dev); ++ if (unlikely(err)) { ++ dev_warn(dev, "ethsw_setup_irqs err %d\n", err); ++ goto err_takedown; ++ } ++ ++ dev_info(&netdev->dev, ++ "probed %d port switch\n", priv->sw_attr.num_ifs); ++ return 0; ++ ++err_takedown: ++ ethsw_remove(sw_dev); ++err_free_cmdport: ++ fsl_mc_portal_free(priv->mc_io); ++err_free_netdev: ++ dev_set_drvdata(dev, NULL); ++ free_netdev(netdev); ++ ++ return err; ++} ++ ++static const struct fsl_mc_device_id ethsw_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpsw", ++ }, ++ {} ++}; ++ ++static struct fsl_mc_driver eth_sw_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = ethsw_probe, ++ .remove = ethsw_remove, ++ .match_id_table = ethsw_match_id_table, ++}; ++ ++module_fsl_mc_driver(eth_sw_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)"); +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/Kconfig +@@ -0,0 +1,7 @@ ++config FSL_DPAA2_EVB ++ tristate "DPAA2 Edge Virtual Bridge" ++ depends on FSL_MC_BUS && FSL_DPAA2 ++ select VLAN_8021Q ++ default y ++ ---help--- ++ Prototype driver for DPAA2 Edge Virtual Bridge. +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o ++ ++dpaa2-evb-objs := evb.o dpdmux.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h +@@ -0,0 +1,279 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPDMUX_CMD_H ++#define _FSL_DPDMUX_CMD_H ++ ++/* DPDMUX Version */ ++#define DPDMUX_VER_MAJOR 6 ++#define DPDMUX_VER_MINOR 1 ++ ++#define DPDMUX_CMD_BASE_VER 1 ++#define DPDMUX_CMD_ID_OFFSET 4 ++ ++#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER) ++ ++/* Command IDs */ ++#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800) ++#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806) ++#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906) ++#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986) ++#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06) ++ ++#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002) ++#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003) ++#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004) ++#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005) ++#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006) ++ ++#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012) ++#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013) ++#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014) ++#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015) ++#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016) ++#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017) ++ ++#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1) ++ ++#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3) ++ ++#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7) ++#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8) ++#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9) ++#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa) ++ ++#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0) ++#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1) ++#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2) ++#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3) ++#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4) ++ ++#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5) ++#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6) ++#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7) ++ ++#define DPDMUX_MASK(field) \ ++ GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \ ++ DPDMUX_##field##_SHIFT) ++#define dpdmux_set_field(var, field, val) \ ++ ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field))) ++#define dpdmux_get_field(var, field) \ ++ (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT) ++ ++struct dpdmux_cmd_open { ++ u32 dpdmux_id; ++}; ++ ++struct dpdmux_cmd_create { ++ u8 method; ++ u8 manip; ++ u16 num_ifs; ++ u32 pad; ++ ++ u16 adv_max_dmat_entries; ++ u16 adv_max_mc_groups; ++ u16 adv_max_vlan_ids; ++ u16 pad1; ++ ++ u64 options; ++}; ++ ++struct dpdmux_cmd_destroy { ++ u32 dpdmux_id; ++}; ++ ++#define DPDMUX_ENABLE_SHIFT 0 ++#define DPDMUX_ENABLE_SIZE 1 ++ ++struct dpdmux_rsp_is_enabled { ++ u8 en; ++}; ++ ++struct dpdmux_cmd_set_irq_enable { ++ u8 enable; ++ u8 pad[3]; ++ u8 irq_index; ++}; ++ ++struct dpdmux_cmd_get_irq_enable { ++ u32 pad; ++ u8 irq_index; ++}; ++ ++struct dpdmux_rsp_get_irq_enable { ++ u8 enable; ++}; ++ ++struct dpdmux_cmd_set_irq_mask { ++ u32 mask; ++ u8 irq_index; ++}; ++ ++struct dpdmux_cmd_get_irq_mask { ++ u32 pad; ++ u8 irq_index; ++}; ++ ++struct dpdmux_rsp_get_irq_mask { ++ u32 mask; ++}; ++ ++struct dpdmux_cmd_get_irq_status { ++ u32 status; ++ u8 irq_index; ++}; ++ ++struct dpdmux_rsp_get_irq_status { ++ u32 status; ++}; ++ ++struct dpdmux_cmd_clear_irq_status { ++ u32 status; ++ u8 irq_index; ++}; ++ ++struct dpdmux_rsp_get_attr { ++ u8 method; ++ u8 manip; ++ u16 num_ifs; ++ u16 mem_size; ++ u16 pad; ++ ++ u64 pad1; ++ ++ u32 id; ++ u32 pad2; ++ ++ u64 options; ++}; ++ ++struct dpdmux_cmd_set_max_frame_length { ++ u16 max_frame_length; ++}; ++ ++#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0 ++#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4 ++#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4 ++#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4 ++ ++struct dpdmux_cmd_if_set_accepted_frames { ++ u16 if_id; ++ u8 frames_options; ++}; ++ ++struct dpdmux_cmd_if { ++ u16 if_id; ++}; ++ ++struct dpdmux_rsp_if_get_attr { ++ u8 pad[3]; ++ u8 enabled; ++ u8 pad1[3]; ++ u8 accepted_frames_type; ++ u32 rate; ++}; ++ ++struct dpdmux_cmd_if_l2_rule { ++ u16 if_id; ++ u8 mac_addr5; ++ u8 mac_addr4; ++ u8 mac_addr3; ++ u8 mac_addr2; ++ u8 mac_addr1; ++ u8 mac_addr0; ++ ++ u32 pad; ++ u16 vlan_id; ++}; ++ ++struct dpdmux_cmd_if_get_counter { ++ u16 if_id; ++ u8 counter_type; ++}; ++ ++struct dpdmux_rsp_if_get_counter { ++ u64 pad; ++ u64 counter; ++}; ++ ++struct dpdmux_cmd_if_set_link_cfg { ++ u16 if_id; ++ u16 pad[3]; ++ ++ u32 rate; ++ u32 pad1; ++ ++ u64 options; ++}; ++ ++struct dpdmux_cmd_if_get_link_state { ++ u16 if_id; ++}; ++ ++struct dpdmux_rsp_if_get_link_state { ++ u32 pad; ++ u8 up; ++ u8 pad1[3]; ++ ++ u32 rate; ++ u32 pad2; ++ ++ u64 options; ++}; ++ ++struct dpdmux_rsp_get_api_version { ++ u16 major; ++ u16 minor; ++}; ++ ++struct dpdmux_set_custom_key { ++ u64 pad[6]; ++ u64 key_cfg_iova; ++}; ++ ++struct dpdmux_cmd_add_custom_cls_entry { ++ u8 pad[3]; ++ u8 key_size; ++ u16 pad1; ++ u16 dest_if; ++ u64 key_iova; ++ u64 mask_iova; ++}; ++ ++struct dpdmux_cmd_remove_custom_cls_entry { ++ u8 pad[3]; ++ u8 key_size; ++ u32 pad1; ++ u64 key_iova; ++ u64 mask_iova; ++}; ++ ++#endif /* _FSL_DPDMUX_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c +@@ -0,0 +1,1112 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpdmux.h" ++#include "dpdmux-cmd.h" ++ ++/** ++ * dpdmux_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpdmux_id: DPDMUX unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpdmux_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpdmux_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_open *cmd_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dpdmux_cmd_open *)cmd.params; ++ cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_create() - Create the DPDMUX object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @obj_id: returned object id ++ * ++ * Create the DPDMUX object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * The function accepts an authentication token of a parent ++ * container that this object should be assigned to. The token ++ * can be '0' so the object will be assigned to the default container. ++ * The newly created object can be opened with the returned ++ * object id and using the container's associated tokens and MC portals. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_create(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ const struct dpdmux_cfg *cfg, ++ u32 *obj_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_create *cmd_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpdmux_cmd_create *)cmd.params; ++ cmd_params->method = cfg->method; ++ cmd_params->manip = cfg->manip; ++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); ++ cmd_params->adv_max_dmat_entries = ++ cpu_to_le16(cfg->adv.max_dmat_entries); ++ cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups); ++ cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids); ++ cmd_params->options = cpu_to_le64(cfg->adv.options); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *obj_id = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @object_id: The object id; it must be a valid id within the container that ++ * created this object; ++ * ++ * The function accepts the authentication token of the parent container that ++ * created the object (not the one that currently owns the object). The object ++ * is searched within parent using the provided 'object_id'. ++ * All tokens to the object must be closed before calling destroy. ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpdmux_destroy(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ u32 object_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_destroy *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpdmux_cmd_destroy *)cmd.params; ++ cmd_params->dpdmux_id = cpu_to_le32(object_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_enable() - Enable DPDMUX functionality ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_disable() - Disable DPDMUX functionality ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_is_enabled() - Check if the DPDMUX is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_rsp_is_enabled *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params; ++ *en = dpdmux_get_field(rsp_params->en, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_set_irq_enable *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params; ++ cmd_params->enable = en; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_get_irq_enable() - Get overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_get_irq_enable *cmd_params; ++ struct dpdmux_rsp_get_irq_enable *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params; ++ *en = rsp_params->enable; ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_set_irq_mask *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_get_irq_mask *cmd_params; ++ struct dpdmux_rsp_get_irq_mask *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params; ++ *mask = le32_to_cpu(rsp_params->mask); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_get_irq_status *cmd_params; ++ struct dpdmux_rsp_get_irq_status *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params; ++ *status = le32_to_cpu(rsp_params->status); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_clear_irq_status *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_get_attributes() - Retrieve DPDMUX attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_rsp_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params; ++ attr->id = le32_to_cpu(rsp_params->id); ++ attr->options = le64_to_cpu(rsp_params->options); ++ attr->method = rsp_params->method; ++ attr->manip = rsp_params->manip; ++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); ++ attr->mem_size = le16_to_cpu(rsp_params->mem_size); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_if_enable() - Enable Interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Interface Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id) ++{ ++ struct dpdmux_cmd_if *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_disable() - Disable Interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Interface Identifier ++ * ++ * Return: Completion status. '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id) ++{ ++ struct dpdmux_cmd_if *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @max_frame_length: The required maximum frame length ++ * ++ * Update the maximum frame length on all DMUX interfaces. ++ * In case of VEPA, the maximum frame length on all dmux interfaces ++ * will be updated with the minimum value of the mfls of the connected ++ * dpnis and the actual value of dmux mfl. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_set_max_frame_length *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params; ++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_ul_reset_counters() - Function resets the uplink counter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_set_accepted_frames() - Set the accepted frame types ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); ++ * @cfg: Frame types configuration ++ * ++ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or ++ * priority-tagged frames are discarded. ++ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or ++ * priority-tagged frames are accepted. ++ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, ++ * untagged and priority-tagged frame are accepted; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_accepted_frames *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_set_accepted_frames *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE, ++ cfg->type); ++ dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION, ++ cfg->unaccept_act); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); ++ * @attr: Interface attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_if_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if *cmd_params; ++ struct dpdmux_rsp_if_get_attr *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params; ++ attr->rate = le32_to_cpu(rsp_params->rate); ++ attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE); ++ attr->accept_frame_type = ++ dpdmux_get_field(rsp_params->accepted_frames_type, ++ ACCEPTED_FRAMES_TYPE); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Destination interface ID ++ * @rule: L2 rule ++ * ++ * Function removes a L2 rule from DPDMUX table ++ * or adds an interface to an existing multicast address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_l2_rule *rule) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_l2_rule *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); ++ cmd_params->mac_addr5 = rule->mac_addr[5]; ++ cmd_params->mac_addr4 = rule->mac_addr[4]; ++ cmd_params->mac_addr3 = rule->mac_addr[3]; ++ cmd_params->mac_addr2 = rule->mac_addr[2]; ++ cmd_params->mac_addr1 = rule->mac_addr[1]; ++ cmd_params->mac_addr0 = rule->mac_addr[0]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Destination interface ID ++ * @rule: L2 rule ++ * ++ * Function adds a L2 rule into DPDMUX table ++ * or adds an interface to an existing multicast address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_l2_rule *rule) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_l2_rule *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); ++ cmd_params->mac_addr5 = rule->mac_addr[5]; ++ cmd_params->mac_addr4 = rule->mac_addr[4]; ++ cmd_params->mac_addr3 = rule->mac_addr[3]; ++ cmd_params->mac_addr2 = rule->mac_addr[2]; ++ cmd_params->mac_addr1 = rule->mac_addr[1]; ++ cmd_params->mac_addr0 = rule->mac_addr[0]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_get_counter() - Functions obtains specific counter of an interface ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMUX object ++ * @if_id: Interface Id ++ * @counter_type: counter type ++ * @counter: Returned specific counter information ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpdmux_counter_type counter_type, ++ u64 *counter) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_get_counter *cmd_params; ++ struct dpdmux_rsp_if_get_counter *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->counter_type = counter_type; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params; ++ *counter = le64_to_cpu(rsp_params->counter); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_if_set_link_cfg() - set the link configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: interface id ++ * @cfg: Link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_set_link_cfg *cmd_params; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ cmd_params->rate = cpu_to_le32(cfg->rate); ++ cmd_params->options = cpu_to_le64(cfg->options); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_if_get_link_state - Return the link state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: interface id ++ * @state: link state ++ * ++ * @returns '0' on Success; Error code otherwise. ++ */ ++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_link_state *state) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_cmd_if_get_link_state *cmd_params; ++ struct dpdmux_rsp_if_get_link_state *rsp_params; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params; ++ cmd_params->if_id = cpu_to_le16(if_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params; ++ state->rate = le32_to_cpu(rsp_params->rate); ++ state->options = le64_to_cpu(rsp_params->options); ++ state->up = dpdmux_get_field(rsp_params->up, ENABLE); ++ ++ return 0; ++} ++ ++/** ++ * dpdmux_set_custom_key - Set a custom classification key. ++ * ++ * This API is only available for DPDMUX instance created with ++ * DPDMUX_METHOD_CUSTOM. This API must be called before populating the ++ * classification table using dpdmux_add_custom_cls_entry. ++ * ++ * Calls to dpdmux_set_custom_key remove all existing classification entries ++ * that may have been added previously using dpdmux_add_custom_cls_entry. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @if_id: interface id ++ * @key_cfg_iova: DMA address of a configuration structure set up using ++ * dpkg_prepare_key_cfg. Maximum key size is 24 bytes. ++ * ++ * @returns '0' on Success; Error code otherwise. ++ */ ++int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u64 key_cfg_iova) ++{ ++ struct dpdmux_set_custom_key *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_set_custom_key *)cmd.params; ++ cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_add_custom_cls_entry - Adds a custom classification entry. ++ * ++ * This API is only available for DPDMUX instances created with ++ * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key ++ * composition rule must be set up using dpdmux_set_custom_key. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @rule: Classification rule to insert. Rules cannot be duplicated, if a ++ * matching rule already exists, the action will be replaced. ++ * @action: Action to perform for matching traffic. ++ * ++ * @returns '0' on Success; Error code otherwise. ++ */ ++int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_rule_cfg *rule, ++ struct dpdmux_cls_action *action) ++{ ++ struct dpdmux_cmd_add_custom_cls_entry *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY, ++ cmd_flags, ++ token); ++ ++ cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params; ++ cmd_params->key_size = rule->key_size; ++ cmd_params->dest_if = cpu_to_le16(action->dest_if); ++ cmd_params->key_iova = cpu_to_le64(rule->key_iova); ++ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_remove_custom_cls_entry - Removes a custom classification entry. ++ * ++ * This API is only available for DPDMUX instances created with ++ * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification ++ * entries previously inserted using dpdmux_add_custom_cls_entry. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSW object ++ * @rule: Classification rule to remove ++ * ++ * @returns '0' on Success; Error code otherwise. ++ */ ++int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_rule_cfg *rule) ++{ ++ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params; ++ cmd_params->key_size = rule->key_size; ++ cmd_params->key_iova = cpu_to_le64(rule->key_iova); ++ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpdmux_get_api_version() - Get Data Path Demux API version ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of data path demux API ++ * @minor_ver: Minor version of data path demux API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmux_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpdmux_rsp_get_api_version *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION, ++ cmd_flags, ++ 0); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params; ++ *major_ver = le16_to_cpu(rsp_params->major); ++ *minor_ver = le16_to_cpu(rsp_params->minor); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h +@@ -0,0 +1,453 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPDMUX_H ++#define __FSL_DPDMUX_H ++ ++struct fsl_mc_io; ++ ++/* Data Path Demux API ++ * Contains API for handling DPDMUX topology and functionality ++ */ ++ ++int dpdmux_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpdmux_id, ++ u16 *token); ++ ++int dpdmux_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * DPDMUX general options ++ */ ++ ++/** ++ * Enable bridging between internal interfaces ++ */ ++#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL ++ ++/** ++ * Mask support for classification ++ */ ++#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL ++ ++#define DPDMUX_IRQ_INDEX_IF 0x0000 ++#define DPDMUX_IRQ_INDEX 0x0001 ++ ++/** ++ * IRQ event - Indicates that the link state changed ++ */ ++#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 ++ ++/** ++ * enum dpdmux_manip - DPDMUX manipulation operations ++ * @DPDMUX_MANIP_NONE: No manipulation on frames ++ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress ++ */ ++enum dpdmux_manip { ++ DPDMUX_MANIP_NONE = 0x0, ++ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 ++}; ++ ++/** ++ * enum dpdmux_method - DPDMUX method options ++ * @DPDMUX_METHOD_NONE: no DPDMUX method ++ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address ++ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address ++ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN ++ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN ++ */ ++enum dpdmux_method { ++ DPDMUX_METHOD_NONE = 0x0, ++ DPDMUX_METHOD_C_VLAN_MAC = 0x1, ++ DPDMUX_METHOD_MAC = 0x2, ++ DPDMUX_METHOD_C_VLAN = 0x3, ++ DPDMUX_METHOD_S_VLAN = 0x4, ++ DPDMUX_METHOD_CUSTOM = 0x5 ++}; ++ ++/** ++ * struct dpdmux_cfg - DPDMUX configuration parameters ++ * @method: Defines the operation method for the DPDMUX address table ++ * @manip: Required manipulation operation ++ * @num_ifs: Number of interfaces (excluding the uplink interface) ++ * @adv: Advanced parameters; default is all zeros; ++ * use this structure to change default settings ++ */ ++struct dpdmux_cfg { ++ enum dpdmux_method method; ++ enum dpdmux_manip manip; ++ u16 num_ifs; ++ /** ++ * struct adv - Advanced parameters ++ * @options: DPDMUX options - combination of 'DPDMUX_OPT_' flags ++ * @max_dmat_entries: Maximum entries in DPDMUX address table ++ * 0 - indicates default: 64 entries per interface. ++ * @max_mc_groups: Number of multicast groups in DPDMUX table ++ * 0 - indicates default: 32 multicast groups ++ * @max_vlan_ids: max vlan ids allowed in the system - ++ * relevant only case of working in mac+vlan method. ++ * 0 - indicates default 16 vlan ids. ++ */ ++ struct { ++ u64 options; ++ u16 max_dmat_entries; ++ u16 max_mc_groups; ++ u16 max_vlan_ids; ++ } adv; ++}; ++ ++int dpdmux_create(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ const struct dpdmux_cfg *cfg, ++ u32 *obj_id); ++ ++int dpdmux_destroy(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ u32 object_id); ++ ++int dpdmux_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpdmux_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpdmux_is_enabled(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ int *en); ++ ++int dpdmux_reset(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en); ++ ++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en); ++ ++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask); ++ ++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask); ++ ++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status); ++ ++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status); ++ ++/** ++ * struct dpdmux_attr - Structure representing DPDMUX attributes ++ * @id: DPDMUX object ID ++ * @options: Configuration options (bitmap) ++ * @method: DPDMUX address table method ++ * @manip: DPDMUX manipulation type ++ * @num_ifs: Number of interfaces (excluding the uplink interface) ++ * @mem_size: DPDMUX frame storage memory size ++ */ ++struct dpdmux_attr { ++ int id; ++ u64 options; ++ enum dpdmux_method method; ++ enum dpdmux_manip manip; ++ u16 num_ifs; ++ u16 mem_size; ++}; ++ ++int dpdmux_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_attr *attr); ++ ++int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 max_frame_length); ++ ++/** ++ * enum dpdmux_counter_type - Counter types ++ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames ++ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes ++ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames ++ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames ++ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames ++ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes ++ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames ++ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes ++ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames ++ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes ++ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames ++ */ ++enum dpdmux_counter_type { ++ DPDMUX_CNT_ING_FRAME = 0x0, ++ DPDMUX_CNT_ING_BYTE = 0x1, ++ DPDMUX_CNT_ING_FLTR_FRAME = 0x2, ++ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, ++ DPDMUX_CNT_ING_MCAST_FRAME = 0x4, ++ DPDMUX_CNT_ING_MCAST_BYTE = 0x5, ++ DPDMUX_CNT_ING_BCAST_FRAME = 0x6, ++ DPDMUX_CNT_ING_BCAST_BYTES = 0x7, ++ DPDMUX_CNT_EGR_FRAME = 0x8, ++ DPDMUX_CNT_EGR_BYTE = 0x9, ++ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa ++}; ++ ++/** ++ * enum dpdmux_accepted_frames_type - DPDMUX frame types ++ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and ++ * priority-tagged frames ++ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or ++ * priority-tagged frames that are received on this ++ * interface ++ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames ++ * received on this interface are accepted ++ */ ++enum dpdmux_accepted_frames_type { ++ DPDMUX_ADMIT_ALL = 0, ++ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, ++ DPDMUX_ADMIT_ONLY_UNTAGGED = 2 ++}; ++ ++/** ++ * enum dpdmux_action - DPDMUX action for un-accepted frames ++ * @DPDMUX_ACTION_DROP: Drop un-accepted frames ++ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the ++ * control interface ++ */ ++enum dpdmux_action { ++ DPDMUX_ACTION_DROP = 0, ++ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 ++}; ++ ++/** ++ * struct dpdmux_accepted_frames - Frame types configuration ++ * @type: Defines ingress accepted frames ++ * @unaccept_act: Defines action on frames not accepted ++ */ ++struct dpdmux_accepted_frames { ++ enum dpdmux_accepted_frames_type type; ++ enum dpdmux_action unaccept_act; ++}; ++ ++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_accepted_frames *cfg); ++ ++/** ++ * struct dpdmux_if_attr - Structure representing frame types configuration ++ * @rate: Configured interface rate (in bits per second) ++ * @enabled: Indicates if interface is enabled ++ * @accept_frame_type: Indicates type of accepted frames for the interface ++ */ ++struct dpdmux_if_attr { ++ u32 rate; ++ int enabled; ++ enum dpdmux_accepted_frames_type accept_frame_type; ++}; ++ ++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_if_attr *attr); ++ ++int dpdmux_if_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id); ++ ++int dpdmux_if_disable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id); ++ ++/** ++ * struct dpdmux_l2_rule - Structure representing L2 rule ++ * @mac_addr: MAC address ++ * @vlan_id: VLAN ID ++ */ ++struct dpdmux_l2_rule { ++ u8 mac_addr[6]; ++ u16 vlan_id; ++}; ++ ++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_l2_rule *rule); ++ ++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ const struct dpdmux_l2_rule *rule); ++ ++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ enum dpdmux_counter_type counter_type, ++ u64 *counter); ++ ++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values ++ */ ++struct dpdmux_link_cfg { ++ u32 rate; ++ u64 options; ++}; ++ ++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_link_cfg *cfg); ++/** ++ * struct dpdmux_link_state - Structure representing DPDMUX link state ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values ++ * @up: 0 - down, 1 - up ++ */ ++struct dpdmux_link_state { ++ u32 rate; ++ u64 options; ++ int up; ++}; ++ ++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u16 if_id, ++ struct dpdmux_link_state *state); ++ ++int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u64 key_cfg_iova); ++ ++/** ++ * struct dpdmux_rule_cfg - Custom classification rule. ++ * ++ * @key_iova: DMA address of buffer storing the look-up value ++ * @mask_iova: DMA address of the mask used for TCAM classification ++ * @key_size: size, in bytes, of the look-up value. This must match the size ++ * of the look-up key defined using dpdmux_set_custom_key, otherwise the ++ * entry will never be hit ++ */ ++struct dpdmux_rule_cfg { ++ u64 key_iova; ++ u64 mask_iova; ++ u8 key_size; ++}; ++ ++/** ++ * struct dpdmux_cls_action - Action to execute for frames matching the ++ * classification entry ++ * ++ * @dest_if: Interface to forward the frames to. Port numbering is similar to ++ * the one used to connect interfaces: ++ * - 0 is the uplink port, ++ * - all others are downlink ports. ++ */ ++struct dpdmux_cls_action { ++ u16 dest_if; ++}; ++ ++int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_rule_cfg *rule, ++ struct dpdmux_cls_action *action); ++ ++int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpdmux_rule_cfg *rule); ++ ++int dpdmux_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); ++ ++#endif /* __FSL_DPDMUX_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/evb/evb.c +@@ -0,0 +1,1350 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++ ++#include "dpdmux.h" ++#include "dpdmux-cmd.h" ++ ++static const char evb_drv_version[] = "0.1"; ++ ++/* Minimal supported DPDMUX version */ ++#define DPDMUX_MIN_VER_MAJOR 6 ++#define DPDMUX_MIN_VER_MINOR 0 ++ ++/* IRQ index */ ++#define DPDMUX_MAX_IRQ_NUM 2 ++ ++/* MAX FRAME LENGTH (currently 10k) */ ++#define EVB_MAX_FRAME_LENGTH (10 * 1024) ++/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */ ++#define EVB_MIN_FRAME_LENGTH 68 ++ ++struct evb_port_priv { ++ struct net_device *netdev; ++ struct list_head list; ++ u16 port_index; ++ struct evb_priv *evb_priv; ++ u8 vlans[VLAN_VID_MASK + 1]; ++}; ++ ++struct evb_priv { ++ /* keep first */ ++ struct evb_port_priv uplink; ++ ++ struct fsl_mc_io *mc_io; ++ struct list_head port_list; ++ struct dpdmux_attr attr; ++ u16 mux_handle; ++ int dev_id; ++}; ++ ++static int _evb_port_carrier_state_sync(struct net_device *netdev) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct dpdmux_link_state state; ++ int err; ++ ++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, &state); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err); ++ return err; ++ } ++ ++ WARN_ONCE(state.up > 1, "Garbage read into link_state"); ++ ++ if (state.up) ++ netif_carrier_on(port_priv->netdev); ++ else ++ netif_carrier_off(port_priv->netdev); ++ ++ return 0; ++} ++ ++static int evb_port_open(struct net_device *netdev) ++{ ++ int err; ++ ++ /* FIXME: enable port when support added */ ++ ++ err = _evb_port_carrier_state_sync(netdev); ++ if (err) { ++ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n", ++ err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ ++static int evb_links_state_update(struct evb_priv *priv) ++{ ++ struct evb_port_priv *port_priv; ++ struct list_head *pos; ++ int err; ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct evb_port_priv, list); ++ ++ err = _evb_port_carrier_state_sync(port_priv->netdev); ++ if (err) ++ netdev_err(port_priv->netdev, ++ "_evb_port_carrier_state_sync err %d\n", ++ err); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t evb_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev); ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ struct fsl_mc_io *io = priv->mc_io; ++ u16 token = priv->mux_handle; ++ int irq_index = DPDMUX_IRQ_INDEX_IF; ++ ++ /* Mask the events and the if_id reserved bits to be cleared on read */ ++ u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; ++ int err; ++ ++ /* Sanity check */ ++ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index])) ++ goto out; ++ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num)) ++ goto out; ++ ++ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status); ++ if (unlikely(err)) { ++ netdev_err(netdev, "Can't get irq status (err %d)", err); ++ err = dpdmux_clear_irq_status(io, 0, token, irq_index, ++ 0xFFFFFFFF); ++ if (unlikely(err)) ++ netdev_err(netdev, "Can't clear irq status (err %d)", ++ err); ++ goto out; ++ } ++ ++ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) { ++ err = evb_links_state_update(priv); ++ if (unlikely(err)) ++ goto out; ++ } ++ ++out: ++ return IRQ_HANDLED; ++} ++ ++static int evb_setup_irqs(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev = &evb_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ const int irq_index = DPDMUX_IRQ_INDEX_IF; ++ u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED; ++ ++ err = fsl_mc_allocate_irqs(evb_dev); ++ if (unlikely(err)) { ++ dev_err(dev, "MC irqs allocation failed\n"); ++ return err; ++ } ++ ++ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) { ++ err = -EINVAL; ++ goto free_irq; ++ } ++ ++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, ++ irq_index, 0); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err); ++ goto free_irq; ++ } ++ ++ irq = evb_dev->irqs[irq_index]; ++ ++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq, ++ evb_irq0_handler, ++ _evb_irq0_handler_thread, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(dev), dev); ++ if (unlikely(err)) { ++ dev_err(dev, "devm_request_threaded_irq(): %d", err); ++ goto free_irq; ++ } ++ ++ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle, ++ irq_index, mask); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_set_irq_mask(): %d", err); ++ goto free_devm_irq; ++ } ++ ++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, ++ irq_index, 1); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_set_irq_enable(): %d", err); ++ goto free_devm_irq; ++ } ++ ++ return 0; ++ ++free_devm_irq: ++ devm_free_irq(dev, irq->msi_desc->irq, dev); ++free_irq: ++ fsl_mc_free_irqs(evb_dev); ++ return err; ++} ++ ++static void evb_teardown_irqs(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev = &evb_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ ++ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, ++ DPDMUX_IRQ_INDEX_IF, 0); ++ ++ devm_free_irq(dev, ++ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq, ++ dev); ++ fsl_mc_free_irqs(evb_dev); ++} ++ ++static int evb_port_add_rule(struct net_device *netdev, ++ const unsigned char *addr, u16 vid) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct dpdmux_l2_rule rule = { .vlan_id = vid }; ++ int err; ++ ++ if (addr) ++ ether_addr_copy(rule.mac_addr, addr); ++ ++ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, &rule); ++ if (unlikely(err)) ++ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err); ++ return err; ++} ++ ++static int evb_port_del_rule(struct net_device *netdev, ++ const unsigned char *addr, u16 vid) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct dpdmux_l2_rule rule = { .vlan_id = vid }; ++ int err; ++ ++ if (addr) ++ ether_addr_copy(rule.mac_addr, addr); ++ ++ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, &rule); ++ if (unlikely(err)) ++ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err); ++ return err; ++} ++ ++static bool _lookup_address(struct net_device *netdev, ++ const unsigned char *addr) ++{ ++ struct netdev_hw_addr *ha; ++ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ? ++ &netdev->uc : &netdev->mc; ++ ++ netif_addr_lock_bh(netdev); ++ list_for_each_entry(ha, &list->list, list) { ++ if (ether_addr_equal(ha->addr, addr)) { ++ netif_addr_unlock_bh(netdev); ++ return true; ++ } ++ } ++ netif_addr_unlock_bh(netdev); ++ return false; ++} ++ ++static inline int evb_port_fdb_prep(struct nlattr *tb[], ++ struct net_device *netdev, ++ const unsigned char *addr, u16 *vid, ++ bool del) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct evb_priv *evb_priv = port_priv->evb_priv; ++ ++ *vid = 0; ++ ++ if (evb_priv->attr.method != DPDMUX_METHOD_MAC && ++ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) { ++ netdev_err(netdev, ++ "EVB mode does not support MAC classification\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ /* check if the address is configured on this port */ ++ if (_lookup_address(netdev, addr)) { ++ if (!del) ++ return -EEXIST; ++ } else { ++ if (del) ++ return -ENOENT; ++ } ++ ++ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { ++ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) { ++ netdev_err(netdev, "invalid vlan size %d\n", ++ nla_len(tb[NDA_VLAN])); ++ return -EINVAL; ++ } ++ ++ *vid = nla_get_u16(tb[NDA_VLAN]); ++ ++ if (!*vid || *vid >= VLAN_VID_MASK) { ++ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid); ++ return -EINVAL; ++ } ++ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { ++ netdev_err(netdev, ++ "EVB mode requires explicit VLAN configuration\n"); ++ return -EINVAL; ++ } else if (tb[NDA_VLAN]) { ++ netdev_warn(netdev, "VLAN not supported, argument ignored\n"); ++ } ++ ++ return 0; ++} ++ ++static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *netdev, ++ const unsigned char *addr, u16 vid, u16 flags) ++{ ++ u16 _vid; ++ int err; ++ ++ /* TODO: add replace support when added to iproute bridge */ ++ if (!(flags & NLM_F_REQUEST)) { ++ netdev_err(netdev, ++ "evb_port_fdb_add unexpected flags value %08x\n", ++ flags); ++ return -EINVAL; ++ } ++ ++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0); ++ if (unlikely(err)) ++ return err; ++ ++ err = evb_port_add_rule(netdev, addr, _vid); ++ if (unlikely(err)) ++ return err; ++ ++ if (is_unicast_ether_addr(addr)) { ++ err = dev_uc_add(netdev, addr); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dev_uc_add err %d\n", err); ++ return err; ++ } ++ } else { ++ err = dev_mc_add(netdev, addr); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dev_mc_add err %d\n", err); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *netdev, ++ const unsigned char *addr, u16 vid) ++{ ++ u16 _vid; ++ int err; ++ ++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1); ++ if (unlikely(err)) ++ return err; ++ ++ err = evb_port_del_rule(netdev, addr, _vid); ++ if (unlikely(err)) ++ return err; ++ ++ if (is_unicast_ether_addr(addr)) { ++ err = dev_uc_del(netdev, addr); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dev_uc_del err %d\n", err); ++ return err; ++ } ++ } else { ++ err = dev_mc_del(netdev, addr); ++ if (unlikely(err)) { ++ netdev_err(netdev, "dev_mc_del err %d\n", err); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static int evb_change_mtu(struct net_device *netdev, ++ int mtu) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct evb_priv *evb_priv = port_priv->evb_priv; ++ struct list_head *pos; ++ int err = 0; ++ ++ /* This operation is not permitted on downlinks */ ++ if (port_priv->port_index > 0) ++ return -EPERM; ++ ++ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) { ++ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n", ++ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH); ++ return -EINVAL; ++ } ++ ++ err = dpdmux_set_max_frame_length(evb_priv->mc_io, ++ 0, ++ evb_priv->mux_handle, ++ (uint16_t)mtu); ++ ++ if (unlikely(err)) { ++ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n", ++ err); ++ return err; ++ } ++ ++ /* Update the max frame length for downlinks */ ++ list_for_each(pos, &evb_priv->port_list) { ++ port_priv = list_entry(pos, struct evb_port_priv, list); ++ port_priv->netdev->mtu = mtu; ++ } ++ ++ netdev->mtu = mtu; ++ return 0; ++} ++ ++static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = { ++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, ++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, ++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, ++ .len = sizeof(struct bridge_vlan_info), }, ++}; ++ ++static int evb_setlink_af_spec(struct net_device *netdev, ++ struct nlattr **tb) ++{ ++ struct bridge_vlan_info *vinfo; ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ int err = 0; ++ ++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) { ++ netdev_err(netdev, "no VLAN INFO in nlmsg\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); ++ ++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) ++ return -EINVAL; ++ ++ err = evb_port_add_rule(netdev, NULL, vinfo->vid); ++ if (unlikely(err)) ++ return err; ++ ++ port_priv->vlans[vinfo->vid] = 1; ++ ++ return 0; ++} ++ ++static int evb_setlink(struct net_device *netdev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct evb_priv *evb_priv = port_priv->evb_priv; ++ struct nlattr *attr; ++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? ++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1]; ++ int err = 0; ++ ++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && ++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { ++ netdev_err(netdev, ++ "EVB mode does not support VLAN only classification\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ if (attr) { ++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, ++ ifla_br_policy); ++ if (unlikely(err)) { ++ netdev_err(netdev, ++ "nla_parse_nested for br_policy err %d\n", ++ err); ++ return err; ++ } ++ ++ err = evb_setlink_af_spec(netdev, tb); ++ return err; ++ } ++ ++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n"); ++ return -EOPNOTSUPP; ++} ++ ++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct evb_priv *evb_priv = port_priv->evb_priv; ++ u8 operstate = netif_running(netdev) ? ++ netdev->operstate : IF_OPER_DOWN; ++ int iflink; ++ int err; ++ ++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); ++ if (unlikely(err)) ++ goto nla_put_err; ++ if (netdev->addr_len) { ++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, ++ netdev->dev_addr); ++ if (unlikely(err)) ++ goto nla_put_err; ++ } ++ ++ iflink = dev_get_iflink(netdev); ++ if (netdev->ifindex != iflink) { ++ err = nla_put_u32(skb, IFLA_LINK, iflink); ++ if (unlikely(err)) ++ goto nla_put_err; ++ } ++ ++ return 0; ++ ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ return err; ++} ++ ++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev) ++{ ++ struct nlattr *nest; ++ int err; ++ ++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); ++ if (!nest) { ++ netdev_err(netdev, "nla_nest_start failed\n"); ++ return -ENOMEM; ++ } ++ ++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0); ++ if (unlikely(err)) ++ goto nla_put_err; ++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1); ++ if (unlikely(err)) ++ goto nla_put_err; ++ nla_nest_end(skb, nest); ++ ++ return 0; ++ ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ nla_nest_cancel(skb, nest); ++ return err; ++} ++ ++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct nlattr *nest; ++ struct bridge_vlan_info vinfo; ++ const u8 *vlans = port_priv->vlans; ++ u16 i; ++ int err; ++ ++ nest = nla_nest_start(skb, IFLA_AF_SPEC); ++ if (!nest) { ++ netdev_err(netdev, "nla_nest_start failed"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < VLAN_VID_MASK + 1; i++) { ++ if (!vlans[i]) ++ continue; ++ ++ vinfo.flags = 0; ++ vinfo.vid = i; ++ ++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, ++ sizeof(vinfo), &vinfo); ++ if (unlikely(err)) ++ goto nla_put_err; ++ } ++ ++ nla_nest_end(skb, nest); ++ ++ return 0; ++ ++nla_put_err: ++ netdev_err(netdev, "nla_put_ err %d\n", err); ++ nla_nest_cancel(skb, nest); ++ return err; ++} ++ ++static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *netdev, u32 filter_mask, int nlflags) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct evb_priv *evb_priv = port_priv->evb_priv; ++ struct ifinfomsg *hdr; ++ struct nlmsghdr *nlh; ++ int err; ++ ++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && ++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { ++ return 0; ++ } ++ ++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); ++ if (!nlh) ++ return -EMSGSIZE; ++ ++ hdr = nlmsg_data(nlh); ++ memset(hdr, 0, sizeof(*hdr)); ++ hdr->ifi_family = AF_BRIDGE; ++ hdr->ifi_type = netdev->type; ++ hdr->ifi_index = netdev->ifindex; ++ hdr->ifi_flags = dev_get_flags(netdev); ++ ++ err = __nla_put_netdev(skb, netdev); ++ if (unlikely(err)) ++ goto nla_put_err; ++ ++ err = __nla_put_port(skb, netdev); ++ if (unlikely(err)) ++ goto nla_put_err; ++ ++ /* Check if the VID information is requested */ ++ if (filter_mask & RTEXT_FILTER_BRVLAN) { ++ err = __nla_put_vlan(skb, netdev); ++ if (unlikely(err)) ++ goto nla_put_err; ++ } ++ ++ nlmsg_end(skb, nlh); ++ return skb->len; ++ ++nla_put_err: ++ nlmsg_cancel(skb, nlh); ++ return -EMSGSIZE; ++} ++ ++static int evb_dellink(struct net_device *netdev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++{ ++ struct nlattr *tb[IFLA_BRIDGE_MAX + 1]; ++ struct nlattr *spec; ++ struct bridge_vlan_info *vinfo; ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ int err = 0; ++ ++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ if (!spec) ++ return 0; ++ ++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); ++ if (unlikely(err)) ++ return err; ++ ++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) ++ return -EOPNOTSUPP; ++ ++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); ++ ++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) ++ return -EINVAL; ++ ++ err = evb_port_del_rule(netdev, NULL, vinfo->vid); ++ if (unlikely(err)) { ++ netdev_err(netdev, "evb_port_del_rule err %d\n", err); ++ return err; ++ } ++ port_priv->vlans[vinfo->vid] = 0; ++ ++ return 0; ++} ++ ++void evb_port_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ u64 tmp; ++ int err; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_ING_FRAME, &storage->rx_packets); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_ING_FLTR_FRAME, &tmp); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_ING_FRAME_DISCARD, ++ &storage->rx_dropped); ++ if (unlikely(err)) { ++ storage->rx_dropped = tmp; ++ goto error; ++ } ++ storage->rx_dropped += tmp; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_ING_MCAST_FRAME, ++ &storage->multicast); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes); ++ if (unlikely(err)) ++ goto error; ++ ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ DPDMUX_CNT_EGR_FRAME_DISCARD, ++ &storage->tx_dropped); ++ if (unlikely(err)) ++ goto error; ++ ++ return; ++ ++error: ++ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err); ++} ++ ++static const struct net_device_ops evb_port_ops = { ++ .ndo_open = &evb_port_open, ++ ++ .ndo_start_xmit = &evb_dropframe, ++ ++ .ndo_fdb_add = &evb_port_fdb_add, ++ .ndo_fdb_del = &evb_port_fdb_del, ++ ++ .ndo_get_stats64 = &evb_port_get_stats, ++ .ndo_change_mtu = &evb_change_mtu, ++}; ++ ++static void evb_get_drvinfo(struct net_device *netdev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ u16 version_major, version_minor; ++ int err; ++ ++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version)); ++ ++ err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0, ++ &version_major, ++ &version_minor); ++ if (err) ++ strlcpy(drvinfo->fw_version, "N/A", ++ sizeof(drvinfo->fw_version)); ++ else ++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), ++ "%u.%u", version_major, version_minor); ++ ++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)); ++} ++ ++static int evb_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct dpdmux_link_state state = {0}; ++ int err = 0; ++ ++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ &state); ++ if (err) { ++ netdev_err(netdev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* At the moment, we have no way of interrogating the DPMAC ++ * from the DPDMUX side or there may not exist a DPMAC at all. ++ * Report only autoneg state, duplexity and speed. ++ */ ++ if (state.options & DPDMUX_LINK_OPT_AUTONEG) ++ cmd->autoneg = AUTONEG_ENABLE; ++ if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX)) ++ cmd->duplex = DUPLEX_FULL; ++ ethtool_cmd_speed_set(cmd, state.rate); ++ ++out: ++ return err; ++} ++ ++static int evb_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ struct dpdmux_link_state state = {0}; ++ struct dpdmux_link_cfg cfg = {0}; ++ int err = 0; ++ ++ netdev_dbg(netdev, "Setting link parameters..."); ++ ++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ &state); ++ if (err) { ++ netdev_err(netdev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* Due to a temporary MC limitation, the DPDMUX port must be down ++ * in order to be able to change link settings. Taking steps to let ++ * the user know that. ++ */ ++ if (netif_running(netdev)) { ++ netdev_info(netdev, ++ "Sorry, interface must be brought down first.\n"); ++ return -EACCES; ++ } ++ ++ cfg.options = state.options; ++ cfg.rate = ethtool_cmd_speed(cmd); ++ if (cmd->autoneg == AUTONEG_ENABLE) ++ cfg.options |= DPDMUX_LINK_OPT_AUTONEG; ++ else ++ cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG; ++ if (cmd->duplex == DUPLEX_HALF) ++ cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX; ++ else ++ cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX; ++ ++ err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ &cfg); ++ if (err) ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(netdev, "ERROR %d setting link cfg", err); ++ ++out: ++ return err; ++} ++ ++static struct { ++ enum dpdmux_counter_type id; ++ char name[ETH_GSTRING_LEN]; ++} evb_ethtool_counters[] = { ++ {DPDMUX_CNT_ING_FRAME, "rx frames"}, ++ {DPDMUX_CNT_ING_BYTE, "rx bytes"}, ++ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"}, ++ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, ++ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, ++ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, ++ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, ++ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, ++ {DPDMUX_CNT_EGR_FRAME, "tx frames"}, ++ {DPDMUX_CNT_EGR_BYTE, "tx bytes"}, ++ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, ++}; ++ ++static int evb_ethtool_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(evb_ethtool_counters); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static void evb_ethtool_get_strings(struct net_device *netdev, ++ u32 stringset, u8 *data) ++{ ++ u32 i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ evb_ethtool_counters[i].name, ETH_GSTRING_LEN); ++ break; ++ } ++} ++ ++static void evb_ethtool_get_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ struct evb_port_priv *port_priv = netdev_priv(netdev); ++ u32 i; ++ int err; ++ ++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) { ++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, ++ 0, ++ port_priv->evb_priv->mux_handle, ++ port_priv->port_index, ++ evb_ethtool_counters[i].id, ++ &data[i]); ++ if (err) ++ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n", ++ evb_ethtool_counters[i].name, err); ++ } ++} ++ ++static const struct ethtool_ops evb_port_ethtool_ops = { ++ .get_drvinfo = &evb_get_drvinfo, ++ .get_link = ðtool_op_get_link, ++ .get_settings = &evb_get_settings, ++ .set_settings = &evb_set_settings, ++ .get_strings = &evb_ethtool_get_strings, ++ .get_ethtool_stats = &evb_ethtool_get_stats, ++ .get_sset_count = &evb_ethtool_get_sset_count, ++}; ++ ++static int evb_open(struct net_device *netdev) ++{ ++ struct evb_priv *priv = netdev_priv(netdev); ++ int err = 0; ++ ++ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle); ++ if (unlikely(err)) ++ netdev_err(netdev, "dpdmux_enable err %d\n", err); ++ ++ return err; ++} ++ ++static int evb_close(struct net_device *netdev) ++{ ++ struct evb_priv *priv = netdev_priv(netdev); ++ int err = 0; ++ ++ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle); ++ if (unlikely(err)) ++ netdev_err(netdev, "dpdmux_disable err %d\n", err); ++ ++ return err; ++} ++ ++static const struct net_device_ops evb_ops = { ++ .ndo_start_xmit = &evb_dropframe, ++ .ndo_open = &evb_open, ++ .ndo_stop = &evb_close, ++ ++ .ndo_bridge_setlink = &evb_setlink, ++ .ndo_bridge_getlink = &evb_getlink, ++ .ndo_bridge_dellink = &evb_dellink, ++ ++ .ndo_get_stats64 = &evb_port_get_stats, ++ .ndo_change_mtu = &evb_change_mtu, ++}; ++ ++static int evb_takedown(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev = &evb_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ int err; ++ ++ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle); ++ if (unlikely(err)) ++ dev_warn(dev, "dpdmux_close err %d\n", err); ++ ++ return 0; ++} ++ ++static int evb_init(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev = &evb_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ u16 version_major; ++ u16 version_minor; ++ int err = 0; ++ ++ priv->dev_id = evb_dev->obj_desc.id; ++ ++ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_open err %d\n", err); ++ goto err_exit; ++ } ++ if (!priv->mux_handle) { ++ dev_err(dev, "dpdmux_open returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_exit; ++ } ++ ++ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle, ++ &priv->attr); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_get_attributes err %d\n", err); ++ goto err_close; ++ } ++ ++ err = dpdmux_get_api_version(priv->mc_io, 0, ++ &version_major, ++ &version_minor); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_get_api_version err %d\n", err); ++ goto err_close; ++ } ++ ++ /* Minimum supported DPDMUX version check */ ++ if (version_major < DPDMUX_MIN_VER_MAJOR || ++ (version_major == DPDMUX_MIN_VER_MAJOR && ++ version_minor < DPDMUX_MIN_VER_MINOR)) { ++ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n", ++ version_major, version_minor, ++ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR); ++ err = -ENOTSUPP; ++ goto err_close; ++ } ++ ++ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle); ++ if (unlikely(err)) { ++ dev_err(dev, "dpdmux_reset err %d\n", err); ++ goto err_close; ++ } ++ ++ return 0; ++ ++err_close: ++ dpdmux_close(priv->mc_io, 0, priv->mux_handle); ++err_exit: ++ return err; ++} ++ ++static int evb_remove(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev = &evb_dev->dev; ++ struct net_device *netdev = dev_get_drvdata(dev); ++ struct evb_priv *priv = netdev_priv(netdev); ++ struct evb_port_priv *port_priv; ++ struct list_head *pos; ++ ++ list_for_each(pos, &priv->port_list) { ++ port_priv = list_entry(pos, struct evb_port_priv, list); ++ ++ rtnl_lock(); ++ netdev_upper_dev_unlink(port_priv->netdev, netdev); ++ rtnl_unlock(); ++ ++ unregister_netdev(port_priv->netdev); ++ free_netdev(port_priv->netdev); ++ } ++ ++ evb_teardown_irqs(evb_dev); ++ ++ unregister_netdev(netdev); ++ ++ evb_takedown(evb_dev); ++ fsl_mc_portal_free(priv->mc_io); ++ ++ dev_set_drvdata(dev, NULL); ++ free_netdev(netdev); ++ ++ return 0; ++} ++ ++static int evb_probe(struct fsl_mc_device *evb_dev) ++{ ++ struct device *dev; ++ struct evb_priv *priv = NULL; ++ struct net_device *netdev = NULL; ++ char port_name[IFNAMSIZ]; ++ int i; ++ int err = 0; ++ ++ dev = &evb_dev->dev; ++ ++ /* register switch device, it's for management only - no I/O */ ++ netdev = alloc_etherdev(sizeof(*priv)); ++ if (!netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ return -ENOMEM; ++ } ++ netdev->netdev_ops = &evb_ops; ++ ++ dev_set_drvdata(dev, netdev); ++ ++ priv = netdev_priv(netdev); ++ ++ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io); ++ if (unlikely(err)) { ++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); ++ goto err_free_netdev; ++ } ++ if (!priv->mc_io) { ++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_free_netdev; ++ } ++ ++ err = evb_init(evb_dev); ++ if (unlikely(err)) { ++ dev_err(dev, "evb init err %d\n", err); ++ goto err_free_cmdport; ++ } ++ ++ INIT_LIST_HEAD(&priv->port_list); ++ netdev->flags |= IFF_PROMISC | IFF_MASTER; ++ ++ dev_alloc_name(netdev, "evb%d"); ++ ++ /* register switch ports */ ++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); ++ ++ /* only register downlinks? */ ++ for (i = 0; i < priv->attr.num_ifs + 1; i++) { ++ struct net_device *port_netdev; ++ struct evb_port_priv *port_priv; ++ ++ if (i) { ++ port_netdev = ++ alloc_etherdev(sizeof(struct evb_port_priv)); ++ if (!port_netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ goto err_takedown; ++ } ++ ++ port_priv = netdev_priv(port_netdev); ++ ++ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE; ++ ++ dev_alloc_name(port_netdev, port_name); ++ } else { ++ port_netdev = netdev; ++ port_priv = &priv->uplink; ++ } ++ ++ port_priv->netdev = port_netdev; ++ port_priv->evb_priv = priv; ++ port_priv->port_index = i; ++ ++ SET_NETDEV_DEV(port_netdev, dev); ++ ++ if (i) { ++ port_netdev->netdev_ops = &evb_port_ops; ++ ++ err = register_netdev(port_netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev err %d\n", err); ++ free_netdev(port_netdev); ++ goto err_takedown; ++ } ++ ++ rtnl_lock(); ++ err = netdev_master_upper_dev_link(port_netdev, netdev, ++ NULL, NULL); ++ if (unlikely(err)) { ++ dev_err(dev, "netdev_master_upper_dev_link err %d\n", ++ err); ++ unregister_netdev(port_netdev); ++ free_netdev(port_netdev); ++ rtnl_unlock(); ++ goto err_takedown; ++ } ++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, ++ IFF_SLAVE, GFP_KERNEL); ++ rtnl_unlock(); ++ ++ list_add(&port_priv->list, &priv->port_list); ++ } else { ++ err = register_netdev(netdev); ++ ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ goto err_takedown; ++ } ++ } ++ ++ port_netdev->ethtool_ops = &evb_port_ethtool_ops; ++ ++ /* ports are up from init */ ++ rtnl_lock(); ++ err = dev_open(port_netdev); ++ rtnl_unlock(); ++ if (unlikely(err)) ++ dev_warn(dev, "dev_open err %d\n", err); ++ } ++ ++ /* setup irqs */ ++ err = evb_setup_irqs(evb_dev); ++ if (unlikely(err)) { ++ dev_warn(dev, "evb_setup_irqs err %d\n", err); ++ goto err_takedown; ++ } ++ ++ dev_info(dev, "probed evb device with %d ports\n", ++ priv->attr.num_ifs); ++ return 0; ++ ++err_takedown: ++ evb_remove(evb_dev); ++err_free_cmdport: ++ fsl_mc_portal_free(priv->mc_io); ++err_free_netdev: ++ return err; ++} ++ ++static const struct fsl_mc_device_id evb_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpdmux", ++ }, ++ {} ++}; ++ ++static struct fsl_mc_driver evb_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = evb_probe, ++ .remove = evb_remove, ++ .match_id_table = evb_match_id_table, ++}; ++ ++module_fsl_mc_driver(evb_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)"); +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Kconfig +@@ -0,0 +1,23 @@ ++config FSL_DPAA2_MAC ++ tristate "DPAA2 MAC / PHY interface" ++ depends on FSL_MC_BUS && FSL_DPAA2 ++ select MDIO_BUS_MUX_MMIOREG ++ select FSL_XGMAC_MDIO ++ select FIXED_PHY ++ ---help--- ++ Prototype driver for DPAA2 MAC / PHY interface object. ++ This driver works as a proxy between phylib including phy drivers and ++ the MC firmware. It receives updates on link state changes from PHY ++ lib and forwards them to MC and receives interrupt from MC whenever ++ a request is made to change the link state. ++ ++ ++config FSL_DPAA2_MAC_NETDEVS ++ bool "Expose net interfaces for PHYs" ++ default n ++ depends on FSL_DPAA2_MAC ++ ---help--- ++ Exposes macX net interfaces which allow direct control over MACs and ++ PHYs. ++ . ++ Leave disabled if unsure. +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o ++ ++dpaa2-mac-objs := mac.o dpmac.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h +@@ -0,0 +1,172 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMAC_CMD_H ++#define _FSL_DPMAC_CMD_H ++ ++/* DPMAC Version */ ++#define DPMAC_VER_MAJOR 4 ++#define DPMAC_VER_MINOR 2 ++#define DPMAC_CMD_BASE_VERSION 1 ++#define DPMAC_CMD_ID_OFFSET 4 ++ ++#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800) ++#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c) ++#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c) ++#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c) ++#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c) ++ ++#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004) ++#define DPMAC_CMDID_RESET DPMAC_CMD(0x005) ++ ++#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012) ++#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013) ++#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014) ++#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015) ++#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016) ++#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017) ++ ++#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2) ++#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3) ++#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4) ++ ++#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5) ++ ++/* Macros for accessing command fields smaller than 1byte */ ++#define DPMAC_MASK(field) \ ++ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \ ++ DPMAC_##field##_SHIFT) ++#define dpmac_set_field(var, field, val) \ ++ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field))) ++#define dpmac_get_field(var, field) \ ++ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT) ++ ++struct dpmac_cmd_open { ++ u32 dpmac_id; ++}; ++ ++struct dpmac_cmd_create { ++ u32 mac_id; ++}; ++ ++struct dpmac_cmd_destroy { ++ u32 dpmac_id; ++}; ++ ++struct dpmac_cmd_set_irq_enable { ++ u8 enable; ++ u8 pad[3]; ++ u8 irq_index; ++}; ++ ++struct dpmac_cmd_get_irq_enable { ++ u32 pad; ++ u8 irq_index; ++}; ++ ++struct dpmac_rsp_get_irq_enable { ++ u8 enabled; ++}; ++ ++struct dpmac_cmd_set_irq_mask { ++ u32 mask; ++ u8 irq_index; ++}; ++ ++struct dpmac_cmd_get_irq_mask { ++ u32 pad; ++ u8 irq_index; ++}; ++ ++struct dpmac_rsp_get_irq_mask { ++ u32 mask; ++}; ++ ++struct dpmac_cmd_get_irq_status { ++ u32 status; ++ u8 irq_index; ++}; ++ ++struct dpmac_rsp_get_irq_status { ++ u32 status; ++}; ++ ++struct dpmac_cmd_clear_irq_status { ++ u32 status; ++ u8 irq_index; ++}; ++ ++struct dpmac_rsp_get_attributes { ++ u8 eth_if; ++ u8 link_type; ++ u16 id; ++ u32 max_rate; ++}; ++ ++struct dpmac_rsp_get_link_cfg { ++ u64 options; ++ u32 rate; ++}; ++ ++#define DPMAC_STATE_SIZE 1 ++#define DPMAC_STATE_SHIFT 0 ++ ++struct dpmac_cmd_set_link_state { ++ u64 options; ++ u32 rate; ++ u32 pad; ++ /* only least significant bit is valid */ ++ u8 up; ++}; ++ ++struct dpmac_cmd_get_counter { ++ u8 type; ++}; ++ ++struct dpmac_rsp_get_counter { ++ u64 pad; ++ u64 counter; ++}; ++ ++struct dpmac_rsp_get_api_version { ++ u16 major; ++ u16 minor; ++}; ++ ++struct dpmac_cmd_set_port_mac_addr { ++ u8 pad[2]; ++ u8 addr[6]; ++}; ++ ++#endif /* _FSL_DPMAC_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c +@@ -0,0 +1,620 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++/** ++ * dpmac_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpmac_id: DPMAC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmac_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpmac_id, ++ u16 *token) ++{ ++ struct dpmac_cmd_open *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dpmac_cmd_open *)cmd.params; ++ cmd_params->dpmac_id = cpu_to_le32(dpmac_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return err; ++} ++ ++/** ++ * dpmac_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_create() - Create the DPMAC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @obj_id: Returned object id ++ * ++ * Create the DPMAC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The function accepts an authentication token of a parent ++ * container that this object should be assigned to. The token ++ * can be '0' so the object will be assigned to the default container. ++ * The newly created object can be opened with the returned ++ * object id and using the container's associated tokens and MC portals. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ const struct dpmac_cfg *cfg, ++ u32 *obj_id) ++{ ++ struct dpmac_cmd_create *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpmac_cmd_create *)cmd.params; ++ cmd_params->mac_id = cpu_to_le32(cfg->mac_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *obj_id = mc_cmd_read_object_id(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @object_id: The object id; it must be a valid id within the container that ++ * created this object; ++ * ++ * The function accepts the authentication token of the parent container that ++ * created the object (not the one that currently owns the object). The object ++ * is searched within parent using the provided 'object_id'. ++ * All tokens to the object must be closed before calling destroy. ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ u32 object_id) ++{ ++ struct dpmac_cmd_destroy *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpmac_cmd_destroy *)cmd.params; ++ cmd_params->dpmac_id = cpu_to_le32(object_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en) ++{ ++ struct dpmac_cmd_set_irq_enable *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->enable = en; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en) ++{ ++ struct dpmac_cmd_get_irq_enable *cmd_params; ++ struct dpmac_rsp_get_irq_enable *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params; ++ *en = rsp_params->enabled; ++ ++ return 0; ++} ++ ++/** ++ * dpmac_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask) ++{ ++ struct dpmac_cmd_set_irq_mask *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask) ++{ ++ struct dpmac_cmd_get_irq_mask *cmd_params; ++ struct dpmac_rsp_get_irq_mask *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params; ++ *mask = le32_to_cpu(rsp_params->mask); ++ ++ return 0; ++} ++ ++/** ++ * dpmac_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status) ++{ ++ struct dpmac_cmd_get_irq_status *cmd_params; ++ struct dpmac_rsp_get_irq_status *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params; ++ *status = le32_to_cpu(rsp_params->status); ++ ++ return 0; ++} ++ ++/** ++ * dpmac_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status) ++{ ++ struct dpmac_cmd_clear_irq_status *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_get_attributes - Retrieve DPMAC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_attr *attr) ++{ ++ struct dpmac_rsp_get_attributes *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params; ++ attr->eth_if = rsp_params->eth_if; ++ attr->link_type = rsp_params->link_type; ++ attr->id = le16_to_cpu(rsp_params->id); ++ attr->max_rate = le32_to_cpu(rsp_params->max_rate); ++ ++ return 0; ++} ++ ++/** ++ * dpmac_get_link_cfg() - Get Ethernet link configuration ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Returned structure with the link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct dpmac_rsp_get_link_cfg *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params; ++ cfg->options = le64_to_cpu(rsp_params->options); ++ cfg->rate = le32_to_cpu(rsp_params->rate); ++ ++ return 0; ++} ++ ++/** ++ * dpmac_set_link_state() - Set the Ethernet link status ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @link_state: Link state configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_link_state *link_state) ++{ ++ struct dpmac_cmd_set_link_state *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params; ++ cmd_params->options = cpu_to_le64(link_state->options); ++ cmd_params->rate = cpu_to_le32(link_state->rate); ++ cmd_params->up = dpmac_get_field(link_state->up, STATE); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_get_counter() - Read a specific DPMAC counter ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @type: The requested counter ++ * @counter: Returned counter value ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpmac_counter type, ++ u64 *counter) ++{ ++ struct dpmac_cmd_get_counter *dpmac_cmd; ++ struct dpmac_rsp_get_counter *dpmac_rsp; ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, ++ cmd_flags, ++ token); ++ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params; ++ dpmac_cmd->type = type; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params; ++ *counter = le64_to_cpu(dpmac_rsp->counter); ++ ++ return 0; ++} ++ ++/* untested */ ++int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 addr[6]) ++{ ++ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR, ++ cmd_flags, ++ token); ++ dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params; ++ dpmac_cmd->addr[0] = addr[5]; ++ dpmac_cmd->addr[1] = addr[4]; ++ dpmac_cmd->addr[2] = addr[3]; ++ dpmac_cmd->addr[3] = addr[2]; ++ dpmac_cmd->addr[4] = addr[1]; ++ dpmac_cmd->addr[5] = addr[0]; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpmac_get_api_version() - Get Data Path MAC version ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of data path mac API ++ * @minor_ver: Minor version of data path mac API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver) ++{ ++ struct dpmac_rsp_get_api_version *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION, ++ cmd_flags, ++ 0); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params; ++ *major_ver = le16_to_cpu(rsp_params->major); ++ *minor_ver = le16_to_cpu(rsp_params->minor); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h +@@ -0,0 +1,342 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMAC_H ++#define __FSL_DPMAC_H ++ ++/* Data Path MAC API ++ * Contains initialization APIs and runtime control APIs for DPMAC ++ */ ++ ++struct fsl_mc_io; ++ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ int dpmac_id, ++ u16 *token); ++ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ ++/** ++ * enum dpmac_link_type - DPMAC link type ++ * @DPMAC_LINK_TYPE_NONE: No link ++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type ++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID ++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type ++ */ ++enum dpmac_link_type { ++ DPMAC_LINK_TYPE_NONE, ++ DPMAC_LINK_TYPE_FIXED, ++ DPMAC_LINK_TYPE_PHY, ++ DPMAC_LINK_TYPE_BACKPLANE ++}; ++ ++/** ++ * enum dpmac_eth_if - DPMAC Ethrnet interface ++ * @DPMAC_ETH_IF_MII: MII interface ++ * @DPMAC_ETH_IF_RMII: RMII interface ++ * @DPMAC_ETH_IF_SMII: SMII interface ++ * @DPMAC_ETH_IF_GMII: GMII interface ++ * @DPMAC_ETH_IF_RGMII: RGMII interface ++ * @DPMAC_ETH_IF_SGMII: SGMII interface ++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface ++ * @DPMAC_ETH_IF_XAUI: XAUI interface ++ * @DPMAC_ETH_IF_XFI: XFI interface ++ */ ++enum dpmac_eth_if { ++ DPMAC_ETH_IF_MII, ++ DPMAC_ETH_IF_RMII, ++ DPMAC_ETH_IF_SMII, ++ DPMAC_ETH_IF_GMII, ++ DPMAC_ETH_IF_RGMII, ++ DPMAC_ETH_IF_SGMII, ++ DPMAC_ETH_IF_QSGMII, ++ DPMAC_ETH_IF_XAUI, ++ DPMAC_ETH_IF_XFI ++}; ++ ++/** ++ * struct dpmac_cfg - Structure representing DPMAC configuration ++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, ++ * the MAC IDs are continuous. ++ * For example: 2 WRIOPs, 16 MACs in each: ++ * MAC IDs for the 1st WRIOP: 1-16, ++ * MAC IDs for the 2nd WRIOP: 17-32. ++ */ ++struct dpmac_cfg { ++ u16 mac_id; ++}; ++ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ const struct dpmac_cfg *cfg, ++ u32 *obj_id); ++ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ u16 dprc_token, ++ u32 cmd_flags, ++ u32 object_id); ++ ++/** ++ * DPMAC IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPMAC_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 ++/** ++ * IRQ event - Indicates that the link state changed ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 ++ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 en); ++ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u8 *en); ++ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 mask); ++ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *mask); ++ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 *status); ++ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ u8 irq_index, ++ u32 status); ++ ++/** ++ * struct dpmac_attr - Structure representing DPMAC attributes ++ * @id: DPMAC object ID ++ * @max_rate: Maximum supported rate - in Mbps ++ * @eth_if: Ethernet interface ++ * @link_type: link type ++ */ ++struct dpmac_attr { ++ u16 id; ++ u32 max_rate; ++ enum dpmac_eth_if eth_if; ++ enum dpmac_link_type link_type; ++}; ++ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_attr *attr); ++ ++/** ++ * DPMAC link configuration/state options ++ */ ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration ++ * @rate: Link's rate - in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ */ ++struct dpmac_link_cfg { ++ u32 rate; ++ u64 options; ++}; ++ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_link_cfg *cfg); ++ ++/** ++ * struct dpmac_link_state - DPMAC link configuration request ++ * @rate: Rate in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ * @up: Link state ++ */ ++struct dpmac_link_state { ++ u32 rate; ++ u64 options; ++ int up; ++}; ++ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ struct dpmac_link_state *link_state); ++ ++/** ++ * enum dpmac_counter - DPMAC counter types ++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger ++ * (up to max frame length specified), ++ * good or bad. ++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received ++ * with a wrong CRC ++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length ++ * specified, with a bad frame check sequence. ++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. ++ * Occurs when a receive FIFO overflows. ++ * Includes also frames truncated as a result of ++ * the receive FIFO overflow. ++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error ++ * (optional used for wrong SFD). ++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 ++ * bytes long with a good CRC. ++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length ++ * specified, with a good frame check sequence. ++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) ++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted ++ * (regular and PFC). ++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid ++ * frames and valid pause frames. ++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. ++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. ++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. ++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. ++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error ++ * (except for undersized/fragment frame). ++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid ++ * frames and valid pause frames transmitted. ++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. ++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. ++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. ++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. ++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including ++ * pause frames. ++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including ++ * pause frames. ++ */ ++enum dpmac_counter { ++ DPMAC_CNT_ING_FRAME_64, ++ DPMAC_CNT_ING_FRAME_127, ++ DPMAC_CNT_ING_FRAME_255, ++ DPMAC_CNT_ING_FRAME_511, ++ DPMAC_CNT_ING_FRAME_1023, ++ DPMAC_CNT_ING_FRAME_1518, ++ DPMAC_CNT_ING_FRAME_1519_MAX, ++ DPMAC_CNT_ING_FRAG, ++ DPMAC_CNT_ING_JABBER, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ DPMAC_CNT_ING_ALIGN_ERR, ++ DPMAC_CNT_EGR_UNDERSIZED, ++ DPMAC_CNT_ING_OVERSIZED, ++ DPMAC_CNT_ING_VALID_PAUSE_FRAME, ++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, ++ DPMAC_CNT_ING_BYTE, ++ DPMAC_CNT_ING_MCAST_FRAME, ++ DPMAC_CNT_ING_BCAST_FRAME, ++ DPMAC_CNT_ING_ALL_FRAME, ++ DPMAC_CNT_ING_UCAST_FRAME, ++ DPMAC_CNT_ING_ERR_FRAME, ++ DPMAC_CNT_EGR_BYTE, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ DPMAC_CNT_EGR_BCAST_FRAME, ++ DPMAC_CNT_EGR_UCAST_FRAME, ++ DPMAC_CNT_EGR_ERR_FRAME, ++ DPMAC_CNT_ING_GOOD_FRAME, ++ DPMAC_CNT_ENG_GOOD_FRAME ++}; ++ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpmac_counter type, ++ u64 *counter); ++ ++/** ++ * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical ++ * port. This is not used for filtering, MAC is always in ++ * promiscuous mode, it is passed to DPNIs through DPNI API for ++ * application used. ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @addr: MAC address to set ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const u8 addr[6]); ++ ++int dpmac_get_api_version(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 *major_ver, ++ u16 *minor_ver); ++ ++#endif /* __FSL_DPMAC_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/mac.c +@@ -0,0 +1,666 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++ ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++struct dpaa2_mac_priv { ++ struct net_device *netdev; ++ struct fsl_mc_device *mc_dev; ++ struct dpmac_attr attr; ++ struct dpmac_link_state old_state; ++}; ++ ++/* TODO: fix the 10G modes, mapping can't be right: ++ * XGMII is paralel ++ * XAUI is serial, using 8b/10b encoding ++ * XFI is also serial but using 64b/66b encoding ++ * they can't all map to XGMII... ++ * ++ * This must be kept in sync with enum dpmac_eth_if. ++ */ ++static phy_interface_t dpaa2_mac_iface_mode[] = { ++ PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */ ++ PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */ ++ PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */ ++ PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */ ++ PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */ ++ PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */ ++ PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */ ++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */ ++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */ ++}; ++ ++static void dpaa2_mac_link_changed(struct net_device *netdev) ++{ ++ struct phy_device *phydev; ++ struct dpmac_link_state state = { 0 }; ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int err; ++ ++ /* the PHY just notified us of link state change */ ++ phydev = netdev->phydev; ++ ++ state.up = !!phydev->link; ++ if (phydev->link) { ++ state.rate = phydev->speed; ++ ++ if (!phydev->duplex) ++ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; ++ if (phydev->autoneg) ++ state.options |= DPMAC_LINK_OPT_AUTONEG; ++ ++ netif_carrier_on(netdev); ++ } else { ++ netif_carrier_off(netdev); ++ } ++ ++ if (priv->old_state.up != state.up || ++ priv->old_state.rate != state.rate || ++ priv->old_state.options != state.options) { ++ priv->old_state = state; ++ phy_print_status(phydev); ++ } ++ ++ /* We must interrogate MC at all times, because we don't know ++ * when and whether a potential DPNI may have read the link state. ++ */ ++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, ++ priv->mc_dev->mc_handle, &state); ++ if (unlikely(err)) ++ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); ++} ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, ++ struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ ++static int dpaa2_mac_open(struct net_device *netdev) ++{ ++ /* start PHY state machine */ ++ phy_start(netdev->phydev); ++ ++ return 0; ++} ++ ++static int dpaa2_mac_stop(struct net_device *netdev) ++{ ++ if (!netdev->phydev) ++ goto done; ++ ++ /* stop PHY state machine */ ++ phy_stop(netdev->phydev); ++ ++ /* signal link down to firmware */ ++ netdev->phydev->link = 0; ++ dpaa2_mac_link_changed(netdev); ++ ++done: ++ return 0; ++} ++ ++static int dpaa2_mac_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_gset(netdev->phydev, cmd); ++} ++ ++static int dpaa2_mac_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_sset(netdev->phydev, cmd); ++} ++ ++static void dpaa2_mac_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ u64 tmp; ++ int err; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ &storage->tx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); ++ if (err) ++ goto error; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ &storage->rx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_OVERSIZED, &tmp); ++ if (err) ++ goto error; ++ storage->rx_errors += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); ++ if (err) ++ goto error; ++ ++ return; ++error: ++ netdev_err(netdev, "dpmac_get_counter err %d\n", err); ++} ++ ++static struct { ++ enum dpmac_counter id; ++ char name[ETH_GSTRING_LEN]; ++} dpaa2_mac_counters[] = { ++ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, ++ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, ++ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, ++ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, ++ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, ++ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, ++ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, ++ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, ++ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, ++ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, ++ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, ++ {DPMAC_CNT_ING_FRAG, "rx frags"}, ++ {DPMAC_CNT_ING_JABBER, "rx jabber"}, ++ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, ++ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, ++ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, ++ {DPMAC_CNT_ING_BYTE, "rx bytes"}, ++ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, ++ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, ++ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, ++ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, ++ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, ++ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, ++ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, ++ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, ++ ++}; ++ ++static void dpaa2_mac_get_strings(struct net_device *netdev, ++ u32 stringset, u8 *data) ++{ ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ dpaa2_mac_counters[i].name, ++ ETH_GSTRING_LEN); ++ break; ++ } ++} ++ ++static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int i; ++ int err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { ++ err = dpmac_get_counter(priv->mc_dev->mc_io, ++ 0, ++ priv->mc_dev->mc_handle, ++ dpaa2_mac_counters[i].id, &data[i]); ++ if (err) ++ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", ++ dpaa2_mac_counters[i].name, err); ++ } ++} ++ ++static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(dpaa2_mac_counters); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static const struct net_device_ops dpaa2_mac_ndo_ops = { ++ .ndo_start_xmit = &dpaa2_mac_drop_frame, ++ .ndo_open = &dpaa2_mac_open, ++ .ndo_stop = &dpaa2_mac_stop, ++ .ndo_get_stats64 = &dpaa2_mac_get_stats, ++}; ++ ++static const struct ethtool_ops dpaa2_mac_ethtool_ops = { ++ .get_settings = &dpaa2_mac_get_settings, ++ .set_settings = &dpaa2_mac_set_settings, ++ .get_strings = &dpaa2_mac_get_strings, ++ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, ++ .get_sset_count = &dpaa2_mac_get_sset_count, ++}; ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++static void configure_link(struct dpaa2_mac_priv *priv, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct phy_device *phydev = priv->netdev->phydev; ++ ++ if (unlikely(!phydev)) ++ return; ++ ++ phydev->speed = cfg->rate; ++ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); ++ ++ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { ++ phydev->autoneg = 1; ++ phydev->advertising |= ADVERTISED_Autoneg; ++ } else { ++ phydev->autoneg = 0; ++ phydev->advertising &= ~ADVERTISED_Autoneg; ++ } ++ ++ phy_start_aneg(phydev); ++} ++ ++static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ struct dpmac_link_cfg link_cfg; ++ u32 status; ++ int err; ++ ++ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, &status); ++ if (unlikely(err || !status)) ++ return IRQ_NONE; ++ ++ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ ++ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { ++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ &link_cfg); ++ if (unlikely(err)) ++ goto out; ++ ++ configure_link(priv, &link_cfg); ++ } ++ ++out: ++ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, status); ++ ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ ++ err = fsl_mc_allocate_irqs(mc_dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); ++ return err; ++ } ++ ++ irq = mc_dev->irqs[0]; ++ err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq, ++ NULL, &dpaa2_mac_irq_handler, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&mc_dev->dev), &mc_dev->dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", ++ err); ++ goto free_irq; ++ } ++ ++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); ++ goto free_irq; ++ } ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 1); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ fsl_mc_free_irqs(mc_dev); ++ ++ return err; ++} ++ ++static void teardown_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err; ++ ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 0); ++ if (err) ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ ++ fsl_mc_free_irqs(mc_dev); ++} ++ ++static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id) ++{ ++ struct device_node *dpmacs, *dpmac = NULL; ++ struct device_node *mc_node = dev->of_node; ++ u32 id; ++ int err; ++ ++ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); ++ if (!dpmacs) { ++ dev_err(dev, "No dpmacs subnode in device-tree\n"); ++ return NULL; ++ } ++ ++ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { ++ err = of_property_read_u32(dpmac, "reg", &id); ++ if (err) ++ continue; ++ if (id == dpmac_id) ++ return dpmac; ++ } ++ ++ return NULL; ++} ++ ++static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev; ++ struct dpaa2_mac_priv *priv = NULL; ++ struct device_node *phy_node, *dpmac_node; ++ struct net_device *netdev; ++ phy_interface_t if_mode; ++ int err = 0; ++ ++ dev = &mc_dev->dev; ++ ++ /* prepare a net_dev structure to make the phy lib API happy */ ++ netdev = alloc_etherdev(sizeof(*priv)); ++ if (!netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ priv = netdev_priv(netdev); ++ priv->mc_dev = mc_dev; ++ priv->netdev = netdev; ++ ++ SET_NETDEV_DEV(netdev, dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); ++#endif ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); ++ if (err || !mc_dev->mc_io) { ++ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_netdev; ++ } ++ ++ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (err || !mc_dev->mc_handle) { ++ dev_err(dev, "dpmac_open error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_mcp; ++ } ++ ++ err = dpmac_get_attributes(mc_dev->mc_io, 0, ++ mc_dev->mc_handle, &priv->attr); ++ if (err) { ++ dev_err(dev, "dpmac_get_attributes err %d\n", err); ++ err = -EINVAL; ++ goto err_close; ++ } ++ ++ /* Look up the DPMAC node in the device-tree. */ ++ dpmac_node = find_dpmac_node(dev, priv->attr.id); ++ if (!dpmac_node) { ++ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); ++ err = -ENODEV; ++ goto err_close; ++ } ++ ++ err = setup_irqs(mc_dev); ++ if (err) { ++ err = -EFAULT; ++ goto err_close; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ /* OPTIONAL, register netdev just to make it visible to the user */ ++ netdev->netdev_ops = &dpaa2_mac_ndo_ops; ++ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; ++ ++ /* phy starts up enabled so netdev should be up too */ ++ netdev->flags |= IFF_UP; ++ ++ err = register_netdev(priv->netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ err = -ENODEV; ++ goto err_free_irq; ++ } ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++ /* probe the PHY as a fixed-link if the link type declared in DPC ++ * explicitly mandates this ++ */ ++ ++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); ++ if (!phy_node) { ++ goto probe_fixed_link; ++ } ++ ++ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { ++ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; ++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", ++ phy_modes(if_mode), priv->attr.eth_if); ++ } else { ++ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", ++ priv->attr.eth_if); ++ goto probe_fixed_link; ++ } ++ ++ /* try to connect to the PHY */ ++ netdev->phydev = of_phy_connect(netdev, phy_node, ++ &dpaa2_mac_link_changed, 0, if_mode); ++ if (!netdev->phydev) { ++ /* No need for dev_err(); the kernel's loud enough as it is. */ ++ dev_dbg(dev, "Can't of_phy_connect() now.\n"); ++ /* We might be waiting for the MDIO MUX to probe, so defer ++ * our own probing. ++ */ ++ err = -EPROBE_DEFER; ++ goto err_defer; ++ } ++ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); ++ ++probe_fixed_link: ++ if (!netdev->phydev) { ++ struct fixed_phy_status status = { ++ .link = 1, ++ /* fixed-phys don't support 10Gbps speed for now */ ++ .speed = 1000, ++ .duplex = 1, ++ }; ++ ++ /* try to register a fixed link phy */ ++ netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1, ++ NULL); ++ if (!netdev->phydev || IS_ERR(netdev->phydev)) { ++ dev_err(dev, "error trying to register fixed PHY\n"); ++ /* So we don't crash unregister_netdev() later on */ ++ netdev->phydev = NULL; ++ err = -EFAULT; ++ goto err_no_phy; ++ } ++ dev_info(dev, "Registered fixed PHY.\n"); ++ } ++ ++ /* start PHY state machine */ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ dpaa2_mac_open(netdev); ++#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ phy_start(netdev->phydev); ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ return 0; ++ ++err_defer: ++err_no_phy: ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(netdev); ++err_free_irq: ++#endif ++ teardown_irqs(mc_dev); ++err_close: ++ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++err_free_mcp: ++ fsl_mc_portal_free(mc_dev->mc_io); ++err_free_netdev: ++ free_netdev(netdev); ++err_exit: ++ return err; ++} ++ ++static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev = &mc_dev->dev; ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(priv->netdev); ++#endif ++ teardown_irqs(priv->mc_dev); ++ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); ++ fsl_mc_portal_free(priv->mc_dev->mc_io); ++ free_netdev(priv->netdev); ++ ++ dev_set_drvdata(dev, NULL); ++ kfree(priv); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpmac", ++ }, ++ { .vendor = 0x0 } ++}; ++MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table); ++ ++static struct fsl_mc_driver dpaa2_mac_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_mac_probe, ++ .remove = dpaa2_mac_remove, ++ .match_id_table = dpaa2_mac_match_id_table, ++}; ++ ++module_fsl_mc_driver(dpaa2_mac_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/rtc/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o ++ ++dpaa2-rtc-objs := rtc.o dprtc.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h +@@ -0,0 +1,160 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPRTC_CMD_H ++#define _FSL_DPRTC_CMD_H ++ ++/* DPRTC Version */ ++#define DPRTC_VER_MAJOR 2 ++#define DPRTC_VER_MINOR 0 ++ ++/* Command versioning */ ++#define DPRTC_CMD_BASE_VERSION 1 ++#define DPRTC_CMD_ID_OFFSET 4 ++ ++#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) ++ ++/* Command IDs */ ++#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) ++#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) ++#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910) ++#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990) ++#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10) ++ ++#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002) ++#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003) ++#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004) ++#define DPRTC_CMDID_RESET DPRTC_CMD(0x005) ++#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006) ++ ++#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012) ++#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013) ++#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014) ++#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015) ++#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016) ++#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017) ++ ++#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0) ++#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) ++#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) ++#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) ++#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) ++#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5) ++#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6) ++#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7) ++#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8) ++#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9) ++#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA) ++ ++/* Macros for accessing command fields smaller than 1byte */ ++#define DPRTC_MASK(field) \ ++ GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \ ++ DPRTC_##field##_SHIFT) ++#define dprtc_get_field(var, field) \ ++ (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT) ++ ++#pragma pack(push, 1) ++struct dprtc_cmd_open { ++ uint32_t dprtc_id; ++}; ++ ++struct dprtc_cmd_destroy { ++ uint32_t object_id; ++}; ++ ++#define DPRTC_ENABLE_SHIFT 0 ++#define DPRTC_ENABLE_SIZE 1 ++ ++struct dprtc_rsp_is_enabled { ++ uint8_t en; ++}; ++ ++struct dprtc_cmd_get_irq { ++ uint32_t pad; ++ uint8_t irq_index; ++}; ++ ++struct dprtc_cmd_set_irq_enable { ++ uint8_t en; ++ uint8_t pad[3]; ++ uint8_t irq_index; ++}; ++ ++struct dprtc_rsp_get_irq_enable { ++ uint8_t en; ++}; ++ ++struct dprtc_cmd_set_irq_mask { ++ uint32_t mask; ++ uint8_t irq_index; ++}; ++ ++struct dprtc_rsp_get_irq_mask { ++ uint32_t mask; ++}; ++ ++struct dprtc_cmd_get_irq_status { ++ uint32_t status; ++ uint8_t irq_index; ++}; ++ ++struct dprtc_rsp_get_irq_status { ++ uint32_t status; ++}; ++ ++struct dprtc_cmd_clear_irq_status { ++ uint32_t status; ++ uint8_t irq_index; ++}; ++ ++struct dprtc_rsp_get_attributes { ++ uint32_t pad; ++ uint32_t id; ++}; ++ ++struct dprtc_cmd_set_clock_offset { ++ uint64_t offset; ++}; ++ ++struct dprtc_get_freq_compensation { ++ uint32_t freq_compensation; ++}; ++ ++struct dprtc_time { ++ uint64_t time; ++}; ++ ++struct dprtc_rsp_get_api_version { ++ uint16_t major; ++ uint16_t minor; ++}; ++#pragma pack(pop) ++#endif /* _FSL_DPRTC_CMD_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c +@@ -0,0 +1,746 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dprtc.h" ++#include "dprtc-cmd.h" ++ ++/** ++ * dprtc_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dprtc_id: DPRTC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dprtc_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dprtc_id, ++ uint16_t *token) ++{ ++ struct dprtc_cmd_open *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dprtc_cmd_open *)cmd.params; ++ cmd_params->dprtc_id = cpu_to_le32(dprtc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return err; ++} ++ ++/** ++ * dprtc_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_create() - Create the DPRTC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @obj_id: Returned object id ++ * ++ * Create the DPRTC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The function accepts an authentication token of a parent ++ * container that this object should be assigned to. The token ++ * can be '0' so the object will be assigned to the default container. ++ * The newly created object can be opened with the returned ++ * object id and using the container's associated tokens and MC portals. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_create(struct fsl_mc_io *mc_io, ++ uint16_t dprc_token, ++ uint32_t cmd_flags, ++ const struct dprtc_cfg *cfg, ++ uint32_t *obj_id) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ (void)(cfg); /* unused */ ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, ++ cmd_flags, ++ dprc_token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *obj_id = mc_cmd_read_object_id(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dprtc_destroy() - Destroy the DPRTC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @object_id: The object id; it must be a valid id within the container that ++ * created this object; ++ * ++ * The function accepts the authentication token of the parent container that ++ * created the object (not the one that currently owns the object). The object ++ * is searched within parent using the provided 'object_id'. ++ * All tokens to the object must be closed before calling destroy. ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dprtc_destroy(struct fsl_mc_io *mc_io, ++ uint16_t dprc_token, ++ uint32_t cmd_flags, ++ uint32_t object_id) ++{ ++ struct dprtc_cmd_destroy *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dprtc_cmd_destroy *)cmd.params; ++ cmd_params->object_id = cpu_to_le32(object_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dprtc_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dprtc_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dprtc_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct dprtc_rsp_is_enabled *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params; ++ *en = dprtc_get_field(rsp_params->en, ENABLE); ++ ++ return 0; ++} ++ ++int dprtc_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct dprtc_cmd_set_irq_enable *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->en = en; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct dprtc_rsp_get_irq_enable *rsp_params; ++ struct dprtc_cmd_get_irq *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params; ++ *en = rsp_params->en; ++ ++ return 0; ++} ++ ++/** ++ * dprtc_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct dprtc_cmd_set_irq_mask *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct dprtc_rsp_get_irq_mask *rsp_params; ++ struct dprtc_cmd_get_irq *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params; ++ *mask = le32_to_cpu(rsp_params->mask); ++ ++ return 0; ++} ++ ++/** ++ * dprtc_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct dprtc_cmd_get_irq_status *cmd_params; ++ struct dprtc_rsp_get_irq_status *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params; ++ *status = rsp_params->status; ++ ++ return 0; ++} ++ ++/** ++ * dprtc_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct dprtc_cmd_clear_irq_status *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->status = cpu_to_le32(status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_get_attributes - Retrieve DPRTC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprtc_attr *attr) ++{ ++ struct dprtc_rsp_get_attributes *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params; ++ attr->id = le32_to_cpu(rsp_params->id); ++ ++ return 0; ++} ++ ++/** ++ * dprtc_set_clock_offset() - Sets the clock's offset ++ * (usually relative to another clock). ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @offset: New clock offset (in nanoseconds). ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int64_t offset) ++{ ++ struct dprtc_cmd_set_clock_offset *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params; ++ cmd_params->offset = cpu_to_le64(offset); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_set_freq_compensation() - Sets a new frequency compensation value. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @freq_compensation: The new frequency compensation value to set. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint32_t freq_compensation) ++{ ++ struct dprtc_get_freq_compensation *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; ++ cmd_params->freq_compensation = cpu_to_le32(freq_compensation); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @freq_compensation: Frequency compensation value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint32_t *freq_compensation) ++{ ++ struct dprtc_get_freq_compensation *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; ++ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); ++ ++ return 0; ++} ++ ++/** ++ * dprtc_get_time() - Returns the current RTC time. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @time: Current RTC time. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_time(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint64_t *time) ++{ ++ struct dprtc_time *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ rsp_params = (struct dprtc_time *)cmd.params; ++ *time = le64_to_cpu(rsp_params->time); ++ ++ return 0; ++} ++ ++/** ++ * dprtc_set_time() - Updates current RTC time. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @time: New RTC time. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_time(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint64_t time) ++{ ++ struct dprtc_time *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_time *)cmd.params; ++ cmd_params->time = cpu_to_le64(time); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_set_alarm() - Defines and sets alarm. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRTC object ++ * @time: In nanoseconds, the time when the alarm ++ * should go off - must be a multiple of ++ * 1 microsecond ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_set_alarm(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, uint64_t time) ++{ ++ struct dprtc_time *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, ++ cmd_flags, ++ token); ++ cmd_params = (struct dprtc_time *)cmd.params; ++ cmd_params->time = cpu_to_le64(time); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dprtc_get_api_version() - Get Data Path Real Time Counter API version ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of data path real time counter API ++ * @minor_ver: Minor version of data path real time counter API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprtc_get_api_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t *major_ver, ++ uint16_t *minor_ver) ++{ ++ struct dprtc_rsp_get_api_version *rsp_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION, ++ cmd_flags, ++ 0); ++ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params; ++ *major_ver = le16_to_cpu(rsp_params->major); ++ *minor_ver = le16_to_cpu(rsp_params->minor); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h +@@ -0,0 +1,172 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPRTC_H ++#define __FSL_DPRTC_H ++ ++/* Data Path Real Time Counter API ++ * Contains initialization APIs and runtime control APIs for RTC ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * Number of irq's ++ */ ++#define DPRTC_MAX_IRQ_NUM 1 ++#define DPRTC_IRQ_INDEX 0 ++ ++/** ++ * Interrupt event masks: ++ */ ++ ++/** ++ * Interrupt event mask indicating alarm event had occurred ++ */ ++#define DPRTC_EVENT_ALARM 0x40000000 ++/** ++ * Interrupt event mask indicating periodic pulse event had occurred ++ */ ++#define DPRTC_EVENT_PPS 0x08000000 ++ ++int dprtc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dprtc_id, ++ uint16_t *token); ++ ++int dprtc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dprtc_cfg - Structure representing DPRTC configuration ++ * @options: place holder ++ */ ++struct dprtc_cfg { ++ uint32_t options; ++}; ++ ++int dprtc_create(struct fsl_mc_io *mc_io, ++ uint16_t dprc_token, ++ uint32_t cmd_flags, ++ const struct dprtc_cfg *cfg, ++ uint32_t *obj_id); ++ ++int dprtc_destroy(struct fsl_mc_io *mc_io, ++ uint16_t dprc_token, ++ uint32_t cmd_flags, ++ uint32_t object_id); ++ ++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int64_t offset); ++ ++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint32_t freq_compensation); ++ ++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint32_t *freq_compensation); ++ ++int dprtc_get_time(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint64_t *time); ++ ++int dprtc_set_time(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint64_t time); ++ ++int dprtc_set_alarm(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint64_t time); ++ ++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++int dprtc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dprtc_attr - Structure representing DPRTC attributes ++ * @id: DPRTC object ID ++ */ ++struct dprtc_attr { ++ int id; ++}; ++ ++int dprtc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprtc_attr *attr); ++ ++int dprtc_get_api_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t *major_ver, ++ uint16_t *minor_ver); ++ ++#endif /* __FSL_DPRTC_H */ +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c +@@ -0,0 +1,243 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++ ++#include "dprtc.h" ++#include "dprtc-cmd.h" ++ ++#define N_EXT_TS 2 ++ ++struct ptp_clock *clock; ++struct fsl_mc_device *rtc_mc_dev; ++u32 freqCompensation; ++ ++/* PTP clock operations */ ++static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ++{ ++ u64 adj; ++ u32 diff, tmr_add; ++ int neg_adj = 0; ++ int err = 0; ++ struct fsl_mc_device *mc_dev = rtc_mc_dev; ++ struct device *dev = &mc_dev->dev; ++ ++ if (ppb < 0) { ++ neg_adj = 1; ++ ppb = -ppb; ++ } ++ ++ tmr_add = freqCompensation; ++ adj = tmr_add; ++ adj *= ppb; ++ diff = div_u64(adj, 1000000000ULL); ++ ++ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; ++ ++ err = dprtc_set_freq_compensation(mc_dev->mc_io, 0, ++ mc_dev->mc_handle, tmr_add); ++ if (err) ++ dev_err(dev, "dprtc_set_freq_compensation err %d\n", err); ++ return 0; ++} ++ ++static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) ++{ ++ s64 now; ++ int err = 0; ++ struct fsl_mc_device *mc_dev = rtc_mc_dev; ++ struct device *dev = &mc_dev->dev; ++ ++ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now); ++ if (err) { ++ dev_err(dev, "dprtc_get_time err %d\n", err); ++ return 0; ++ } ++ ++ now += delta; ++ ++ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now); ++ if (err) { ++ dev_err(dev, "dprtc_set_time err %d\n", err); ++ return 0; ++ } ++ return 0; ++} ++ ++static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts) ++{ ++ u64 ns; ++ u32 remainder; ++ int err = 0; ++ struct fsl_mc_device *mc_dev = rtc_mc_dev; ++ struct device *dev = &mc_dev->dev; ++ ++ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns); ++ if (err) { ++ dev_err(dev, "dprtc_get_time err %d\n", err); ++ return 0; ++ } ++ ++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ++ ts->tv_nsec = remainder; ++ return 0; ++} ++ ++static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, ++ const struct timespec *ts) ++{ ++ u64 ns; ++ int err = 0; ++ struct fsl_mc_device *mc_dev = rtc_mc_dev; ++ struct device *dev = &mc_dev->dev; ++ ++ ns = ts->tv_sec * 1000000000ULL; ++ ns += ts->tv_nsec; ++ ++ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns); ++ if (err) ++ dev_err(dev, "dprtc_set_time err %d\n", err); ++ return 0; ++} ++ ++static struct ptp_clock_info ptp_dpaa2_caps = { ++ .owner = THIS_MODULE, ++ .name = "dpaa2 clock", ++ .max_adj = 512000, ++ .n_alarm = 0, ++ .n_ext_ts = N_EXT_TS, ++ .n_per_out = 0, ++ .n_pins = 0, ++ .pps = 1, ++ .adjfreq = ptp_dpaa2_adjfreq, ++ .adjtime = ptp_dpaa2_adjtime, ++ .gettime64 = ptp_dpaa2_gettime, ++ .settime64 = ptp_dpaa2_settime, ++}; ++ ++static int rtc_probe(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev; ++ int err = 0; ++ int dpaa2_phc_index; ++ u32 tmr_add = 0; ++ ++ if (!mc_dev) ++ return -EFAULT; ++ ++ dev = &mc_dev->dev; ++ ++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); ++ if (unlikely(err)) { ++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); ++ goto err_exit; ++ } ++ if (!mc_dev->mc_io) { ++ dev_err(dev, ++ "fsl_mc_portal_allocate returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_exit; ++ } ++ ++ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dprtc_open err %d\n", err); ++ goto err_free_mcp; ++ } ++ if (!mc_dev->mc_handle) { ++ dev_err(dev, "dprtc_open returned null handle but no error\n"); ++ err = -EFAULT; ++ goto err_free_mcp; ++ } ++ ++ rtc_mc_dev = mc_dev; ++ ++ err = dprtc_get_freq_compensation(mc_dev->mc_io, 0, ++ mc_dev->mc_handle, &tmr_add); ++ if (err) { ++ dev_err(dev, "dprtc_get_freq_compensation err %d\n", err); ++ goto err_close; ++ } ++ freqCompensation = tmr_add; ++ ++ clock = ptp_clock_register(&ptp_dpaa2_caps, dev); ++ if (IS_ERR(clock)) { ++ err = PTR_ERR(clock); ++ goto err_close; ++ } ++ dpaa2_phc_index = ptp_clock_index(clock); ++ ++ return 0; ++err_close: ++ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++err_free_mcp: ++ fsl_mc_portal_free(mc_dev->mc_io); ++err_exit: ++ return err; ++} ++ ++static int rtc_remove(struct fsl_mc_device *mc_dev) ++{ ++ ptp_clock_unregister(clock); ++ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ fsl_mc_portal_free(mc_dev->mc_io); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_id rtc_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dprtc", ++ }, ++ {} ++}; ++ ++static struct fsl_mc_driver rtc_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = rtc_probe, ++ .remove = rtc_remove, ++ .match_id_table = rtc_match_id_table, ++}; ++ ++module_fsl_mc_driver(rtc_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)"); diff --git a/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch new file mode 100644 index 000000000..0c1cd1bf8 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch @@ -0,0 +1,144 @@ +From 505eb62bdb7a4cc25b13491dd5c68d0741c5d6da Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:21:13 +0800 +Subject: [PATCH] ata: support layerscape + +This is a integrated patch for layerscape sata support. + +Signed-off-by: Tang Yuantian +Signed-off-by: Yangbo Lu +--- + drivers/ata/ahci_qoriq.c | 63 ++++++++++++++++++++++++++++++++++++++++++------ + 1 file changed, 56 insertions(+), 7 deletions(-) + +--- a/drivers/ata/ahci_qoriq.c ++++ b/drivers/ata/ahci_qoriq.c +@@ -1,7 +1,7 @@ + /* + * Freescale QorIQ AHCI SATA platform driver + * +- * Copyright 2015 Freescale, Inc. ++ * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Tang Yuantian + * + * This program is free software; you can redistribute it and/or modify +@@ -46,23 +46,32 @@ + #define LS1021A_AXICC_ADDR 0xC0 + + #define SATA_ECC_DISABLE 0x00020000 ++#define ECC_DIS_ARMV8_CH2 0x80000000 ++#define ECC_DIS_LS1088A 0x40000000 + + enum ahci_qoriq_type { + AHCI_LS1021A, + AHCI_LS1043A, + AHCI_LS2080A, ++ AHCI_LS1046A, ++ AHCI_LS1088A, ++ AHCI_LS2088A, + }; + + struct ahci_qoriq_priv { + struct ccsr_ahci *reg_base; + enum ahci_qoriq_type type; + void __iomem *ecc_addr; ++ bool is_dmacoherent; + }; + + static const struct of_device_id ahci_qoriq_of_match[] = { + { .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A}, + { .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A}, + { .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A}, ++ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A}, ++ { .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A}, ++ { .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A}, + {}, + }; + MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match); +@@ -154,6 +163,8 @@ static int ahci_qoriq_phy_init(struct ah + + switch (qpriv->type) { + case AHCI_LS1021A: ++ if (!qpriv->ecc_addr) ++ return -EINVAL; + writel(SATA_ECC_DISABLE, qpriv->ecc_addr); + writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); + writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2); +@@ -161,19 +172,56 @@ static int ahci_qoriq_phy_init(struct ah + writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4); + writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5); + writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); +- writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, ++ reg_base + LS1021A_AXICC_ADDR); + break; + + case AHCI_LS1043A: ++ if (!qpriv->ecc_addr) ++ return -EINVAL; ++ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, ++ qpriv->ecc_addr); + writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); + writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); +- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); + break; + + case AHCI_LS2080A: + writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); + writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); +- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); ++ break; ++ ++ case AHCI_LS1046A: ++ if (!qpriv->ecc_addr) ++ return -EINVAL; ++ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, ++ qpriv->ecc_addr); ++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); ++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); ++ break; ++ ++ case AHCI_LS1088A: ++ if (!qpriv->ecc_addr) ++ return -EINVAL; ++ writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, ++ qpriv->ecc_addr); ++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); ++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); ++ break; ++ ++ case AHCI_LS2088A: ++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); ++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); ++ if (qpriv->is_dmacoherent) ++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); + break; + } + +@@ -204,13 +252,14 @@ static int ahci_qoriq_probe(struct platf + + qoriq_priv->type = (enum ahci_qoriq_type)of_id->data; + +- if (qoriq_priv->type == AHCI_LS1021A) { +- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, +- "sata-ecc"); ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "sata-ecc"); ++ if (res) { + qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(qoriq_priv->ecc_addr)) + return PTR_ERR(qoriq_priv->ecc_addr); + } ++ qoriq_priv->is_dmacoherent = of_dma_is_coherent(np); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) diff --git a/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch new file mode 100644 index 000000000..2f7d6f847 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch @@ -0,0 +1,307 @@ +From bd3df6d053a28d5aa630524c9087c21def30e764 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:09:35 +0800 +Subject: [PATCH] clk: support layerscape + +This is a integrated patch for layerscape clock support. + +Signed-off-by: Yuantian Tang +Signed-off-by: Mingkai Hu +Signed-off-by: Scott Wood +Signed-off-by: Yangbo Lu +--- + drivers/clk/clk-qoriq.c | 170 ++++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 156 insertions(+), 14 deletions(-) + +--- a/drivers/clk/clk-qoriq.c ++++ b/drivers/clk/clk-qoriq.c +@@ -12,6 +12,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -87,7 +88,7 @@ struct clockgen { + struct device_node *node; + void __iomem *regs; + struct clockgen_chipinfo info; /* mutable copy */ +- struct clk *sysclk; ++ struct clk *sysclk, *coreclk; + struct clockgen_pll pll[6]; + struct clk *cmux[NUM_CMUX]; + struct clk *hwaccel[NUM_HWACCEL]; +@@ -266,6 +267,39 @@ static const struct clockgen_muxinfo ls1 + }, + }; + ++static const struct clockgen_muxinfo ls1046a_hwa1 = { ++ { ++ {}, ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo ls1046a_hwa2 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ {}, ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo ls1012a_cmux = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ {}, ++ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ } ++}; ++ + static const struct clockgen_muxinfo t1023_hwa1 = { + { + {}, +@@ -489,6 +523,42 @@ static const struct clockgen_chipinfo ch + .flags = CG_PLL_8BIT, + }, + { ++ .compat = "fsl,ls1046a-clockgen", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &t1040_cmux ++ }, ++ .hwaccel = { ++ &ls1046a_hwa1, &ls1046a_hwa2 ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,ls1088a-clockgen", ++ .cmux_groups = { ++ &clockgen2_cmux_cga12 ++ }, ++ .cmux_to_group = { ++ 0, 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_VER3 | CG_LITTLE_ENDIAN, ++ }, ++ { ++ .compat = "fsl,ls1012a-clockgen", ++ .cmux_groups = { ++ &ls1012a_cmux ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x03, ++ }, ++ { + .compat = "fsl,ls2080a-clockgen", + .cmux_groups = { + &clockgen2_cmux_cga12, &clockgen2_cmux_cgb +@@ -846,7 +916,12 @@ static void __init create_muxes(struct c + + static void __init clockgen_init(struct device_node *np); + +-/* Legacy nodes may get probed before the parent clockgen node */ ++/* ++ * Legacy nodes may get probed before the parent clockgen node. ++ * It is assumed that device trees with legacy nodes will not ++ * contain a "clocks" property -- otherwise the input clocks may ++ * not be initialized at this point. ++ */ + static void __init legacy_init_clockgen(struct device_node *np) + { + if (!clockgen.node) +@@ -887,18 +962,13 @@ static struct clk __init + return clk_register_fixed_rate(NULL, name, NULL, 0, rate); + } + +-static struct clk *sysclk_from_parent(const char *name) ++static struct clk __init *input_clock(const char *name, struct clk *clk) + { +- struct clk *clk; +- const char *parent_name; +- +- clk = of_clk_get(clockgen.node, 0); +- if (IS_ERR(clk)) +- return clk; ++ const char *input_name; + + /* Register the input clock under the desired name. */ +- parent_name = __clk_get_name(clk); +- clk = clk_register_fixed_factor(NULL, name, parent_name, ++ input_name = __clk_get_name(clk); ++ clk = clk_register_fixed_factor(NULL, name, input_name, + 0, 1, 1); + if (IS_ERR(clk)) + pr_err("%s: Couldn't register %s: %ld\n", __func__, name, +@@ -907,6 +977,29 @@ static struct clk *sysclk_from_parent(co + return clk; + } + ++static struct clk __init *input_clock_by_name(const char *name, ++ const char *dtname) ++{ ++ struct clk *clk; ++ ++ clk = of_clk_get_by_name(clockgen.node, dtname); ++ if (IS_ERR(clk)) ++ return clk; ++ ++ return input_clock(name, clk); ++} ++ ++static struct clk __init *input_clock_by_index(const char *name, int idx) ++{ ++ struct clk *clk; ++ ++ clk = of_clk_get(clockgen.node, 0); ++ if (IS_ERR(clk)) ++ return clk; ++ ++ return input_clock(name, clk); ++} ++ + static struct clk * __init create_sysclk(const char *name) + { + struct device_node *sysclk; +@@ -916,7 +1009,11 @@ static struct clk * __init create_sysclk + if (!IS_ERR(clk)) + return clk; + +- clk = sysclk_from_parent(name); ++ clk = input_clock_by_name(name, "sysclk"); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ clk = input_clock_by_index(name, 0); + if (!IS_ERR(clk)) + return clk; + +@@ -927,7 +1024,27 @@ static struct clk * __init create_sysclk + return clk; + } + +- pr_err("%s: No input clock\n", __func__); ++ pr_err("%s: No input sysclk\n", __func__); ++ return NULL; ++} ++ ++static struct clk * __init create_coreclk(const char *name) ++{ ++ struct clk *clk; ++ ++ clk = input_clock_by_name(name, "coreclk"); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ /* ++ * This indicates a mix of legacy nodes with the new coreclk ++ * mechanism, which should never happen. If this error occurs, ++ * don't use the wrong input clock just because coreclk isn't ++ * ready yet. ++ */ ++ if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER)) ++ return clk; ++ + return NULL; + } + +@@ -950,11 +1067,19 @@ static void __init create_one_pll(struct + u32 __iomem *reg; + u32 mult; + struct clockgen_pll *pll = &cg->pll[idx]; ++ const char *input = "cg-sysclk"; + int i; + + if (!(cg->info.pll_mask & (1 << idx))) + return; + ++ if (cg->coreclk && idx != PLATFORM_PLL) { ++ if (IS_ERR(cg->coreclk)) ++ return; ++ ++ input = "cg-coreclk"; ++ } ++ + if (cg->info.flags & CG_VER3) { + switch (idx) { + case PLATFORM_PLL: +@@ -1000,12 +1125,13 @@ static void __init create_one_pll(struct + + for (i = 0; i < ARRAY_SIZE(pll->div); i++) { + struct clk *clk; ++ int ret; + + snprintf(pll->div[i].name, sizeof(pll->div[i].name), + "cg-pll%d-div%d", idx, i + 1); + + clk = clk_register_fixed_factor(NULL, +- pll->div[i].name, "cg-sysclk", 0, mult, i + 1); ++ pll->div[i].name, input, 0, mult, i + 1); + if (IS_ERR(clk)) { + pr_err("%s: %s: register failed %ld\n", + __func__, pll->div[i].name, PTR_ERR(clk)); +@@ -1013,6 +1139,11 @@ static void __init create_one_pll(struct + } + + pll->div[i].clk = clk; ++ ret = clk_register_clkdev(clk, pll->div[i].name, NULL); ++ if (ret != 0) ++ pr_err("%s: %s: register to lookup table failed %ld\n", ++ __func__, pll->div[i].name, PTR_ERR(clk)); ++ + } + } + +@@ -1142,6 +1273,13 @@ static struct clk *clockgen_clk_get(stru + goto bad_args; + clk = pll->div[idx].clk; + break; ++ case 5: ++ if (idx != 0) ++ goto bad_args; ++ clk = cg->coreclk; ++ if (IS_ERR(clk)) ++ clk = NULL; ++ break; + default: + goto bad_args; + } +@@ -1253,6 +1391,7 @@ static void __init clockgen_init(struct + clockgen.info.flags |= CG_CMUX_GE_PLAT; + + clockgen.sysclk = create_sysclk("cg-sysclk"); ++ clockgen.coreclk = create_coreclk("cg-coreclk"); + create_plls(&clockgen); + create_muxes(&clockgen); + +@@ -1273,8 +1412,11 @@ err: + + CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); + CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); + CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); + CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init); + CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); + + /* Legacy nodes */ diff --git a/target/linux/layerscape/patches-4.4/8026-cpufreq-qoriq-Don-t-look-at-clock-implementation-det.patch b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch similarity index 57% rename from target/linux/layerscape/patches-4.4/8026-cpufreq-qoriq-Don-t-look-at-clock-implementation-det.patch rename to target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch index e8cb061a5..2bc0f24f7 100644 --- a/target/linux/layerscape/patches-4.4/8026-cpufreq-qoriq-Don-t-look-at-clock-implementation-det.patch +++ b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch @@ -1,34 +1,29 @@ -From 8f3768a7c649526f821a6a4cd32cc44a8e7fa317 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Fri, 15 Jan 2016 07:34:33 +0000 -Subject: [PATCH 26/70] cpufreq: qoriq: Don't look at clock implementation - details +From a9ebdf9fa18fd317a4e97f46e8c5263898094864 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:20:10 +0800 +Subject: [PATCH] cpufreq: support layerscape -Get the CPU clock's potential parent clocks from the clock interface -itself, rather than manually parsing the clocks property to find a -phandle, looking at the clock-names property of that, and assuming that -those are valid parent clocks for the cpu clock. +This is a integrated patch for layerscape pm support. -This is necessary now that the clocks are generated based on the clock -driver's knowledge of the chip rather than a fragile device-tree -description of the mux options. - -We can now rely on the clock driver to ensure that the mux only exposes -options that are valid. The cpufreq driver was currently being overly -conservative in some cases -- for example, the "min_cpufreq = -get_bus_freq()" restriction only applies to chips with erratum -A-004510, and whether the freq_mask used on p5020 is needed depends on -the actual frequencies of the PLLs (FWIW, p5040 has a similar -limitation but its .freq_mask was zero) -- and the frequency mask -mechanism made assumptions about particular parent clock indices that -are no longer valid. - -Signed-off-by: Scott Wood -Acked-by: Viresh Kumar +Signed-off-by: Tang Yuantian +Signed-off-by: Yangbo Lu --- - drivers/cpufreq/qoriq-cpufreq.c | 138 ++++++++++++--------------------------- - 1 file changed, 41 insertions(+), 97 deletions(-) + drivers/cpufreq/Kconfig | 2 +- + drivers/cpufreq/qoriq-cpufreq.c | 176 +++++++++++++++------------------------- + drivers/firmware/psci.c | 12 ++- + 3 files changed, 77 insertions(+), 113 deletions(-) +--- a/drivers/cpufreq/Kconfig ++++ b/drivers/cpufreq/Kconfig +@@ -332,7 +332,7 @@ endif + + config QORIQ_CPUFREQ + tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" +- depends on OF && COMMON_CLK && (PPC_E500MC || ARM) ++ depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) + depends on !CPU_THERMAL || THERMAL + select CLK_QORIQ + help --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -11,6 +11,7 @@ @@ -37,10 +32,21 @@ Acked-by: Viresh Kumar #include +#include #include + #include #include - #include -@@ -35,53 +36,20 @@ struct cpu_data { - struct cpufreq_frequency_table *table; +@@ -22,10 +23,6 @@ + #include + #include + +-#if !defined(CONFIG_ARM) +-#include /* for get_hard_smp_processor_id() in UP configs */ +-#endif +- + /** + * struct cpu_data + * @pclk: the parent clock of cpu +@@ -37,73 +34,51 @@ struct cpu_data { + struct thermal_cooling_device *cdev; }; +/* @@ -101,8 +107,35 @@ Acked-by: Viresh Kumar static u32 get_bus_freq(void) { struct device_node *soc; -@@ -99,9 +67,10 @@ static u32 get_bus_freq(void) - return sysfreq; + u32 sysfreq; ++ struct clk *pltclk; ++ int ret; + ++ /* get platform freq by searching bus-frequency property */ + soc = of_find_node_by_type(NULL, "soc"); +- if (!soc) +- return 0; +- +- if (of_property_read_u32(soc, "bus-frequency", &sysfreq)) +- sysfreq = 0; ++ if (soc) { ++ ret = of_property_read_u32(soc, "bus-frequency", &sysfreq); ++ of_node_put(soc); ++ if (!ret) ++ return sysfreq; ++ } + +- of_node_put(soc); ++ /* get platform freq by its clock name */ ++ pltclk = clk_get(NULL, "cg-pll0-div1"); ++ if (IS_ERR(pltclk)) { ++ pr_err("%s: can't get bus frequency %ld\n", ++ __func__, PTR_ERR(pltclk)); ++ return PTR_ERR(pltclk); ++ } + +- return sysfreq; ++ return clk_get_rate(pltclk); } -static struct device_node *cpu_to_clk_node(int cpu) @@ -114,7 +147,7 @@ Acked-by: Viresh Kumar if (!cpu_present(cpu)) return NULL; -@@ -110,37 +79,28 @@ static struct device_node *cpu_to_clk_no +@@ -112,37 +87,28 @@ static struct device_node *cpu_to_clk_no if (!np) return NULL; @@ -160,16 +193,21 @@ Acked-by: Viresh Kumar } /* reduce the duplicated frequencies in frequency table */ -@@ -198,7 +158,7 @@ static int qoriq_cpufreq_cpu_init(struct +@@ -198,10 +164,11 @@ static void freq_table_sort(struct cpufr + + static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) { - struct device_node *np, *pnode; +- struct device_node *np, *pnode; ++ struct device_node *np; int i, count, ret; - u32 freq, mask; + u32 freq; struct clk *clk; ++ const struct clk_hw *hwclk; struct cpufreq_frequency_table *table; struct cpu_data *data; -@@ -219,17 +179,12 @@ static int qoriq_cpufreq_cpu_init(struct + unsigned int cpu = policy->cpu; +@@ -221,17 +188,13 @@ static int qoriq_cpufreq_cpu_init(struct goto err_nomem2; } @@ -178,7 +216,8 @@ Acked-by: Viresh Kumar - pr_err("%s: could not get clock information\n", __func__); - goto err_nomem2; - } -+ count = clk_get_num_parents(policy->clk); ++ hwclk = __clk_get_hw(policy->clk); ++ count = clk_hw_get_num_parents(hwclk); - count = of_property_count_strings(pnode, "clock-names"); data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL); @@ -189,7 +228,7 @@ Acked-by: Viresh Kumar } table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); -@@ -238,23 +193,11 @@ static int qoriq_cpufreq_cpu_init(struct +@@ -240,23 +203,11 @@ static int qoriq_cpufreq_cpu_init(struct goto err_pclk; } @@ -200,7 +239,7 @@ Acked-by: Viresh Kumar - for (i = 0; i < count; i++) { - clk = of_clk_get(pnode, i); -+ clk = clk_get_parent_by_index(policy->clk, i); ++ clk = clk_hw_get_parent_by_index(hwclk, i)->clk; data->pclk[i] = clk; freq = clk_get_rate(clk); - /* @@ -215,7 +254,15 @@ Acked-by: Viresh Kumar table[i].driver_data = i; } freq_table_redup(table, count); -@@ -288,10 +231,7 @@ err_nomem1: +@@ -282,7 +233,6 @@ static int qoriq_cpufreq_cpu_init(struct + policy->cpuinfo.transition_latency = u64temp + 1; + + of_node_put(np); +- of_node_put(pnode); + + return 0; + +@@ -290,10 +240,7 @@ err_nomem1: kfree(table); err_pclk: kfree(data->pclk); @@ -226,7 +273,7 @@ Acked-by: Viresh Kumar kfree(data); err_np: of_node_put(np); -@@ -332,12 +272,20 @@ static struct cpufreq_driver qoriq_cpufr +@@ -357,12 +304,25 @@ static struct cpufreq_driver qoriq_cpufr .attr = cpufreq_generic_attr, }; @@ -246,13 +293,18 @@ Acked-by: Viresh Kumar + { .compatible = "fsl,t2080-clockgen", &blacklist }, + { .compatible = "fsl,t4240-clockgen", &blacklist }, + ++ { .compatible = "fsl,ls1012a-clockgen", }, + { .compatible = "fsl,ls1021a-clockgen", }, ++ { .compatible = "fsl,ls1043a-clockgen", }, ++ { .compatible = "fsl,ls1046a-clockgen", }, ++ { .compatible = "fsl,ls1088a-clockgen", }, ++ { .compatible = "fsl,ls2080a-clockgen", }, + { .compatible = "fsl,p4080-clockgen", }, + { .compatible = "fsl,qoriq-clockgen-1.0", }, { .compatible = "fsl,qoriq-clockgen-2.0", }, {} }; -@@ -355,16 +303,12 @@ static int __init qoriq_cpufreq_init(voi +@@ -380,16 +340,12 @@ static int __init qoriq_cpufreq_init(voi match = of_match_node(node_matches, np); data = match->data; @@ -272,3 +324,38 @@ Acked-by: Viresh Kumar ret = cpufreq_register_driver(&qoriq_cpufreq_driver); if (!ret) pr_info("Freescale QorIQ CPU frequency scaling driver\n"); +--- a/drivers/firmware/psci.c ++++ b/drivers/firmware/psci.c +@@ -418,8 +418,12 @@ CPUIDLE_METHOD_OF_DECLARE(psci, "psci", + + static int psci_system_suspend(unsigned long unused) + { +- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), +- virt_to_phys(cpu_resume), 0, 0); ++ u32 state; ++ ++ state = ( 2 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | ++ (1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT); ++ ++ return psci_cpu_suspend(state, virt_to_phys(cpu_resume)); + } + + static int psci_system_suspend_enter(suspend_state_t state) +@@ -439,6 +443,8 @@ static void __init psci_init_system_susp + if (!IS_ENABLED(CONFIG_SUSPEND)) + return; + ++ suspend_set_ops(&psci_suspend_ops); ++ + ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND)); + + if (ret != PSCI_RET_NOT_SUPPORTED) +@@ -516,6 +522,8 @@ static void __init psci_0_2_set_function + arm_pm_restart = psci_sys_reset; + + pm_power_off = psci_sys_poweroff; ++ psci_init_system_suspend(); ++ suspend_set_ops(&psci_suspend_ops); + } + + /* diff --git a/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch new file mode 100644 index 000000000..79103a273 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch @@ -0,0 +1,26717 @@ +From 0a5b97d1f524c1769b4059e3c7123b52755f7121 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Wed, 27 Sep 2017 15:02:01 +0800 +Subject: [PATCH] crypto: support layerscape +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a integrated patch for layerscape sec support. + +Signed-off-by: Radu Alexe +Signed-off-by: Fabio Estevam +Signed-off-by: Arnd Bergmann +Signed-off-by: Radu Alexe +Signed-off-by: Tudor Ambarus +Signed-off-by: Eric Biggers +Signed-off-by: Giovanni Cabiddu +Signed-off-by: Xulin Sun +Signed-off-by: Ard Biesheuvel +Signed-off-by: Marcus Folkesson +Signed-off-by: Tudor Ambarus +Signed-off-by: Andrew Lutomirski +Signed-off-by: Wei Yongjun +Signed-off-by: Masahiro Yamada +Signed-off-by: Marcelo Cerri +Signed-off-by: Arvind Yadav +Signed-off-by: Herbert Xu +Signed-off-by: Laura Abbott +Signed-off-by: Horia Geantă +Signed-off-by: Yangbo Lu +--- + crypto/Kconfig | 30 + + crypto/Makefile | 4 + + crypto/acompress.c | 169 + + crypto/algboss.c | 12 +- + crypto/crypto_user.c | 19 + + crypto/scompress.c | 356 ++ + crypto/tcrypt.c | 17 +- + crypto/testmgr.c | 1701 ++++---- + crypto/testmgr.h | 1125 +++--- + crypto/tls.c | 607 +++ + drivers/crypto/caam/Kconfig | 72 +- + drivers/crypto/caam/Makefile | 15 +- + drivers/crypto/caam/caamalg.c | 2125 +++------- + drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++ + drivers/crypto/caam/caamalg_desc.h | 127 + + drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++ + drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++ + drivers/crypto/caam/caamalg_qi2.h | 265 ++ + drivers/crypto/caam/caamhash.c | 521 +-- + drivers/crypto/caam/caampkc.c | 471 ++- + drivers/crypto/caam/caampkc.h | 58 + + drivers/crypto/caam/caamrng.c | 16 +- + drivers/crypto/caam/compat.h | 1 + + drivers/crypto/caam/ctrl.c | 356 +- + drivers/crypto/caam/ctrl.h | 2 + + drivers/crypto/caam/desc.h | 52 +- + drivers/crypto/caam/desc_constr.h | 139 +- + drivers/crypto/caam/dpseci.c | 859 ++++ + drivers/crypto/caam/dpseci.h | 395 ++ + drivers/crypto/caam/dpseci_cmd.h | 261 ++ + drivers/crypto/caam/error.c | 127 +- + drivers/crypto/caam/error.h | 10 +- + drivers/crypto/caam/intern.h | 31 +- + drivers/crypto/caam/jr.c | 55 +- + drivers/crypto/caam/key_gen.c | 32 +- + drivers/crypto/caam/key_gen.h | 36 +- + drivers/crypto/caam/pdb.h | 62 + + drivers/crypto/caam/pkc_desc.c | 36 + + drivers/crypto/caam/qi.c | 797 ++++ + drivers/crypto/caam/qi.h | 204 + + drivers/crypto/caam/regs.h | 63 +- + drivers/crypto/caam/sg_sw_qm.h | 126 + + drivers/crypto/caam/sg_sw_qm2.h | 81 + + drivers/crypto/caam/sg_sw_sec4.h | 60 +- + drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +- + drivers/staging/wilc1000/linux_wlan.c | 2 +- + drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +- + include/crypto/acompress.h | 269 ++ + include/crypto/internal/acompress.h | 81 + + include/crypto/internal/scompress.h | 136 + + include/linux/crypto.h | 3 + + include/uapi/linux/cryptouser.h | 5 + + scripts/spelling.txt | 3 + + sound/soc/amd/acp-pcm-dma.c | 2 +- + 54 files changed, 17263 insertions(+), 3955 deletions(-) + create mode 100644 crypto/acompress.c + create mode 100644 crypto/scompress.c + create mode 100644 crypto/tls.c + create mode 100644 drivers/crypto/caam/caamalg_desc.c + create mode 100644 drivers/crypto/caam/caamalg_desc.h + create mode 100644 drivers/crypto/caam/caamalg_qi.c + create mode 100644 drivers/crypto/caam/caamalg_qi2.c + create mode 100644 drivers/crypto/caam/caamalg_qi2.h + create mode 100644 drivers/crypto/caam/dpseci.c + create mode 100644 drivers/crypto/caam/dpseci.h + create mode 100644 drivers/crypto/caam/dpseci_cmd.h + create mode 100644 drivers/crypto/caam/qi.c + create mode 100644 drivers/crypto/caam/qi.h + create mode 100644 drivers/crypto/caam/sg_sw_qm.h + create mode 100644 drivers/crypto/caam/sg_sw_qm2.h + create mode 100644 include/crypto/acompress.h + create mode 100644 include/crypto/internal/acompress.h + create mode 100644 include/crypto/internal/scompress.h + +--- a/crypto/Kconfig ++++ b/crypto/Kconfig +@@ -102,6 +102,15 @@ config CRYPTO_KPP + select CRYPTO_ALGAPI + select CRYPTO_KPP2 + ++config CRYPTO_ACOMP2 ++ tristate ++ select CRYPTO_ALGAPI2 ++ ++config CRYPTO_ACOMP ++ tristate ++ select CRYPTO_ALGAPI ++ select CRYPTO_ACOMP2 ++ + config CRYPTO_RSA + tristate "RSA algorithm" + select CRYPTO_AKCIPHER +@@ -138,6 +147,7 @@ config CRYPTO_MANAGER2 + select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS + select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS + select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS ++ select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS + + config CRYPTO_USER + tristate "Userspace cryptographic algorithm configuration" +@@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV + a sequence number xored with a salt. This is the default + algorithm for CBC. + ++config CRYPTO_TLS ++ tristate "TLS support" ++ select CRYPTO_AEAD ++ select CRYPTO_BLKCIPHER ++ select CRYPTO_MANAGER ++ select CRYPTO_HASH ++ select CRYPTO_NULL ++ select CRYPTO_AUTHENC ++ help ++ Support for TLS 1.0 record encryption and decryption ++ ++ This module adds support for encryption/decryption of TLS 1.0 frames ++ using blockcipher algorithms. The name of the resulting algorithm is ++ "tls10(hmac(),cbc())". By default, the generic base ++ algorithms are used (e.g. aes-generic, sha1-generic), but hardware ++ accelerated versions will be used automatically if available. ++ ++ User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0 ++ operations through AF_ALG or cryptodev interfaces ++ + comment "Block modes" + + config CRYPTO_CBC +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o + rsa_generic-y += rsa-pkcs1pad.o + obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o + ++obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o ++obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o ++ + cryptomgr-y := algboss.o testmgr.o + + obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o +@@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge + obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o + obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o + obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o ++obj-$(CONFIG_CRYPTO_TLS) += tls.o + obj-$(CONFIG_CRYPTO_LZO) += lzo.o + obj-$(CONFIG_CRYPTO_LZ4) += lz4.o + obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o +--- /dev/null ++++ b/crypto/acompress.c +@@ -0,0 +1,169 @@ ++/* ++ * Asynchronous Compression operations ++ * ++ * Copyright (c) 2016, Intel Corporation ++ * Authors: Weigang Li ++ * Giovanni Cabiddu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "internal.h" ++ ++static const struct crypto_type crypto_acomp_type; ++ ++#ifdef CONFIG_NET ++static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) ++{ ++ struct crypto_report_acomp racomp; ++ ++ strncpy(racomp.type, "acomp", sizeof(racomp.type)); ++ ++ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, ++ sizeof(struct crypto_report_acomp), &racomp)) ++ goto nla_put_failure; ++ return 0; ++ ++nla_put_failure: ++ return -EMSGSIZE; ++} ++#else ++static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) ++{ ++ return -ENOSYS; ++} ++#endif ++ ++static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) ++ __attribute__ ((unused)); ++ ++static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) ++{ ++ seq_puts(m, "type : acomp\n"); ++} ++ ++static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) ++{ ++ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); ++ struct acomp_alg *alg = crypto_acomp_alg(acomp); ++ ++ alg->exit(acomp); ++} ++ ++static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) ++{ ++ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); ++ struct acomp_alg *alg = crypto_acomp_alg(acomp); ++ ++ if (tfm->__crt_alg->cra_type != &crypto_acomp_type) ++ return crypto_init_scomp_ops_async(tfm); ++ ++ acomp->compress = alg->compress; ++ acomp->decompress = alg->decompress; ++ acomp->dst_free = alg->dst_free; ++ acomp->reqsize = alg->reqsize; ++ ++ if (alg->exit) ++ acomp->base.exit = crypto_acomp_exit_tfm; ++ ++ if (alg->init) ++ return alg->init(acomp); ++ ++ return 0; ++} ++ ++static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) ++{ ++ int extsize = crypto_alg_extsize(alg); ++ ++ if (alg->cra_type != &crypto_acomp_type) ++ extsize += sizeof(struct crypto_scomp *); ++ ++ return extsize; ++} ++ ++static const struct crypto_type crypto_acomp_type = { ++ .extsize = crypto_acomp_extsize, ++ .init_tfm = crypto_acomp_init_tfm, ++#ifdef CONFIG_PROC_FS ++ .show = crypto_acomp_show, ++#endif ++ .report = crypto_acomp_report, ++ .maskclear = ~CRYPTO_ALG_TYPE_MASK, ++ .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, ++ .type = CRYPTO_ALG_TYPE_ACOMPRESS, ++ .tfmsize = offsetof(struct crypto_acomp, base), ++}; ++ ++struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, ++ u32 mask) ++{ ++ return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); ++} ++EXPORT_SYMBOL_GPL(crypto_alloc_acomp); ++ ++struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) ++{ ++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); ++ struct acomp_req *req; ++ ++ req = __acomp_request_alloc(acomp); ++ if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) ++ return crypto_acomp_scomp_alloc_ctx(req); ++ ++ return req; ++} ++EXPORT_SYMBOL_GPL(acomp_request_alloc); ++ ++void acomp_request_free(struct acomp_req *req) ++{ ++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); ++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); ++ ++ if (tfm->__crt_alg->cra_type != &crypto_acomp_type) ++ crypto_acomp_scomp_free_ctx(req); ++ ++ if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { ++ acomp->dst_free(req->dst); ++ req->dst = NULL; ++ } ++ ++ __acomp_request_free(req); ++} ++EXPORT_SYMBOL_GPL(acomp_request_free); ++ ++int crypto_register_acomp(struct acomp_alg *alg) ++{ ++ struct crypto_alg *base = &alg->base; ++ ++ base->cra_type = &crypto_acomp_type; ++ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; ++ base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; ++ ++ return crypto_register_alg(base); ++} ++EXPORT_SYMBOL_GPL(crypto_register_acomp); ++ ++int crypto_unregister_acomp(struct acomp_alg *alg) ++{ ++ return crypto_unregister_alg(&alg->base); ++} ++EXPORT_SYMBOL_GPL(crypto_unregister_acomp); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Asynchronous compression type"); +--- a/crypto/algboss.c ++++ b/crypto/algboss.c +@@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc + memcpy(param->alg, alg->cra_name, sizeof(param->alg)); + type = alg->cra_flags; + +- /* This piece of crap needs to disappear into per-type test hooks. */ +-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +- type |= CRYPTO_ALG_TESTED; +-#else +- if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & +- CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && +- ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == +- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : +- alg->cra_ablkcipher.ivsize)) ++ /* Do not test internal algorithms. */ ++ if (type & CRYPTO_ALG_INTERNAL) + type |= CRYPTO_ALG_TESTED; +-#endif + + param->type = type; + +--- a/crypto/crypto_user.c ++++ b/crypto/crypto_user.c +@@ -112,6 +112,21 @@ nla_put_failure: + return -EMSGSIZE; + } + ++static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) ++{ ++ struct crypto_report_acomp racomp; ++ ++ strncpy(racomp.type, "acomp", sizeof(racomp.type)); ++ ++ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, ++ sizeof(struct crypto_report_acomp), &racomp)) ++ goto nla_put_failure; ++ return 0; ++ ++nla_put_failure: ++ return -EMSGSIZE; ++} ++ + static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) + { + struct crypto_report_akcipher rakcipher; +@@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp + goto nla_put_failure; + + break; ++ case CRYPTO_ALG_TYPE_ACOMPRESS: ++ if (crypto_report_acomp(skb, alg)) ++ goto nla_put_failure; + ++ break; + case CRYPTO_ALG_TYPE_AKCIPHER: + if (crypto_report_akcipher(skb, alg)) + goto nla_put_failure; +--- /dev/null ++++ b/crypto/scompress.c +@@ -0,0 +1,356 @@ ++/* ++ * Synchronous Compression operations ++ * ++ * Copyright 2015 LG Electronics Inc. ++ * Copyright (c) 2016, Intel Corporation ++ * Author: Giovanni Cabiddu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "internal.h" ++ ++static const struct crypto_type crypto_scomp_type; ++static void * __percpu *scomp_src_scratches; ++static void * __percpu *scomp_dst_scratches; ++static int scomp_scratch_users; ++static DEFINE_MUTEX(scomp_lock); ++ ++#ifdef CONFIG_NET ++static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) ++{ ++ struct crypto_report_comp rscomp; ++ ++ strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); ++ ++ if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, ++ sizeof(struct crypto_report_comp), &rscomp)) ++ goto nla_put_failure; ++ return 0; ++ ++nla_put_failure: ++ return -EMSGSIZE; ++} ++#else ++static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) ++{ ++ return -ENOSYS; ++} ++#endif ++ ++static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) ++ __attribute__ ((unused)); ++ ++static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) ++{ ++ seq_puts(m, "type : scomp\n"); ++} ++ ++static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) ++{ ++ return 0; ++} ++ ++static void crypto_scomp_free_scratches(void * __percpu *scratches) ++{ ++ int i; ++ ++ if (!scratches) ++ return; ++ ++ for_each_possible_cpu(i) ++ vfree(*per_cpu_ptr(scratches, i)); ++ ++ free_percpu(scratches); ++} ++ ++static void * __percpu *crypto_scomp_alloc_scratches(void) ++{ ++ void * __percpu *scratches; ++ int i; ++ ++ scratches = alloc_percpu(void *); ++ if (!scratches) ++ return NULL; ++ ++ for_each_possible_cpu(i) { ++ void *scratch; ++ ++ scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); ++ if (!scratch) ++ goto error; ++ *per_cpu_ptr(scratches, i) = scratch; ++ } ++ ++ return scratches; ++ ++error: ++ crypto_scomp_free_scratches(scratches); ++ return NULL; ++} ++ ++static void crypto_scomp_free_all_scratches(void) ++{ ++ if (!--scomp_scratch_users) { ++ crypto_scomp_free_scratches(scomp_src_scratches); ++ crypto_scomp_free_scratches(scomp_dst_scratches); ++ scomp_src_scratches = NULL; ++ scomp_dst_scratches = NULL; ++ } ++} ++ ++static int crypto_scomp_alloc_all_scratches(void) ++{ ++ if (!scomp_scratch_users++) { ++ scomp_src_scratches = crypto_scomp_alloc_scratches(); ++ if (!scomp_src_scratches) ++ return -ENOMEM; ++ scomp_dst_scratches = crypto_scomp_alloc_scratches(); ++ if (!scomp_dst_scratches) ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static void crypto_scomp_sg_free(struct scatterlist *sgl) ++{ ++ int i, n; ++ struct page *page; ++ ++ if (!sgl) ++ return; ++ ++ n = sg_nents(sgl); ++ for_each_sg(sgl, sgl, n, i) { ++ page = sg_page(sgl); ++ if (page) ++ __free_page(page); ++ } ++ ++ kfree(sgl); ++} ++ ++static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) ++{ ++ struct scatterlist *sgl; ++ struct page *page; ++ int i, n; ++ ++ n = ((size - 1) >> PAGE_SHIFT) + 1; ++ ++ sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); ++ if (!sgl) ++ return NULL; ++ ++ sg_init_table(sgl, n); ++ ++ for (i = 0; i < n; i++) { ++ page = alloc_page(gfp); ++ if (!page) ++ goto err; ++ sg_set_page(sgl + i, page, PAGE_SIZE, 0); ++ } ++ ++ return sgl; ++ ++err: ++ sg_mark_end(sgl + i); ++ crypto_scomp_sg_free(sgl); ++ return NULL; ++} ++ ++static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) ++{ ++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); ++ void **tfm_ctx = acomp_tfm_ctx(tfm); ++ struct crypto_scomp *scomp = *tfm_ctx; ++ void **ctx = acomp_request_ctx(req); ++ const int cpu = get_cpu(); ++ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); ++ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); ++ int ret; ++ ++ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (req->dst && !req->dlen) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) ++ req->dlen = SCOMP_SCRATCH_SIZE; ++ ++ scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); ++ if (dir) ++ ret = crypto_scomp_compress(scomp, scratch_src, req->slen, ++ scratch_dst, &req->dlen, *ctx); ++ else ++ ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, ++ scratch_dst, &req->dlen, *ctx); ++ if (!ret) { ++ if (!req->dst) { ++ req->dst = crypto_scomp_sg_alloc(req->dlen, ++ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? ++ GFP_KERNEL : GFP_ATOMIC); ++ if (!req->dst) ++ goto out; ++ } ++ scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, ++ 1); ++ } ++out: ++ put_cpu(); ++ return ret; ++} ++ ++static int scomp_acomp_compress(struct acomp_req *req) ++{ ++ return scomp_acomp_comp_decomp(req, 1); ++} ++ ++static int scomp_acomp_decompress(struct acomp_req *req) ++{ ++ return scomp_acomp_comp_decomp(req, 0); ++} ++ ++static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) ++{ ++ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); ++ ++ crypto_free_scomp(*ctx); ++} ++ ++int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) ++{ ++ struct crypto_alg *calg = tfm->__crt_alg; ++ struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); ++ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); ++ struct crypto_scomp *scomp; ++ ++ if (!crypto_mod_get(calg)) ++ return -EAGAIN; ++ ++ scomp = crypto_create_tfm(calg, &crypto_scomp_type); ++ if (IS_ERR(scomp)) { ++ crypto_mod_put(calg); ++ return PTR_ERR(scomp); ++ } ++ ++ *ctx = scomp; ++ tfm->exit = crypto_exit_scomp_ops_async; ++ ++ crt->compress = scomp_acomp_compress; ++ crt->decompress = scomp_acomp_decompress; ++ crt->dst_free = crypto_scomp_sg_free; ++ crt->reqsize = sizeof(void *); ++ ++ return 0; ++} ++ ++struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) ++{ ++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); ++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); ++ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); ++ struct crypto_scomp *scomp = *tfm_ctx; ++ void *ctx; ++ ++ ctx = crypto_scomp_alloc_ctx(scomp); ++ if (IS_ERR(ctx)) { ++ kfree(req); ++ return NULL; ++ } ++ ++ *req->__ctx = ctx; ++ ++ return req; ++} ++ ++void crypto_acomp_scomp_free_ctx(struct acomp_req *req) ++{ ++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); ++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); ++ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); ++ struct crypto_scomp *scomp = *tfm_ctx; ++ void *ctx = *req->__ctx; ++ ++ if (ctx) ++ crypto_scomp_free_ctx(scomp, ctx); ++} ++ ++static const struct crypto_type crypto_scomp_type = { ++ .extsize = crypto_alg_extsize, ++ .init_tfm = crypto_scomp_init_tfm, ++#ifdef CONFIG_PROC_FS ++ .show = crypto_scomp_show, ++#endif ++ .report = crypto_scomp_report, ++ .maskclear = ~CRYPTO_ALG_TYPE_MASK, ++ .maskset = CRYPTO_ALG_TYPE_MASK, ++ .type = CRYPTO_ALG_TYPE_SCOMPRESS, ++ .tfmsize = offsetof(struct crypto_scomp, base), ++}; ++ ++int crypto_register_scomp(struct scomp_alg *alg) ++{ ++ struct crypto_alg *base = &alg->base; ++ int ret = -ENOMEM; ++ ++ mutex_lock(&scomp_lock); ++ if (crypto_scomp_alloc_all_scratches()) ++ goto error; ++ ++ base->cra_type = &crypto_scomp_type; ++ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; ++ base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; ++ ++ ret = crypto_register_alg(base); ++ if (ret) ++ goto error; ++ ++ mutex_unlock(&scomp_lock); ++ return ret; ++ ++error: ++ crypto_scomp_free_all_scratches(); ++ mutex_unlock(&scomp_lock); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(crypto_register_scomp); ++ ++int crypto_unregister_scomp(struct scomp_alg *alg) ++{ ++ int ret; ++ ++ mutex_lock(&scomp_lock); ++ ret = crypto_unregister_alg(&alg->base); ++ crypto_scomp_free_all_scratches(); ++ mutex_unlock(&scomp_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(crypto_unregister_scomp); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Synchronous compression type"); +--- a/crypto/tcrypt.c ++++ b/crypto/tcrypt.c +@@ -74,7 +74,7 @@ static char *check[] = { + "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", + "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", + "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512", +- NULL ++ "rsa", NULL + }; + + struct tcrypt_result { +@@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32 + ret += tcrypt_test("hmac(sha3-512)"); + break; + ++ case 115: ++ ret += tcrypt_test("rsa"); ++ break; ++ + case 150: + ret += tcrypt_test("ansi_cprng"); + break; +@@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32 + case 190: + ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); + break; ++ case 191: ++ ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))"); ++ break; + case 200: + test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); +@@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32 + test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, + speed_template_32_40_48); + test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, +- speed_template_32_48_64); ++ speed_template_32_64); + test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, +- speed_template_32_48_64); ++ speed_template_32_64); + test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, +@@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32 + test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, + speed_template_32_40_48); + test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, +- speed_template_32_48_64); ++ speed_template_32_64); + test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, +- speed_template_32_48_64); ++ speed_template_32_64); + test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, +--- a/crypto/testmgr.c ++++ b/crypto/testmgr.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -62,7 +63,7 @@ int alg_test(const char *driver, const c + */ + #define IDX1 32 + #define IDX2 32400 +-#define IDX3 1 ++#define IDX3 1511 + #define IDX4 8193 + #define IDX5 22222 + #define IDX6 17101 +@@ -82,47 +83,54 @@ struct tcrypt_result { + + struct aead_test_suite { + struct { +- struct aead_testvec *vecs; ++ const struct aead_testvec *vecs; + unsigned int count; + } enc, dec; + }; + + struct cipher_test_suite { + struct { +- struct cipher_testvec *vecs; ++ const struct cipher_testvec *vecs; + unsigned int count; + } enc, dec; + }; + + struct comp_test_suite { + struct { +- struct comp_testvec *vecs; ++ const struct comp_testvec *vecs; + unsigned int count; + } comp, decomp; + }; + + struct hash_test_suite { +- struct hash_testvec *vecs; ++ const struct hash_testvec *vecs; + unsigned int count; + }; + + struct cprng_test_suite { +- struct cprng_testvec *vecs; ++ const struct cprng_testvec *vecs; + unsigned int count; + }; + + struct drbg_test_suite { +- struct drbg_testvec *vecs; ++ const struct drbg_testvec *vecs; + unsigned int count; + }; + ++struct tls_test_suite { ++ struct { ++ struct tls_testvec *vecs; ++ unsigned int count; ++ } enc, dec; ++}; ++ + struct akcipher_test_suite { +- struct akcipher_testvec *vecs; ++ const struct akcipher_testvec *vecs; + unsigned int count; + }; + + struct kpp_test_suite { +- struct kpp_testvec *vecs; ++ const struct kpp_testvec *vecs; + unsigned int count; + }; + +@@ -139,12 +147,14 @@ struct alg_test_desc { + struct hash_test_suite hash; + struct cprng_test_suite cprng; + struct drbg_test_suite drbg; ++ struct tls_test_suite tls; + struct akcipher_test_suite akcipher; + struct kpp_test_suite kpp; + } suite; + }; + +-static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; ++static const unsigned int IDX[8] = { ++ IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; + + static void hexdump(unsigned char *buf, unsigned int len) + { +@@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r + } + + static int ahash_partial_update(struct ahash_request **preq, +- struct crypto_ahash *tfm, struct hash_testvec *template, ++ struct crypto_ahash *tfm, const struct hash_testvec *template, + void *hash_buff, int k, int temp, struct scatterlist *sg, + const char *algo, char *result, struct tcrypt_result *tresult) + { +@@ -259,11 +269,12 @@ out_nostate: + return ret; + } + +-static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, +- unsigned int tcount, bool use_digest, +- const int align_offset) ++static int __test_hash(struct crypto_ahash *tfm, ++ const struct hash_testvec *template, unsigned int tcount, ++ bool use_digest, const int align_offset) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); ++ size_t digest_size = crypto_ahash_digestsize(tfm); + unsigned int i, j, k, temp; + struct scatterlist sg[8]; + char *result; +@@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha + char *xbuf[XBUFSIZE]; + int ret = -ENOMEM; + +- result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); ++ result = kmalloc(digest_size, GFP_KERNEL); + if (!result) + return ret; + key = kmalloc(MAX_KEYLEN, GFP_KERNEL); +@@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha + goto out; + + j++; +- memset(result, 0, MAX_DIGEST_SIZE); ++ memset(result, 0, digest_size); + + hash_buff = xbuf[0]; + hash_buff += align_offset; +@@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha + continue; + + j++; +- memset(result, 0, MAX_DIGEST_SIZE); ++ memset(result, 0, digest_size); + + temp = 0; + sg_init_table(sg, template[i].np); +@@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha + continue; + + j++; +- memset(result, 0, MAX_DIGEST_SIZE); ++ memset(result, 0, digest_size); + + ret = -EINVAL; + hash_buff = xbuf[0]; +@@ -536,7 +547,8 @@ out_nobuf: + return ret; + } + +-static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ++static int test_hash(struct crypto_ahash *tfm, ++ const struct hash_testvec *template, + unsigned int tcount, bool use_digest) + { + unsigned int alignmask; +@@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash + } + + static int __test_aead(struct crypto_aead *tfm, int enc, +- struct aead_testvec *template, unsigned int tcount, ++ const struct aead_testvec *template, unsigned int tcount, + const bool diff_dst, const int align_offset) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); +@@ -955,7 +967,7 @@ out_noxbuf: + } + + static int test_aead(struct crypto_aead *tfm, int enc, +- struct aead_testvec *template, unsigned int tcount) ++ const struct aead_testvec *template, unsigned int tcount) + { + unsigned int alignmask; + int ret; +@@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead + return 0; + } + ++static int __test_tls(struct crypto_aead *tfm, int enc, ++ struct tls_testvec *template, unsigned int tcount, ++ const bool diff_dst) ++{ ++ const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); ++ unsigned int i, k, authsize; ++ char *q; ++ struct aead_request *req; ++ struct scatterlist *sg; ++ struct scatterlist *sgout; ++ const char *e, *d; ++ struct tcrypt_result result; ++ void *input; ++ void *output; ++ void *assoc; ++ char *iv; ++ char *key; ++ char *xbuf[XBUFSIZE]; ++ char *xoutbuf[XBUFSIZE]; ++ char *axbuf[XBUFSIZE]; ++ int ret = -ENOMEM; ++ ++ if (testmgr_alloc_buf(xbuf)) ++ goto out_noxbuf; ++ ++ if (diff_dst && testmgr_alloc_buf(xoutbuf)) ++ goto out_nooutbuf; ++ ++ if (testmgr_alloc_buf(axbuf)) ++ goto out_noaxbuf; ++ ++ iv = kzalloc(MAX_IVLEN, GFP_KERNEL); ++ if (!iv) ++ goto out_noiv; ++ ++ key = kzalloc(MAX_KEYLEN, GFP_KERNEL); ++ if (!key) ++ goto out_nokey; ++ ++ sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL); ++ if (!sg) ++ goto out_nosg; ++ ++ sgout = sg + 8; ++ ++ d = diff_dst ? "-ddst" : ""; ++ e = enc ? "encryption" : "decryption"; ++ ++ init_completion(&result.completion); ++ ++ req = aead_request_alloc(tfm, GFP_KERNEL); ++ if (!req) { ++ pr_err("alg: tls%s: Failed to allocate request for %s\n", ++ d, algo); ++ goto out; ++ } ++ ++ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ tcrypt_complete, &result); ++ ++ for (i = 0; i < tcount; i++) { ++ input = xbuf[0]; ++ assoc = axbuf[0]; ++ ++ ret = -EINVAL; ++ if (WARN_ON(template[i].ilen > PAGE_SIZE || ++ template[i].alen > PAGE_SIZE)) ++ goto out; ++ ++ memcpy(assoc, template[i].assoc, template[i].alen); ++ memcpy(input, template[i].input, template[i].ilen); ++ ++ if (template[i].iv) ++ memcpy(iv, template[i].iv, MAX_IVLEN); ++ else ++ memset(iv, 0, MAX_IVLEN); ++ ++ crypto_aead_clear_flags(tfm, ~0); ++ ++ if (template[i].klen > MAX_KEYLEN) { ++ pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", ++ d, i, algo, template[i].klen, MAX_KEYLEN); ++ ret = -EINVAL; ++ goto out; ++ } ++ memcpy(key, template[i].key, template[i].klen); ++ ++ ret = crypto_aead_setkey(tfm, key, template[i].klen); ++ if (!ret == template[i].fail) { ++ pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n", ++ d, i, algo, crypto_aead_get_flags(tfm)); ++ goto out; ++ } else if (ret) ++ continue; ++ ++ authsize = 20; ++ ret = crypto_aead_setauthsize(tfm, authsize); ++ if (ret) { ++ pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", ++ d, authsize, i, algo); ++ goto out; ++ } ++ ++ k = !!template[i].alen; ++ sg_init_table(sg, k + 1); ++ sg_set_buf(&sg[0], assoc, template[i].alen); ++ sg_set_buf(&sg[k], input, (enc ? template[i].rlen : ++ template[i].ilen)); ++ output = input; ++ ++ if (diff_dst) { ++ sg_init_table(sgout, k + 1); ++ sg_set_buf(&sgout[0], assoc, template[i].alen); ++ ++ output = xoutbuf[0]; ++ sg_set_buf(&sgout[k], output, ++ (enc ? template[i].rlen : template[i].ilen)); ++ } ++ ++ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, ++ template[i].ilen, iv); ++ ++ aead_request_set_ad(req, template[i].alen); ++ ++ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); ++ ++ switch (ret) { ++ case 0: ++ if (template[i].novrfy) { ++ /* verification was supposed to fail */ ++ pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", ++ d, e, i, algo); ++ /* so really, we got a bad message */ ++ ret = -EBADMSG; ++ goto out; ++ } ++ break; ++ case -EINPROGRESS: ++ case -EBUSY: ++ wait_for_completion(&result.completion); ++ reinit_completion(&result.completion); ++ ret = result.err; ++ if (!ret) ++ break; ++ case -EBADMSG: ++ /* verification failure was expected */ ++ if (template[i].novrfy) ++ continue; ++ /* fall through */ ++ default: ++ pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n", ++ d, e, i, algo, -ret); ++ goto out; ++ } ++ ++ q = output; ++ if (memcmp(q, template[i].result, template[i].rlen)) { ++ pr_err("alg: tls%s: Test %d failed on %s for %s\n", ++ d, i, e, algo); ++ hexdump(q, template[i].rlen); ++ pr_err("should be:\n"); ++ hexdump(template[i].result, template[i].rlen); ++ ret = -EINVAL; ++ goto out; ++ } ++ } ++ ++out: ++ aead_request_free(req); ++ ++ kfree(sg); ++out_nosg: ++ kfree(key); ++out_nokey: ++ kfree(iv); ++out_noiv: ++ testmgr_free_buf(axbuf); ++out_noaxbuf: ++ if (diff_dst) ++ testmgr_free_buf(xoutbuf); ++out_nooutbuf: ++ testmgr_free_buf(xbuf); ++out_noxbuf: ++ return ret; ++} ++ ++static int test_tls(struct crypto_aead *tfm, int enc, ++ struct tls_testvec *template, unsigned int tcount) ++{ ++ int ret; ++ /* test 'dst == src' case */ ++ ret = __test_tls(tfm, enc, template, tcount, false); ++ if (ret) ++ return ret; ++ /* test 'dst != src' case */ ++ return __test_tls(tfm, enc, template, tcount, true); ++} ++ ++static int alg_test_tls(const struct alg_test_desc *desc, const char *driver, ++ u32 type, u32 mask) ++{ ++ struct crypto_aead *tfm; ++ int err = 0; ++ ++ tfm = crypto_alloc_aead(driver, type, mask); ++ if (IS_ERR(tfm)) { ++ pr_err("alg: aead: Failed to load transform for %s: %ld\n", ++ driver, PTR_ERR(tfm)); ++ return PTR_ERR(tfm); ++ } ++ ++ if (desc->suite.tls.enc.vecs) { ++ err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs, ++ desc->suite.tls.enc.count); ++ if (err) ++ goto out; ++ } ++ ++ if (!err && desc->suite.tls.dec.vecs) ++ err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs, ++ desc->suite.tls.dec.count); ++ ++out: ++ crypto_free_aead(tfm); ++ return err; ++} ++ + static int test_cipher(struct crypto_cipher *tfm, int enc, +- struct cipher_testvec *template, unsigned int tcount) ++ const struct cipher_testvec *template, ++ unsigned int tcount) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm)); + unsigned int i, j, k; +@@ -1066,7 +1306,8 @@ out_nobuf: + } + + static int __test_skcipher(struct crypto_skcipher *tfm, int enc, +- struct cipher_testvec *template, unsigned int tcount, ++ const struct cipher_testvec *template, ++ unsigned int tcount, + const bool diff_dst, const int align_offset) + { + const char *algo = +@@ -1330,7 +1571,8 @@ out_nobuf: + } + + static int test_skcipher(struct crypto_skcipher *tfm, int enc, +- struct cipher_testvec *template, unsigned int tcount) ++ const struct cipher_testvec *template, ++ unsigned int tcount) + { + unsigned int alignmask; + int ret; +@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s + return 0; + } + +-static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, +- struct comp_testvec *dtemplate, int ctcount, int dtcount) ++static int test_comp(struct crypto_comp *tfm, ++ const struct comp_testvec *ctemplate, ++ const struct comp_testvec *dtemplate, ++ int ctcount, int dtcount) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); + unsigned int i; +@@ -1442,7 +1686,154 @@ out: + return ret; + } + +-static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, ++static int test_acomp(struct crypto_acomp *tfm, ++ const struct comp_testvec *ctemplate, ++ const struct comp_testvec *dtemplate, ++ int ctcount, int dtcount) ++{ ++ const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); ++ unsigned int i; ++ char *output; ++ int ret; ++ struct scatterlist src, dst; ++ struct acomp_req *req; ++ struct tcrypt_result result; ++ ++ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); ++ if (!output) ++ return -ENOMEM; ++ ++ for (i = 0; i < ctcount; i++) { ++ unsigned int dlen = COMP_BUF_SIZE; ++ int ilen = ctemplate[i].inlen; ++ void *input_vec; ++ ++ input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL); ++ if (!input_vec) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ memset(output, 0, dlen); ++ init_completion(&result.completion); ++ sg_init_one(&src, input_vec, ilen); ++ sg_init_one(&dst, output, dlen); ++ ++ req = acomp_request_alloc(tfm); ++ if (!req) { ++ pr_err("alg: acomp: request alloc failed for %s\n", ++ algo); ++ kfree(input_vec); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ acomp_request_set_params(req, &src, &dst, ilen, dlen); ++ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ tcrypt_complete, &result); ++ ++ ret = wait_async_op(&result, crypto_acomp_compress(req)); ++ if (ret) { ++ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", ++ i + 1, algo, -ret); ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ if (req->dlen != ctemplate[i].outlen) { ++ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", ++ i + 1, algo, req->dlen); ++ ret = -EINVAL; ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ if (memcmp(output, ctemplate[i].output, req->dlen)) { ++ pr_err("alg: acomp: Compression test %d failed for %s\n", ++ i + 1, algo); ++ hexdump(output, req->dlen); ++ ret = -EINVAL; ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ kfree(input_vec); ++ acomp_request_free(req); ++ } ++ ++ for (i = 0; i < dtcount; i++) { ++ unsigned int dlen = COMP_BUF_SIZE; ++ int ilen = dtemplate[i].inlen; ++ void *input_vec; ++ ++ input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL); ++ if (!input_vec) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ memset(output, 0, dlen); ++ init_completion(&result.completion); ++ sg_init_one(&src, input_vec, ilen); ++ sg_init_one(&dst, output, dlen); ++ ++ req = acomp_request_alloc(tfm); ++ if (!req) { ++ pr_err("alg: acomp: request alloc failed for %s\n", ++ algo); ++ kfree(input_vec); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ acomp_request_set_params(req, &src, &dst, ilen, dlen); ++ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ tcrypt_complete, &result); ++ ++ ret = wait_async_op(&result, crypto_acomp_decompress(req)); ++ if (ret) { ++ pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", ++ i + 1, algo, -ret); ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ if (req->dlen != dtemplate[i].outlen) { ++ pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", ++ i + 1, algo, req->dlen); ++ ret = -EINVAL; ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ if (memcmp(output, dtemplate[i].output, req->dlen)) { ++ pr_err("alg: acomp: Decompression test %d failed for %s\n", ++ i + 1, algo); ++ hexdump(output, req->dlen); ++ ret = -EINVAL; ++ kfree(input_vec); ++ acomp_request_free(req); ++ goto out; ++ } ++ ++ kfree(input_vec); ++ acomp_request_free(req); ++ } ++ ++ ret = 0; ++ ++out: ++ kfree(output); ++ return ret; ++} ++ ++static int test_cprng(struct crypto_rng *tfm, ++ const struct cprng_testvec *template, + unsigned int tcount) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); +@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al + struct crypto_aead *tfm; + int err = 0; + +- tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_aead(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: aead: Failed to load transform for %s: " + "%ld\n", driver, PTR_ERR(tfm)); +@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct + struct crypto_cipher *tfm; + int err = 0; + +- tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_cipher(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: cipher: Failed to load transform for " + "%s: %ld\n", driver, PTR_ERR(tfm)); +@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc + struct crypto_skcipher *tfm; + int err = 0; + +- tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_skcipher(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: skcipher: Failed to load transform for " + "%s: %ld\n", driver, PTR_ERR(tfm)); +@@ -1593,22 +1984,38 @@ out: + static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) + { +- struct crypto_comp *tfm; ++ struct crypto_comp *comp; ++ struct crypto_acomp *acomp; + int err; ++ u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; + +- tfm = crypto_alloc_comp(driver, type, mask); +- if (IS_ERR(tfm)) { +- printk(KERN_ERR "alg: comp: Failed to load transform for %s: " +- "%ld\n", driver, PTR_ERR(tfm)); +- return PTR_ERR(tfm); +- } ++ if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { ++ acomp = crypto_alloc_acomp(driver, type, mask); ++ if (IS_ERR(acomp)) { ++ pr_err("alg: acomp: Failed to load transform for %s: %ld\n", ++ driver, PTR_ERR(acomp)); ++ return PTR_ERR(acomp); ++ } ++ err = test_acomp(acomp, desc->suite.comp.comp.vecs, ++ desc->suite.comp.decomp.vecs, ++ desc->suite.comp.comp.count, ++ desc->suite.comp.decomp.count); ++ crypto_free_acomp(acomp); ++ } else { ++ comp = crypto_alloc_comp(driver, type, mask); ++ if (IS_ERR(comp)) { ++ pr_err("alg: comp: Failed to load transform for %s: %ld\n", ++ driver, PTR_ERR(comp)); ++ return PTR_ERR(comp); ++ } + +- err = test_comp(tfm, desc->suite.comp.comp.vecs, +- desc->suite.comp.decomp.vecs, +- desc->suite.comp.comp.count, +- desc->suite.comp.decomp.count); ++ err = test_comp(comp, desc->suite.comp.comp.vecs, ++ desc->suite.comp.decomp.vecs, ++ desc->suite.comp.comp.count, ++ desc->suite.comp.decomp.count); + +- crypto_free_comp(tfm); ++ crypto_free_comp(comp); ++ } + return err; + } + +@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al + struct crypto_ahash *tfm; + int err; + +- tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_ahash(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: hash: Failed to load transform for %s: " + "%ld\n", driver, PTR_ERR(tfm)); +@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct + if (err) + goto out; + +- tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_shash(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " + "%ld\n", driver, PTR_ERR(tfm)); +@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a + struct crypto_rng *rng; + int err; + +- rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ rng = crypto_alloc_rng(driver, type, mask); + if (IS_ERR(rng)) { + printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " + "%ld\n", driver, PTR_ERR(rng)); +@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a + } + + +-static int drbg_cavs_test(struct drbg_testvec *test, int pr, ++static int drbg_cavs_test(const struct drbg_testvec *test, int pr, + const char *driver, u32 type, u32 mask) + { + int ret = -EAGAIN; +@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te + if (!buf) + return -ENOMEM; + +- drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ drng = crypto_alloc_rng(driver, type, mask); + if (IS_ERR(drng)) { + printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " + "%s\n", driver); +@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al + int err = 0; + int pr = 0; + int i = 0; +- struct drbg_testvec *template = desc->suite.drbg.vecs; ++ const struct drbg_testvec *template = desc->suite.drbg.vecs; + unsigned int tcount = desc->suite.drbg.count; + + if (0 == memcmp(driver, "drbg_pr_", 8)) +@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al + + } + +-static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec, ++static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, + const char *alg) + { + struct kpp_request *req; +@@ -1888,7 +2295,7 @@ free_req: + } + + static int test_kpp(struct crypto_kpp *tfm, const char *alg, +- struct kpp_testvec *vecs, unsigned int tcount) ++ const struct kpp_testvec *vecs, unsigned int tcount) + { + int ret, i; + +@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg + struct crypto_kpp *tfm; + int err = 0; + +- tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_kpp(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); +@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg + } + + static int test_akcipher_one(struct crypto_akcipher *tfm, +- struct akcipher_testvec *vecs) ++ const struct akcipher_testvec *vecs) + { + char *xbuf[XBUFSIZE]; + struct akcipher_request *req; +@@ -2044,7 +2451,8 @@ free_xbuf: + } + + static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, +- struct akcipher_testvec *vecs, unsigned int tcount) ++ const struct akcipher_testvec *vecs, ++ unsigned int tcount) + { + const char *algo = + crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm)); +@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc + struct crypto_akcipher *tfm; + int err = 0; + +- tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); ++ tfm = crypto_alloc_akcipher(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); +@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al + return 0; + } + ++#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) } ++ + /* Please keep this list sorted by algorithm name. */ + static const struct alg_test_desc alg_test_descs[] = { + { +- .alg = "__cbc-cast5-avx", +- .test = alg_test_null, +- }, { +- .alg = "__cbc-cast6-avx", +- .test = alg_test_null, +- }, { +- .alg = "__cbc-serpent-avx", +- .test = alg_test_null, +- }, { +- .alg = "__cbc-serpent-avx2", +- .test = alg_test_null, +- }, { +- .alg = "__cbc-serpent-sse2", +- .test = alg_test_null, +- }, { +- .alg = "__cbc-twofish-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-aes-aesni", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "__driver-cbc-camellia-aesni", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-camellia-aesni-avx2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-cast5-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-cast6-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-serpent-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-serpent-avx2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-serpent-sse2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-cbc-twofish-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-aes-aesni", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "__driver-ecb-camellia-aesni", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-camellia-aesni-avx2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-cast5-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-cast6-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-serpent-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-serpent-avx2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-serpent-sse2", +- .test = alg_test_null, +- }, { +- .alg = "__driver-ecb-twofish-avx", +- .test = alg_test_null, +- }, { +- .alg = "__driver-gcm-aes-aesni", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "__ghash-pclmulqdqni", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { + .alg = "ansi_cprng", + .test = alg_test_cprng, + .suite = { +- .cprng = { +- .vecs = ansi_cprng_aes_tv_template, +- .count = ANSI_CPRNG_AES_TEST_VECTORS +- } ++ .cprng = __VECS(ansi_cprng_aes_tv_template) + } + }, { + .alg = "authenc(hmac(md5),ecb(cipher_null))", + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = hmac_md5_ecb_cipher_null_enc_tv_template, +- .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = hmac_md5_ecb_cipher_null_dec_tv_template, +- .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS +- } ++ .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template), ++ .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template) + } + } + }, { +@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha1_aes_cbc_enc_tv_temp, +- .count = +- HMAC_SHA1_AES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) + } + } + }, { +@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha1_des_cbc_enc_tv_temp, +- .count = +- HMAC_SHA1_DES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp) + } + } + }, { +@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha1_des3_ede_cbc_enc_tv_temp, +- .count = +- HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp) + } + } + }, { +@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha1_ecb_cipher_null_enc_tv_temp, +- .count = +- HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC +- }, +- .dec = { +- .vecs = +- hmac_sha1_ecb_cipher_null_dec_tv_temp, +- .count = +- HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp), ++ .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp) + } + } + }, { +@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha224_des_cbc_enc_tv_temp, +- .count = +- HMAC_SHA224_DES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp) + } + } + }, { +@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha224_des3_ede_cbc_enc_tv_temp, +- .count = +- HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp) + } + } + }, { +@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha256_aes_cbc_enc_tv_temp, +- .count = +- HMAC_SHA256_AES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp) + } + } + }, { +@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha256_des_cbc_enc_tv_temp, +- .count = +- HMAC_SHA256_DES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp) + } + } + }, { +@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha256_des3_ede_cbc_enc_tv_temp, +- .count = +- HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp) + } + } + }, { +@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha384_des_cbc_enc_tv_temp, +- .count = +- HMAC_SHA384_DES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp) + } + } + }, { +@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha384_des3_ede_cbc_enc_tv_temp, +- .count = +- HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp) + } + } + }, { +@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha512_aes_cbc_enc_tv_temp, +- .count = +- HMAC_SHA512_AES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp) + } + } + }, { +@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha512_des_cbc_enc_tv_temp, +- .count = +- HMAC_SHA512_DES_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp) + } + } + }, { +@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = +- hmac_sha512_des3_ede_cbc_enc_tv_temp, +- .count = +- HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC +- } ++ .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp) + } + } + }, { +@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_cbc_enc_tv_template, +- .count = AES_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_cbc_dec_tv_template, +- .count = AES_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_cbc_enc_tv_template), ++ .dec = __VECS(aes_cbc_dec_tv_template) + } + } + }, { +@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = anubis_cbc_enc_tv_template, +- .count = ANUBIS_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = anubis_cbc_dec_tv_template, +- .count = ANUBIS_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(anubis_cbc_enc_tv_template), ++ .dec = __VECS(anubis_cbc_dec_tv_template) + } + } + }, { +@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = bf_cbc_enc_tv_template, +- .count = BF_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = bf_cbc_dec_tv_template, +- .count = BF_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(bf_cbc_enc_tv_template), ++ .dec = __VECS(bf_cbc_dec_tv_template) + } + } + }, { +@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = camellia_cbc_enc_tv_template, +- .count = CAMELLIA_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = camellia_cbc_dec_tv_template, +- .count = CAMELLIA_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(camellia_cbc_enc_tv_template), ++ .dec = __VECS(camellia_cbc_dec_tv_template) + } + } + }, { +@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast5_cbc_enc_tv_template, +- .count = CAST5_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast5_cbc_dec_tv_template, +- .count = CAST5_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast5_cbc_enc_tv_template), ++ .dec = __VECS(cast5_cbc_dec_tv_template) + } + } + }, { +@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast6_cbc_enc_tv_template, +- .count = CAST6_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast6_cbc_dec_tv_template, +- .count = CAST6_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast6_cbc_enc_tv_template), ++ .dec = __VECS(cast6_cbc_dec_tv_template) + } + } + }, { +@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = des_cbc_enc_tv_template, +- .count = DES_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des_cbc_dec_tv_template, +- .count = DES_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des_cbc_enc_tv_template), ++ .dec = __VECS(des_cbc_dec_tv_template) + } + } + }, { +@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = des3_ede_cbc_enc_tv_template, +- .count = DES3_EDE_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des3_ede_cbc_dec_tv_template, +- .count = DES3_EDE_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des3_ede_cbc_enc_tv_template), ++ .dec = __VECS(des3_ede_cbc_dec_tv_template) + } + } + }, { +@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = serpent_cbc_enc_tv_template, +- .count = SERPENT_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = serpent_cbc_dec_tv_template, +- .count = SERPENT_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(serpent_cbc_enc_tv_template), ++ .dec = __VECS(serpent_cbc_dec_tv_template) + } + } + }, { +@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tf_cbc_enc_tv_template, +- .count = TF_CBC_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tf_cbc_dec_tv_template, +- .count = TF_CBC_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tf_cbc_enc_tv_template), ++ .dec = __VECS(tf_cbc_dec_tv_template) + } + } + }, { ++ .alg = "cbcmac(aes)", ++ .fips_allowed = 1, ++ .test = alg_test_hash, ++ .suite = { ++ .hash = __VECS(aes_cbcmac_tv_template) ++ } ++ }, { + .alg = "ccm(aes)", + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = aes_ccm_enc_tv_template, +- .count = AES_CCM_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_ccm_dec_tv_template, +- .count = AES_CCM_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_ccm_enc_tv_template), ++ .dec = __VECS(aes_ccm_dec_tv_template) + } + } + }, { +@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = chacha20_enc_tv_template, +- .count = CHACHA20_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = chacha20_enc_tv_template, +- .count = CHACHA20_ENC_TEST_VECTORS +- }, ++ .enc = __VECS(chacha20_enc_tv_template), ++ .dec = __VECS(chacha20_enc_tv_template), + } + } + }, { +@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = aes_cmac128_tv_template, +- .count = CMAC_AES_TEST_VECTORS +- } ++ .hash = __VECS(aes_cmac128_tv_template) + } + }, { + .alg = "cmac(des3_ede)", + .fips_allowed = 1, + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = des3_ede_cmac64_tv_template, +- .count = CMAC_DES3_EDE_TEST_VECTORS +- } ++ .hash = __VECS(des3_ede_cmac64_tv_template) + } + }, { + .alg = "compress_null", +@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te + .alg = "crc32", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = crc32_tv_template, +- .count = CRC32_TEST_VECTORS +- } ++ .hash = __VECS(crc32_tv_template) + } + }, { + .alg = "crc32c", + .test = alg_test_crc32c, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = crc32c_tv_template, +- .count = CRC32C_TEST_VECTORS +- } ++ .hash = __VECS(crc32c_tv_template) + } + }, { + .alg = "crct10dif", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = crct10dif_tv_template, +- .count = CRCT10DIF_TEST_VECTORS +- } ++ .hash = __VECS(crct10dif_tv_template) + } + }, { +- .alg = "cryptd(__driver-cbc-aes-aesni)", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "cryptd(__driver-cbc-camellia-aesni)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-cbc-serpent-avx2)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-aes-aesni)", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "cryptd(__driver-ecb-camellia-aesni)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-cast5-avx)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-cast6-avx)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-serpent-avx)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-serpent-avx2)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-serpent-sse2)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-ecb-twofish-avx)", +- .test = alg_test_null, +- }, { +- .alg = "cryptd(__driver-gcm-aes-aesni)", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { +- .alg = "cryptd(__ghash-pclmulqdqni)", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { + .alg = "ctr(aes)", + .test = alg_test_skcipher, + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_ctr_enc_tv_template, +- .count = AES_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_ctr_dec_tv_template, +- .count = AES_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_ctr_enc_tv_template), ++ .dec = __VECS(aes_ctr_dec_tv_template) + } + } + }, { +@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = bf_ctr_enc_tv_template, +- .count = BF_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = bf_ctr_dec_tv_template, +- .count = BF_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(bf_ctr_enc_tv_template), ++ .dec = __VECS(bf_ctr_dec_tv_template) + } + } + }, { +@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = camellia_ctr_enc_tv_template, +- .count = CAMELLIA_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = camellia_ctr_dec_tv_template, +- .count = CAMELLIA_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(camellia_ctr_enc_tv_template), ++ .dec = __VECS(camellia_ctr_dec_tv_template) + } + } + }, { +@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast5_ctr_enc_tv_template, +- .count = CAST5_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast5_ctr_dec_tv_template, +- .count = CAST5_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast5_ctr_enc_tv_template), ++ .dec = __VECS(cast5_ctr_dec_tv_template) + } + } + }, { +@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast6_ctr_enc_tv_template, +- .count = CAST6_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast6_ctr_dec_tv_template, +- .count = CAST6_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast6_ctr_enc_tv_template), ++ .dec = __VECS(cast6_ctr_dec_tv_template) + } + } + }, { +@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = des_ctr_enc_tv_template, +- .count = DES_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des_ctr_dec_tv_template, +- .count = DES_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des_ctr_enc_tv_template), ++ .dec = __VECS(des_ctr_dec_tv_template) + } + } + }, { + .alg = "ctr(des3_ede)", + .test = alg_test_skcipher, ++ .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = des3_ede_ctr_enc_tv_template, +- .count = DES3_EDE_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des3_ede_ctr_dec_tv_template, +- .count = DES3_EDE_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des3_ede_ctr_enc_tv_template), ++ .dec = __VECS(des3_ede_ctr_dec_tv_template) + } + } + }, { +@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = serpent_ctr_enc_tv_template, +- .count = SERPENT_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = serpent_ctr_dec_tv_template, +- .count = SERPENT_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(serpent_ctr_enc_tv_template), ++ .dec = __VECS(serpent_ctr_dec_tv_template) + } + } + }, { +@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tf_ctr_enc_tv_template, +- .count = TF_CTR_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tf_ctr_dec_tv_template, +- .count = TF_CTR_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tf_ctr_enc_tv_template), ++ .dec = __VECS(tf_ctr_dec_tv_template) + } + } + }, { +@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cts_mode_enc_tv_template, +- .count = CTS_MODE_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cts_mode_dec_tv_template, +- .count = CTS_MODE_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cts_mode_enc_tv_template), ++ .dec = __VECS(cts_mode_dec_tv_template) + } + } + }, { +@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .comp = { +- .comp = { +- .vecs = deflate_comp_tv_template, +- .count = DEFLATE_COMP_TEST_VECTORS +- }, +- .decomp = { +- .vecs = deflate_decomp_tv_template, +- .count = DEFLATE_DECOMP_TEST_VECTORS +- } ++ .comp = __VECS(deflate_comp_tv_template), ++ .decomp = __VECS(deflate_decomp_tv_template) + } + } + }, { +@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { +- .kpp = { +- .vecs = dh_tv_template, +- .count = DH_TEST_VECTORS +- } ++ .kpp = __VECS(dh_tv_template) + } + }, { + .alg = "digest_null", +@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_nopr_ctr_aes128_tv_template, +- .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template) +- } ++ .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template) + } + }, { + .alg = "drbg_nopr_ctr_aes192", + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_nopr_ctr_aes192_tv_template, +- .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template) +- } ++ .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template) + } + }, { + .alg = "drbg_nopr_ctr_aes256", + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_nopr_ctr_aes256_tv_template, +- .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template) +- } ++ .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template) + } + }, { + /* +@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_nopr_hmac_sha256_tv_template, +- .count = +- ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template) +- } ++ .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template) + } + }, { + /* covered by drbg_nopr_hmac_sha256 test */ +@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_nopr_sha256_tv_template, +- .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template) +- } ++ .drbg = __VECS(drbg_nopr_sha256_tv_template) + } + }, { + /* covered by drbg_nopr_sha256 test */ +@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_pr_ctr_aes128_tv_template, +- .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template) +- } ++ .drbg = __VECS(drbg_pr_ctr_aes128_tv_template) + } + }, { + /* covered by drbg_pr_ctr_aes128 test */ +@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_pr_hmac_sha256_tv_template, +- .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template) +- } ++ .drbg = __VECS(drbg_pr_hmac_sha256_tv_template) + } + }, { + /* covered by drbg_pr_hmac_sha256 test */ +@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_drbg, + .fips_allowed = 1, + .suite = { +- .drbg = { +- .vecs = drbg_pr_sha256_tv_template, +- .count = ARRAY_SIZE(drbg_pr_sha256_tv_template) +- } ++ .drbg = __VECS(drbg_pr_sha256_tv_template) + } + }, { + /* covered by drbg_pr_sha256 test */ +@@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .test = alg_test_null, + }, { +- .alg = "ecb(__aes-aesni)", +- .test = alg_test_null, +- .fips_allowed = 1, +- }, { + .alg = "ecb(aes)", + .test = alg_test_skcipher, + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_enc_tv_template, +- .count = AES_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_dec_tv_template, +- .count = AES_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_enc_tv_template), ++ .dec = __VECS(aes_dec_tv_template) + } + } + }, { +@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = anubis_enc_tv_template, +- .count = ANUBIS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = anubis_dec_tv_template, +- .count = ANUBIS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(anubis_enc_tv_template), ++ .dec = __VECS(anubis_dec_tv_template) + } + } + }, { +@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = arc4_enc_tv_template, +- .count = ARC4_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = arc4_dec_tv_template, +- .count = ARC4_DEC_TEST_VECTORS +- } ++ .enc = __VECS(arc4_enc_tv_template), ++ .dec = __VECS(arc4_dec_tv_template) + } + } + }, { +@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = bf_enc_tv_template, +- .count = BF_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = bf_dec_tv_template, +- .count = BF_DEC_TEST_VECTORS +- } ++ .enc = __VECS(bf_enc_tv_template), ++ .dec = __VECS(bf_dec_tv_template) + } + } + }, { +@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = camellia_enc_tv_template, +- .count = CAMELLIA_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = camellia_dec_tv_template, +- .count = CAMELLIA_DEC_TEST_VECTORS +- } ++ .enc = __VECS(camellia_enc_tv_template), ++ .dec = __VECS(camellia_dec_tv_template) + } + } + }, { +@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast5_enc_tv_template, +- .count = CAST5_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast5_dec_tv_template, +- .count = CAST5_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast5_enc_tv_template), ++ .dec = __VECS(cast5_dec_tv_template) + } + } + }, { +@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast6_enc_tv_template, +- .count = CAST6_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast6_dec_tv_template, +- .count = CAST6_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast6_enc_tv_template), ++ .dec = __VECS(cast6_dec_tv_template) + } + } + }, { +@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = des_enc_tv_template, +- .count = DES_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des_dec_tv_template, +- .count = DES_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des_enc_tv_template), ++ .dec = __VECS(des_dec_tv_template) + } + } + }, { +@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = des3_ede_enc_tv_template, +- .count = DES3_EDE_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = des3_ede_dec_tv_template, +- .count = DES3_EDE_DEC_TEST_VECTORS +- } ++ .enc = __VECS(des3_ede_enc_tv_template), ++ .dec = __VECS(des3_ede_dec_tv_template) + } + } + }, { +@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = khazad_enc_tv_template, +- .count = KHAZAD_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = khazad_dec_tv_template, +- .count = KHAZAD_DEC_TEST_VECTORS +- } ++ .enc = __VECS(khazad_enc_tv_template), ++ .dec = __VECS(khazad_dec_tv_template) + } + } + }, { +@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = seed_enc_tv_template, +- .count = SEED_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = seed_dec_tv_template, +- .count = SEED_DEC_TEST_VECTORS +- } ++ .enc = __VECS(seed_enc_tv_template), ++ .dec = __VECS(seed_dec_tv_template) + } + } + }, { +@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = serpent_enc_tv_template, +- .count = SERPENT_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = serpent_dec_tv_template, +- .count = SERPENT_DEC_TEST_VECTORS +- } ++ .enc = __VECS(serpent_enc_tv_template), ++ .dec = __VECS(serpent_dec_tv_template) + } + } + }, { +@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tea_enc_tv_template, +- .count = TEA_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tea_dec_tv_template, +- .count = TEA_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tea_enc_tv_template), ++ .dec = __VECS(tea_dec_tv_template) + } + } + }, { +@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tnepres_enc_tv_template, +- .count = TNEPRES_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tnepres_dec_tv_template, +- .count = TNEPRES_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tnepres_enc_tv_template), ++ .dec = __VECS(tnepres_dec_tv_template) + } + } + }, { +@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tf_enc_tv_template, +- .count = TF_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tf_dec_tv_template, +- .count = TF_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tf_enc_tv_template), ++ .dec = __VECS(tf_dec_tv_template) + } + } + }, { +@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = xeta_enc_tv_template, +- .count = XETA_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = xeta_dec_tv_template, +- .count = XETA_DEC_TEST_VECTORS +- } ++ .enc = __VECS(xeta_enc_tv_template), ++ .dec = __VECS(xeta_dec_tv_template) + } + } + }, { +@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = xtea_enc_tv_template, +- .count = XTEA_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = xtea_dec_tv_template, +- .count = XTEA_DEC_TEST_VECTORS +- } ++ .enc = __VECS(xtea_enc_tv_template), ++ .dec = __VECS(xtea_dec_tv_template) + } + } + }, { +@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { +- .kpp = { +- .vecs = ecdh_tv_template, +- .count = ECDH_TEST_VECTORS +- } ++ .kpp = __VECS(ecdh_tv_template) + } + }, { + .alg = "gcm(aes)", +@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = aes_gcm_enc_tv_template, +- .count = AES_GCM_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_gcm_dec_tv_template, +- .count = AES_GCM_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_gcm_enc_tv_template), ++ .dec = __VECS(aes_gcm_dec_tv_template) + } + } + }, { +@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = ghash_tv_template, +- .count = GHASH_TEST_VECTORS +- } ++ .hash = __VECS(ghash_tv_template) + } + }, { + .alg = "hmac(crc32)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = bfin_crc_tv_template, +- .count = BFIN_CRC_TEST_VECTORS +- } ++ .hash = __VECS(bfin_crc_tv_template) + } + }, { + .alg = "hmac(md5)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = hmac_md5_tv_template, +- .count = HMAC_MD5_TEST_VECTORS +- } ++ .hash = __VECS(hmac_md5_tv_template) + } + }, { + .alg = "hmac(rmd128)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = hmac_rmd128_tv_template, +- .count = HMAC_RMD128_TEST_VECTORS +- } ++ .hash = __VECS(hmac_rmd128_tv_template) + } + }, { + .alg = "hmac(rmd160)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = hmac_rmd160_tv_template, +- .count = HMAC_RMD160_TEST_VECTORS +- } ++ .hash = __VECS(hmac_rmd160_tv_template) + } + }, { + .alg = "hmac(sha1)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha1_tv_template, +- .count = HMAC_SHA1_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha1_tv_template) + } + }, { + .alg = "hmac(sha224)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha224_tv_template, +- .count = HMAC_SHA224_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha224_tv_template) + } + }, { + .alg = "hmac(sha256)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha256_tv_template, +- .count = HMAC_SHA256_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha256_tv_template) + } + }, { + .alg = "hmac(sha3-224)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha3_224_tv_template, +- .count = HMAC_SHA3_224_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha3_224_tv_template) + } + }, { + .alg = "hmac(sha3-256)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha3_256_tv_template, +- .count = HMAC_SHA3_256_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha3_256_tv_template) + } + }, { + .alg = "hmac(sha3-384)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha3_384_tv_template, +- .count = HMAC_SHA3_384_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha3_384_tv_template) + } + }, { + .alg = "hmac(sha3-512)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha3_512_tv_template, +- .count = HMAC_SHA3_512_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha3_512_tv_template) + } + }, { + .alg = "hmac(sha384)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha384_tv_template, +- .count = HMAC_SHA384_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha384_tv_template) + } + }, { + .alg = "hmac(sha512)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = hmac_sha512_tv_template, +- .count = HMAC_SHA512_TEST_VECTORS +- } ++ .hash = __VECS(hmac_sha512_tv_template) + } + }, { + .alg = "jitterentropy_rng", +@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_kw_enc_tv_template, +- .count = ARRAY_SIZE(aes_kw_enc_tv_template) +- }, +- .dec = { +- .vecs = aes_kw_dec_tv_template, +- .count = ARRAY_SIZE(aes_kw_dec_tv_template) +- } ++ .enc = __VECS(aes_kw_enc_tv_template), ++ .dec = __VECS(aes_kw_dec_tv_template) + } + } + }, { +@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_lrw_enc_tv_template, +- .count = AES_LRW_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_lrw_dec_tv_template, +- .count = AES_LRW_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_lrw_enc_tv_template), ++ .dec = __VECS(aes_lrw_dec_tv_template) + } + } + }, { +@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = camellia_lrw_enc_tv_template, +- .count = CAMELLIA_LRW_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = camellia_lrw_dec_tv_template, +- .count = CAMELLIA_LRW_DEC_TEST_VECTORS +- } ++ .enc = __VECS(camellia_lrw_enc_tv_template), ++ .dec = __VECS(camellia_lrw_dec_tv_template) + } + } + }, { +@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast6_lrw_enc_tv_template, +- .count = CAST6_LRW_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast6_lrw_dec_tv_template, +- .count = CAST6_LRW_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast6_lrw_enc_tv_template), ++ .dec = __VECS(cast6_lrw_dec_tv_template) + } + } + }, { +@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = serpent_lrw_enc_tv_template, +- .count = SERPENT_LRW_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = serpent_lrw_dec_tv_template, +- .count = SERPENT_LRW_DEC_TEST_VECTORS +- } ++ .enc = __VECS(serpent_lrw_enc_tv_template), ++ .dec = __VECS(serpent_lrw_dec_tv_template) + } + } + }, { +@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tf_lrw_enc_tv_template, +- .count = TF_LRW_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tf_lrw_dec_tv_template, +- .count = TF_LRW_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tf_lrw_enc_tv_template), ++ .dec = __VECS(tf_lrw_dec_tv_template) + } + } + }, { +@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .comp = { +- .comp = { +- .vecs = lz4_comp_tv_template, +- .count = LZ4_COMP_TEST_VECTORS +- }, +- .decomp = { +- .vecs = lz4_decomp_tv_template, +- .count = LZ4_DECOMP_TEST_VECTORS +- } ++ .comp = __VECS(lz4_comp_tv_template), ++ .decomp = __VECS(lz4_decomp_tv_template) + } + } + }, { +@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .comp = { +- .comp = { +- .vecs = lz4hc_comp_tv_template, +- .count = LZ4HC_COMP_TEST_VECTORS +- }, +- .decomp = { +- .vecs = lz4hc_decomp_tv_template, +- .count = LZ4HC_DECOMP_TEST_VECTORS +- } ++ .comp = __VECS(lz4hc_comp_tv_template), ++ .decomp = __VECS(lz4hc_decomp_tv_template) + } + } + }, { +@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .comp = { +- .comp = { +- .vecs = lzo_comp_tv_template, +- .count = LZO_COMP_TEST_VECTORS +- }, +- .decomp = { +- .vecs = lzo_decomp_tv_template, +- .count = LZO_DECOMP_TEST_VECTORS +- } ++ .comp = __VECS(lzo_comp_tv_template), ++ .decomp = __VECS(lzo_decomp_tv_template) + } + } + }, { + .alg = "md4", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = md4_tv_template, +- .count = MD4_TEST_VECTORS +- } ++ .hash = __VECS(md4_tv_template) + } + }, { + .alg = "md5", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = md5_tv_template, +- .count = MD5_TEST_VECTORS +- } ++ .hash = __VECS(md5_tv_template) + } + }, { + .alg = "michael_mic", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = michael_mic_tv_template, +- .count = MICHAEL_MIC_TEST_VECTORS +- } ++ .hash = __VECS(michael_mic_tv_template) + } + }, { + .alg = "ofb(aes)", +@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_ofb_enc_tv_template, +- .count = AES_OFB_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_ofb_dec_tv_template, +- .count = AES_OFB_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_ofb_enc_tv_template), ++ .dec = __VECS(aes_ofb_dec_tv_template) + } + } + }, { +@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = fcrypt_pcbc_enc_tv_template, +- .count = FCRYPT_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = fcrypt_pcbc_dec_tv_template, +- .count = FCRYPT_DEC_TEST_VECTORS +- } ++ .enc = __VECS(fcrypt_pcbc_enc_tv_template), ++ .dec = __VECS(fcrypt_pcbc_dec_tv_template) + } + } + }, { + .alg = "poly1305", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = poly1305_tv_template, +- .count = POLY1305_TEST_VECTORS +- } ++ .hash = __VECS(poly1305_tv_template) + } + }, { + .alg = "rfc3686(ctr(aes))", +@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_ctr_rfc3686_enc_tv_template, +- .count = AES_CTR_3686_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_ctr_rfc3686_dec_tv_template, +- .count = AES_CTR_3686_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_ctr_rfc3686_enc_tv_template), ++ .dec = __VECS(aes_ctr_rfc3686_dec_tv_template) + } + } + }, { +@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = aes_gcm_rfc4106_enc_tv_template, +- .count = AES_GCM_4106_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_gcm_rfc4106_dec_tv_template, +- .count = AES_GCM_4106_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_gcm_rfc4106_enc_tv_template), ++ .dec = __VECS(aes_gcm_rfc4106_dec_tv_template) + } + } + }, { +@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .aead = { +- .enc = { +- .vecs = aes_ccm_rfc4309_enc_tv_template, +- .count = AES_CCM_4309_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_ccm_rfc4309_dec_tv_template, +- .count = AES_CCM_4309_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_ccm_rfc4309_enc_tv_template), ++ .dec = __VECS(aes_ccm_rfc4309_dec_tv_template) + } + } + }, { +@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = aes_gcm_rfc4543_enc_tv_template, +- .count = AES_GCM_4543_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_gcm_rfc4543_dec_tv_template, +- .count = AES_GCM_4543_DEC_TEST_VECTORS +- }, ++ .enc = __VECS(aes_gcm_rfc4543_enc_tv_template), ++ .dec = __VECS(aes_gcm_rfc4543_dec_tv_template), + } + } + }, { +@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = rfc7539_enc_tv_template, +- .count = RFC7539_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = rfc7539_dec_tv_template, +- .count = RFC7539_DEC_TEST_VECTORS +- }, ++ .enc = __VECS(rfc7539_enc_tv_template), ++ .dec = __VECS(rfc7539_dec_tv_template), + } + } + }, { +@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te + .test = alg_test_aead, + .suite = { + .aead = { +- .enc = { +- .vecs = rfc7539esp_enc_tv_template, +- .count = RFC7539ESP_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = rfc7539esp_dec_tv_template, +- .count = RFC7539ESP_DEC_TEST_VECTORS +- }, ++ .enc = __VECS(rfc7539esp_enc_tv_template), ++ .dec = __VECS(rfc7539esp_dec_tv_template), + } + } + }, { + .alg = "rmd128", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = rmd128_tv_template, +- .count = RMD128_TEST_VECTORS +- } ++ .hash = __VECS(rmd128_tv_template) + } + }, { + .alg = "rmd160", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = rmd160_tv_template, +- .count = RMD160_TEST_VECTORS +- } ++ .hash = __VECS(rmd160_tv_template) + } + }, { + .alg = "rmd256", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = rmd256_tv_template, +- .count = RMD256_TEST_VECTORS +- } ++ .hash = __VECS(rmd256_tv_template) + } + }, { + .alg = "rmd320", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = rmd320_tv_template, +- .count = RMD320_TEST_VECTORS +- } ++ .hash = __VECS(rmd320_tv_template) + } + }, { + .alg = "rsa", + .test = alg_test_akcipher, + .fips_allowed = 1, + .suite = { +- .akcipher = { +- .vecs = rsa_tv_template, +- .count = RSA_TEST_VECTORS +- } ++ .akcipher = __VECS(rsa_tv_template) + } + }, { + .alg = "salsa20", + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = salsa20_stream_enc_tv_template, +- .count = SALSA20_STREAM_ENC_TEST_VECTORS +- } ++ .enc = __VECS(salsa20_stream_enc_tv_template) + } + } + }, { +@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha1_tv_template, +- .count = SHA1_TEST_VECTORS +- } ++ .hash = __VECS(sha1_tv_template) + } + }, { + .alg = "sha224", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha224_tv_template, +- .count = SHA224_TEST_VECTORS +- } ++ .hash = __VECS(sha224_tv_template) + } + }, { + .alg = "sha256", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha256_tv_template, +- .count = SHA256_TEST_VECTORS +- } ++ .hash = __VECS(sha256_tv_template) + } + }, { + .alg = "sha3-224", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha3_224_tv_template, +- .count = SHA3_224_TEST_VECTORS +- } ++ .hash = __VECS(sha3_224_tv_template) + } + }, { + .alg = "sha3-256", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha3_256_tv_template, +- .count = SHA3_256_TEST_VECTORS +- } ++ .hash = __VECS(sha3_256_tv_template) + } + }, { + .alg = "sha3-384", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha3_384_tv_template, +- .count = SHA3_384_TEST_VECTORS +- } ++ .hash = __VECS(sha3_384_tv_template) + } + }, { + .alg = "sha3-512", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha3_512_tv_template, +- .count = SHA3_512_TEST_VECTORS +- } ++ .hash = __VECS(sha3_512_tv_template) + } + }, { + .alg = "sha384", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha384_tv_template, +- .count = SHA384_TEST_VECTORS +- } ++ .hash = __VECS(sha384_tv_template) + } + }, { + .alg = "sha512", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { +- .hash = { +- .vecs = sha512_tv_template, +- .count = SHA512_TEST_VECTORS +- } ++ .hash = __VECS(sha512_tv_template) + } + }, { + .alg = "tgr128", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = tgr128_tv_template, +- .count = TGR128_TEST_VECTORS +- } ++ .hash = __VECS(tgr128_tv_template) + } + }, { + .alg = "tgr160", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = tgr160_tv_template, +- .count = TGR160_TEST_VECTORS +- } ++ .hash = __VECS(tgr160_tv_template) + } + }, { + .alg = "tgr192", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = tgr192_tv_template, +- .count = TGR192_TEST_VECTORS ++ .hash = __VECS(tgr192_tv_template) ++ } ++ }, { ++ .alg = "tls10(hmac(sha1),cbc(aes))", ++ .test = alg_test_tls, ++ .suite = { ++ .tls = { ++ .enc = __VECS(tls_enc_tv_template), ++ .dec = __VECS(tls_dec_tv_template) + } + } + }, { + .alg = "vmac(aes)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = aes_vmac128_tv_template, +- .count = VMAC_AES_TEST_VECTORS +- } ++ .hash = __VECS(aes_vmac128_tv_template) + } + }, { + .alg = "wp256", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = wp256_tv_template, +- .count = WP256_TEST_VECTORS +- } ++ .hash = __VECS(wp256_tv_template) + } + }, { + .alg = "wp384", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = wp384_tv_template, +- .count = WP384_TEST_VECTORS +- } ++ .hash = __VECS(wp384_tv_template) + } + }, { + .alg = "wp512", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = wp512_tv_template, +- .count = WP512_TEST_VECTORS +- } ++ .hash = __VECS(wp512_tv_template) + } + }, { + .alg = "xcbc(aes)", + .test = alg_test_hash, + .suite = { +- .hash = { +- .vecs = aes_xcbc128_tv_template, +- .count = XCBC_AES_TEST_VECTORS +- } ++ .hash = __VECS(aes_xcbc128_tv_template) + } + }, { + .alg = "xts(aes)", +@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te + .fips_allowed = 1, + .suite = { + .cipher = { +- .enc = { +- .vecs = aes_xts_enc_tv_template, +- .count = AES_XTS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = aes_xts_dec_tv_template, +- .count = AES_XTS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(aes_xts_enc_tv_template), ++ .dec = __VECS(aes_xts_dec_tv_template) + } + } + }, { +@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = camellia_xts_enc_tv_template, +- .count = CAMELLIA_XTS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = camellia_xts_dec_tv_template, +- .count = CAMELLIA_XTS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(camellia_xts_enc_tv_template), ++ .dec = __VECS(camellia_xts_dec_tv_template) + } + } + }, { +@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = cast6_xts_enc_tv_template, +- .count = CAST6_XTS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = cast6_xts_dec_tv_template, +- .count = CAST6_XTS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(cast6_xts_enc_tv_template), ++ .dec = __VECS(cast6_xts_dec_tv_template) + } + } + }, { +@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = serpent_xts_enc_tv_template, +- .count = SERPENT_XTS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = serpent_xts_dec_tv_template, +- .count = SERPENT_XTS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(serpent_xts_enc_tv_template), ++ .dec = __VECS(serpent_xts_dec_tv_template) + } + } + }, { +@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te + .test = alg_test_skcipher, + .suite = { + .cipher = { +- .enc = { +- .vecs = tf_xts_enc_tv_template, +- .count = TF_XTS_ENC_TEST_VECTORS +- }, +- .dec = { +- .vecs = tf_xts_dec_tv_template, +- .count = TF_XTS_DEC_TEST_VECTORS +- } ++ .enc = __VECS(tf_xts_enc_tv_template), ++ .dec = __VECS(tf_xts_dec_tv_template) + } + } + } +--- a/crypto/testmgr.h ++++ b/crypto/testmgr.h +@@ -34,9 +34,9 @@ + + struct hash_testvec { + /* only used with keyed hash algorithms */ +- char *key; +- char *plaintext; +- char *digest; ++ const char *key; ++ const char *plaintext; ++ const char *digest; + unsigned char tap[MAX_TAP]; + unsigned short psize; + unsigned char np; +@@ -63,11 +63,11 @@ struct hash_testvec { + */ + + struct cipher_testvec { +- char *key; +- char *iv; +- char *iv_out; +- char *input; +- char *result; ++ const char *key; ++ const char *iv; ++ const char *iv_out; ++ const char *input; ++ const char *result; + unsigned short tap[MAX_TAP]; + int np; + unsigned char also_non_np; +@@ -80,11 +80,11 @@ struct cipher_testvec { + }; + + struct aead_testvec { +- char *key; +- char *iv; +- char *input; +- char *assoc; +- char *result; ++ const char *key; ++ const char *iv; ++ const char *input; ++ const char *assoc; ++ const char *result; + unsigned char tap[MAX_TAP]; + unsigned char atap[MAX_TAP]; + int np; +@@ -99,10 +99,10 @@ struct aead_testvec { + }; + + struct cprng_testvec { +- char *key; +- char *dt; +- char *v; +- char *result; ++ const char *key; ++ const char *dt; ++ const char *v; ++ const char *result; + unsigned char klen; + unsigned short dtlen; + unsigned short vlen; +@@ -111,24 +111,38 @@ struct cprng_testvec { + }; + + struct drbg_testvec { +- unsigned char *entropy; ++ const unsigned char *entropy; + size_t entropylen; +- unsigned char *entpra; +- unsigned char *entprb; ++ const unsigned char *entpra; ++ const unsigned char *entprb; + size_t entprlen; +- unsigned char *addtla; +- unsigned char *addtlb; ++ const unsigned char *addtla; ++ const unsigned char *addtlb; + size_t addtllen; +- unsigned char *pers; ++ const unsigned char *pers; + size_t perslen; +- unsigned char *expected; ++ const unsigned char *expected; + size_t expectedlen; + }; + ++struct tls_testvec { ++ char *key; /* wrapped keys for encryption and authentication */ ++ char *iv; /* initialization vector */ ++ char *input; /* input data */ ++ char *assoc; /* associated data: seq num, type, version, input len */ ++ char *result; /* result data */ ++ unsigned char fail; /* the test failure is expected */ ++ unsigned char novrfy; /* dec verification failure expected */ ++ unsigned char klen; /* key length */ ++ unsigned short ilen; /* input data length */ ++ unsigned short alen; /* associated data length */ ++ unsigned short rlen; /* result length */ ++}; ++ + struct akcipher_testvec { +- unsigned char *key; +- unsigned char *m; +- unsigned char *c; ++ const unsigned char *key; ++ const unsigned char *m; ++ const unsigned char *c; + unsigned int key_len; + unsigned int m_size; + unsigned int c_size; +@@ -136,27 +150,227 @@ struct akcipher_testvec { + }; + + struct kpp_testvec { +- unsigned char *secret; +- unsigned char *b_public; +- unsigned char *expected_a_public; +- unsigned char *expected_ss; ++ const unsigned char *secret; ++ const unsigned char *b_public; ++ const unsigned char *expected_a_public; ++ const unsigned char *expected_ss; + unsigned short secret_size; + unsigned short b_public_size; + unsigned short expected_a_public_size; + unsigned short expected_ss_size; + }; + +-static char zeroed_string[48]; ++static const char zeroed_string[48]; + + /* +- * RSA test vectors. Borrowed from openSSL. ++ * TLS1.0 synthetic test vectors + */ +-#ifdef CONFIG_CRYPTO_FIPS +-#define RSA_TEST_VECTORS 2 ++static struct tls_testvec tls_enc_tv_template[] = { ++ { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ ++#else ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ ++#endif ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "Single block msg", ++ .ilen = 16, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x00\x10", ++ .alen = 13, ++ .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1" ++ "\x59\x79\x1e\x91\x5f\x52\x14\x9c" ++ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73" ++ "\xdc\x89\x47\x49\x49\xcb\x30\x6b" ++ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02" ++ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61", ++ .rlen = 16 + 20 + 12, ++ }, { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ ++#else ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ ++#endif ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "", ++ .ilen = 0, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x00\x00", ++ .alen = 13, ++ .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67" ++ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a" ++ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45" ++ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a", ++ .rlen = 20 + 12, ++ }, { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ ++#else ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ ++#endif ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "285 bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext285" ++ " bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext285" ++ " bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext285" ++ " bytes plaintext285 bytes plaintext", ++ .ilen = 285, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x01\x1d", ++ .alen = 13, ++ .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd" ++ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90" ++ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a" ++ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94" ++ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51" ++ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31" ++ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8" ++ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50" ++ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac" ++ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14" ++ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d" ++ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8" ++ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c" ++ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74" ++ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e" ++ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b" ++ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d" ++ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13" ++ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d" ++ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5" ++ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2" ++ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20" ++ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1" ++ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e" ++ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7" ++ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e" ++ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65", ++ .rlen = 285 + 20 + 15, ++ } ++}; ++ ++static struct tls_testvec tls_dec_tv_template[] = { ++ { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ ++#else ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ ++#endif ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1" ++ "\x59\x79\x1e\x91\x5f\x52\x14\x9c" ++ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73" ++ "\xdc\x89\x47\x49\x49\xcb\x30\x6b" ++ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02" ++ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61", ++ .ilen = 16 + 20 + 12, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x00\x30", ++ .alen = 13, ++ .result = "Single block msg", ++ .rlen = 16, ++ }, { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ + #else +-#define RSA_TEST_VECTORS 5 ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ + #endif +-static struct akcipher_testvec rsa_tv_template[] = { ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67" ++ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a" ++ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45" ++ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a", ++ .ilen = 20 + 12, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x00\x20", ++ .alen = 13, ++ .result = "", ++ .rlen = 0, ++ }, { ++#ifdef __LITTLE_ENDIAN ++ .key = "\x08\x00" /* rta length */ ++ "\x01\x00" /* rta type */ ++#else ++ .key = "\x00\x08" /* rta length */ ++ "\x00\x01" /* rta type */ ++#endif ++ "\x00\x00\x00\x10" /* enc key length */ ++ "authenticationkey20benckeyis16_bytes", ++ .klen = 8 + 20 + 16, ++ .iv = "iv0123456789abcd", ++ .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd" ++ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90" ++ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a" ++ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94" ++ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51" ++ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31" ++ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8" ++ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50" ++ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac" ++ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14" ++ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d" ++ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8" ++ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c" ++ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74" ++ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e" ++ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b" ++ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d" ++ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13" ++ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d" ++ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5" ++ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2" ++ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20" ++ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1" ++ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e" ++ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7" ++ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e" ++ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65", ++ ++ .ilen = 285 + 20 + 15, ++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" ++ "\x00\x03\x01\x01\x40", ++ .alen = 13, ++ .result = "285 bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext285" ++ " bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext285" ++ " bytes plaintext285 bytes plaintext285 bytes" ++ " plaintext285 bytes plaintext285 bytes plaintext", ++ .rlen = 285, ++ } ++}; ++ ++/* ++ * RSA test vectors. Borrowed from openSSL. ++ */ ++static const struct akcipher_testvec rsa_tv_template[] = { + { + #ifndef CONFIG_CRYPTO_FIPS + .key = +@@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te + .m_size = 8, + .c_size = 256, + .public_key_vec = true, ++#ifndef CONFIG_CRYPTO_FIPS + }, { + .key = + "\x30\x82\x09\x29" /* sequence of 2345 bytes */ +@@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te + .key_len = 2349, + .m_size = 8, + .c_size = 512, ++#endif + } + }; + +-#define DH_TEST_VECTORS 2 +- +-struct kpp_testvec dh_tv_template[] = { ++static const struct kpp_testvec dh_tv_template[] = { + { + .secret = + #ifdef __LITTLE_ENDIAN +@@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = { + } + }; + +-#ifdef CONFIG_CRYPTO_FIPS +-#define ECDH_TEST_VECTORS 1 +-#else +-#define ECDH_TEST_VECTORS 2 +-#endif +-struct kpp_testvec ecdh_tv_template[] = { ++static const struct kpp_testvec ecdh_tv_template[] = { + { + #ifndef CONFIG_CRYPTO_FIPS + .secret = +@@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] = + /* + * MD4 test vectors from RFC1320 + */ +-#define MD4_TEST_VECTORS 7 +- +-static struct hash_testvec md4_tv_template [] = { ++static const struct hash_testvec md4_tv_template[] = { + { + .plaintext = "", + .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31" +@@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa + }, + }; + +-#define SHA3_224_TEST_VECTORS 3 +-static struct hash_testvec sha3_224_tv_template[] = { ++static const struct hash_testvec sha3_224_tv_template[] = { + { + .plaintext = "", + .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7" +@@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t + }, + }; + +-#define SHA3_256_TEST_VECTORS 3 +-static struct hash_testvec sha3_256_tv_template[] = { ++static const struct hash_testvec sha3_256_tv_template[] = { + { + .plaintext = "", + .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" +@@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t + }; + + +-#define SHA3_384_TEST_VECTORS 3 +-static struct hash_testvec sha3_384_tv_template[] = { ++static const struct hash_testvec sha3_384_tv_template[] = { + { + .plaintext = "", + .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" +@@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t + }; + + +-#define SHA3_512_TEST_VECTORS 3 +-static struct hash_testvec sha3_512_tv_template[] = { ++static const struct hash_testvec sha3_512_tv_template[] = { + { + .plaintext = "", + .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" +@@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t + /* + * MD5 test vectors from RFC1321 + */ +-#define MD5_TEST_VECTORS 7 +- +-static struct hash_testvec md5_tv_template[] = { ++static const struct hash_testvec md5_tv_template[] = { + { + .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04" + "\xe9\x80\x09\x98\xec\xf8\x42\x7e", +@@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa + /* + * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E) + */ +-#define RMD128_TEST_VECTORS 10 +- +-static struct hash_testvec rmd128_tv_template[] = { ++static const struct hash_testvec rmd128_tv_template[] = { + { + .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e" + "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46", +@@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem + /* + * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E) + */ +-#define RMD160_TEST_VECTORS 10 +- +-static struct hash_testvec rmd160_tv_template[] = { ++static const struct hash_testvec rmd160_tv_template[] = { + { + .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28" + "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31", +@@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem + /* + * RIPEMD-256 test vectors + */ +-#define RMD256_TEST_VECTORS 8 +- +-static struct hash_testvec rmd256_tv_template[] = { ++static const struct hash_testvec rmd256_tv_template[] = { + { + .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18" + "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a" +@@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem + /* + * RIPEMD-320 test vectors + */ +-#define RMD320_TEST_VECTORS 8 +- +-static struct hash_testvec rmd320_tv_template[] = { ++static const struct hash_testvec rmd320_tv_template[] = { + { + .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1" + "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25" +@@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem + } + }; + +-#define CRCT10DIF_TEST_VECTORS 3 +-static struct hash_testvec crct10dif_tv_template[] = { ++static const struct hash_testvec crct10dif_tv_template[] = { + { +- .plaintext = "abc", +- .psize = 3, +-#ifdef __LITTLE_ENDIAN +- .digest = "\x3b\x44", +-#else +- .digest = "\x44\x3b", +-#endif +- }, { +- .plaintext = "1234567890123456789012345678901234567890" +- "123456789012345678901234567890123456789", +- .psize = 79, +-#ifdef __LITTLE_ENDIAN +- .digest = "\x70\x4b", +-#else +- .digest = "\x4b\x70", +-#endif +- }, { +- .plaintext = +- "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", +- .psize = 56, +-#ifdef __LITTLE_ENDIAN +- .digest = "\xe3\x9c", +-#else +- .digest = "\x9c\xe3", +-#endif +- .np = 2, +- .tap = { 28, 28 } ++ .plaintext = "abc", ++ .psize = 3, ++ .digest = (u8 *)(u16 []){ 0x443b }, ++ }, { ++ .plaintext = "1234567890123456789012345678901234567890" ++ "123456789012345678901234567890123456789", ++ .psize = 79, ++ .digest = (u8 *)(u16 []){ 0x4b70 }, ++ .np = 2, ++ .tap = { 63, 16 }, ++ }, { ++ .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" ++ "ddddddddddddd", ++ .psize = 56, ++ .digest = (u8 *)(u16 []){ 0x9ce3 }, ++ .np = 8, ++ .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, ++ }, { ++ .plaintext = "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "123456789012345678901234567890123456789", ++ .psize = 319, ++ .digest = (u8 *)(u16 []){ 0x44c6 }, ++ }, { ++ .plaintext = "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "1234567890123456789012345678901234567890" ++ "123456789012345678901234567890123456789", ++ .psize = 319, ++ .digest = (u8 *)(u16 []){ 0x44c6 }, ++ .np = 4, ++ .tap = { 1, 255, 57, 6 }, + } + }; + +@@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_ + * SHA1 test vectors from from FIPS PUB 180-1 + * Long vector from CAVS 5.0 + */ +-#define SHA1_TEST_VECTORS 6 +- +-static struct hash_testvec sha1_tv_template[] = { ++static const struct hash_testvec sha1_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ + /* + * SHA224 test vectors from from FIPS PUB 180-2 + */ +-#define SHA224_TEST_VECTORS 5 +- +-static struct hash_testvec sha224_tv_template[] = { ++static const struct hash_testvec sha224_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem + /* + * SHA256 test vectors from from NIST + */ +-#define SHA256_TEST_VECTORS 5 +- +-static struct hash_testvec sha256_tv_template[] = { ++static const struct hash_testvec sha256_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem + /* + * SHA384 test vectors from from NIST and kerneli + */ +-#define SHA384_TEST_VECTORS 6 +- +-static struct hash_testvec sha384_tv_template[] = { ++static const struct hash_testvec sha384_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem + /* + * SHA512 test vectors from from NIST and kerneli + */ +-#define SHA512_TEST_VECTORS 6 +- +-static struct hash_testvec sha512_tv_template[] = { ++static const struct hash_testvec sha512_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem + * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE + * submission + */ +-#define WP512_TEST_VECTORS 8 +- +-static struct hash_testvec wp512_tv_template[] = { ++static const struct hash_testvec wp512_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp + }, + }; + +-#define WP384_TEST_VECTORS 8 +- +-static struct hash_testvec wp384_tv_template[] = { ++static const struct hash_testvec wp384_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp + }, + }; + +-#define WP256_TEST_VECTORS 8 +- +-static struct hash_testvec wp256_tv_template[] = { ++static const struct hash_testvec wp256_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp + /* + * TIGER test vectors from Tiger website + */ +-#define TGR192_TEST_VECTORS 6 +- +-static struct hash_testvec tgr192_tv_template[] = { ++static const struct hash_testvec tgr192_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem + }, + }; + +-#define TGR160_TEST_VECTORS 6 +- +-static struct hash_testvec tgr160_tv_template[] = { ++static const struct hash_testvec tgr160_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem + }, + }; + +-#define TGR128_TEST_VECTORS 6 +- +-static struct hash_testvec tgr128_tv_template[] = { ++static const struct hash_testvec tgr128_tv_template[] = { + { + .plaintext = "", + .psize = 0, +@@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem + }, + }; + +-#define GHASH_TEST_VECTORS 6 +- +-static struct hash_testvec ghash_tv_template[] = ++static const struct hash_testvec ghash_tv_template[] = + { + { + .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03" +@@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp + * HMAC-MD5 test vectors from RFC2202 + * (These need to be fixed to not use strlen). + */ +-#define HMAC_MD5_TEST_VECTORS 7 +- +-static struct hash_testvec hmac_md5_tv_template[] = ++static const struct hash_testvec hmac_md5_tv_template[] = + { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", +@@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t + /* + * HMAC-RIPEMD128 test vectors from RFC2286 + */ +-#define HMAC_RMD128_TEST_VECTORS 7 +- +-static struct hash_testvec hmac_rmd128_tv_template[] = { ++static const struct hash_testvec hmac_rmd128_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ksize = 16, +@@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t + /* + * HMAC-RIPEMD160 test vectors from RFC2286 + */ +-#define HMAC_RMD160_TEST_VECTORS 7 +- +-static struct hash_testvec hmac_rmd160_tv_template[] = { ++static const struct hash_testvec hmac_rmd160_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ksize = 20, +@@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t + /* + * HMAC-SHA1 test vectors from RFC2202 + */ +-#define HMAC_SHA1_TEST_VECTORS 7 +- +-static struct hash_testvec hmac_sha1_tv_template[] = { ++static const struct hash_testvec hmac_sha1_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ksize = 20, +@@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_ + /* + * SHA224 HMAC test vectors from RFC4231 + */ +-#define HMAC_SHA224_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha224_tv_template[] = { ++static const struct hash_testvec hmac_sha224_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t + * HMAC-SHA256 test vectors from + * draft-ietf-ipsec-ciph-sha-256-01.txt + */ +-#define HMAC_SHA256_TEST_VECTORS 10 +- +-static struct hash_testvec hmac_sha256_tv_template[] = { ++static const struct hash_testvec hmac_sha256_tv_template[] = { + { + .key = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" +@@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t + }, + }; + +-#define CMAC_AES_TEST_VECTORS 6 +- +-static struct hash_testvec aes_cmac128_tv_template[] = { ++static const struct hash_testvec aes_cmac128_tv_template[] = { + { /* From NIST Special Publication 800-38B, AES-128 */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", +@@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t + } + }; + +-#define CMAC_DES3_EDE_TEST_VECTORS 4 ++static const struct hash_testvec aes_cbcmac_tv_template[] = { ++ { ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", ++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a", ++ .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60" ++ "\xa8\x9e\xca\xf3\x24\x66\xef\x97", ++ .psize = 16, ++ .ksize = 16, ++ }, { ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", ++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" ++ "\x30", ++ .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43" ++ "\xf8\xf2\x76\x03\xac\x39\xb0\x9d", ++ .psize = 33, ++ .ksize = 16, ++ .np = 2, ++ .tap = { 7, 26 }, ++ }, { ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", ++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" ++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" ++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" ++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" ++ "\xad\x2b\x41\x7b\xe6\x6c\x37", ++ .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c" ++ "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a", ++ .psize = 63, ++ .ksize = 16, ++ }, { ++ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" ++ "\x2b\x73\xae\xf0\x85\x7d\x77\x81" ++ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" ++ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", ++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" ++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" ++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" ++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" ++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" ++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" ++ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10" ++ "\x1c", ++ .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f" ++ "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6", ++ .psize = 65, ++ .ksize = 32, ++ } ++}; + +-static struct hash_testvec des3_ede_cmac64_tv_template[] = { ++static const struct hash_testvec des3_ede_cmac64_tv_template[] = { + /* + * From NIST Special Publication 800-38B, Three Key TDEA + * Corrected test vectors from: +@@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac + } + }; + +-#define XCBC_AES_TEST_VECTORS 6 +- +-static struct hash_testvec aes_xcbc128_tv_template[] = { ++static const struct hash_testvec aes_xcbc128_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +@@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t + } + }; + +-#define VMAC_AES_TEST_VECTORS 11 +-static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', +- '\x02', '\x03', '\x02', '\x02', +- '\x02', '\x04', '\x01', '\x07', +- '\x04', '\x01', '\x04', '\x03',}; +-static char vmac_string2[128] = {'a', 'b', 'c',}; +-static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- 'a', 'b', 'c', 'a', 'b', 'c', +- }; +- +-static char vmac_string4[17] = {'b', 'c', 'e', 'f', +- 'i', 'j', 'l', 'm', +- 'o', 'p', 'r', 's', +- 't', 'u', 'w', 'x', 'z'}; +- +-static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c', +- 'o', 'l', 'k', ']', '%', +- '9', '2', '7', '!', 'A'}; +- +-static char vmac_string6[129] = {'p', 't', '*', '7', 'l', +- 'i', '!', '#', 'w', '0', +- 'z', '/', '4', 'A', 'n'}; ++static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', ++ '\x02', '\x03', '\x02', '\x02', ++ '\x02', '\x04', '\x01', '\x07', ++ '\x04', '\x01', '\x04', '\x03',}; ++static const char vmac_string2[128] = {'a', 'b', 'c',}; ++static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ 'a', 'b', 'c', 'a', 'b', 'c', ++ }; ++ ++static const char vmac_string4[17] = {'b', 'c', 'e', 'f', ++ 'i', 'j', 'l', 'm', ++ 'o', 'p', 'r', 's', ++ 't', 'u', 'w', 'x', 'z'}; ++ ++static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c', ++ 'o', 'l', 'k', ']', '%', ++ '9', '2', '7', '!', 'A'}; ++ ++static const char vmac_string6[129] = {'p', 't', '*', '7', 'l', ++ 'i', '!', '#', 'w', '0', ++ 'z', '/', '4', 'A', 'n'}; + +-static struct hash_testvec aes_vmac128_tv_template[] = { ++static const struct hash_testvec aes_vmac128_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +@@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t + * SHA384 HMAC test vectors from RFC4231 + */ + +-#define HMAC_SHA384_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha384_tv_template[] = { ++static const struct hash_testvec hmac_sha384_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t + * SHA512 HMAC test vectors from RFC4231 + */ + +-#define HMAC_SHA512_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha512_tv_template[] = { ++static const struct hash_testvec hmac_sha512_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t + }, + }; + +-#define HMAC_SHA3_224_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha3_224_tv_template[] = { ++static const struct hash_testvec hmac_sha3_224_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224 + }, + }; + +-#define HMAC_SHA3_256_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha3_256_tv_template[] = { ++static const struct hash_testvec hmac_sha3_256_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256 + }, + }; + +-#define HMAC_SHA3_384_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha3_384_tv_template[] = { ++static const struct hash_testvec hmac_sha3_384_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384 + }, + }; + +-#define HMAC_SHA3_512_TEST_VECTORS 4 +- +-static struct hash_testvec hmac_sha3_512_tv_template[] = { ++static const struct hash_testvec hmac_sha3_512_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" +@@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512 + * Poly1305 test vectors from RFC7539 A.3. + */ + +-#define POLY1305_TEST_VECTORS 11 +- +-static struct hash_testvec poly1305_tv_template[] = { ++static const struct hash_testvec poly1305_tv_template[] = { + { /* Test Vector #1 */ + .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t + /* + * DES test vectors. + */ +-#define DES_ENC_TEST_VECTORS 11 +-#define DES_DEC_TEST_VECTORS 5 +-#define DES_CBC_ENC_TEST_VECTORS 6 +-#define DES_CBC_DEC_TEST_VECTORS 5 +-#define DES_CTR_ENC_TEST_VECTORS 2 +-#define DES_CTR_DEC_TEST_VECTORS 2 +-#define DES3_EDE_ENC_TEST_VECTORS 4 +-#define DES3_EDE_DEC_TEST_VECTORS 4 +-#define DES3_EDE_CBC_ENC_TEST_VECTORS 2 +-#define DES3_EDE_CBC_DEC_TEST_VECTORS 2 +-#define DES3_EDE_CTR_ENC_TEST_VECTORS 2 +-#define DES3_EDE_CTR_DEC_TEST_VECTORS 2 +- +-static struct cipher_testvec des_enc_tv_template[] = { ++static const struct cipher_testvec des_enc_tv_template[] = { + { /* From Applied Cryptography */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_ + }, + }; + +-static struct cipher_testvec des_dec_tv_template[] = { ++static const struct cipher_testvec des_dec_tv_template[] = { + { /* From Applied Cryptography */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_ + }, + }; + +-static struct cipher_testvec des_cbc_enc_tv_template[] = { ++static const struct cipher_testvec des_cbc_enc_tv_template[] = { + { /* From OpenSSL */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc + }, + }; + +-static struct cipher_testvec des_cbc_dec_tv_template[] = { ++static const struct cipher_testvec des_cbc_dec_tv_template[] = { + { /* FIPS Pub 81 */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec + }, + }; + +-static struct cipher_testvec des_ctr_enc_tv_template[] = { ++static const struct cipher_testvec des_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", + .klen = 8, +@@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc + }, + }; + +-static struct cipher_testvec des_ctr_dec_tv_template[] = { ++static const struct cipher_testvec des_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", + .klen = 8, +@@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec + }, + }; + +-static struct cipher_testvec des3_ede_enc_tv_template[] = { ++static const struct cipher_testvec des3_ede_enc_tv_template[] = { + { /* These are from openssl */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\x55\x55\x55\x55\x55\x55\x55\x55" +@@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en + }, + }; + +-static struct cipher_testvec des3_ede_dec_tv_template[] = { ++static const struct cipher_testvec des3_ede_dec_tv_template[] = { + { /* These are from openssl */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\x55\x55\x55\x55\x55\x55\x55\x55" +@@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de + }, + }; + +-static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = { ++static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = { + { /* Generated from openssl */ + .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" + "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +@@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb + }, + }; + +-static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = { ++static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = { + { /* Generated from openssl */ + .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" + "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +@@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb + }, + }; + +-static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = { ++static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" + "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" +@@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct + }, + }; + +-static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = { ++static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" + "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" +@@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct + /* + * Blowfish test vectors. + */ +-#define BF_ENC_TEST_VECTORS 7 +-#define BF_DEC_TEST_VECTORS 7 +-#define BF_CBC_ENC_TEST_VECTORS 2 +-#define BF_CBC_DEC_TEST_VECTORS 2 +-#define BF_CTR_ENC_TEST_VECTORS 2 +-#define BF_CTR_DEC_TEST_VECTORS 2 +- +-static struct cipher_testvec bf_enc_tv_template[] = { ++static const struct cipher_testvec bf_enc_tv_template[] = { + { /* DES test vectors from OpenSSL */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8, +@@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t + }, + }; + +-static struct cipher_testvec bf_dec_tv_template[] = { ++static const struct cipher_testvec bf_dec_tv_template[] = { + { /* DES test vectors from OpenSSL */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8, +@@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t + }, + }; + +-static struct cipher_testvec bf_cbc_enc_tv_template[] = { ++static const struct cipher_testvec bf_cbc_enc_tv_template[] = { + { /* From OpenSSL */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", +@@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_ + }, + }; + +-static struct cipher_testvec bf_cbc_dec_tv_template[] = { ++static const struct cipher_testvec bf_cbc_dec_tv_template[] = { + { /* From OpenSSL */ + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", +@@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_ + }, + }; + +-static struct cipher_testvec bf_ctr_enc_tv_template[] = { ++static const struct cipher_testvec bf_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_ + }, + }; + +-static struct cipher_testvec bf_ctr_dec_tv_template[] = { ++static const struct cipher_testvec bf_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_ + /* + * Twofish test vectors. + */ +-#define TF_ENC_TEST_VECTORS 4 +-#define TF_DEC_TEST_VECTORS 4 +-#define TF_CBC_ENC_TEST_VECTORS 5 +-#define TF_CBC_DEC_TEST_VECTORS 5 +-#define TF_CTR_ENC_TEST_VECTORS 2 +-#define TF_CTR_DEC_TEST_VECTORS 2 +-#define TF_LRW_ENC_TEST_VECTORS 8 +-#define TF_LRW_DEC_TEST_VECTORS 8 +-#define TF_XTS_ENC_TEST_VECTORS 5 +-#define TF_XTS_DEC_TEST_VECTORS 5 +- +-static struct cipher_testvec tf_enc_tv_template[] = { ++static const struct cipher_testvec tf_enc_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t + }, + }; + +-static struct cipher_testvec tf_dec_tv_template[] = { ++static const struct cipher_testvec tf_dec_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t + }, + }; + +-static struct cipher_testvec tf_cbc_enc_tv_template[] = { ++static const struct cipher_testvec tf_cbc_enc_tv_template[] = { + { /* Generated with Nettle */ + .key = zeroed_string, + .klen = 16, +@@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_ + }, + }; + +-static struct cipher_testvec tf_cbc_dec_tv_template[] = { ++static const struct cipher_testvec tf_cbc_dec_tv_template[] = { + { /* Reverse of the first four above */ + .key = zeroed_string, + .klen = 16, +@@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_ + }, + }; + +-static struct cipher_testvec tf_ctr_enc_tv_template[] = { ++static const struct cipher_testvec tf_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_ + }, + }; + +-static struct cipher_testvec tf_ctr_dec_tv_template[] = { ++static const struct cipher_testvec tf_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_ + }, + }; + +-static struct cipher_testvec tf_lrw_enc_tv_template[] = { ++static const struct cipher_testvec tf_lrw_enc_tv_template[] = { + /* Generated from AES-LRW test vectors */ + { + .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" +@@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_ + }, + }; + +-static struct cipher_testvec tf_lrw_dec_tv_template[] = { ++static const struct cipher_testvec tf_lrw_dec_tv_template[] = { + /* Generated from AES-LRW test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_ + }, + }; + +-static struct cipher_testvec tf_xts_enc_tv_template[] = { ++static const struct cipher_testvec tf_xts_enc_tv_template[] = { + /* Generated from AES-XTS test vectors */ + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_ + }, + }; + +-static struct cipher_testvec tf_xts_dec_tv_template[] = { ++static const struct cipher_testvec tf_xts_dec_tv_template[] = { + /* Generated from AES-XTS test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_ + * Serpent test vectors. These are backwards because Serpent writes + * octet sequences in right-to-left mode. + */ +-#define SERPENT_ENC_TEST_VECTORS 5 +-#define SERPENT_DEC_TEST_VECTORS 5 +- +-#define TNEPRES_ENC_TEST_VECTORS 4 +-#define TNEPRES_DEC_TEST_VECTORS 4 +- +-#define SERPENT_CBC_ENC_TEST_VECTORS 1 +-#define SERPENT_CBC_DEC_TEST_VECTORS 1 +- +-#define SERPENT_CTR_ENC_TEST_VECTORS 2 +-#define SERPENT_CTR_DEC_TEST_VECTORS 2 +- +-#define SERPENT_LRW_ENC_TEST_VECTORS 8 +-#define SERPENT_LRW_DEC_TEST_VECTORS 8 +- +-#define SERPENT_XTS_ENC_TEST_VECTORS 5 +-#define SERPENT_XTS_DEC_TEST_VECTORS 5 +- +-static struct cipher_testvec serpent_enc_tv_template[] = { ++static const struct cipher_testvec serpent_enc_tv_template[] = { + { + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +@@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc + }, + }; + +-static struct cipher_testvec tnepres_enc_tv_template[] = { ++static const struct cipher_testvec tnepres_enc_tv_template[] = { + { /* KeySize=128, PT=0, I=1 */ + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", +@@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc + }; + + +-static struct cipher_testvec serpent_dec_tv_template[] = { ++static const struct cipher_testvec serpent_dec_tv_template[] = { + { + .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47" + "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2", +@@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec + }, + }; + +-static struct cipher_testvec tnepres_dec_tv_template[] = { ++static const struct cipher_testvec tnepres_dec_tv_template[] = { + { + .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97" + "\x6d\x6f\xbb\x38\x4b\x37\x21\x28", +@@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec + }, + }; + +-static struct cipher_testvec serpent_cbc_enc_tv_template[] = { ++static const struct cipher_testvec serpent_cbc_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc + }, + }; + +-static struct cipher_testvec serpent_cbc_dec_tv_template[] = { ++static const struct cipher_testvec serpent_cbc_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc + }, + }; + +-static struct cipher_testvec serpent_ctr_enc_tv_template[] = { ++static const struct cipher_testvec serpent_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr + }, + }; + +-static struct cipher_testvec serpent_ctr_dec_tv_template[] = { ++static const struct cipher_testvec serpent_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr + }, + }; + +-static struct cipher_testvec serpent_lrw_enc_tv_template[] = { ++static const struct cipher_testvec serpent_lrw_enc_tv_template[] = { + /* Generated from AES-LRW test vectors */ + { + .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" +@@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw + }, + }; + +-static struct cipher_testvec serpent_lrw_dec_tv_template[] = { ++static const struct cipher_testvec serpent_lrw_dec_tv_template[] = { + /* Generated from AES-LRW test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw + }, + }; + +-static struct cipher_testvec serpent_xts_enc_tv_template[] = { ++static const struct cipher_testvec serpent_xts_enc_tv_template[] = { + /* Generated from AES-XTS test vectors */ + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts + }, + }; + +-static struct cipher_testvec serpent_xts_dec_tv_template[] = { ++static const struct cipher_testvec serpent_xts_dec_tv_template[] = { + /* Generated from AES-XTS test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts + }; + + /* Cast6 test vectors from RFC 2612 */ +-#define CAST6_ENC_TEST_VECTORS 4 +-#define CAST6_DEC_TEST_VECTORS 4 +-#define CAST6_CBC_ENC_TEST_VECTORS 1 +-#define CAST6_CBC_DEC_TEST_VECTORS 1 +-#define CAST6_CTR_ENC_TEST_VECTORS 2 +-#define CAST6_CTR_DEC_TEST_VECTORS 2 +-#define CAST6_LRW_ENC_TEST_VECTORS 1 +-#define CAST6_LRW_DEC_TEST_VECTORS 1 +-#define CAST6_XTS_ENC_TEST_VECTORS 1 +-#define CAST6_XTS_DEC_TEST_VECTORS 1 +- +-static struct cipher_testvec cast6_enc_tv_template[] = { ++static const struct cipher_testvec cast6_enc_tv_template[] = { + { + .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" + "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", +@@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t + }, + }; + +-static struct cipher_testvec cast6_dec_tv_template[] = { ++static const struct cipher_testvec cast6_dec_tv_template[] = { + { + .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" + "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", +@@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t + }, + }; + +-static struct cipher_testvec cast6_cbc_enc_tv_template[] = { ++static const struct cipher_testvec cast6_cbc_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e + }, + }; + +-static struct cipher_testvec cast6_cbc_dec_tv_template[] = { ++static const struct cipher_testvec cast6_cbc_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d + }, + }; + +-static struct cipher_testvec cast6_ctr_enc_tv_template[] = { ++static const struct cipher_testvec cast6_ctr_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e + }, + }; + +-static struct cipher_testvec cast6_ctr_dec_tv_template[] = { ++static const struct cipher_testvec cast6_ctr_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d + }, + }; + +-static struct cipher_testvec cast6_lrw_enc_tv_template[] = { ++static const struct cipher_testvec cast6_lrw_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c" + "\x23\x84\xcb\x1c\x77\xd6\x19\x5d" +@@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e + }, + }; + +-static struct cipher_testvec cast6_lrw_dec_tv_template[] = { ++static const struct cipher_testvec cast6_lrw_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c" + "\x23\x84\xcb\x1c\x77\xd6\x19\x5d" +@@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d + }, + }; + +-static struct cipher_testvec cast6_xts_enc_tv_template[] = { ++static const struct cipher_testvec cast6_xts_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" +@@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e + }, + }; + +-static struct cipher_testvec cast6_xts_dec_tv_template[] = { ++static const struct cipher_testvec cast6_xts_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" +@@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d + /* + * AES test vectors. + */ +-#define AES_ENC_TEST_VECTORS 4 +-#define AES_DEC_TEST_VECTORS 4 +-#define AES_CBC_ENC_TEST_VECTORS 5 +-#define AES_CBC_DEC_TEST_VECTORS 5 +-#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +-#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 +-#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2 +-#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2 +-#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7 +-#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7 +-#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7 +-#define AES_LRW_ENC_TEST_VECTORS 8 +-#define AES_LRW_DEC_TEST_VECTORS 8 +-#define AES_XTS_ENC_TEST_VECTORS 5 +-#define AES_XTS_DEC_TEST_VECTORS 5 +-#define AES_CTR_ENC_TEST_VECTORS 5 +-#define AES_CTR_DEC_TEST_VECTORS 5 +-#define AES_OFB_ENC_TEST_VECTORS 1 +-#define AES_OFB_DEC_TEST_VECTORS 1 +-#define AES_CTR_3686_ENC_TEST_VECTORS 7 +-#define AES_CTR_3686_DEC_TEST_VECTORS 6 +-#define AES_GCM_ENC_TEST_VECTORS 9 +-#define AES_GCM_DEC_TEST_VECTORS 8 +-#define AES_GCM_4106_ENC_TEST_VECTORS 23 +-#define AES_GCM_4106_DEC_TEST_VECTORS 23 +-#define AES_GCM_4543_ENC_TEST_VECTORS 1 +-#define AES_GCM_4543_DEC_TEST_VECTORS 2 +-#define AES_CCM_ENC_TEST_VECTORS 8 +-#define AES_CCM_DEC_TEST_VECTORS 7 +-#define AES_CCM_4309_ENC_TEST_VECTORS 7 +-#define AES_CCM_4309_DEC_TEST_VECTORS 10 +- +-static struct cipher_testvec aes_enc_tv_template[] = { ++static const struct cipher_testvec aes_enc_tv_template[] = { + { /* From FIPS-197 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +@@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_ + }, + }; + +-static struct cipher_testvec aes_dec_tv_template[] = { ++static const struct cipher_testvec aes_dec_tv_template[] = { + { /* From FIPS-197 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +@@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_ + }, + }; + +-static struct cipher_testvec aes_cbc_enc_tv_template[] = { ++static const struct cipher_testvec aes_cbc_enc_tv_template[] = { + { /* From RFC 3602 */ + .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", +@@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc + }, + }; + +-static struct cipher_testvec aes_cbc_dec_tv_template[] = { ++static const struct cipher_testvec aes_cbc_dec_tv_template[] = { + { /* From RFC 3602 */ + .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", +@@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec + }, + }; + +-static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { ++static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { + { /* Input data from RFC 2410 Case 1 */ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_ + }, + }; + +-static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { ++static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { + { + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_ + }, + }; + +-static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { + { /* RFC 3602 Case 1 */ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes + }, + }; + +-static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { + { /* Input data from RFC 2410 Case 1 */ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb + }, + }; + +-static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { ++static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { + { + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb + }, + }; + +-static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { + { /* RFC 3602 Case 1 */ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a + }, + }; + +-static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { + { /* RFC 3602 Case 1 */ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a + }, + }; + +-#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des + }, + }; + +-#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d + }, + }; + +-#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d + }, + }; + +-#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d + }, + }; + +-#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d + }, + }; + +-#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des + }, + }; + +-#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d + }, + }; + +-#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d + }, + }; + +-#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d + }, + }; + +-#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1 +- +-static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { ++static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { + { /*Generated with cryptopp*/ + #ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ +@@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d + }, + }; + +-static struct cipher_testvec aes_lrw_enc_tv_template[] = { ++static const struct cipher_testvec aes_lrw_enc_tv_template[] = { + /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ + { /* LRW-32-AES 1 */ + .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" +@@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc + } + }; + +-static struct cipher_testvec aes_lrw_dec_tv_template[] = { ++static const struct cipher_testvec aes_lrw_dec_tv_template[] = { + /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ + /* same as enc vectors with input and result reversed */ + { /* LRW-32-AES 1 */ +@@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec + } + }; + +-static struct cipher_testvec aes_xts_enc_tv_template[] = { ++static const struct cipher_testvec aes_xts_enc_tv_template[] = { + /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ + { /* XTS-AES 1 */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc + } + }; + +-static struct cipher_testvec aes_xts_dec_tv_template[] = { ++static const struct cipher_testvec aes_xts_dec_tv_template[] = { + /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ + { /* XTS-AES 1 */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec + }; + + +-static struct cipher_testvec aes_ctr_enc_tv_template[] = { ++static const struct cipher_testvec aes_ctr_enc_tv_template[] = { + { /* From NIST Special Publication 800-38A, Appendix F.5 */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", +@@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc + }, + }; + +-static struct cipher_testvec aes_ctr_dec_tv_template[] = { ++static const struct cipher_testvec aes_ctr_dec_tv_template[] = { + { /* From NIST Special Publication 800-38A, Appendix F.5 */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", +@@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec + }, + }; + +-static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = { ++static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = { + { /* From RFC 3686 */ + .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" +@@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc + }, + }; + +-static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { ++static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { + { /* From RFC 3686 */ + .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" +@@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc + }, + }; + +-static struct cipher_testvec aes_ofb_enc_tv_template[] = { ++static const struct cipher_testvec aes_ofb_enc_tv_template[] = { + /* From NIST Special Publication 800-38A, Appendix F.5 */ + { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" +@@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc + } + }; + +-static struct cipher_testvec aes_ofb_dec_tv_template[] = { ++static const struct cipher_testvec aes_ofb_dec_tv_template[] = { + /* From NIST Special Publication 800-38A, Appendix F.5 */ + { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" +@@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec + } + }; + +-static struct aead_testvec aes_gcm_enc_tv_template[] = { ++static const struct aead_testvec aes_gcm_enc_tv_template[] = { + { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ + .key = zeroed_string, + .klen = 16, +@@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t + } + }; + +-static struct aead_testvec aes_gcm_dec_tv_template[] = { ++static const struct aead_testvec aes_gcm_dec_tv_template[] = { + { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ + .key = zeroed_string, + .klen = 32, +@@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t + } + }; + +-static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { ++static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 20, +@@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41 + } + }; + +-static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { ++static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 20, +@@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41 + } + }; + +-static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { ++static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { + { /* From draft-mcgrew-gcm-test-01 */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" +@@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45 + } + }; + +-static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { ++static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { + { /* From draft-mcgrew-gcm-test-01 */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" +@@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45 + }, + }; + +-static struct aead_testvec aes_ccm_enc_tv_template[] = { ++static const struct aead_testvec aes_ccm_enc_tv_template[] = { + { /* From RFC 3610 */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", +@@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t + } + }; + +-static struct aead_testvec aes_ccm_dec_tv_template[] = { ++static const struct aead_testvec aes_ccm_dec_tv_template[] = { + { /* From RFC 3610 */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", +@@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t + * These vectors are copied/generated from the ones for rfc4106 with + * the key truncated by one byte.. + */ +-static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { ++static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 19, +@@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43 + } + }; + +-static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { ++static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 19, +@@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43 + /* + * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. + */ +-#define RFC7539_ENC_TEST_VECTORS 2 +-#define RFC7539_DEC_TEST_VECTORS 2 +-static struct aead_testvec rfc7539_enc_tv_template[] = { ++static const struct aead_testvec rfc7539_enc_tv_template[] = { + { + .key = "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" +@@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t + }, + }; + +-static struct aead_testvec rfc7539_dec_tv_template[] = { ++static const struct aead_testvec rfc7539_dec_tv_template[] = { + { + .key = "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" +@@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t + /* + * draft-irtf-cfrg-chacha20-poly1305 + */ +-#define RFC7539ESP_DEC_TEST_VECTORS 1 +-#define RFC7539ESP_ENC_TEST_VECTORS 1 +-static struct aead_testvec rfc7539esp_enc_tv_template[] = { ++static const struct aead_testvec rfc7539esp_enc_tv_template[] = { + { + .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" + "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" +@@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en + }, + }; + +-static struct aead_testvec rfc7539esp_dec_tv_template[] = { ++static const struct aead_testvec rfc7539esp_dec_tv_template[] = { + { + .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" + "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" +@@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de + * semiblock of the ciphertext from the test vector. For decryption, iv is + * the first semiblock of the ciphertext. + */ +-static struct cipher_testvec aes_kw_enc_tv_template[] = { ++static const struct cipher_testvec aes_kw_enc_tv_template[] = { + { + .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2" + "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6", +@@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_ + }, + }; + +-static struct cipher_testvec aes_kw_dec_tv_template[] = { ++static const struct cipher_testvec aes_kw_dec_tv_template[] = { + { + .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" + "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71" +@@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_ + * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf + * Only AES-128 is supported at this time. + */ +-#define ANSI_CPRNG_AES_TEST_VECTORS 6 +- +-static struct cprng_testvec ansi_cprng_aes_tv_template[] = { ++static const struct cprng_testvec ansi_cprng_aes_tv_template[] = { + { + .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" + "\xed\x06\x1c\xab\xb8\xd4\x62\x02", +@@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a + * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and + * w/o personalization string, w/ and w/o additional input string). + */ +-static struct drbg_testvec drbg_pr_sha256_tv_template[] = { ++static const struct drbg_testvec drbg_pr_sha256_tv_template[] = { + { + .entropy = (unsigned char *) + "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86" +@@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25 + }, + }; + +-static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = { ++static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = { + { + .entropy = (unsigned char *) + "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a" +@@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_ + }, + }; + +-static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = { ++static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = { + { + .entropy = (unsigned char *) + "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42" +@@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a + * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and + * w/o personalization string, w/ and w/o additional input string). + */ +-static struct drbg_testvec drbg_nopr_sha256_tv_template[] = { ++static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = { + { + .entropy = (unsigned char *) + "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3" +@@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha + }, + }; + +-static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = { ++static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = { + { + .entropy = (unsigned char *) + "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c" +@@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma + }, + }; + +-static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = { ++static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = { + { + .entropy = (unsigned char *) + "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9" +@@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr + }, + }; + +-static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = { ++static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = { + { + .entropy = (unsigned char *) + "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f" +@@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr + }, + }; + +-static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = { ++static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = { + { + .entropy = (unsigned char *) + "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8" +@@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr + }; + + /* Cast5 test vectors from RFC 2144 */ +-#define CAST5_ENC_TEST_VECTORS 4 +-#define CAST5_DEC_TEST_VECTORS 4 +-#define CAST5_CBC_ENC_TEST_VECTORS 1 +-#define CAST5_CBC_DEC_TEST_VECTORS 1 +-#define CAST5_CTR_ENC_TEST_VECTORS 2 +-#define CAST5_CTR_DEC_TEST_VECTORS 2 +- +-static struct cipher_testvec cast5_enc_tv_template[] = { ++static const struct cipher_testvec cast5_enc_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x12\x34\x56\x78" + "\x23\x45\x67\x89\x34\x56\x78\x9a", +@@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t + }, + }; + +-static struct cipher_testvec cast5_dec_tv_template[] = { ++static const struct cipher_testvec cast5_dec_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x12\x34\x56\x78" + "\x23\x45\x67\x89\x34\x56\x78\x9a", +@@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t + }, + }; + +-static struct cipher_testvec cast5_cbc_enc_tv_template[] = { ++static const struct cipher_testvec cast5_cbc_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", +@@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e + }, + }; + +-static struct cipher_testvec cast5_cbc_dec_tv_template[] = { ++static const struct cipher_testvec cast5_cbc_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", +@@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d + }, + }; + +-static struct cipher_testvec cast5_ctr_enc_tv_template[] = { ++static const struct cipher_testvec cast5_ctr_enc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", +@@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e + }, + }; + +-static struct cipher_testvec cast5_ctr_dec_tv_template[] = { ++static const struct cipher_testvec cast5_ctr_dec_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", +@@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d + /* + * ARC4 test vectors from OpenSSL + */ +-#define ARC4_ENC_TEST_VECTORS 7 +-#define ARC4_DEC_TEST_VECTORS 7 +- +-static struct cipher_testvec arc4_enc_tv_template[] = { ++static const struct cipher_testvec arc4_enc_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv + }, + }; + +-static struct cipher_testvec arc4_dec_tv_template[] = { ++static const struct cipher_testvec arc4_dec_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .klen = 8, +@@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv + /* + * TEA test vectors + */ +-#define TEA_ENC_TEST_VECTORS 4 +-#define TEA_DEC_TEST_VECTORS 4 +- +-static struct cipher_testvec tea_enc_tv_template[] = { ++static const struct cipher_testvec tea_enc_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_ + } + }; + +-static struct cipher_testvec tea_dec_tv_template[] = { ++static const struct cipher_testvec tea_dec_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_ + /* + * XTEA test vectors + */ +-#define XTEA_ENC_TEST_VECTORS 4 +-#define XTEA_DEC_TEST_VECTORS 4 +- +-static struct cipher_testvec xtea_enc_tv_template[] = { ++static const struct cipher_testvec xtea_enc_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv + } + }; + +-static struct cipher_testvec xtea_dec_tv_template[] = { ++static const struct cipher_testvec xtea_dec_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv + /* + * KHAZAD test vectors. + */ +-#define KHAZAD_ENC_TEST_VECTORS 5 +-#define KHAZAD_DEC_TEST_VECTORS 5 +- +-static struct cipher_testvec khazad_enc_tv_template[] = { ++static const struct cipher_testvec khazad_enc_tv_template[] = { + { + .key = "\x80\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", +@@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_ + }, + }; + +-static struct cipher_testvec khazad_dec_tv_template[] = { ++static const struct cipher_testvec khazad_dec_tv_template[] = { + { + .key = "\x80\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", +@@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_ + * Anubis test vectors. + */ + +-#define ANUBIS_ENC_TEST_VECTORS 5 +-#define ANUBIS_DEC_TEST_VECTORS 5 +-#define ANUBIS_CBC_ENC_TEST_VECTORS 2 +-#define ANUBIS_CBC_DEC_TEST_VECTORS 2 +- +-static struct cipher_testvec anubis_enc_tv_template[] = { ++static const struct cipher_testvec anubis_enc_tv_template[] = { + { + .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" + "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", +@@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_ + }, + }; + +-static struct cipher_testvec anubis_dec_tv_template[] = { ++static const struct cipher_testvec anubis_dec_tv_template[] = { + { + .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" + "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", +@@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_ + }, + }; + +-static struct cipher_testvec anubis_cbc_enc_tv_template[] = { ++static const struct cipher_testvec anubis_cbc_enc_tv_template[] = { + { + .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" + "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", +@@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_ + }, + }; + +-static struct cipher_testvec anubis_cbc_dec_tv_template[] = { ++static const struct cipher_testvec anubis_cbc_dec_tv_template[] = { + { + .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" + "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", +@@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_ + /* + * XETA test vectors + */ +-#define XETA_ENC_TEST_VECTORS 4 +-#define XETA_DEC_TEST_VECTORS 4 +- +-static struct cipher_testvec xeta_enc_tv_template[] = { ++static const struct cipher_testvec xeta_enc_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv + } + }; + +-static struct cipher_testvec xeta_dec_tv_template[] = { ++static const struct cipher_testvec xeta_dec_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv + /* + * FCrypt test vectors + */ +-#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template) +-#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template) +- +-static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = { ++static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = { + { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8, +@@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc + } + }; + +-static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = { ++static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = { + { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8, +@@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc + /* + * CAMELLIA test vectors. + */ +-#define CAMELLIA_ENC_TEST_VECTORS 4 +-#define CAMELLIA_DEC_TEST_VECTORS 4 +-#define CAMELLIA_CBC_ENC_TEST_VECTORS 3 +-#define CAMELLIA_CBC_DEC_TEST_VECTORS 3 +-#define CAMELLIA_CTR_ENC_TEST_VECTORS 2 +-#define CAMELLIA_CTR_DEC_TEST_VECTORS 2 +-#define CAMELLIA_LRW_ENC_TEST_VECTORS 8 +-#define CAMELLIA_LRW_DEC_TEST_VECTORS 8 +-#define CAMELLIA_XTS_ENC_TEST_VECTORS 5 +-#define CAMELLIA_XTS_DEC_TEST_VECTORS 5 +- +-static struct cipher_testvec camellia_enc_tv_template[] = { ++static const struct cipher_testvec camellia_enc_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xba\x98\x76\x54\x32\x10", +@@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en + }, + }; + +-static struct cipher_testvec camellia_dec_tv_template[] = { ++static const struct cipher_testvec camellia_dec_tv_template[] = { + { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xba\x98\x76\x54\x32\x10", +@@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de + }, + }; + +-static struct cipher_testvec camellia_cbc_enc_tv_template[] = { ++static const struct cipher_testvec camellia_cbc_enc_tv_template[] = { + { + .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", +@@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb + }, + }; + +-static struct cipher_testvec camellia_cbc_dec_tv_template[] = { ++static const struct cipher_testvec camellia_cbc_dec_tv_template[] = { + { + .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", +@@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb + }, + }; + +-static struct cipher_testvec camellia_ctr_enc_tv_template[] = { ++static const struct cipher_testvec camellia_ctr_enc_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct + }, + }; + +-static struct cipher_testvec camellia_ctr_dec_tv_template[] = { ++static const struct cipher_testvec camellia_ctr_dec_tv_template[] = { + { /* Generated with Crypto++ */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" +@@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct + }, + }; + +-static struct cipher_testvec camellia_lrw_enc_tv_template[] = { ++static const struct cipher_testvec camellia_lrw_enc_tv_template[] = { + /* Generated from AES-LRW test vectors */ + { + .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" +@@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr + }, + }; + +-static struct cipher_testvec camellia_lrw_dec_tv_template[] = { ++static const struct cipher_testvec camellia_lrw_dec_tv_template[] = { + /* Generated from AES-LRW test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr + }, + }; + +-static struct cipher_testvec camellia_xts_enc_tv_template[] = { ++static const struct cipher_testvec camellia_xts_enc_tv_template[] = { + /* Generated from AES-XTS test vectors */ + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt + }, + }; + +-static struct cipher_testvec camellia_xts_dec_tv_template[] = { ++static const struct cipher_testvec camellia_xts_dec_tv_template[] = { + /* Generated from AES-XTS test vectors */ + /* same as enc vectors with input and result reversed */ + { +@@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt + /* + * SEED test vectors + */ +-#define SEED_ENC_TEST_VECTORS 4 +-#define SEED_DEC_TEST_VECTORS 4 +- +-static struct cipher_testvec seed_enc_tv_template[] = { ++static const struct cipher_testvec seed_enc_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv + } + }; + +-static struct cipher_testvec seed_dec_tv_template[] = { ++static const struct cipher_testvec seed_dec_tv_template[] = { + { + .key = zeroed_string, + .klen = 16, +@@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv + } + }; + +-#define SALSA20_STREAM_ENC_TEST_VECTORS 5 +-static struct cipher_testvec salsa20_stream_enc_tv_template[] = { ++static const struct cipher_testvec salsa20_stream_enc_tv_template[] = { + /* + * Testvectors from verified.test-vectors submitted to ECRYPT. + * They are truncated to size 39, 64, 111, 129 to test a variety +@@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str + }, + }; + +-#define CHACHA20_ENC_TEST_VECTORS 4 +-static struct cipher_testvec chacha20_enc_tv_template[] = { ++static const struct cipher_testvec chacha20_enc_tv_template[] = { + { /* RFC7539 A.2. Test Vector #1 */ + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" +@@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en + /* + * CTS (Cipher Text Stealing) mode tests + */ +-#define CTS_MODE_ENC_TEST_VECTORS 6 +-#define CTS_MODE_DEC_TEST_VECTORS 6 +-static struct cipher_testvec cts_mode_enc_tv_template[] = { ++static const struct cipher_testvec cts_mode_enc_tv_template[] = { + { /* from rfc3962 */ + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" +@@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en + } + }; + +-static struct cipher_testvec cts_mode_dec_tv_template[] = { ++static const struct cipher_testvec cts_mode_dec_tv_template[] = { + { /* from rfc3962 */ + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" +@@ -33308,10 +33351,7 @@ struct comp_testvec { + * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. + */ + +-#define DEFLATE_COMP_TEST_VECTORS 2 +-#define DEFLATE_DECOMP_TEST_VECTORS 2 +- +-static struct comp_testvec deflate_comp_tv_template[] = { ++static const struct comp_testvec deflate_comp_tv_template[] = { + { + .inlen = 70, + .outlen = 38, +@@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_ + }, + }; + +-static struct comp_testvec deflate_decomp_tv_template[] = { ++static const struct comp_testvec deflate_decomp_tv_template[] = { + { + .inlen = 122, + .outlen = 191, +@@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom + /* + * LZO test vectors (null-terminated strings). + */ +-#define LZO_COMP_TEST_VECTORS 2 +-#define LZO_DECOMP_TEST_VECTORS 2 +- +-static struct comp_testvec lzo_comp_tv_template[] = { ++static const struct comp_testvec lzo_comp_tv_template[] = { + { + .inlen = 70, + .outlen = 57, +@@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t + }, + }; + +-static struct comp_testvec lzo_decomp_tv_template[] = { ++static const struct comp_testvec lzo_decomp_tv_template[] = { + { + .inlen = 133, + .outlen = 159, +@@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv + */ + #define MICHAEL_MIC_TEST_VECTORS 6 + +-static struct hash_testvec michael_mic_tv_template[] = { ++static const struct hash_testvec michael_mic_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00", + .ksize = 8, +@@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t + /* + * CRC32 test vectors + */ +-#define CRC32_TEST_VECTORS 14 +- +-static struct hash_testvec crc32_tv_template[] = { ++static const struct hash_testvec crc32_tv_template[] = { + { + .key = "\x87\xa9\xcb\xed", + .ksize = 4, +@@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp + /* + * CRC32C test vectors + */ +-#define CRC32C_TEST_VECTORS 15 +- +-static struct hash_testvec crc32c_tv_template[] = { ++static const struct hash_testvec crc32c_tv_template[] = { + { + .psize = 0, + .digest = "\x00\x00\x00\x00", +@@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem + /* + * Blakcifn CRC test vectors + */ +-#define BFIN_CRC_TEST_VECTORS 6 +- +-static struct hash_testvec bfin_crc_tv_template[] = { ++static const struct hash_testvec bfin_crc_tv_template[] = { + { + .psize = 0, + .digest = "\x00\x00\x00\x00", +@@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t + + }; + +-#define LZ4_COMP_TEST_VECTORS 1 +-#define LZ4_DECOMP_TEST_VECTORS 1 +- + static struct comp_testvec lz4_comp_tv_template[] = { + { + .inlen = 70, +@@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv + }, + }; + +-#define LZ4HC_COMP_TEST_VECTORS 1 +-#define LZ4HC_DECOMP_TEST_VECTORS 1 +- + static struct comp_testvec lz4hc_comp_tv_template[] = { + { + .inlen = 70, +--- /dev/null ++++ b/crypto/tls.c +@@ -0,0 +1,607 @@ ++/* ++ * Copyright 2013 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct tls_instance_ctx { ++ struct crypto_ahash_spawn auth; ++ struct crypto_skcipher_spawn enc; ++}; ++ ++struct crypto_tls_ctx { ++ unsigned int reqoff; ++ struct crypto_ahash *auth; ++ struct crypto_skcipher *enc; ++ struct crypto_skcipher *null; ++}; ++ ++struct tls_request_ctx { ++ /* ++ * cryptlen holds the payload length in the case of encryption or ++ * payload_len + icv_len + padding_len in case of decryption ++ */ ++ unsigned int cryptlen; ++ /* working space for partial results */ ++ struct scatterlist tmp[2]; ++ struct scatterlist cipher[2]; ++ struct scatterlist dst[2]; ++ char tail[]; ++}; ++ ++struct async_op { ++ struct completion completion; ++ int err; ++}; ++ ++static void tls_async_op_done(struct crypto_async_request *req, int err) ++{ ++ struct async_op *areq = req->data; ++ ++ if (err == -EINPROGRESS) ++ return; ++ ++ areq->err = err; ++ complete(&areq->completion); ++} ++ ++static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key, ++ unsigned int keylen) ++{ ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); ++ struct crypto_ahash *auth = ctx->auth; ++ struct crypto_skcipher *enc = ctx->enc; ++ struct crypto_authenc_keys keys; ++ int err = -EINVAL; ++ ++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ++ goto badkey; ++ ++ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); ++ crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) & ++ CRYPTO_TFM_REQ_MASK); ++ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); ++ crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) & ++ CRYPTO_TFM_RES_MASK); ++ ++ if (err) ++ goto out; ++ ++ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); ++ crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) & ++ CRYPTO_TFM_REQ_MASK); ++ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); ++ crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) & ++ CRYPTO_TFM_RES_MASK); ++ ++out: ++ return err; ++ ++badkey: ++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ goto out; ++} ++ ++/** ++ * crypto_tls_genicv - Calculate hmac digest for a TLS record ++ * @hash: (output) buffer to save the digest into ++ * @src: (input) scatterlist with the assoc and payload data ++ * @srclen: (input) size of the source buffer (assoclen + cryptlen) ++ * @req: (input) aead request ++ **/ ++static int crypto_tls_genicv(u8 *hash, struct scatterlist *src, ++ unsigned int srclen, struct aead_request *req) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); ++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req); ++ struct async_op ahash_op; ++ struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff); ++ unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP; ++ int err = -EBADMSG; ++ ++ /* Bail out if the request assoc len is 0 */ ++ if (!req->assoclen) ++ return err; ++ ++ init_completion(&ahash_op.completion); ++ ++ /* the hash transform to be executed comes from the original request */ ++ ahash_request_set_tfm(ahreq, ctx->auth); ++ /* prepare the hash request with input data and result pointer */ ++ ahash_request_set_crypt(ahreq, src, hash, srclen); ++ /* set the notifier for when the async hash function returns */ ++ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, ++ tls_async_op_done, &ahash_op); ++ ++ /* Calculate the digest on the given data. The result is put in hash */ ++ err = crypto_ahash_digest(ahreq); ++ if (err == -EINPROGRESS) { ++ err = wait_for_completion_interruptible(&ahash_op.completion); ++ if (!err) ++ err = ahash_op.err; ++ } ++ ++ return err; ++} ++ ++/** ++ * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record ++ * @hash: (output) buffer to save the digest and padding into ++ * @phashlen: (output) the size of digest + padding ++ * @req: (input) aead request ++ **/ ++static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen, ++ struct aead_request *req) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ unsigned int hash_size = crypto_aead_authsize(tls); ++ unsigned int block_size = crypto_aead_blocksize(tls); ++ unsigned int srclen = req->cryptlen + hash_size; ++ unsigned int icvlen = req->cryptlen + req->assoclen; ++ unsigned int padlen; ++ int err; ++ ++ err = crypto_tls_genicv(hash, req->src, icvlen, req); ++ if (err) ++ goto out; ++ ++ /* add padding after digest */ ++ padlen = block_size - (srclen % block_size); ++ memset(hash + hash_size, padlen - 1, padlen); ++ ++ *phashlen = hash_size + padlen; ++out: ++ return err; ++} ++ ++static int crypto_tls_copy_data(struct aead_request *req, ++ struct scatterlist *src, ++ struct scatterlist *dst, ++ unsigned int len) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); ++ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); ++ ++ skcipher_request_set_tfm(skreq, ctx->null); ++ skcipher_request_set_callback(skreq, aead_request_flags(req), ++ NULL, NULL); ++ skcipher_request_set_crypt(skreq, src, dst, len, NULL); ++ ++ return crypto_skcipher_encrypt(skreq); ++} ++ ++static int crypto_tls_encrypt(struct aead_request *req) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); ++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req); ++ struct skcipher_request *skreq; ++ struct scatterlist *cipher = treq_ctx->cipher; ++ struct scatterlist *tmp = treq_ctx->tmp; ++ struct scatterlist *sg, *src, *dst; ++ unsigned int cryptlen, phashlen; ++ u8 *hash = treq_ctx->tail; ++ int err; ++ ++ /* ++ * The hash result is saved at the beginning of the tls request ctx ++ * and is aligned as required by the hash transform. Enough space was ++ * allocated in crypto_tls_init_tfm to accommodate the difference. The ++ * requests themselves start later at treq_ctx->tail + ctx->reqoff so ++ * the result is not overwritten by the second (cipher) request. ++ */ ++ hash = (u8 *)ALIGN((unsigned long)hash + ++ crypto_ahash_alignmask(ctx->auth), ++ crypto_ahash_alignmask(ctx->auth) + 1); ++ ++ /* ++ * STEP 1: create ICV together with necessary padding ++ */ ++ err = crypto_tls_gen_padicv(hash, &phashlen, req); ++ if (err) ++ return err; ++ ++ /* ++ * STEP 2: Hash and padding are combined with the payload ++ * depending on the form it arrives. Scatter tables must have at least ++ * one page of data before chaining with another table and can't have ++ * an empty data page. The following code addresses these requirements. ++ * ++ * If the payload is empty, only the hash is encrypted, otherwise the ++ * payload scatterlist is merged with the hash. A special merging case ++ * is when the payload has only one page of data. In that case the ++ * payload page is moved to another scatterlist and prepared there for ++ * encryption. ++ */ ++ if (req->cryptlen) { ++ src = scatterwalk_ffwd(tmp, req->src, req->assoclen); ++ ++ sg_init_table(cipher, 2); ++ sg_set_buf(cipher + 1, hash, phashlen); ++ ++ if (sg_is_last(src)) { ++ sg_set_page(cipher, sg_page(src), req->cryptlen, ++ src->offset); ++ src = cipher; ++ } else { ++ unsigned int rem_len = req->cryptlen; ++ ++ for (sg = src; rem_len > sg->length; sg = sg_next(sg)) ++ rem_len -= min(rem_len, sg->length); ++ ++ sg_set_page(cipher, sg_page(sg), rem_len, sg->offset); ++ sg_chain(sg, 1, cipher); ++ } ++ } else { ++ sg_init_one(cipher, hash, phashlen); ++ src = cipher; ++ } ++ ++ /** ++ * If src != dst copy the associated data from source to destination. ++ * In both cases fast-forward passed the associated data in the dest. ++ */ ++ if (req->src != req->dst) { ++ err = crypto_tls_copy_data(req, req->src, req->dst, ++ req->assoclen); ++ if (err) ++ return err; ++ } ++ dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen); ++ ++ /* ++ * STEP 3: encrypt the frame and return the result ++ */ ++ cryptlen = req->cryptlen + phashlen; ++ ++ /* ++ * The hash and the cipher are applied at different times and their ++ * requests can use the same memory space without interference ++ */ ++ skreq = (void *)(treq_ctx->tail + ctx->reqoff); ++ skcipher_request_set_tfm(skreq, ctx->enc); ++ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); ++ skcipher_request_set_callback(skreq, aead_request_flags(req), ++ req->base.complete, req->base.data); ++ /* ++ * Apply the cipher transform. The result will be in req->dst when the ++ * asynchronuous call terminates ++ */ ++ err = crypto_skcipher_encrypt(skreq); ++ ++ return err; ++} ++ ++static int crypto_tls_decrypt(struct aead_request *req) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); ++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req); ++ unsigned int cryptlen = req->cryptlen; ++ unsigned int hash_size = crypto_aead_authsize(tls); ++ unsigned int block_size = crypto_aead_blocksize(tls); ++ struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff); ++ struct scatterlist *tmp = treq_ctx->tmp; ++ struct scatterlist *src, *dst; ++ ++ u8 padding[255]; /* padding can be 0-255 bytes */ ++ u8 pad_size; ++ u16 *len_field; ++ u8 *ihash, *hash = treq_ctx->tail; ++ ++ int paderr = 0; ++ int err = -EINVAL; ++ int i; ++ struct async_op ciph_op; ++ ++ /* ++ * Rule out bad packets. The input packet length must be at least one ++ * byte more than the hash_size ++ */ ++ if (cryptlen <= hash_size || cryptlen % block_size) ++ goto out; ++ ++ /* ++ * Step 1 - Decrypt the source. Fast-forward past the associated data ++ * to the encrypted data. The result will be overwritten in place so ++ * that the decrypted data will be adjacent to the associated data. The ++ * last step (computing the hash) will have it's input data already ++ * prepared and ready to be accessed at req->src. ++ */ ++ src = scatterwalk_ffwd(tmp, req->src, req->assoclen); ++ dst = src; ++ ++ init_completion(&ciph_op.completion); ++ skcipher_request_set_tfm(skreq, ctx->enc); ++ skcipher_request_set_callback(skreq, aead_request_flags(req), ++ tls_async_op_done, &ciph_op); ++ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); ++ err = crypto_skcipher_decrypt(skreq); ++ if (err == -EINPROGRESS) { ++ err = wait_for_completion_interruptible(&ciph_op.completion); ++ if (!err) ++ err = ciph_op.err; ++ } ++ if (err) ++ goto out; ++ ++ /* ++ * Step 2 - Verify padding ++ * Retrieve the last byte of the payload; this is the padding size. ++ */ ++ cryptlen -= 1; ++ scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0); ++ ++ /* RFC recommendation for invalid padding size. */ ++ if (cryptlen < pad_size + hash_size) { ++ pad_size = 0; ++ paderr = -EBADMSG; ++ } ++ cryptlen -= pad_size; ++ scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0); ++ ++ /* Padding content must be equal with pad_size. We verify it all */ ++ for (i = 0; i < pad_size; i++) ++ if (padding[i] != pad_size) ++ paderr = -EBADMSG; ++ ++ /* ++ * Step 3 - Verify hash ++ * Align the digest result as required by the hash transform. Enough ++ * space was allocated in crypto_tls_init_tfm ++ */ ++ hash = (u8 *)ALIGN((unsigned long)hash + ++ crypto_ahash_alignmask(ctx->auth), ++ crypto_ahash_alignmask(ctx->auth) + 1); ++ /* ++ * Two bytes at the end of the associated data make the length field. ++ * It must be updated with the length of the cleartext message before ++ * the hash is calculated. ++ */ ++ len_field = sg_virt(req->src) + req->assoclen - 2; ++ cryptlen -= hash_size; ++ *len_field = htons(cryptlen); ++ ++ /* This is the hash from the decrypted packet. Save it for later */ ++ ihash = hash + hash_size; ++ scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0); ++ ++ /* Now compute and compare our ICV with the one from the packet */ ++ err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req); ++ if (!err) ++ err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0; ++ ++ if (req->src != req->dst) { ++ err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen + ++ req->assoclen); ++ if (err) ++ goto out; ++ } ++ ++ /* return the first found error */ ++ if (paderr) ++ err = paderr; ++ ++out: ++ aead_request_complete(req, err); ++ return err; ++} ++ ++static int crypto_tls_init_tfm(struct crypto_aead *tfm) ++{ ++ struct aead_instance *inst = aead_alg_instance(tfm); ++ struct tls_instance_ctx *ictx = aead_instance_ctx(inst); ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm); ++ struct crypto_ahash *auth; ++ struct crypto_skcipher *enc; ++ struct crypto_skcipher *null; ++ int err; ++ ++ auth = crypto_spawn_ahash(&ictx->auth); ++ if (IS_ERR(auth)) ++ return PTR_ERR(auth); ++ ++ enc = crypto_spawn_skcipher(&ictx->enc); ++ err = PTR_ERR(enc); ++ if (IS_ERR(enc)) ++ goto err_free_ahash; ++ ++ null = crypto_get_default_null_skcipher2(); ++ err = PTR_ERR(null); ++ if (IS_ERR(null)) ++ goto err_free_skcipher; ++ ++ ctx->auth = auth; ++ ctx->enc = enc; ++ ctx->null = null; ++ ++ /* ++ * Allow enough space for two digests. The two digests will be compared ++ * during the decryption phase. One will come from the decrypted packet ++ * and the other will be calculated. For encryption, one digest is ++ * padded (up to a cipher blocksize) and chained with the payload ++ */ ++ ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) + ++ crypto_ahash_alignmask(auth), ++ crypto_ahash_alignmask(auth) + 1) + ++ max(crypto_ahash_digestsize(auth), ++ crypto_skcipher_blocksize(enc)); ++ ++ crypto_aead_set_reqsize(tfm, ++ sizeof(struct tls_request_ctx) + ++ ctx->reqoff + ++ max_t(unsigned int, ++ crypto_ahash_reqsize(auth) + ++ sizeof(struct ahash_request), ++ crypto_skcipher_reqsize(enc) + ++ sizeof(struct skcipher_request))); ++ ++ return 0; ++ ++err_free_skcipher: ++ crypto_free_skcipher(enc); ++err_free_ahash: ++ crypto_free_ahash(auth); ++ return err; ++} ++ ++static void crypto_tls_exit_tfm(struct crypto_aead *tfm) ++{ ++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm); ++ ++ crypto_free_ahash(ctx->auth); ++ crypto_free_skcipher(ctx->enc); ++ crypto_put_default_null_skcipher2(); ++} ++ ++static void crypto_tls_free(struct aead_instance *inst) ++{ ++ struct tls_instance_ctx *ctx = aead_instance_ctx(inst); ++ ++ crypto_drop_skcipher(&ctx->enc); ++ crypto_drop_ahash(&ctx->auth); ++ kfree(inst); ++} ++ ++static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb) ++{ ++ struct crypto_attr_type *algt; ++ struct aead_instance *inst; ++ struct hash_alg_common *auth; ++ struct crypto_alg *auth_base; ++ struct skcipher_alg *enc; ++ struct tls_instance_ctx *ctx; ++ const char *enc_name; ++ int err; ++ ++ algt = crypto_get_attr_type(tb); ++ if (IS_ERR(algt)) ++ return PTR_ERR(algt); ++ ++ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) ++ return -EINVAL; ++ ++ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, ++ CRYPTO_ALG_TYPE_AHASH_MASK | ++ crypto_requires_sync(algt->type, algt->mask)); ++ if (IS_ERR(auth)) ++ return PTR_ERR(auth); ++ ++ auth_base = &auth->base; ++ ++ enc_name = crypto_attr_alg_name(tb[2]); ++ err = PTR_ERR(enc_name); ++ if (IS_ERR(enc_name)) ++ goto out_put_auth; ++ ++ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); ++ err = -ENOMEM; ++ if (!inst) ++ goto out_put_auth; ++ ++ ctx = aead_instance_ctx(inst); ++ ++ err = crypto_init_ahash_spawn(&ctx->auth, auth, ++ aead_crypto_instance(inst)); ++ if (err) ++ goto err_free_inst; ++ ++ crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); ++ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, ++ crypto_requires_sync(algt->type, ++ algt->mask)); ++ if (err) ++ goto err_drop_auth; ++ ++ enc = crypto_spawn_skcipher_alg(&ctx->enc); ++ ++ err = -ENAMETOOLONG; ++ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, ++ "tls10(%s,%s)", auth_base->cra_name, ++ enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) ++ goto err_drop_enc; ++ ++ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, ++ "tls10(%s,%s)", auth_base->cra_driver_name, ++ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) ++ goto err_drop_enc; ++ ++ inst->alg.base.cra_flags = (auth_base->cra_flags | ++ enc->base.cra_flags) & CRYPTO_ALG_ASYNC; ++ inst->alg.base.cra_priority = enc->base.cra_priority * 10 + ++ auth_base->cra_priority; ++ inst->alg.base.cra_blocksize = enc->base.cra_blocksize; ++ inst->alg.base.cra_alignmask = auth_base->cra_alignmask | ++ enc->base.cra_alignmask; ++ inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx); ++ ++ inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); ++ inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); ++ inst->alg.maxauthsize = auth->digestsize; ++ ++ inst->alg.init = crypto_tls_init_tfm; ++ inst->alg.exit = crypto_tls_exit_tfm; ++ ++ inst->alg.setkey = crypto_tls_setkey; ++ inst->alg.encrypt = crypto_tls_encrypt; ++ inst->alg.decrypt = crypto_tls_decrypt; ++ ++ inst->free = crypto_tls_free; ++ ++ err = aead_register_instance(tmpl, inst); ++ if (err) ++ goto err_drop_enc; ++ ++out: ++ crypto_mod_put(auth_base); ++ return err; ++ ++err_drop_enc: ++ crypto_drop_skcipher(&ctx->enc); ++err_drop_auth: ++ crypto_drop_ahash(&ctx->auth); ++err_free_inst: ++ kfree(inst); ++out_put_auth: ++ goto out; ++} ++ ++static struct crypto_template crypto_tls_tmpl = { ++ .name = "tls10", ++ .create = crypto_tls_create, ++ .module = THIS_MODULE, ++}; ++ ++static int __init crypto_tls_module_init(void) ++{ ++ return crypto_register_template(&crypto_tls_tmpl); ++} ++ ++static void __exit crypto_tls_module_exit(void) ++{ ++ crypto_unregister_template(&crypto_tls_tmpl); ++} ++ ++module_init(crypto_tls_module_init); ++module_exit(crypto_tls_module_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("TLS 1.0 record encryption"); +--- a/drivers/crypto/caam/Kconfig ++++ b/drivers/crypto/caam/Kconfig +@@ -1,6 +1,11 @@ ++config CRYPTO_DEV_FSL_CAAM_COMMON ++ tristate ++ + config CRYPTO_DEV_FSL_CAAM +- tristate "Freescale CAAM-Multicore driver backend" ++ tristate "Freescale CAAM-Multicore platform driver backend" + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE ++ select CRYPTO_DEV_FSL_CAAM_COMMON ++ select SOC_BUS + help + Enables the driver module for Freescale's Cryptographic Accelerator + and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). +@@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM + To compile this driver as a module, choose M here: the module + will be called caam. + ++if CRYPTO_DEV_FSL_CAAM ++ ++config CRYPTO_DEV_FSL_CAAM_DEBUG ++ bool "Enable debug output in CAAM driver" ++ help ++ Selecting this will enable printing of various debug ++ information in the CAAM driver. ++ + config CRYPTO_DEV_FSL_CAAM_JR + tristate "Freescale CAAM Job Ring driver backend" +- depends on CRYPTO_DEV_FSL_CAAM + default y + help + Enables the driver module for Job Rings which are part of +@@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR + To compile this driver as a module, choose M here: the module + will be called caam_jr. + ++if CRYPTO_DEV_FSL_CAAM_JR ++ + config CRYPTO_DEV_FSL_CAAM_RINGSIZE + int "Job Ring size" +- depends on CRYPTO_DEV_FSL_CAAM_JR + range 2 9 + default "9" + help +@@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE + + config CRYPTO_DEV_FSL_CAAM_INTC + bool "Job Ring interrupt coalescing" +- depends on CRYPTO_DEV_FSL_CAAM_JR + help + Enable the Job Ring's interrupt coalescing feature. + +@@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL + + config CRYPTO_DEV_FSL_CAAM_CRYPTO_API + tristate "Register algorithm implementations with the Crypto API" +- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_AEAD + select CRYPTO_AUTHENC +@@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API + To compile this as a module, choose M here: the module + will be called caamalg. + ++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI ++ tristate "Queue Interface as Crypto API backend" ++ depends on FSL_SDK_DPA && NET ++ default y ++ select CRYPTO_AUTHENC ++ select CRYPTO_BLKCIPHER ++ help ++ Selecting this will use CAAM Queue Interface (QI) for sending ++ & receiving crypto jobs to/from CAAM. This gives better performance ++ than job ring interface when the number of cores are more than the ++ number of job rings assigned to the kernel. The number of portals ++ assigned to the kernel should also be more than the number of ++ job rings. ++ ++ To compile this as a module, choose M here: the module ++ will be called caamalg_qi. ++ + config CRYPTO_DEV_FSL_CAAM_AHASH_API + tristate "Register hash algorithm implementations with Crypto API" +- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_HASH + help +@@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API + + config CRYPTO_DEV_FSL_CAAM_PKC_API + tristate "Register public key cryptography implementations with Crypto API" +- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_RSA + help +@@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API + + config CRYPTO_DEV_FSL_CAAM_RNG_API + tristate "Register caam device for hwrng API" +- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_RNG + select HW_RANDOM +@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API + To compile this as a module, choose M here: the module + will be called caamrng. + +-config CRYPTO_DEV_FSL_CAAM_IMX +- def_bool SOC_IMX6 || SOC_IMX7D +- depends on CRYPTO_DEV_FSL_CAAM ++endif # CRYPTO_DEV_FSL_CAAM_JR + +-config CRYPTO_DEV_FSL_CAAM_DEBUG +- bool "Enable debug output in CAAM driver" +- depends on CRYPTO_DEV_FSL_CAAM +- help +- Selecting this will enable printing of various debug +- information in the CAAM driver. ++endif # CRYPTO_DEV_FSL_CAAM ++ ++config CRYPTO_DEV_FSL_DPAA2_CAAM ++ tristate "QorIQ DPAA2 CAAM (DPSECI) driver" ++ depends on FSL_MC_DPIO ++ select CRYPTO_DEV_FSL_CAAM_COMMON ++ select CRYPTO_BLKCIPHER ++ select CRYPTO_AUTHENC ++ select CRYPTO_AEAD ++ ---help--- ++ CAAM driver for QorIQ Data Path Acceleration Architecture 2. ++ It handles DPSECI DPAA2 objects that sit on the Management Complex ++ (MC) fsl-mc bus. ++ ++ To compile this as a module, choose M here: the module ++ will be called dpaa2_caam. ++ ++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC ++ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ ++ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ ++ CRYPTO_DEV_FSL_DPAA2_CAAM) +--- a/drivers/crypto/caam/Makefile ++++ b/drivers/crypto/caam/Makefile +@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG + ccflags-y := -DDEBUG + endif + ++ccflags-y += -DVERSION=\"\" ++ ++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o ++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o ++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o + + caam-objs := ctrl.o +-caam_jr-objs := jr.o key_gen.o error.o ++caam_jr-objs := jr.o key_gen.o + caam_pkc-y := caampkc.o pkc_desc.o ++ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) ++ ccflags-y += -DCONFIG_CAAM_QI ++ caam-objs += qi.o ++endif ++ ++obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o ++ ++dpaa2_caam-y := caamalg_qi2.o dpseci.o +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -2,6 +2,7 @@ + * caam - Freescale FSL CAAM support for crypto API + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * Copyright 2016 NXP + * + * Based on talitos crypto API driver. + * +@@ -53,6 +54,7 @@ + #include "error.h" + #include "sg_sw_sec4.h" + #include "key_gen.h" ++#include "caamalg_desc.h" + + /* + * crypto alg +@@ -62,8 +64,6 @@ + #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ + CTR_RFC3686_NONCE_SIZE + \ + SHA512_DIGEST_SIZE * 2) +-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +-#define CAAM_MAX_IV_LENGTH 16 + + #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) + #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ +@@ -71,37 +71,6 @@ + #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 5) + +-/* length of descriptors text */ +-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) +-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) +-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ) +- +-/* Note: Nonce is counted in enckeylen */ +-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) +- +-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) +-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) +-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) +- +-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) +-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) +-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) +- +-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) +-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) +-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) +- +-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) +-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) +-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) +- +-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) +-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ +- 20 * CAAM_CMD_SZ) +-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ +- 15 * CAAM_CMD_SZ) +- + #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) + #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) + +@@ -112,47 +81,11 @@ + #define debug(format, arg...) + #endif + +-#ifdef DEBUG +-#include +- +-static void dbg_dump_sg(const char *level, const char *prefix_str, +- int prefix_type, int rowsize, int groupsize, +- struct scatterlist *sg, size_t tlen, bool ascii, +- bool may_sleep) +-{ +- struct scatterlist *it; +- void *it_page; +- size_t len; +- void *buf; +- +- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { +- /* +- * make sure the scatterlist's page +- * has a valid virtual memory mapping +- */ +- it_page = kmap_atomic(sg_page(it)); +- if (unlikely(!it_page)) { +- printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); +- return; +- } +- +- buf = it_page + it->offset; +- len = min_t(size_t, tlen, it->length); +- print_hex_dump(level, prefix_str, prefix_type, rowsize, +- groupsize, buf, len, ascii); +- tlen -= len; +- +- kunmap_atomic(it_page); +- } +-} +-#endif +- + static struct list_head alg_list; + + struct caam_alg_entry { + int class1_alg_type; + int class2_alg_type; +- int alg_op; + bool rfc3686; + bool geniv; + }; +@@ -163,302 +96,67 @@ struct caam_aead_alg { + bool registered; + }; + +-/* Set DK bit in class 1 operation if shared */ +-static inline void append_dec_op1(u32 *desc, u32 type) +-{ +- u32 *jump_cmd, *uncond_jump_cmd; +- +- /* DK bit is valid only for AES */ +- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { +- append_operation(desc, type | OP_ALG_AS_INITFINAL | +- OP_ALG_DECRYPT); +- return; +- } +- +- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); +- append_operation(desc, type | OP_ALG_AS_INITFINAL | +- OP_ALG_DECRYPT); +- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); +- set_jump_tgt_here(desc, jump_cmd); +- append_operation(desc, type | OP_ALG_AS_INITFINAL | +- OP_ALG_DECRYPT | OP_ALG_AAI_DK); +- set_jump_tgt_here(desc, uncond_jump_cmd); +-} +- +-/* +- * For aead functions, read payload and write payload, +- * both of which are specified in req->src and req->dst +- */ +-static inline void aead_append_src_dst(u32 *desc, u32 msg_type) +-{ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | +- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); +-} +- +-/* +- * For ablkcipher encrypt and decrypt, read from req->src and +- * write to req->dst +- */ +-static inline void ablkcipher_append_src_dst(u32 *desc) +-{ +- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | +- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +-} +- + /* + * per-session context + */ + struct caam_ctx { +- struct device *jrdev; + u32 sh_desc_enc[DESC_MAX_USED_LEN]; + u32 sh_desc_dec[DESC_MAX_USED_LEN]; + u32 sh_desc_givenc[DESC_MAX_USED_LEN]; ++ u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t sh_desc_enc_dma; + dma_addr_t sh_desc_dec_dma; + dma_addr_t sh_desc_givenc_dma; +- u32 class1_alg_type; +- u32 class2_alg_type; +- u32 alg_op; +- u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t key_dma; +- unsigned int enckeylen; +- unsigned int split_key_len; +- unsigned int split_key_pad_len; ++ struct device *jrdev; ++ struct alginfo adata; ++ struct alginfo cdata; + unsigned int authsize; + }; + +-static void append_key_aead(u32 *desc, struct caam_ctx *ctx, +- int keys_fit_inline, bool is_rfc3686) +-{ +- u32 *nonce; +- unsigned int enckeylen = ctx->enckeylen; +- +- /* +- * RFC3686 specific: +- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE} +- * | enckeylen = encryption key size + nonce size +- */ +- if (is_rfc3686) +- enckeylen -= CTR_RFC3686_NONCE_SIZE; +- +- if (keys_fit_inline) { +- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +- ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- append_key_as_imm(desc, (void *)ctx->key + +- ctx->split_key_pad_len, enckeylen, +- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- } else { +- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- append_key(desc, ctx->key_dma + ctx->split_key_pad_len, +- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- } +- +- /* Load Counter into CONTEXT1 reg */ +- if (is_rfc3686) { +- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + +- enckeylen); +- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, +- LDST_CLASS_IND_CCB | +- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); +- append_move(desc, +- MOVE_SRC_OUTFIFO | +- MOVE_DEST_CLASS1CTX | +- (16 << MOVE_OFFSET_SHIFT) | +- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); +- } +-} +- +-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, +- int keys_fit_inline, bool is_rfc3686) +-{ +- u32 *key_jump_cmd; +- +- /* Note: Context registers are saved. */ +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); +- +- set_jump_tgt_here(desc, key_jump_cmd); +-} +- + static int aead_null_set_sh_desc(struct crypto_aead *aead) + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- bool keys_fit_inline = false; +- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; + u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - ++ ctx->adata.keylen_pad; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN + +- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { ++ ctx->adata.key_inline = true; ++ ctx->adata.key_virt = ctx->key; ++ } else { ++ ctx->adata.key_inline = false; ++ ctx->adata.key_dma = ctx->key_dma; ++ } + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +- ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- else +- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* assoclen + cryptlen = seqinlen */ +- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Prepare to read and write cryptlen + assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* +- * MOVE_LEN opcode is not available in all SEC HW revisions, +- * thus need to do some magic, i.e. self-patch the descriptor +- * buffer. +- */ +- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | +- MOVE_DEST_MATH3 | +- (0x6 << MOVE_LEN_SHIFT)); +- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | +- MOVE_DEST_DESCBUF | +- MOVE_WAITCOMP | +- (0x8 << MOVE_LEN_SHIFT)); +- +- /* Class 2 operation */ +- append_operation(desc, ctx->class2_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Read and write cryptlen bytes */ +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); +- +- set_move_tgt_here(desc, read_move_cmd); +- set_move_tgt_here(desc, write_move_cmd); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | +- MOVE_AUX_LS); +- +- /* Write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "aead null enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + +- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; +- +- desc = ctx->sh_desc_dec; ++ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { ++ ctx->adata.key_inline = true; ++ ctx->adata.key_virt = ctx->key; ++ } else { ++ ctx->adata.key_inline = false; ++ ctx->adata.key_dma = ctx->key_dma; ++ } + + /* aead_decrypt shared descriptor */ +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +- ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- else +- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Class 2 operation */ +- append_operation(desc, ctx->class2_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); +- +- /* assoclen + cryptlen = seqoutlen */ +- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* Prepare to read and write cryptlen + assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); +- +- /* +- * MOVE_LEN opcode is not available in all SEC HW revisions, +- * thus need to do some magic, i.e. self-patch the descriptor +- * buffer. +- */ +- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | +- MOVE_DEST_MATH2 | +- (0x6 << MOVE_LEN_SHIFT)); +- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | +- MOVE_DEST_DESCBUF | +- MOVE_WAITCOMP | +- (0x8 << MOVE_LEN_SHIFT)); +- +- /* Read and write cryptlen bytes */ +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); +- +- /* +- * Insert a NOP here, since we need at least 4 instructions between +- * code patching the descriptor buffer and the location being patched. +- */ +- jump_cmd = append_jump(desc, JUMP_TEST_ALL); +- set_jump_tgt_here(desc, jump_cmd); +- +- set_move_tgt_here(desc, read_move_cmd); +- set_move_tgt_here(desc, write_move_cmd); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | +- MOVE_AUX_LS); +- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); +- +- /* Load ICV */ +- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | +- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "aead null dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ desc = ctx->sh_desc_dec; ++ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + return 0; + } +@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- bool keys_fit_inline; +- u32 geniv, moveiv; + u32 ctx1_iv_off = 0; +- u32 *desc; +- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == ++ u32 *desc, *nonce = NULL; ++ u32 inl_mask; ++ unsigned int data_len[2]; ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + +@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt + return 0; + + /* NULL encryption / decryption */ +- if (!ctx->enckeylen) ++ if (!ctx->cdata.keylen) + return aead_null_set_sh_desc(aead); + + /* +@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ +- if (is_rfc3686) ++ if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ++ } ++ ++ data_len[0] = ctx->adata.keylen_pad; ++ data_len[1] = ctx->cdata.keylen; + + if (alg->caam.geniv) + goto skip_enc; +@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN + +- ctx->split_key_pad_len + ctx->enckeylen + +- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= +- CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; +- +- /* aead_encrypt shared descriptor */ +- desc = ctx->sh_desc_enc; +- +- /* Note: Context registers are saved. */ +- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); +- +- /* Class 2 operation */ +- append_operation(desc, ctx->class2_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); ++ if (desc_inline_query(DESC_AEAD_ENC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; + +- /* Read and write assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; + +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + +- /* read assoc before reading payload */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +- FIFOLDST_VLF); ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); + +- /* Load Counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Read and write cryptlen bytes */ +- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); +- +- /* Write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ /* aead_encrypt shared descriptor */ ++ desc = ctx->sh_desc_enc; ++ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, ++ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, ++ false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + skip_enc: + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN + +- ctx->split_key_pad_len + ctx->enckeylen + +- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= +- CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; +- +- /* aead_decrypt shared descriptor */ +- desc = ctx->sh_desc_dec; +- +- /* Note: Context registers are saved. */ +- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); +- +- /* Class 2 operation */ +- append_operation(desc, ctx->class2_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ if (desc_inline_query(DESC_AEAD_DEC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; + +- /* Read and write assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- if (alg->caam.geniv) +- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; + else +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* read assoc before reading payload */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +- KEY_VLF); +- +- if (alg->caam.geniv) { +- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- (ctx1_iv_off << LDST_OFFSET_SHIFT)); +- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | +- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); +- } +- +- /* Load Counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); ++ ctx->adata.key_dma = ctx->key_dma; + +- /* Choose operation */ +- if (ctr_mode) +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + else +- append_dec_op1(desc, ctx->class1_alg_type); ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + +- /* Read and write cryptlen bytes */ +- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG); +- +- /* Load ICV */ +- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | +- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); + +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ /* aead_decrypt shared descriptor */ ++ desc = ctx->sh_desc_dec; ++ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, ++ ctx->authsize, alg->caam.geniv, is_rfc3686, ++ nonce, ctx1_iv_off, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + if (!alg->caam.geniv) + goto skip_givenc; +@@ -655,107 +277,32 @@ skip_enc: + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN + +- ctx->split_key_pad_len + ctx->enckeylen + +- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= +- CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; +- +- /* aead_givencrypt shared descriptor */ +- desc = ctx->sh_desc_enc; +- +- /* Note: Context registers are saved. */ +- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); ++ if (desc_inline_query(DESC_AEAD_GIVENC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; + +- if (is_rfc3686) +- goto copy_iv; ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; + +- /* Generate IV */ +- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | +- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | +- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); +- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | +- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- append_move(desc, MOVE_WAITCOMP | +- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | +- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | +- (ivsize << MOVE_LEN_SHIFT)); +- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); +- +-copy_iv: +- /* Copy IV to class 1 context */ +- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | +- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | +- (ivsize << MOVE_LEN_SHIFT)); +- +- /* Return to encryption */ +- append_operation(desc, ctx->class2_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Read and write assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* ivsize + cryptlen = seqoutlen - authsize */ +- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); +- +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* read assoc before reading payload */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +- KEY_VLF); +- +- /* Copy iv from outfifo to class 2 fifo */ +- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | +- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); +- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | +- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); +- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | +- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + +- /* Load Counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Will write ivsize + cryptlen */ +- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Not need to reload iv */ +- append_seq_fifo_load(desc, ivsize, +- FIFOLD_CLASS_SKIP); +- +- /* Will read cryptlen */ +- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | +- FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +- +- /* Write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +- LDST_SRCDST_BYTE_CONTEXT); ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); + +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ /* aead_givencrypt shared descriptor */ ++ desc = ctx->sh_desc_enc; ++ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, ++ ctx->authsize, is_rfc3686, nonce, ++ ctx1_iv_off, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + skip_givenc: + return 0; +@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- bool keys_fit_inline = false; +- u32 *key_jump_cmd, *zero_payload_jump_cmd, +- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; ++ unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; + +- if (!ctx->enckeylen || !ctx->authsize) ++ if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* +@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ +- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_GCM_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_enc; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* skip key loading if they are loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD | JUMP_COND_SELF); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* if assoclen + cryptlen is ZERO, skip to ICV write */ +- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | +- JUMP_COND_MATH_Z); +- +- /* if assoclen is ZERO, skip reading the assoc data */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | +- JUMP_COND_MATH_Z); +- +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* cryptlen = seqinlen - assoclen */ +- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); +- +- /* if cryptlen is ZERO jump to zero-payload commands */ +- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | +- JUMP_COND_MATH_Z); +- +- /* read assoc data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); +- set_jump_tgt_here(desc, zero_assoc_jump_cmd1); +- +- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* write encrypted data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); +- +- /* read payload data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); +- +- /* jump the zero-payload commands */ +- append_jump(desc, JUMP_TEST_ALL | 2); +- +- /* zero-payload commands */ +- set_jump_tgt_here(desc, zero_payload_jump_cmd); +- +- /* read assoc data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); +- +- /* There is no input data */ +- set_jump_tgt_here(desc, zero_assoc_jump_cmd2); +- +- /* write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_GCM_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_dec; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* skip key loading if they are loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | +- JUMP_TEST_ALL | JUMP_COND_SHRD | +- JUMP_COND_SELF); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); +- +- /* if assoclen is ZERO, skip reading the assoc data */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | +- JUMP_COND_MATH_Z); +- +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* read assoc data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); +- +- set_jump_tgt_here(desc, zero_assoc_jump_cmd1); +- +- /* cryptlen = seqoutlen - assoclen */ +- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* jump to zero-payload command if cryptlen is zero */ +- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | +- JUMP_COND_MATH_Z); +- +- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* store encrypted data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); +- +- /* read payload data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); +- +- /* zero-payload command */ +- set_jump_tgt_here(desc, zero_payload_jump_cmd); +- +- /* read ICV */ +- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | +- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + return 0; + } +@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- bool keys_fit_inline = false; +- u32 *key_jump_cmd; ++ unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; + +- if (!ctx->enckeylen || !ctx->authsize) ++ if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* +@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ +- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_RFC4106_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_enc; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip key loading if it is loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* Read assoc data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); +- +- /* Skip IV */ +- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); +- +- /* Will read cryptlen bytes */ +- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); +- +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* cryptlen = seqoutlen - assoclen */ +- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Write encrypted data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); +- +- /* Read payload data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); +- +- /* Write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_RFC4106_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_dec; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip key loading if it is loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | +- JUMP_TEST_ALL | JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); +- +- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- +- /* Read assoc data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); +- +- /* Skip IV */ +- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); +- +- /* Will read cryptlen bytes */ +- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); +- +- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); +- +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- +- /* Will write cryptlen bytes */ +- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* Store payload data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); +- +- /* Read encrypted data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | +- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); +- +- /* Read ICV */ +- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | +- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + return 0; + } +@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- bool keys_fit_inline = false; +- u32 *key_jump_cmd; +- u32 *read_move_cmd, *write_move_cmd; ++ unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; + +- if (!ctx->enckeylen || !ctx->authsize) ++ if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* +@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ +- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_RFC4543_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_enc; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip key loading if it is loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* assoclen + cryptlen = seqinlen */ +- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* +- * MOVE_LEN opcode is not available in all SEC HW revisions, +- * thus need to do some magic, i.e. self-patch the descriptor +- * buffer. +- */ +- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | +- (0x6 << MOVE_LEN_SHIFT)); +- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | +- (0x8 << MOVE_LEN_SHIFT)); +- +- /* Will read assoclen + cryptlen bytes */ +- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Will write assoclen + cryptlen bytes */ +- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- +- /* Read and write assoclen + cryptlen bytes */ +- aead_append_src_dst(desc, FIFOLD_TYPE_AAD); +- +- set_move_tgt_here(desc, read_move_cmd); +- set_move_tgt_here(desc, write_move_cmd); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- /* Move payload data to OFIFO */ +- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); +- +- /* Write ICV */ +- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ +- keys_fit_inline = false; +- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + +- ctx->enckeylen <= CAAM_DESC_BYTES_MAX) +- keys_fit_inline = true; ++ if (rem_bytes >= DESC_RFC4543_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } + + desc = ctx->sh_desc_dec; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Skip key loading if it is loaded due to sharing */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | +- JUMP_TEST_ALL | JUMP_COND_SHRD); +- if (keys_fit_inline) +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- else +- append_key(desc, ctx->key_dma, ctx->enckeylen, +- CLASS_1 | KEY_DEST_CLASS_REG); +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Class 1 operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); +- +- /* assoclen + cryptlen = seqoutlen */ +- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* +- * MOVE_LEN opcode is not available in all SEC HW revisions, +- * thus need to do some magic, i.e. self-patch the descriptor +- * buffer. +- */ +- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | +- (0x6 << MOVE_LEN_SHIFT)); +- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | +- (0x8 << MOVE_LEN_SHIFT)); +- +- /* Will read assoclen + cryptlen bytes */ +- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* Will write assoclen + cryptlen bytes */ +- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); +- +- /* Store payload data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); +- +- /* In-snoop assoclen + cryptlen data */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | +- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); +- +- set_move_tgt_here(desc, read_move_cmd); +- set_move_tgt_here(desc, write_move_cmd); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- /* Move payload data to OFIFO */ +- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); +- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); +- +- /* Read ICV */ +- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | +- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + return 0; + } +@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr + return 0; + } + +-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, +- u32 authkeylen) +-{ +- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, +- ctx->split_key_pad_len, key_in, authkeylen, +- ctx->alg_op); +-} +- + static int aead_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) + { +- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ +- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct crypto_authenc_keys keys; +@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + +- /* Pick class 2 key length from algorithm submask */ +- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> +- OP_ALG_ALGSEL_SHIFT] * 2; +- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); +- +- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE) +- goto badkey; +- + #ifdef DEBUG + printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); +- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", +- ctx->split_key_len, ctx->split_key_pad_len); + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + #endif + +- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); ++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, ++ keys.authkeylen, CAAM_MAX_KEY_SIZE - ++ keys.enckeylen); + if (ret) { + goto badkey; + } + + /* postpend encryption key to auth split key */ +- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen); +- +- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + +- keys.enckeylen, DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); + #ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, +- ctx->split_key_pad_len + keys.enckeylen, 1); ++ ctx->adata.keylen_pad + keys.enckeylen, 1); + #endif +- +- ctx->enckeylen = keys.enckeylen; +- +- ret = aead_set_sh_desc(aead); +- if (ret) { +- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + +- keys.enckeylen, DMA_TO_DEVICE); +- } +- +- return ret; ++ ctx->cdata.keylen = keys.enckeylen; ++ return aead_set_sh_desc(aead); + badkey: + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- int ret = 0; + + #ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", +@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead + #endif + + memcpy(ctx->key, key, keylen); +- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } +- ctx->enckeylen = keylen; +- +- ret = gcm_set_sh_desc(aead); +- if (ret) { +- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, +- DMA_TO_DEVICE); +- } ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; + +- return ret; ++ return gcm_set_sh_desc(aead); + } + + static int rfc4106_setkey(struct crypto_aead *aead, +@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_ + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- int ret = 0; + + if (keylen < 4) + return -EINVAL; +@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_ + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ +- ctx->enckeylen = keylen - 4; +- +- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } +- +- ret = rfc4106_set_sh_desc(aead); +- if (ret) { +- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, +- DMA_TO_DEVICE); +- } +- +- return ret; ++ ctx->cdata.keylen = keylen - 4; ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ return rfc4106_set_sh_desc(aead); + } + + static int rfc4543_setkey(struct crypto_aead *aead, +@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_ + { + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- int ret = 0; + + if (keylen < 4) + return -EINVAL; +@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_ + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ +- ctx->enckeylen = keylen - 4; +- +- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } +- +- ret = rfc4543_set_sh_desc(aead); +- if (ret) { +- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, +- DMA_TO_DEVICE); +- } +- +- return ret; ++ ctx->cdata.keylen = keylen - 4; ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ return rfc4543_set_sh_desc(aead); + } + + static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + const u8 *key, unsigned int keylen) + { + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); +- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher; + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); + const char *alg_name = crypto_tfm_alg_name(tfm); + struct device *jrdev = ctx->jrdev; +- int ret = 0; +- u32 *key_jump_cmd; ++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + u32 *desc; +- u8 *nonce; +- u32 geniv; + u32 ctx1_iv_off = 0; +- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = (ctr_mode && + (strstr(alg_name, "rfc3686") != NULL)); + ++ memcpy(ctx->key, key, keylen); + #ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp + keylen -= CTR_RFC3686_NONCE_SIZE; + } + +- memcpy(ctx->key, key, keylen); +- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } +- ctx->enckeylen = keylen; ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; + + /* ablkcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- /* Load class1 key only */ +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | +- KEY_DEST_CLASS_REG); +- +- /* Load nonce into CONTEXT1 reg */ +- if (is_rfc3686) { +- nonce = (u8 *)key + keylen; +- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, +- LDST_CLASS_IND_CCB | +- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); +- append_move(desc, MOVE_WAITCOMP | +- MOVE_SRC_OUTFIFO | +- MOVE_DEST_CLASS1CTX | +- (16 << MOVE_OFFSET_SHIFT) | +- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); +- } +- +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Load iv */ +- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, ++ ctx1_iv_off); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + +- /* Load counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); +- +- /* Load operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Perform operation */ +- ablkcipher_append_src_dst(desc); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "ablkcipher enc shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif + /* ablkcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; ++ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, ++ ctx1_iv_off); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- /* Load class1 key only */ +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | +- KEY_DEST_CLASS_REG); +- +- /* Load nonce into CONTEXT1 reg */ +- if (is_rfc3686) { +- nonce = (u8 *)key + keylen; +- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, +- LDST_CLASS_IND_CCB | +- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); +- append_move(desc, MOVE_WAITCOMP | +- MOVE_SRC_OUTFIFO | +- MOVE_DEST_CLASS1CTX | +- (16 << MOVE_OFFSET_SHIFT) | +- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); +- } +- +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* load IV */ +- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); +- +- /* Load counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); +- +- /* Choose operation */ +- if (ctr_mode) +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); +- else +- append_dec_op1(desc, ctx->class1_alg_type); +- +- /* Perform operation */ +- ablkcipher_append_src_dst(desc); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +- +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "ablkcipher dec shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif + /* ablkcipher_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; ++ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, ++ ctx1_iv_off); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- /* Load class1 key only */ +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | +- KEY_DEST_CLASS_REG); +- +- /* Load Nonce into CONTEXT1 reg */ +- if (is_rfc3686) { +- nonce = (u8 *)key + keylen; +- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, +- LDST_CLASS_IND_CCB | +- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); +- append_move(desc, MOVE_WAITCOMP | +- MOVE_SRC_OUTFIFO | +- MOVE_DEST_CLASS1CTX | +- (16 << MOVE_OFFSET_SHIFT) | +- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); +- } +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* Generate IV */ +- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | +- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | +- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT); +- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | +- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); +- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +- append_move(desc, MOVE_WAITCOMP | +- MOVE_SRC_INFIFO | +- MOVE_DEST_CLASS1CTX | +- (crt->ivsize << MOVE_LEN_SHIFT) | +- (ctx1_iv_off << MOVE_OFFSET_SHIFT)); +- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); +- +- /* Copy generated IV to memory */ +- append_seq_store(desc, crt->ivsize, +- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | +- (ctx1_iv_off << LDST_OFFSET_SHIFT)); +- +- /* Load Counter into CONTEXT1 reg */ +- if (is_rfc3686) +- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | +- LDST_SRCDST_BYTE_CONTEXT | +- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << +- LDST_OFFSET_SHIFT)); +- +- if (ctx1_iv_off) +- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | +- (1 << JUMP_OFFSET_SHIFT)); +- +- /* Load operation */ +- append_operation(desc, ctx->class1_alg_type | +- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Perform operation */ +- ablkcipher_append_src_dst(desc); +- +- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif +- +- return ret; ++ return 0; + } + + static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, +@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct + { + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; +- u32 *key_jump_cmd, *desc; +- __be64 sector_size = cpu_to_be64(512); ++ u32 *desc; + + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(ablkcipher, +@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct + } + + memcpy(ctx->key, key, keylen); +- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- return -ENOMEM; +- } +- ctx->enckeylen = keylen; ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; + + /* xts_ablkcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- /* Load class1 keys only */ +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- +- /* Load sector size with index 40 bytes (0x28) */ +- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8); +- append_data(desc, (void *)§or_size, 8); +- +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* +- * create sequence for loading the sector index +- * Upper 8B of IV - will be used as sector index +- * Lower 8B of IV - will be discarded +- */ +- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8); +- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); +- +- /* Load operation */ +- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | +- OP_ALG_ENCRYPT); +- +- /* Perform operation */ +- ablkcipher_append_src_dst(desc); +- +- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + /* xts_ablkcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- /* Load class1 key only */ +- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, +- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); +- +- /* Load sector size with index 40 bytes (0x28) */ +- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8); +- append_data(desc, (void *)§or_size, 8); +- +- set_jump_tgt_here(desc, key_jump_cmd); +- +- /* +- * create sequence for loading the sector index +- * Upper 8B of IV - will be used as sector index +- * Lower 8B of IV - will be discarded +- */ +- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8); +- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); +- +- /* Load operation */ +- append_dec_op1(desc, ctx->class1_alg_type); +- +- /* Perform operation */ +- ablkcipher_append_src_dst(desc); +- +- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +- dma_unmap_single(jrdev, ctx->sh_desc_enc_dma, +- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, +- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +-#endif ++ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + + return 0; + } + + /* + * aead_edesc - s/w-extended aead descriptor +- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist +- * @src_nents: number of segments in input scatterlist +- * @dst_nents: number of segments in output scatterlist +- * @iv_dma: dma address of iv for checking continuity and link table +- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) ++ * @src_nents: number of segments in input s/w scatterlist ++ * @dst_nents: number of segments in output s/w scatterlist + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table ++ * @sec4_sg: pointer to h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ + struct aead_edesc { +- int assoc_nents; + int src_nents; + int dst_nents; +- dma_addr_t iv_dma; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; +@@ -1899,12 +739,12 @@ struct aead_edesc { + + /* + * ablkcipher_edesc - s/w-extended ablkcipher descriptor +- * @src_nents: number of segments in input scatterlist +- * @dst_nents: number of segments in output scatterlist ++ * @src_nents: number of segments in input s/w scatterlist ++ * @dst_nents: number of segments in output s/w scatterlist + * @iv_dma: dma address of iv for checking continuity and link table +- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table ++ * @sec4_sg: pointer to h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ + struct ablkcipher_edesc { +@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de + int sec4_sg_bytes) + { + if (dst != src) { +- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE); +- dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE); ++ if (src_nents) ++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); ++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else { +- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL); ++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + } + + if (iv_dma) +@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ablkcipher_edesc *)((char *)desc - +- offsetof(struct ablkcipher_edesc, hw_desc)); ++ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); +@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + edesc->src_nents > 1 ? 100 : ivsize, 1); +- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, req->dst, +- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); + #endif ++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst, ++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + + ablkcipher_unmap(jrdev, edesc, req); + +@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ablkcipher_edesc *)((char *)desc - +- offsetof(struct ablkcipher_edesc, hw_desc)); ++ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); + if (err) + caam_jr_strstatus(jrdev, err); + +@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); +- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, req->dst, +- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); + #endif ++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst, ++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + + ablkcipher_unmap(jrdev, edesc, req); + +@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { +- src_dma = sg_dma_address(req->src); ++ src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0; + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; +@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re + out_options = in_options; + + if (unlikely(req->src != req->dst)) { +- if (!edesc->dst_nents) { ++ if (edesc->dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + +@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); + /* Append Salt */ + if (!generic_gcm) +- append_data(desc, ctx->key + ctx->enckeylen, 4); ++ append_data(desc, ctx->key + ctx->cdata.keylen, 4); + /* Append IV */ + append_data(desc, req->iv, ivsize); + /* End of blank commands */ +@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead + struct caam_aead_alg, aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); +- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + u32 *desc = edesc->hw_desc; +@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_ + int len, sec4_sg_index = 0; + + #ifdef DEBUG +- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); +- printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); +- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, req->src, +- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); ++ pr_err("asked=%d, nbytes%d\n", ++ (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); + #endif ++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->src, ++ edesc->src_nents > 1 ? 100 : req->nbytes, 1); + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); +@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_ + append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); + + if (likely(req->src == req->dst)) { +- if (!edesc->src_nents && iv_contig) { ++ if (edesc->src_nents == 1 && iv_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = edesc->sec4_sg_dma + +@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_ + out_options = LDST_SGF; + } + } else { +- if (!edesc->dst_nents) { ++ if (edesc->dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + +@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 + int len, sec4_sg_index = 0; + + #ifdef DEBUG +- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); + print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); +- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", +- DUMP_PREFIX_ADDRESS, 16, 4, req->src, +- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); + #endif ++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->src, ++ edesc->src_nents > 1 ? 100 : req->nbytes, 1); + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + +- if (!edesc->src_nents) { ++ if (edesc->src_nents == 1) { + src_dma = sg_dma_address(req->src); + in_options = 0; + } else { +@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- int src_nents, dst_nents = 0; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct aead_edesc *edesc; +- int sgc; +- bool all_contig = true; +- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; ++ int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + + if (unlikely(req->dst != req->src)) { +- src_nents = sg_count(req->src, req->assoclen + req->cryptlen); +- dst_nents = sg_count(req->dst, +- req->assoclen + req->cryptlen + +- (encrypt ? authsize : (-authsize))); +- } else { +- src_nents = sg_count(req->src, +- req->assoclen + req->cryptlen + +- (encrypt ? authsize : 0)); +- } +- +- /* Check if data are contiguous. */ +- all_contig = !src_nents; +- if (!all_contig) { +- src_nents = src_nents ? : 1; +- sec4_sg_len = src_nents; +- } +- +- sec4_sg_len += dst_nents; +- +- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen); ++ if (unlikely(src_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen); ++ return ERR_PTR(src_nents); ++ } + +- /* allocate space for base edesc and hw desc commands, link tables */ +- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, +- GFP_DMA | flags); +- if (!edesc) { +- dev_err(jrdev, "could not allocate extended descriptor\n"); +- return ERR_PTR(-ENOMEM); ++ dst_nents = sg_nents_for_len(req->dst, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : ++ (-authsize))); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : (-authsize))); ++ return ERR_PTR(dst_nents); ++ } ++ } else { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(src_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : 0)); ++ return ERR_PTR(src_nents); ++ } + } + + if (likely(req->src == req->dst)) { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_BIDIRECTIONAL); +- if (unlikely(!sgc)) { ++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); +- kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } else { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_TO_DEVICE); +- if (unlikely(!sgc)) { +- dev_err(jrdev, "unable to map source\n"); +- kfree(edesc); +- return ERR_PTR(-ENOMEM); ++ /* Cover also the case of null (zero length) input data */ ++ if (src_nents) { ++ mapped_src_nents = dma_map_sg(jrdev, req->src, ++ src_nents, DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(jrdev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = 0; + } + +- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, +- DMA_FROM_DEVICE); +- if (unlikely(!sgc)) { ++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); +- dma_unmap_sg(jrdev, req->src, src_nents ? : 1, +- DMA_TO_DEVICE); +- kfree(edesc); ++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } + ++ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; ++ sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; ++ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); ++ ++ /* allocate space for base edesc and hw desc commands, link tables */ ++ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, ++ GFP_DMA | flags); ++ if (!edesc) { ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; +- *all_contig_ptr = all_contig; ++ *all_contig_ptr = !(mapped_src_nents > 1); + + sec4_sg_index = 0; +- if (!all_contig) { +- sg_to_sec4_sg_last(req->src, src_nents, +- edesc->sec4_sg + sec4_sg_index, 0); +- sec4_sg_index += src_nents; ++ if (mapped_src_nents > 1) { ++ sg_to_sec4_sg_last(req->src, mapped_src_nents, ++ edesc->sec4_sg + sec4_sg_index, 0); ++ sec4_sg_index += mapped_src_nents; + } +- if (dst_nents) { +- sg_to_sec4_sg_last(req->dst, dst_nents, ++ if (mapped_dst_nents > 1) { ++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + +@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ + u32 *desc; + int ret = 0; + +-#ifdef DEBUG +- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); +- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, req->src, +- req->assoclen + req->cryptlen, 1, may_sleep); +-#endif ++ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->src, ++ req->assoclen + req->cryptlen, 1); + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, +@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; +- int src_nents, dst_nents = 0, sec4_sg_bytes; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct ablkcipher_edesc *edesc; + dma_addr_t iv_dma = 0; +- bool iv_contig = false; +- int sgc; ++ bool in_contig; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +- int sec4_sg_index; ++ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; + +- src_nents = sg_count(req->src, req->nbytes); ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } + +- if (req->dst != req->src) +- dst_nents = sg_count(req->dst, req->nbytes); ++ if (req->dst != req->src) { ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ } + + if (likely(req->src == req->dst)) { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_BIDIRECTIONAL); ++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(jrdev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } + } else { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_TO_DEVICE); +- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, +- DMA_FROM_DEVICE); ++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(jrdev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(jrdev, "unable to map destination\n"); ++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } + } + + iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + +- /* +- * Check if iv can be contiguous with source and destination. +- * If so, include it. If not, create scatterlist. +- */ +- if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) +- iv_contig = true; +- else +- src_nents = src_nents ? : 1; +- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * +- sizeof(struct sec4_sg_entry); ++ if (mapped_src_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->src)) { ++ in_contig = true; ++ sec4_sg_ents = 0; ++ } else { ++ in_contig = false; ++ sec4_sg_ents = 1 + mapped_src_nents; ++ } ++ dst_sg_idx = sec4_sg_ents; ++ sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; ++ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, 0, 0); + return ERR_PTR(-ENOMEM); + } + +@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + + desc_bytes; + +- sec4_sg_index = 0; +- if (!iv_contig) { ++ if (!in_contig) { + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); +- sg_to_sec4_sg_last(req->src, src_nents, ++ sg_to_sec4_sg_last(req->src, mapped_src_nents, + edesc->sec4_sg + 1, 0); +- sec4_sg_index += 1 + src_nents; + } + +- if (dst_nents) { +- sg_to_sec4_sg_last(req->dst, dst_nents, +- edesc->sec4_sg + sec4_sg_index, 0); ++ if (mapped_dst_nents > 1) { ++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents, ++ edesc->sec4_sg + dst_sg_idx, 0); + } + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, 0, 0); ++ kfree(edesc); + return ERR_PTR(-ENOMEM); + } + +@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph + sec4_sg_bytes, 1); + #endif + +- *iv_contig_out = iv_contig; ++ *iv_contig_out = in_contig; + return edesc; + } + +@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; +- int src_nents, dst_nents = 0, sec4_sg_bytes; ++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; + struct ablkcipher_edesc *edesc; + dma_addr_t iv_dma = 0; +- bool iv_contig = false; +- int sgc; ++ bool out_contig; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +- int sec4_sg_index; +- +- src_nents = sg_count(req->src, req->nbytes); ++ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; + +- if (unlikely(req->dst != req->src)) +- dst_nents = sg_count(req->dst, req->nbytes); ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } + + if (likely(req->src == req->dst)) { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_BIDIRECTIONAL); ++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(jrdev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dst_nents = src_nents; ++ mapped_dst_nents = src_nents; + } else { +- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, +- DMA_TO_DEVICE); +- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, +- DMA_FROM_DEVICE); ++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(jrdev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ ++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(jrdev, "unable to map destination\n"); ++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } + } + + /* +@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + +- if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst)) +- iv_contig = true; +- else +- dst_nents = dst_nents ? : 1; +- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * +- sizeof(struct sec4_sg_entry); ++ sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; ++ dst_sg_idx = sec4_sg_ents; ++ if (mapped_dst_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->dst)) { ++ out_contig = true; ++ } else { ++ out_contig = false; ++ sec4_sg_ents += 1 + mapped_dst_nents; ++ } + + /* allocate space for base edesc and hw desc commands, link tables */ ++ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, 0, 0); + return ERR_PTR(-ENOMEM); + } + +@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + + desc_bytes; + +- sec4_sg_index = 0; +- if (src_nents) { +- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); +- sec4_sg_index += src_nents; +- } ++ if (mapped_src_nents > 1) ++ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, ++ 0); + +- if (!iv_contig) { +- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, ++ if (!out_contig) { ++ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, + iv_dma, ivsize, 0); +- sec4_sg_index += 1; +- sg_to_sec4_sg_last(req->dst, dst_nents, +- edesc->sec4_sg + sec4_sg_index, 0); ++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents, ++ edesc->sec4_sg + dst_sg_idx + 1, 0); + } + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); ++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, 0, 0); ++ kfree(edesc); + return ERR_PTR(-ENOMEM); + } + edesc->iv_dma = iv_dma; +@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph + sec4_sg_bytes, 1); + #endif + +- *iv_contig_out = iv_contig; ++ *iv_contig_out = out_contig; + return edesc; + } + +@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; +- bool iv_contig; ++ bool iv_contig = false; + u32 *desc; + int ret = 0; + +@@ -2933,7 +1840,6 @@ struct caam_alg_template { + } template_u; + u32 class1_alg_type; + u32 class2_alg_type; +- u32 alg_op; + }; + + static struct caam_alg_template driver_algs[] = { +@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + } + }, + { +@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + } + }, +@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { +@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, +@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, +@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, +@@ -4385,16 +3237,34 @@ struct caam_crypto_alg { + + static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) + { ++ dma_addr_t dma_addr; ++ + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + ++ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, ++ offsetof(struct caam_ctx, ++ sh_desc_enc_dma), ++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ++ if (dma_mapping_error(ctx->jrdev, dma_addr)) { ++ dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); ++ caam_jr_free(ctx->jrdev); ++ return -ENOMEM; ++ } ++ ++ ctx->sh_desc_enc_dma = dma_addr; ++ ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, ++ sh_desc_dec); ++ ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx, ++ sh_desc_givenc); ++ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); ++ + /* copy descriptor header template value */ +- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; +- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; +- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op; ++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + + return 0; + } +@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_ + + static void caam_exit_common(struct caam_ctx *ctx) + { +- if (ctx->sh_desc_enc_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, +- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); +- if (ctx->sh_desc_dec_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, +- desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); +- if (ctx->sh_desc_givenc_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, +- desc_bytes(ctx->sh_desc_givenc), +- DMA_TO_DEVICE); +- if (ctx->key_dma && +- !dma_mapping_error(ctx->jrdev, ctx->key_dma)) +- dma_unmap_single(ctx->jrdev, ctx->key_dma, +- ctx->enckeylen + ctx->split_key_pad_len, +- DMA_TO_DEVICE); +- ++ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, ++ offsetof(struct caam_ctx, sh_desc_enc_dma), ++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); + } + +@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_ + + t_alg->caam.class1_alg_type = template->class1_alg_type; + t_alg->caam.class2_alg_type = template->class2_alg_type; +- t_alg->caam.alg_op = template->alg_op; + + return t_alg; + } +--- /dev/null ++++ b/drivers/crypto/caam/caamalg_desc.c +@@ -0,0 +1,1913 @@ ++/* ++ * Shared descriptors for aead, ablkcipher algorithms ++ * ++ * Copyright 2016 NXP ++ */ ++ ++#include "compat.h" ++#include "desc_constr.h" ++#include "caamalg_desc.h" ++ ++/* ++ * For aead functions, read payload and write payload, ++ * both of which are specified in req->src and req->dst ++ */ ++static inline void aead_append_src_dst(u32 *desc, u32 msg_type) ++{ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | ++ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); ++} ++ ++/* Set DK bit in class 1 operation if shared */ ++static inline void append_dec_op1(u32 *desc, u32 type) ++{ ++ u32 *jump_cmd, *uncond_jump_cmd; ++ ++ /* DK bit is valid only for AES */ ++ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { ++ append_operation(desc, type | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT); ++ return; ++ } ++ ++ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); ++ append_operation(desc, type | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT); ++ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); ++ set_jump_tgt_here(desc, jump_cmd); ++ append_operation(desc, type | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_AAI_DK); ++ set_jump_tgt_here(desc, uncond_jump_cmd); ++} ++ ++/** ++ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor ++ * (non-protocol) with no (null) encryption. ++ * @desc: pointer to buffer used for descriptor construction ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, ++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * ++ * Note: Requires an MDHA split key. ++ */ ++void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, ++ unsigned int icvsize) ++{ ++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, ++ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* assoclen + cryptlen = seqinlen */ ++ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Prepare to read and write cryptlen + assoclen bytes */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* ++ * MOVE_LEN opcode is not available in all SEC HW revisions, ++ * thus need to do some magic, i.e. self-patch the descriptor ++ * buffer. ++ */ ++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH3 | ++ (0x6 << MOVE_LEN_SHIFT)); ++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | ++ MOVE_DEST_DESCBUF | ++ MOVE_WAITCOMP | ++ (0x8 << MOVE_LEN_SHIFT)); ++ ++ /* Class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Read and write cryptlen bytes */ ++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); ++ ++ set_move_tgt_here(desc, read_move_cmd); ++ set_move_tgt_here(desc, write_move_cmd); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | ++ MOVE_AUX_LS); ++ ++ /* Write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "aead null enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); ++ ++/** ++ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor ++ * (non-protocol) with no (null) decryption. ++ * @desc: pointer to buffer used for descriptor construction ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, ++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * ++ * Note: Requires an MDHA split key. ++ */ ++void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, ++ unsigned int icvsize) ++{ ++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, ++ adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ ++ /* assoclen + cryptlen = seqoutlen */ ++ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Prepare to read and write cryptlen + assoclen bytes */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); ++ ++ /* ++ * MOVE_LEN opcode is not available in all SEC HW revisions, ++ * thus need to do some magic, i.e. self-patch the descriptor ++ * buffer. ++ */ ++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH2 | ++ (0x6 << MOVE_LEN_SHIFT)); ++ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | ++ MOVE_DEST_DESCBUF | ++ MOVE_WAITCOMP | ++ (0x8 << MOVE_LEN_SHIFT)); ++ ++ /* Read and write cryptlen bytes */ ++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); ++ ++ /* ++ * Insert a NOP here, since we need at least 4 instructions between ++ * code patching the descriptor buffer and the location being patched. ++ */ ++ jump_cmd = append_jump(desc, JUMP_TEST_ALL); ++ set_jump_tgt_here(desc, jump_cmd); ++ ++ set_move_tgt_here(desc, read_move_cmd); ++ set_move_tgt_here(desc, write_move_cmd); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | ++ MOVE_AUX_LS); ++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ++ ++ /* Load ICV */ ++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | ++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "aead null dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); ++ ++static void init_sh_desc_key_aead(u32 * const desc, ++ struct alginfo * const cdata, ++ struct alginfo * const adata, ++ const bool is_rfc3686, u32 *nonce) ++{ ++ u32 *key_jump_cmd; ++ unsigned int enckeylen = cdata->keylen; ++ ++ /* Note: Context registers are saved. */ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* ++ * RFC3686 specific: ++ * | key = {AUTH_KEY, ENC_KEY, NONCE} ++ * | enckeylen = encryption key size + nonce size ++ */ ++ if (is_rfc3686) ++ enckeylen -= CTR_RFC3686_NONCE_SIZE; ++ ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, ++ adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, enckeylen, ++ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ ++ /* Load Counter into CONTEXT1 reg */ ++ if (is_rfc3686) { ++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, ++ LDST_CLASS_IND_CCB | ++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); ++ append_move(desc, ++ MOVE_SRC_OUTFIFO | ++ MOVE_DEST_CLASS1CTX | ++ (16 << MOVE_OFFSET_SHIFT) | ++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); ++ } ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++} ++ ++/** ++ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, ++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @nonce: pointer to rfc3686 nonce ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ * @is_qi: true when called from caam/qi ++ * ++ * Note: Requires an MDHA split key. ++ */ ++void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool is_rfc3686, ++ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) ++{ ++ /* Note: Context registers are saved. */ ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ ++ /* Class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ } ++ ++ /* Read and write assoclen bytes */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* Skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* read assoc before reading payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | ++ FIFOLDST_VLF); ++ ++ /* Load Counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Read and write cryptlen bytes */ ++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); ++ ++ /* Write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_aead_encap); ++ ++/** ++ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, ++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @nonce: pointer to rfc3686 nonce ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ * @is_qi: true when called from caam/qi ++ * ++ * Note: Requires an MDHA split key. ++ */ ++void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool geniv, ++ const bool is_rfc3686, u32 *nonce, ++ const u32 ctx1_iv_off, const bool is_qi) ++{ ++ /* Note: Context registers are saved. */ ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ ++ /* Class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ if (!geniv) ++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ } ++ ++ /* Read and write assoclen bytes */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (geniv) ++ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); ++ else ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* Skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* read assoc before reading payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | ++ KEY_VLF); ++ ++ if (geniv) { ++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); ++ } ++ ++ /* Load Counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ /* Choose operation */ ++ if (ctx1_iv_off) ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT); ++ else ++ append_dec_op1(desc, cdata->algtype); ++ ++ /* Read and write cryptlen bytes */ ++ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG); ++ ++ /* Load ICV */ ++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | ++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_aead_decap); ++ ++/** ++ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor ++ * (non-protocol) with HW-generated initialization ++ * vector. ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, ++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @nonce: pointer to rfc3686 nonce ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ * @is_qi: true when called from caam/qi ++ * ++ * Note: Requires an MDHA split key. ++ */ ++void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool is_rfc3686, ++ u32 *nonce, const u32 ctx1_iv_off, ++ const bool is_qi) ++{ ++ u32 geniv, moveiv; ++ ++ /* Note: Context registers are saved. */ ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ } ++ ++ if (is_rfc3686) { ++ if (is_qi) ++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ ++ goto copy_iv; ++ } ++ ++ /* Generate IV */ ++ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | ++ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | ++ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); ++ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | ++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ append_move(desc, MOVE_WAITCOMP | ++ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ++ (ivsize << MOVE_LEN_SHIFT)); ++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ++ ++copy_iv: ++ /* Copy IV to class 1 context */ ++ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ++ (ivsize << MOVE_LEN_SHIFT)); ++ ++ /* Return to encryption */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Read and write assoclen bytes */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* Skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* read assoc before reading payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | ++ KEY_VLF); ++ ++ /* Copy iv from outfifo to class 2 fifo */ ++ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | ++ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); ++ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | ++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); ++ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); ++ ++ /* Load Counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Will write ivsize + cryptlen */ ++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Not need to reload iv */ ++ append_seq_fifo_load(desc, ivsize, ++ FIFOLD_CLASS_SKIP); ++ ++ /* Will read cryptlen */ ++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | ++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); ++ ++ /* Write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "aead givenc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); ++ ++/** ++ * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed ++ * with OP_ALG_AAI_CBC ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with ++ * OP_ALG_AAI_HMAC_PRECOMP. ++ * @assoclen: associated data length ++ * @ivsize: initialization vector size ++ * @authsize: authentication data size ++ * @blocksize: block cipher size ++ */ ++void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int assoclen, ++ unsigned int ivsize, unsigned int authsize, ++ unsigned int blocksize) ++{ ++ u32 *key_jump_cmd, *zero_payload_jump_cmd; ++ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx; ++ ++ /* ++ * Compute the index (in bytes) for the LOAD with destination of ++ * Class 1 Data Size Register and for the LOAD that generates padding ++ */ ++ if (adata->key_inline) { ++ idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad + ++ cdata->keylen - 4 * CAAM_CMD_SZ; ++ idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad + ++ cdata->keylen - 2 * CAAM_CMD_SZ; ++ } else { ++ idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ - ++ 4 * CAAM_CMD_SZ; ++ idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ - ++ 2 * CAAM_CMD_SZ; ++ } ++ ++ stidx = 1 << HDR_START_IDX_SHIFT; ++ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); ++ ++ /* skip key loading if they are loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, ++ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ /* class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* payloadlen = input data length - (assoclen + ivlen) */ ++ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize); ++ ++ /* math1 = payloadlen + icvlen */ ++ append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize); ++ ++ /* padlen = block_size - math1 % block_size */ ++ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1); ++ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize); ++ ++ /* cryptlen = payloadlen + icvlen + padlen */ ++ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4); ++ ++ /* ++ * update immediate data with the padding length value ++ * for the LOAD in the class 1 data size register. ++ */ ++ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 | ++ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF | ++ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8); ++ ++ /* overwrite PL field for the padding iNFO FIFO entry */ ++ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 | ++ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF | ++ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8); ++ ++ /* store encrypted payload, icv and padding */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); ++ ++ /* if payload length is zero, jump to zero-payload commands */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4); ++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ /* load iv in context1 */ ++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | ++ LDST_CLASS_1_CCB | ivsize); ++ ++ /* read assoc for authentication */ ++ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | ++ FIFOLD_TYPE_MSG); ++ /* insnoop payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG | ++ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF); ++ ++ /* jump the zero-payload commands */ ++ append_jump(desc, JUMP_TEST_ALL | 3); ++ ++ /* zero-payload commands */ ++ set_jump_tgt_here(desc, zero_payload_jump_cmd); ++ ++ /* load iv in context1 */ ++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | ++ LDST_CLASS_1_CCB | ivsize); ++ ++ /* assoc data is the only data for authentication */ ++ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); ++ ++ /* send icv to encryption */ ++ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO | ++ authsize); ++ ++ /* update class 1 data size register with padding length */ ++ append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB | ++ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); ++ ++ /* generate padding and send it to encryption */ ++ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 | ++ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N; ++ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB | ++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, ++ desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_tls_encap); ++ ++/** ++ * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed ++ * with OP_ALG_AAI_CBC ++ * @adata: pointer to authentication transform definitions. Note that since a ++ * split key is to be used, the size of the split key itself is ++ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with ++ * OP_ALG_AAI_HMAC_PRECOMP. ++ * @assoclen: associated data length ++ * @ivsize: initialization vector size ++ * @authsize: authentication data size ++ * @blocksize: block cipher size ++ */ ++void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int assoclen, ++ unsigned int ivsize, unsigned int authsize, ++ unsigned int blocksize) ++{ ++ u32 stidx, jumpback; ++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd; ++ /* ++ * Pointer Size bool determines the size of address pointers. ++ * false - Pointers fit in one 32-bit word. ++ * true - Pointers fit in two 32-bit words. ++ */ ++ static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ); ++ ++ stidx = 1 << HDR_START_IDX_SHIFT; ++ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); ++ ++ /* skip key loading if they are loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* class 2 operation */ ++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ /* class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT); ++ ++ /* VSIL = input data length - 2 * block_size */ ++ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 * ++ blocksize); ++ ++ /* ++ * payloadlen + icvlen + padlen = input data length - (assoclen + ++ * ivsize) ++ */ ++ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize); ++ ++ /* skip data to the last but one cipher block */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF); ++ ++ /* load iv for the last cipher block */ ++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | ++ LDST_CLASS_1_CCB | ivsize); ++ ++ /* read last cipher block */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | ++ FIFOLD_TYPE_LAST1 | blocksize); ++ ++ /* move decrypted block into math0 and math1 */ ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 | ++ blocksize); ++ ++ /* reset AES CHA */ ++ append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB | ++ LDST_SRCDST_WORD_CHACTRL | LDST_IMM); ++ ++ /* rewind input sequence */ ++ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO); ++ ++ /* key1 is in decryption form */ ++ append_operation(desc, cdata->algtype | OP_ALG_AAI_DK | ++ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); ++ ++ /* load iv in context1 */ ++ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB | ++ LDST_SRCDST_WORD_CLASS_CTX | ivsize); ++ ++ /* read sequence number */ ++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG); ++ /* load Type, Version and Len fields in math0 */ ++ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5); ++ ++ /* compute (padlen - 1) */ ++ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255); ++ ++ /* math2 = icvlen + (padlen - 1) + 1 */ ++ append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1); ++ ++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); ++ ++ /* VSOL = payloadlen + icvlen + padlen */ ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4); ++ ++#ifdef __LITTLE_ENDIAN ++ append_moveb(desc, MOVE_WAITCOMP | ++ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8); ++#endif ++ /* update Len field */ ++ append_math_sub(desc, REG0, REG0, REG2, 8); ++ ++ /* store decrypted payload, icv and padding */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); ++ ++ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/ ++ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); ++ ++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ /* send Type, Version and Len(pre ICV) fields to authentication */ ++ append_move(desc, MOVE_WAITCOMP | ++ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | ++ (3 << MOVE_OFFSET_SHIFT) | 5); ++ ++ /* outsnooping payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | ++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 | ++ FIFOLDST_VLF); ++ skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2); ++ ++ set_jump_tgt_here(desc, zero_payload_jump_cmd); ++ /* send Type, Version and Len(pre ICV) fields to authentication */ ++ append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS | ++ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | ++ (3 << MOVE_OFFSET_SHIFT) | 5); ++ ++ set_jump_tgt_here(desc, skip_zero_jump_cmd); ++ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4); ++ ++ /* load icvlen and padlen */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | ++ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF); ++ ++ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */ ++ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); ++ ++ /* ++ * Start a new input sequence using the SEQ OUT PTR command options, ++ * pointer and length used when the current output sequence was defined. ++ */ ++ if (ps) { ++ /* ++ * Move the lower 32 bits of Shared Descriptor address, the ++ * SEQ OUT PTR command, Output Pointer (2 words) and ++ * Output Length into math registers. ++ */ ++#ifdef __LITTLE_ENDIAN ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) | ++ 20); ++#else ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) | ++ 20); ++#endif ++ /* Transform SEQ OUT PTR command in SEQ IN PTR command */ ++ append_math_and_imm_u32(desc, REG0, REG0, IMM, ++ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR)); ++ /* Append a JUMP command after the copied fields */ ++ jumpback = CMD_JUMP | (char)-9; ++ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM | ++ LDST_SRCDST_WORD_DECO_MATH2 | ++ (4 << LDST_OFFSET_SHIFT)); ++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); ++ /* Move the updated fields back to the Job Descriptor */ ++#ifdef __LITTLE_ENDIAN ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | ++ MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) | ++ 24); ++#else ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | ++ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) | ++ 24); ++#endif ++ /* ++ * Read the new SEQ IN PTR command, Input Pointer, Input Length ++ * and then jump back to the next command from the ++ * Shared Descriptor. ++ */ ++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6); ++ } else { ++ /* ++ * Move the SEQ OUT PTR command, Output Pointer (1 word) and ++ * Output Length into math registers. ++ */ ++#ifdef __LITTLE_ENDIAN ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) | ++ 12); ++#else ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | ++ MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) | ++ 12); ++#endif ++ /* Transform SEQ OUT PTR command in SEQ IN PTR command */ ++ append_math_and_imm_u64(desc, REG0, REG0, IMM, ++ ~(((u64)(CMD_SEQ_IN_PTR ^ ++ CMD_SEQ_OUT_PTR)) << 32)); ++ /* Append a JUMP command after the copied fields */ ++ jumpback = CMD_JUMP | (char)-7; ++ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM | ++ LDST_SRCDST_WORD_DECO_MATH1 | ++ (4 << LDST_OFFSET_SHIFT)); ++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); ++ /* Move the updated fields back to the Job Descriptor */ ++#ifdef __LITTLE_ENDIAN ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | ++ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) | ++ 16); ++#else ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | ++ MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) | ++ 16); ++#endif ++ /* ++ * Read the new SEQ IN PTR command, Input Pointer, Input Length ++ * and then jump back to the next command from the ++ * Shared Descriptor. ++ */ ++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5); ++ } ++ ++ /* skip payload */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF); ++ /* check icv */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV | ++ FIFOLD_TYPE_LAST2 | authsize); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, ++ desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_tls_decap); ++ ++/** ++ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, ++ *zero_assoc_jump_cmd2; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* skip key loading if they are loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM, ++ ivsize); ++ } else { ++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, ++ CAAM_CMD_SZ); ++ } ++ ++ /* if assoclen + cryptlen is ZERO, skip to ICV write */ ++ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ if (is_qi) ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ ++ /* if assoclen is ZERO, skip reading the assoc data */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* cryptlen = seqinlen - assoclen */ ++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); ++ ++ /* if cryptlen is ZERO jump to zero-payload commands */ ++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ /* read assoc data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); ++ set_jump_tgt_here(desc, zero_assoc_jump_cmd1); ++ ++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* write encrypted data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); ++ ++ /* read payload data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); ++ ++ /* jump to ICV writing */ ++ if (is_qi) ++ append_jump(desc, JUMP_TEST_ALL | 4); ++ else ++ append_jump(desc, JUMP_TEST_ALL | 2); ++ ++ /* zero-payload commands */ ++ set_jump_tgt_here(desc, zero_payload_jump_cmd); ++ ++ /* read assoc data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); ++ if (is_qi) ++ /* jump to ICV writing */ ++ append_jump(desc, JUMP_TEST_ALL | 2); ++ ++ /* There is no input data */ ++ set_jump_tgt_here(desc, zero_assoc_jump_cmd2); ++ ++ if (is_qi) ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | ++ FIFOLD_TYPE_LAST1); ++ ++ /* write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); ++ ++/** ++ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* skip key loading if they are loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | ++ JUMP_TEST_ALL | JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ } ++ ++ /* if assoclen is ZERO, skip reading the assoc data */ ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* read assoc data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); ++ ++ set_jump_tgt_here(desc, zero_assoc_jump_cmd1); ++ ++ /* cryptlen = seqoutlen - assoclen */ ++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* jump to zero-payload command if cryptlen is zero */ ++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | ++ JUMP_COND_MATH_Z); ++ ++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* store encrypted data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); ++ ++ /* read payload data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); ++ ++ /* zero-payload command */ ++ set_jump_tgt_here(desc, zero_payload_jump_cmd); ++ ++ /* read ICV */ ++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); ++ ++/** ++ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip key loading if it is loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ /* Read salt and IV */ ++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + ++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV); ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ } ++ ++ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* Read assoc data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); ++ ++ /* Skip IV */ ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); ++ ++ /* Will read cryptlen bytes */ ++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); ++ ++ /* Skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* cryptlen = seqoutlen - assoclen */ ++ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Write encrypted data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); ++ ++ /* Read payload data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); ++ ++ /* Write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "rfc4106 enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); ++ ++/** ++ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip key loading if it is loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ ++ if (is_qi) { ++ u32 *wait_load_cmd; ++ ++ /* REG3 = assoclen */ ++ append_seq_load(desc, 4, LDST_CLASS_DECO | ++ LDST_SRCDST_WORD_DECO_MATH3 | ++ (4 << LDST_OFFSET_SHIFT)); ++ ++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_CALM | JUMP_COND_NCP | ++ JUMP_COND_NOP | JUMP_COND_NIP | ++ JUMP_COND_NIFP); ++ set_jump_tgt_here(desc, wait_load_cmd); ++ ++ /* Read salt and IV */ ++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + ++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV); ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ } ++ ++ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ ++ /* Read assoc data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); ++ ++ /* Skip IV */ ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); ++ ++ /* Will read cryptlen bytes */ ++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); ++ ++ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); ++ ++ /* Skip assoc data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ++ ++ /* Will write cryptlen bytes */ ++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Store payload data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); ++ ++ /* Read encrypted data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); ++ ++ /* Read ICV */ ++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "rfc4106 dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); ++ ++/** ++ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip key loading if it is loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ if (is_qi) { ++ /* assoclen is not needed, skip it */ ++ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); ++ ++ /* Read salt and IV */ ++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + ++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV); ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ } ++ ++ /* assoclen + cryptlen = seqinlen */ ++ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* ++ * MOVE_LEN opcode is not available in all SEC HW revisions, ++ * thus need to do some magic, i.e. self-patch the descriptor ++ * buffer. ++ */ ++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | ++ (0x6 << MOVE_LEN_SHIFT)); ++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | ++ (0x8 << MOVE_LEN_SHIFT)); ++ ++ /* Will read assoclen + cryptlen bytes */ ++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Will write assoclen + cryptlen bytes */ ++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Read and write assoclen + cryptlen bytes */ ++ aead_append_src_dst(desc, FIFOLD_TYPE_AAD); ++ ++ set_move_tgt_here(desc, read_move_cmd); ++ set_move_tgt_here(desc, write_move_cmd); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ /* Move payload data to OFIFO */ ++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); ++ ++ /* Write ICV */ ++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "rfc4543 enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); ++ ++/** ++ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor ++ * (non-protocol). ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. ++ * @ivsize: initialization vector size ++ * @icvsize: integrity check value (ICV) size (truncated or full) ++ * @is_qi: true when called from caam/qi ++ */ ++void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi) ++{ ++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Skip key loading if it is loaded due to sharing */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ if (cdata->key_inline) ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ else ++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | ++ KEY_DEST_CLASS_REG); ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Class 1 operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT | OP_ALG_ICV_ON); ++ ++ if (is_qi) { ++ /* assoclen is not needed, skip it */ ++ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); ++ ++ /* Read salt and IV */ ++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + ++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV); ++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); ++ } ++ ++ /* assoclen + cryptlen = seqoutlen */ ++ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* ++ * MOVE_LEN opcode is not available in all SEC HW revisions, ++ * thus need to do some magic, i.e. self-patch the descriptor ++ * buffer. ++ */ ++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | ++ (0x6 << MOVE_LEN_SHIFT)); ++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | ++ (0x8 << MOVE_LEN_SHIFT)); ++ ++ /* Will read assoclen + cryptlen bytes */ ++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Will write assoclen + cryptlen bytes */ ++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); ++ ++ /* Store payload data */ ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); ++ ++ /* In-snoop assoclen + cryptlen data */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | ++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); ++ ++ set_move_tgt_here(desc, read_move_cmd); ++ set_move_tgt_here(desc, write_move_cmd); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ /* Move payload data to OFIFO */ ++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); ++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ++ ++ /* Read ICV */ ++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | ++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "rfc4543 dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); ++ ++/* ++ * For ablkcipher encrypt and decrypt, read from req->src and ++ * write to req->dst ++ */ ++static inline void ablkcipher_append_src_dst(u32 *desc) ++{ ++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | ++ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); ++} ++ ++/** ++ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. ++ * @ivsize: initialization vector size ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ */ ++void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off) ++{ ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* Load class1 key only */ ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ ++ /* Load nonce into CONTEXT1 reg */ ++ if (is_rfc3686) { ++ u8 *nonce = cdata->key_virt + cdata->keylen; ++ ++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, ++ LDST_CLASS_IND_CCB | ++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | ++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | ++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); ++ } ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Load iv */ ++ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | ++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ ++ /* Load counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ /* Load operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Perform operation */ ++ ablkcipher_append_src_dst(desc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ablkcipher enc shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap); ++ ++/** ++ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. ++ * @ivsize: initialization vector size ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ */ ++void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off) ++{ ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* Load class1 key only */ ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ ++ /* Load nonce into CONTEXT1 reg */ ++ if (is_rfc3686) { ++ u8 *nonce = cdata->key_virt + cdata->keylen; ++ ++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, ++ LDST_CLASS_IND_CCB | ++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | ++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | ++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); ++ } ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* load IV */ ++ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | ++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ ++ /* Load counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ /* Choose operation */ ++ if (ctx1_iv_off) ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_DECRYPT); ++ else ++ append_dec_op1(desc, cdata->algtype); ++ ++ /* Perform operation */ ++ ablkcipher_append_src_dst(desc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ablkcipher dec shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap); ++ ++/** ++ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor ++ * with HW-generated initialization vector. ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed ++ * with OP_ALG_AAI_CBC. ++ * @ivsize: initialization vector size ++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template ++ * @ctx1_iv_off: IV offset in CONTEXT1 register ++ */ ++void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off) ++{ ++ u32 *key_jump_cmd, geniv; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* Load class1 key only */ ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ ++ /* Load Nonce into CONTEXT1 reg */ ++ if (is_rfc3686) { ++ u8 *nonce = cdata->key_virt + cdata->keylen; ++ ++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, ++ LDST_CLASS_IND_CCB | ++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | ++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | ++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); ++ } ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* Generate IV */ ++ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | ++ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND | ++ (ivsize << NFIFOENTRY_DLEN_SHIFT); ++ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | ++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); ++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | ++ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) | ++ (ctx1_iv_off << MOVE_OFFSET_SHIFT)); ++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ++ ++ /* Copy generated IV to memory */ ++ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | ++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); ++ ++ /* Load Counter into CONTEXT1 reg */ ++ if (is_rfc3686) ++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ++ LDST_OFFSET_SHIFT)); ++ ++ if (ctx1_iv_off) ++ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | ++ (1 << JUMP_OFFSET_SHIFT)); ++ ++ /* Load operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Perform operation */ ++ ablkcipher_append_src_dst(desc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap); ++ ++/** ++ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared ++ * descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. ++ */ ++void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata) ++{ ++ __be64 sector_size = cpu_to_be64(512); ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* Load class1 keys only */ ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ ++ /* Load sector size with index 40 bytes (0x28) */ ++ append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (0x28 << LDST_OFFSET_SHIFT)); ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* ++ * create sequence for loading the sector index ++ * Upper 8B of IV - will be used as sector index ++ * Lower 8B of IV - will be discarded ++ */ ++ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | ++ (0x20 << LDST_OFFSET_SHIFT)); ++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); ++ ++ /* Load operation */ ++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | ++ OP_ALG_ENCRYPT); ++ ++ /* Perform operation */ ++ ablkcipher_append_src_dst(desc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap); ++ ++/** ++ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared ++ * descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @cdata: pointer to block cipher transform definitions ++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. ++ */ ++void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata) ++{ ++ __be64 sector_size = cpu_to_be64(512); ++ u32 *key_jump_cmd; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ++ /* Skip if already shared */ ++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ /* Load class1 key only */ ++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, ++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); ++ ++ /* Load sector size with index 40 bytes (0x28) */ ++ append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | ++ LDST_SRCDST_BYTE_CONTEXT | ++ (0x28 << LDST_OFFSET_SHIFT)); ++ ++ set_jump_tgt_here(desc, key_jump_cmd); ++ ++ /* ++ * create sequence for loading the sector index ++ * Upper 8B of IV - will be used as sector index ++ * Lower 8B of IV - will be discarded ++ */ ++ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | ++ (0x20 << LDST_OFFSET_SHIFT)); ++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); ++ ++ /* Load operation */ ++ append_dec_op1(desc, cdata->algtype); ++ ++ /* Perform operation */ ++ ablkcipher_append_src_dst(desc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++} ++EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("FSL CAAM descriptor support"); ++MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); +--- /dev/null ++++ b/drivers/crypto/caam/caamalg_desc.h +@@ -0,0 +1,127 @@ ++/* ++ * Shared descriptors for aead, ablkcipher algorithms ++ * ++ * Copyright 2016 NXP ++ */ ++ ++#ifndef _CAAMALG_DESC_H_ ++#define _CAAMALG_DESC_H_ ++ ++/* length of descriptors text */ ++#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) ++#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) ++#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) ++#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) ++#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) ++#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) ++#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) ++ ++#define DESC_TLS_BASE (4 * CAAM_CMD_SZ) ++#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ) ++ ++/* Note: Nonce is counted in cdata.keylen */ ++#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) ++ ++#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) ++#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) ++#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) ++ ++#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) ++#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) ++#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) ++#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ) ++#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) ++ ++#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) ++#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) ++#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) ++#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) ++#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) ++ ++#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) ++#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) ++#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) ++#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ) ++#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ) ++ ++#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) ++#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ ++ 20 * CAAM_CMD_SZ) ++#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ ++ 15 * CAAM_CMD_SZ) ++ ++void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, ++ unsigned int icvsize); ++ ++void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, ++ unsigned int icvsize); ++ ++void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool is_rfc3686, ++ u32 *nonce, const u32 ctx1_iv_off, ++ const bool is_qi); ++ ++void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool geniv, ++ const bool is_rfc3686, u32 *nonce, ++ const u32 ctx1_iv_off, const bool is_qi); ++ ++void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int ivsize, ++ unsigned int icvsize, const bool is_rfc3686, ++ u32 *nonce, const u32 ctx1_iv_off, ++ const bool is_qi); ++ ++void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int assoclen, ++ unsigned int ivsize, unsigned int authsize, ++ unsigned int blocksize); ++ ++void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata, ++ struct alginfo *adata, unsigned int assoclen, ++ unsigned int ivsize, unsigned int authsize, ++ unsigned int blocksize); ++ ++void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, unsigned int icvsize, ++ const bool is_qi); ++ ++void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off); ++ ++void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off); ++ ++void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, ++ unsigned int ivsize, const bool is_rfc3686, ++ const u32 ctx1_iv_off); ++ ++void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata); ++ ++void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata); ++ ++#endif /* _CAAMALG_DESC_H_ */ +--- /dev/null ++++ b/drivers/crypto/caam/caamalg_qi.c +@@ -0,0 +1,2877 @@ ++/* ++ * Freescale FSL CAAM support for crypto API over QI backend. ++ * Based on caamalg.c ++ * ++ * Copyright 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016-2017 NXP ++ */ ++ ++#include "compat.h" ++#include "ctrl.h" ++#include "regs.h" ++#include "intern.h" ++#include "desc_constr.h" ++#include "error.h" ++#include "sg_sw_qm.h" ++#include "key_gen.h" ++#include "qi.h" ++#include "jr.h" ++#include "caamalg_desc.h" ++ ++/* ++ * crypto alg ++ */ ++#define CAAM_CRA_PRIORITY 2000 ++/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ ++#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ ++ SHA512_DIGEST_SIZE * 2) ++ ++#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ ++ CAAM_MAX_KEY_SIZE) ++#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) ++ ++struct caam_alg_entry { ++ int class1_alg_type; ++ int class2_alg_type; ++ bool rfc3686; ++ bool geniv; ++}; ++ ++struct caam_aead_alg { ++ struct aead_alg aead; ++ struct caam_alg_entry caam; ++ bool registered; ++}; ++ ++/* ++ * per-session context ++ */ ++struct caam_ctx { ++ struct device *jrdev; ++ u32 sh_desc_enc[DESC_MAX_USED_LEN]; ++ u32 sh_desc_dec[DESC_MAX_USED_LEN]; ++ u32 sh_desc_givenc[DESC_MAX_USED_LEN]; ++ u8 key[CAAM_MAX_KEY_SIZE]; ++ dma_addr_t key_dma; ++ struct alginfo adata; ++ struct alginfo cdata; ++ unsigned int authsize; ++ struct device *qidev; ++ spinlock_t lock; /* Protects multiple init of driver context */ ++ struct caam_drv_ctx *drv_ctx[NUM_OP]; ++}; ++ ++static int aead_set_sh_desc(struct crypto_aead *aead) ++{ ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), ++ typeof(*alg), aead); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ unsigned int ivsize = crypto_aead_ivsize(aead); ++ u32 ctx1_iv_off = 0; ++ u32 *nonce = NULL; ++ unsigned int data_len[2]; ++ u32 inl_mask; ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == ++ OP_ALG_AAI_CTR_MOD128); ++ const bool is_rfc3686 = alg->caam.rfc3686; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ /* ++ * AES-CTR needs to load IV in CONTEXT1 reg ++ * at an offset of 128bits (16bytes) ++ * CONTEXT1[255:128] = IV ++ */ ++ if (ctr_mode) ++ ctx1_iv_off = 16; ++ ++ /* ++ * RFC3686 specific: ++ * CONTEXT1[255:128] = {NONCE, IV, COUNTER} ++ */ ++ if (is_rfc3686) { ++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ++ } ++ ++ data_len[0] = ctx->adata.keylen_pad; ++ data_len[1] = ctx->cdata.keylen; ++ ++ if (alg->caam.geniv) ++ goto skip_enc; ++ ++ /* aead_encrypt shared descriptor */ ++ if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, is_rfc3686, nonce, ++ ctx1_iv_off, true); ++ ++skip_enc: ++ /* aead_decrypt shared descriptor */ ++ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, alg->caam.geniv, ++ is_rfc3686, nonce, ctx1_iv_off, true); ++ ++ if (!alg->caam.geniv) ++ goto skip_givenc; ++ ++ /* aead_givencrypt shared descriptor */ ++ if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, is_rfc3686, nonce, ++ ctx1_iv_off, true); ++ ++skip_givenc: ++ return 0; ++} ++ ++static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(authenc); ++ ++ ctx->authsize = authsize; ++ aead_set_sh_desc(authenc); ++ ++ return 0; ++} ++ ++static int aead_setkey(struct crypto_aead *aead, const u8 *key, ++ unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *jrdev = ctx->jrdev; ++ struct crypto_authenc_keys keys; ++ int ret = 0; ++ ++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ++ goto badkey; ++ ++#ifdef DEBUG ++ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", ++ keys.authkeylen + keys.enckeylen, keys.enckeylen, ++ keys.authkeylen); ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, ++ keys.authkeylen, CAAM_MAX_KEY_SIZE - ++ keys.enckeylen); ++ if (ret) ++ goto badkey; ++ ++ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ++ ctx->adata.keylen_pad + keys.enckeylen, 1); ++#endif ++ ++ ctx->cdata.keylen = keys.enckeylen; ++ ++ ret = aead_set_sh_desc(aead); ++ if (ret) ++ goto badkey; ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ return ret; ++badkey: ++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static int tls_set_sh_desc(struct crypto_aead *tls) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ unsigned int ivsize = crypto_aead_ivsize(tls); ++ unsigned int blocksize = crypto_aead_blocksize(tls); ++ unsigned int assoclen = 13; /* always 13 bytes for TLS */ ++ unsigned int data_len[2]; ++ u32 inl_mask; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ /* ++ * TLS 1.0 encrypt shared descriptor ++ * Job Descriptor and Shared Descriptor ++ * must fit into the 64-word Descriptor h/w Buffer ++ */ ++ data_len[0] = ctx->adata.keylen_pad; ++ data_len[1] = ctx->cdata.keylen; ++ ++ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len, ++ &inl_mask, ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, ++ assoclen, ivsize, ctx->authsize, blocksize); ++ ++ /* ++ * TLS 1.0 decrypt shared descriptor ++ * Keys do not fit inline, regardless of algorithms used ++ */ ++ ctx->adata.key_dma = ctx->key_dma; ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, ++ assoclen, ivsize, ctx->authsize, blocksize); ++ ++ return 0; ++} ++ ++static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ ++ ctx->authsize = authsize; ++ tls_set_sh_desc(tls); ++ ++ return 0; ++} ++ ++static int tls_setkey(struct crypto_aead *tls, const u8 *key, ++ unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ struct device *jrdev = ctx->jrdev; ++ struct crypto_authenc_keys keys; ++ int ret = 0; ++ ++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ++ goto badkey; ++ ++#ifdef DEBUG ++ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", ++ keys.authkeylen + keys.enckeylen, keys.enckeylen, ++ keys.authkeylen); ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, ++ keys.authkeylen, CAAM_MAX_KEY_SIZE - ++ keys.enckeylen); ++ if (ret) ++ goto badkey; ++ ++ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ ++#ifdef DEBUG ++ dev_err(jrdev, "split keylen %d split keylen padded %d\n", ++ ctx->adata.keylen, ctx->adata.keylen_pad); ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ++ ctx->adata.keylen_pad + keys.enckeylen, 1); ++#endif ++ ++ ctx->cdata.keylen = keys.enckeylen; ++ ++ ret = tls_set_sh_desc(tls); ++ if (ret) ++ goto badkey; ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ return ret; ++badkey: ++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); ++ const char *alg_name = crypto_tfm_alg_name(tfm); ++ struct device *jrdev = ctx->jrdev; ++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ u32 ctx1_iv_off = 0; ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == ++ OP_ALG_AAI_CTR_MOD128); ++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); ++ int ret = 0; ++ ++ memcpy(ctx->key, key, keylen); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ /* ++ * AES-CTR needs to load IV in CONTEXT1 reg ++ * at an offset of 128bits (16bytes) ++ * CONTEXT1[255:128] = IV ++ */ ++ if (ctr_mode) ++ ctx1_iv_off = 16; ++ ++ /* ++ * RFC3686 specific: ++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} ++ * | *key = {KEY, NONCE} ++ */ ++ if (is_rfc3686) { ++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ++ keylen -= CTR_RFC3686_NONCE_SIZE; ++ } ++ ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; ++ ++ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ ++ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, ++ is_rfc3686, ctx1_iv_off); ++ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, ++ is_rfc3686, ctx1_iv_off); ++ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata, ++ ivsize, is_rfc3686, ctx1_iv_off); ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[GIVENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT], ++ ctx->sh_desc_givenc); ++ if (ret) { ++ dev_err(jrdev, "driver givenc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ return ret; ++badkey: ++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *jrdev = ctx->jrdev; ++ int ret = 0; ++ ++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { ++ crypto_ablkcipher_set_flags(ablkcipher, ++ CRYPTO_TFM_RES_BAD_KEY_LEN); ++ dev_err(jrdev, "key size mismatch\n"); ++ return -EINVAL; ++ } ++ ++ memcpy(ctx->key, key, keylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; ++ ++ /* xts ablkcipher encrypt, decrypt shared descriptors */ ++ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); ++ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ return ret; ++badkey: ++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return 0; ++} ++ ++/* ++ * aead_edesc - s/w-extended aead descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped h/w link table ++ * @qm_sg_dma: bus physical mapped address of h/w link table ++ * @assoclen: associated data length, in CAAM endianness ++ * @assoclen_dma: bus physical mapped address of req->assoclen ++ * @drv_req: driver-specific request structure ++ * @sgt: the h/w link table ++ */ ++struct aead_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++ unsigned int assoclen; ++ dma_addr_t assoclen_dma; ++ struct caam_drv_req drv_req; ++#define CAAM_QI_MAX_AEAD_SG \ ++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ ++ sizeof(struct qm_sg_entry)) ++ struct qm_sg_entry sgt[0]; ++}; ++ ++/* ++ * tls_edesc - s/w-extended tls descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped h/w link table ++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd' ++ * @qm_sg_dma: bus physical mapped address of h/w link table ++ * @drv_req: driver-specific request structure ++ * @sgt: the h/w link table ++ */ ++struct tls_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++ struct scatterlist tmp[2]; ++ struct scatterlist *dst; ++ struct caam_drv_req drv_req; ++ struct qm_sg_entry sgt[0]; ++}; ++ ++/* ++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped h/w link table ++ * @qm_sg_dma: bus physical mapped address of h/w link table ++ * @drv_req: driver-specific request structure ++ * @sgt: the h/w link table ++ */ ++struct ablkcipher_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++ struct caam_drv_req drv_req; ++#define CAAM_QI_MAX_ABLKCIPHER_SG \ ++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ ++ sizeof(struct qm_sg_entry)) ++ struct qm_sg_entry sgt[0]; ++}; ++ ++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, ++ enum optype type) ++{ ++ /* ++ * This function is called on the fast path with values of 'type' ++ * known at compile time. Invalid arguments are not expected and ++ * thus no checks are made. ++ */ ++ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; ++ u32 *desc; ++ ++ if (unlikely(!drv_ctx)) { ++ spin_lock(&ctx->lock); ++ ++ /* Read again to check if some other core init drv_ctx */ ++ drv_ctx = ctx->drv_ctx[type]; ++ if (!drv_ctx) { ++ int cpu; ++ ++ if (type == ENCRYPT) ++ desc = ctx->sh_desc_enc; ++ else if (type == DECRYPT) ++ desc = ctx->sh_desc_dec; ++ else /* (type == GIVENCRYPT) */ ++ desc = ctx->sh_desc_givenc; ++ ++ cpu = smp_processor_id(); ++ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); ++ if (likely(!IS_ERR_OR_NULL(drv_ctx))) ++ drv_ctx->op_type = type; ++ ++ ctx->drv_ctx[type] = drv_ctx; ++ } ++ ++ spin_unlock(&ctx->lock); ++ } ++ ++ return drv_ctx; ++} ++ ++static void caam_unmap(struct device *dev, struct scatterlist *src, ++ struct scatterlist *dst, int src_nents, ++ int dst_nents, dma_addr_t iv_dma, int ivsize, ++ enum optype op_type, dma_addr_t qm_sg_dma, ++ int qm_sg_bytes) ++{ ++ if (dst != src) { ++ if (src_nents) ++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); ++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); ++ } else { ++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); ++ } ++ ++ if (iv_dma) ++ dma_unmap_single(dev, iv_dma, ivsize, ++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE : ++ DMA_TO_DEVICE); ++ if (qm_sg_bytes) ++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); ++} ++ ++static void aead_unmap(struct device *dev, ++ struct aead_edesc *edesc, ++ struct aead_request *req) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ int ivsize = crypto_aead_ivsize(aead); ++ ++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, ++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, ++ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); ++} ++ ++static void tls_unmap(struct device *dev, ++ struct tls_edesc *edesc, ++ struct aead_request *req) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ int ivsize = crypto_aead_ivsize(aead); ++ ++ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents, ++ edesc->dst_nents, edesc->iv_dma, ivsize, ++ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma, ++ edesc->qm_sg_bytes); ++} ++ ++static void ablkcipher_unmap(struct device *dev, ++ struct ablkcipher_edesc *edesc, ++ struct ablkcipher_request *req) ++{ ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ ++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, ++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, ++ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++} ++ ++static void aead_done(struct caam_drv_req *drv_req, u32 status) ++{ ++ struct device *qidev; ++ struct aead_edesc *edesc; ++ struct aead_request *aead_req = drv_req->app_ctx; ++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); ++ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); ++ int ecode = 0; ++ ++ qidev = caam_ctx->qidev; ++ ++ if (unlikely(status)) { ++ caam_jr_strstatus(qidev, status); ++ ecode = -EIO; ++ } ++ ++ edesc = container_of(drv_req, typeof(*edesc), drv_req); ++ aead_unmap(qidev, edesc, aead_req); ++ ++ aead_request_complete(aead_req, ecode); ++ qi_cache_free(edesc); ++} ++ ++/* ++ * allocate and map the aead extended descriptor ++ */ ++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ++ bool encrypt) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), ++ typeof(*alg), aead); ++ struct device *qidev = ctx->qidev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct aead_edesc *edesc; ++ dma_addr_t qm_sg_dma, iv_dma = 0; ++ int ivsize = 0; ++ unsigned int authsize = ctx->authsize; ++ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; ++ int in_len, out_len; ++ struct qm_sg_entry *sg_table, *fd_sgt; ++ struct caam_drv_ctx *drv_ctx; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ ++ drv_ctx = get_drv_ctx(ctx, op_type); ++ if (unlikely(IS_ERR_OR_NULL(drv_ctx))) ++ return (struct aead_edesc *)drv_ctx; ++ ++ /* allocate space for base edesc and hw desc commands, link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(qidev, "could not allocate extended descriptor\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (likely(req->src == req->dst)) { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ dst_nents = sg_nents_for_len(req->dst, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : ++ (-authsize))); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : (-authsize))); ++ qi_cache_free(edesc); ++ return ERR_PTR(dst_nents); ++ } ++ ++ if (src_nents) { ++ mapped_src_nents = dma_map_sg(qidev, req->src, ++ src_nents, DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = 0; ++ } ++ ++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(qidev, "unable to map destination\n"); ++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { ++ ivsize = crypto_aead_ivsize(aead); ++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, iv_dma)) { ++ dev_err(qidev, "unable to map IV\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, ++ dst_nents, 0, 0, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ /* ++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. ++ * Input is not contiguous. ++ */ ++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + ++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0); ++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { ++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_ents, CAAM_QI_MAX_AEAD_SG); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ sg_table = &edesc->sgt[0]; ++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ edesc->drv_req.app_ctx = req; ++ edesc->drv_req.cbk = aead_done; ++ edesc->drv_req.drv_ctx = drv_ctx; ++ ++ edesc->assoclen = cpu_to_caam32(req->assoclen); ++ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, edesc->assoclen_dma)) { ++ dev_err(qidev, "unable to map assoclen\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); ++ qm_sg_index++; ++ if (ivsize) { ++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); ++ qm_sg_index++; ++ } ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); ++ qm_sg_index += mapped_src_nents; ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ qm_sg_index, 0); ++ ++ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, qm_sg_dma)) { ++ dev_err(qidev, "unable to map S/G table\n"); ++ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->qm_sg_dma = qm_sg_dma; ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ out_len = req->assoclen + req->cryptlen + ++ (encrypt ? ctx->authsize : (-ctx->authsize)); ++ in_len = 4 + ivsize + req->assoclen + req->cryptlen; ++ ++ fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); ++ ++ if (req->dst == req->src) { ++ if (mapped_src_nents == 1) ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), ++ out_len, 0); ++ else ++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + ++ (1 + !!ivsize) * sizeof(*sg_table), ++ out_len, 0); ++ } else if (mapped_dst_nents == 1) { ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, ++ 0); ++ } else { ++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * ++ qm_sg_index, out_len, 0); ++ } ++ ++ return edesc; ++} ++ ++static inline int aead_crypt(struct aead_request *req, bool encrypt) ++{ ++ struct aead_edesc *edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ int ret; ++ ++ if (unlikely(caam_congested)) ++ return -EAGAIN; ++ ++ /* allocate extended descriptor */ ++ edesc = aead_edesc_alloc(req, encrypt); ++ if (IS_ERR_OR_NULL(edesc)) ++ return PTR_ERR(edesc); ++ ++ /* Create and submit job descriptor */ ++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); ++ if (!ret) { ++ ret = -EINPROGRESS; ++ } else { ++ aead_unmap(ctx->qidev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int aead_encrypt(struct aead_request *req) ++{ ++ return aead_crypt(req, true); ++} ++ ++static int aead_decrypt(struct aead_request *req) ++{ ++ return aead_crypt(req, false); ++} ++ ++static void tls_done(struct caam_drv_req *drv_req, u32 status) ++{ ++ struct device *qidev; ++ struct tls_edesc *edesc; ++ struct aead_request *aead_req = drv_req->app_ctx; ++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); ++ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); ++ int ecode = 0; ++ ++ qidev = caam_ctx->qidev; ++ ++ if (unlikely(status)) { ++ caam_jr_strstatus(qidev, status); ++ ecode = -EIO; ++ } ++ ++ edesc = container_of(drv_req, typeof(*edesc), drv_req); ++ tls_unmap(qidev, edesc, aead_req); ++ ++ aead_request_complete(aead_req, ecode); ++ qi_cache_free(edesc); ++} ++ ++/* ++ * allocate and map the tls extended descriptor ++ */ ++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ unsigned int blocksize = crypto_aead_blocksize(aead); ++ unsigned int padsize, authsize; ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), ++ typeof(*alg), aead); ++ struct device *qidev = ctx->qidev; ++ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ++ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct tls_edesc *edesc; ++ dma_addr_t qm_sg_dma, iv_dma = 0; ++ int ivsize = 0; ++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; ++ int in_len, out_len; ++ struct qm_sg_entry *sg_table, *fd_sgt; ++ struct caam_drv_ctx *drv_ctx; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ struct scatterlist *dst; ++ ++ if (encrypt) { ++ padsize = blocksize - ((req->cryptlen + ctx->authsize) % ++ blocksize); ++ authsize = ctx->authsize + padsize; ++ } else { ++ authsize = ctx->authsize; ++ } ++ ++ drv_ctx = get_drv_ctx(ctx, op_type); ++ if (unlikely(IS_ERR_OR_NULL(drv_ctx))) ++ return (struct tls_edesc *)drv_ctx; ++ ++ /* allocate space for base edesc and hw desc commands, link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(qidev, "could not allocate extended descriptor\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (likely(req->src == req->dst)) { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ dst = req->dst; ++ } else { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); ++ dst_nents = sg_nents_for_len(dst, req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(dst_nents); ++ } ++ ++ if (src_nents) { ++ mapped_src_nents = dma_map_sg(qidev, req->src, ++ src_nents, DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = 0; ++ } ++ ++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(qidev, "unable to map destination\n"); ++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ ivsize = crypto_aead_ivsize(aead); ++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, iv_dma)) { ++ dev_err(qidev, "unable to map IV\n"); ++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, ++ op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* ++ * Create S/G table: IV, src, dst. ++ * Input is not contiguous. ++ */ ++ qm_sg_ents = 1 + mapped_src_nents + ++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0); ++ sg_table = &edesc->sgt[0]; ++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->dst = dst; ++ edesc->iv_dma = iv_dma; ++ edesc->drv_req.app_ctx = req; ++ edesc->drv_req.cbk = tls_done; ++ edesc->drv_req.drv_ctx = drv_ctx; ++ ++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); ++ qm_sg_index = 1; ++ ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); ++ qm_sg_index += mapped_src_nents; ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + ++ qm_sg_index, 0); ++ ++ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, qm_sg_dma)) { ++ dev_err(qidev, "unable to map S/G table\n"); ++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma, ++ ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->qm_sg_dma = qm_sg_dma; ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ out_len = req->cryptlen + (encrypt ? authsize : 0); ++ in_len = ivsize + req->assoclen + req->cryptlen; ++ ++ fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ ++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); ++ ++ if (req->dst == req->src) ++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + ++ (sg_nents_for_len(req->src, req->assoclen) + ++ 1) * sizeof(*sg_table), out_len, 0); ++ else if (mapped_dst_nents == 1) ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0); ++ else ++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * ++ qm_sg_index, out_len, 0); ++ ++ return edesc; ++} ++ ++static int tls_crypt(struct aead_request *req, bool encrypt) ++{ ++ struct tls_edesc *edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ int ret; ++ ++ if (unlikely(caam_congested)) ++ return -EAGAIN; ++ ++ edesc = tls_edesc_alloc(req, encrypt); ++ if (IS_ERR_OR_NULL(edesc)) ++ return PTR_ERR(edesc); ++ ++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); ++ if (!ret) { ++ ret = -EINPROGRESS; ++ } else { ++ tls_unmap(ctx->qidev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int tls_encrypt(struct aead_request *req) ++{ ++ return tls_crypt(req, true); ++} ++ ++static int tls_decrypt(struct aead_request *req) ++{ ++ return tls_crypt(req, false); ++} ++ ++static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) ++{ ++ struct ablkcipher_edesc *edesc; ++ struct ablkcipher_request *req = drv_req->app_ctx; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *qidev = caam_ctx->qidev; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ ++#ifdef DEBUG ++ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ edesc = container_of(drv_req, typeof(*edesc), drv_req); ++ ++ if (status) ++ caam_jr_strstatus(qidev, status); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->info, ++ edesc->src_nents > 1 ? 100 : ivsize, 1); ++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst, ++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ++#endif ++ ++ ablkcipher_unmap(qidev, edesc, req); ++ qi_cache_free(edesc); ++ ++ /* ++ * The crypto API expects us to set the IV (req->info) to the last ++ * ciphertext block. This is used e.g. by the CTS mode. ++ */ ++ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, ++ ivsize, 0); ++ ++ ablkcipher_request_complete(req, status); ++} ++ ++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ++ *req, bool encrypt) ++{ ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *qidev = ctx->qidev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct ablkcipher_edesc *edesc; ++ dma_addr_t iv_dma; ++ bool in_contig; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ int dst_sg_idx, qm_sg_ents; ++ struct qm_sg_entry *sg_table, *fd_sgt; ++ struct caam_drv_ctx *drv_ctx; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ ++ drv_ctx = get_drv_ctx(ctx, op_type); ++ if (unlikely(IS_ERR_OR_NULL(drv_ctx))) ++ return (struct ablkcipher_edesc *)drv_ctx; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } ++ ++ if (unlikely(req->src != req->dst)) { ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(qidev, "unable to map destination\n"); ++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, iv_dma)) { ++ dev_err(qidev, "unable to map IV\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (mapped_src_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->src)) { ++ in_contig = true; ++ qm_sg_ents = 0; ++ } else { ++ in_contig = false; ++ qm_sg_ents = 1 + mapped_src_nents; ++ } ++ dst_sg_idx = qm_sg_ents; ++ ++ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; ++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { ++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(qidev, "could not allocate extended descriptor\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ sg_table = &edesc->sgt[0]; ++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ edesc->drv_req.app_ctx = req; ++ edesc->drv_req.cbk = ablkcipher_done; ++ edesc->drv_req.drv_ctx = drv_ctx; ++ ++ if (!in_contig) { ++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); ++ } ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ dst_sg_idx, 0); ++ ++ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { ++ dev_err(qidev, "unable to map S/G table\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ ++ if (!in_contig) ++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, ++ ivsize + req->nbytes, 0); ++ else ++ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes, ++ 0); ++ ++ if (req->src == req->dst) { ++ if (!in_contig) ++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + ++ sizeof(*sg_table), req->nbytes, 0); ++ else ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), ++ req->nbytes, 0); ++ } else if (mapped_dst_nents > 1) { ++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * ++ sizeof(*sg_table), req->nbytes, 0); ++ } else { ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), ++ req->nbytes, 0); ++ } ++ ++ return edesc; ++} ++ ++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ++ struct skcipher_givcrypt_request *creq) ++{ ++ struct ablkcipher_request *req = &creq->creq; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *qidev = ctx->qidev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; ++ struct ablkcipher_edesc *edesc; ++ dma_addr_t iv_dma; ++ bool out_contig; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ struct qm_sg_entry *sg_table, *fd_sgt; ++ int dst_sg_idx, qm_sg_ents; ++ struct caam_drv_ctx *drv_ctx; ++ ++ drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); ++ if (unlikely(IS_ERR_OR_NULL(drv_ctx))) ++ return (struct ablkcipher_edesc *)drv_ctx; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } ++ ++ if (unlikely(req->src != req->dst)) { ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(qidev, "unable to map destination\n"); ++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(qidev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dst_nents = src_nents; ++ mapped_dst_nents = src_nents; ++ } ++ ++ iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE); ++ if (dma_mapping_error(qidev, iv_dma)) { ++ dev_err(qidev, "unable to map IV\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; ++ dst_sg_idx = qm_sg_ents; ++ if (mapped_dst_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->dst)) { ++ out_contig = true; ++ } else { ++ out_contig = false; ++ qm_sg_ents += 1 + mapped_dst_nents; ++ } ++ ++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { ++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (!edesc) { ++ dev_err(qidev, "could not allocate extended descriptor\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ sg_table = &edesc->sgt[0]; ++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ edesc->drv_req.app_ctx = req; ++ edesc->drv_req.cbk = ablkcipher_done; ++ edesc->drv_req.drv_ctx = drv_ctx; ++ ++ if (mapped_src_nents > 1) ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); ++ ++ if (!out_contig) { ++ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ dst_sg_idx + 1, 0); ++ } ++ ++ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { ++ dev_err(qidev, "unable to map S/G table\n"); ++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ ++ if (mapped_src_nents > 1) ++ dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes, ++ 0); ++ else ++ dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), ++ req->nbytes, 0); ++ ++ if (!out_contig) ++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * ++ sizeof(*sg_table), ivsize + req->nbytes, ++ 0); ++ else ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), ++ ivsize + req->nbytes, 0); ++ ++ return edesc; ++} ++ ++static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) ++{ ++ struct ablkcipher_edesc *edesc; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ int ret; ++ ++ if (unlikely(caam_congested)) ++ return -EAGAIN; ++ ++ /* allocate extended descriptor */ ++ edesc = ablkcipher_edesc_alloc(req, encrypt); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); ++ if (!ret) { ++ ret = -EINPROGRESS; ++ } else { ++ ablkcipher_unmap(ctx->qidev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int ablkcipher_encrypt(struct ablkcipher_request *req) ++{ ++ return ablkcipher_crypt(req, true); ++} ++ ++static int ablkcipher_decrypt(struct ablkcipher_request *req) ++{ ++ return ablkcipher_crypt(req, false); ++} ++ ++static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) ++{ ++ struct ablkcipher_request *req = &creq->creq; ++ struct ablkcipher_edesc *edesc; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ int ret; ++ ++ if (unlikely(caam_congested)) ++ return -EAGAIN; ++ ++ /* allocate extended descriptor */ ++ edesc = ablkcipher_giv_edesc_alloc(creq); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); ++ if (!ret) { ++ ret = -EINPROGRESS; ++ } else { ++ ablkcipher_unmap(ctx->qidev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++#define template_ablkcipher template_u.ablkcipher ++struct caam_alg_template { ++ char name[CRYPTO_MAX_ALG_NAME]; ++ char driver_name[CRYPTO_MAX_ALG_NAME]; ++ unsigned int blocksize; ++ u32 type; ++ union { ++ struct ablkcipher_alg ablkcipher; ++ } template_u; ++ u32 class1_alg_type; ++ u32 class2_alg_type; ++}; ++ ++static struct caam_alg_template driver_algs[] = { ++ /* ablkcipher descriptor */ ++ { ++ .name = "cbc(aes)", ++ .driver_name = "cbc-aes-caam-qi", ++ .blocksize = AES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "cbc(des3_ede)", ++ .driver_name = "cbc-3des-caam-qi", ++ .blocksize = DES3_EDE_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = DES3_EDE_KEY_SIZE, ++ .max_keysize = DES3_EDE_KEY_SIZE, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "cbc(des)", ++ .driver_name = "cbc-des-caam-qi", ++ .blocksize = DES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = DES_KEY_SIZE, ++ .max_keysize = DES_KEY_SIZE, ++ .ivsize = DES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "ctr(aes)", ++ .driver_name = "ctr-aes-caam-qi", ++ .blocksize = 1, ++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .geniv = "chainiv", ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, ++ }, ++ { ++ .name = "rfc3686(ctr(aes))", ++ .driver_name = "rfc3686-ctr-aes-caam-qi", ++ .blocksize = 1, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = AES_MIN_KEY_SIZE + ++ CTR_RFC3686_NONCE_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE + ++ CTR_RFC3686_NONCE_SIZE, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, ++ }, ++ { ++ .name = "xts(aes)", ++ .driver_name = "xts-aes-caam-qi", ++ .blocksize = AES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, ++ .template_ablkcipher = { ++ .setkey = xts_ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .geniv = "eseqiv", ++ .min_keysize = 2 * AES_MIN_KEY_SIZE, ++ .max_keysize = 2 * AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, ++ }, ++}; ++ ++static struct caam_aead_alg driver_aeads[] = { ++ /* single-pass ipsec_esp descriptor */ ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-cbc-aes-" ++ "caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-cbc-aes-" ++ "caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-cbc-aes-" ++ "caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-" ++ "cbc-des3_ede-caam-qi", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(des))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-cbc-des-" ++ "caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-cbc-des-" ++ "caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-cbc-des-" ++ "caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-des-caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-cbc-des-" ++ "caam-qi", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "tls10(hmac(sha1),cbc(aes))", ++ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = tls_setkey, ++ .setauthsize = tls_setauthsize, ++ .encrypt = tls_encrypt, ++ .decrypt = tls_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ } ++}; ++ ++struct caam_crypto_alg { ++ struct list_head entry; ++ struct crypto_alg crypto_alg; ++ struct caam_alg_entry caam; ++}; ++ ++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ++{ ++ struct caam_drv_private *priv; ++ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */ ++ static const u8 digest_size[] = { ++ MD5_DIGEST_SIZE, ++ SHA1_DIGEST_SIZE, ++ SHA224_DIGEST_SIZE, ++ SHA256_DIGEST_SIZE, ++ SHA384_DIGEST_SIZE, ++ SHA512_DIGEST_SIZE ++ }; ++ u8 op_id; ++ ++ /* ++ * distribute tfms across job rings to ensure in-order ++ * crypto request processing per tfm ++ */ ++ ctx->jrdev = caam_jr_alloc(); ++ if (IS_ERR(ctx->jrdev)) { ++ pr_err("Job Ring Device allocation for transform failed\n"); ++ return PTR_ERR(ctx->jrdev); ++ } ++ ++ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { ++ dev_err(ctx->jrdev, "unable to map key\n"); ++ caam_jr_free(ctx->jrdev); ++ return -ENOMEM; ++ } ++ ++ /* copy descriptor header template value */ ++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; ++ ++ if (ctx->adata.algtype) { ++ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK) ++ >> OP_ALG_ALGSEL_SHIFT; ++ if (op_id < ARRAY_SIZE(digest_size)) { ++ ctx->authsize = digest_size[op_id]; ++ } else { ++ dev_err(ctx->jrdev, ++ "incorrect op_id %d; must be less than %zu\n", ++ op_id, ARRAY_SIZE(digest_size)); ++ caam_jr_free(ctx->jrdev); ++ return -EINVAL; ++ } ++ } else { ++ ctx->authsize = 0; ++ } ++ ++ priv = dev_get_drvdata(ctx->jrdev->parent); ++ ctx->qidev = priv->qidev; ++ ++ spin_lock_init(&ctx->lock); ++ ctx->drv_ctx[ENCRYPT] = NULL; ++ ctx->drv_ctx[DECRYPT] = NULL; ++ ctx->drv_ctx[GIVENCRYPT] = NULL; ++ ++ return 0; ++} ++ ++static int caam_cra_init(struct crypto_tfm *tfm) ++{ ++ struct crypto_alg *alg = tfm->__crt_alg; ++ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), ++ crypto_alg); ++ struct caam_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ return caam_init_common(ctx, &caam_alg->caam); ++} ++ ++static int caam_aead_init(struct crypto_aead *tfm) ++{ ++ struct aead_alg *alg = crypto_aead_alg(tfm); ++ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), ++ aead); ++ struct caam_ctx *ctx = crypto_aead_ctx(tfm); ++ ++ return caam_init_common(ctx, &caam_alg->caam); ++} ++ ++static void caam_exit_common(struct caam_ctx *ctx) ++{ ++ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); ++ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); ++ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); ++ ++ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ++ DMA_TO_DEVICE); ++ ++ caam_jr_free(ctx->jrdev); ++} ++ ++static void caam_cra_exit(struct crypto_tfm *tfm) ++{ ++ caam_exit_common(crypto_tfm_ctx(tfm)); ++} ++ ++static void caam_aead_exit(struct crypto_aead *tfm) ++{ ++ caam_exit_common(crypto_aead_ctx(tfm)); ++} ++ ++static struct list_head alg_list; ++static void __exit caam_qi_algapi_exit(void) ++{ ++ struct caam_crypto_alg *t_alg, *n; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { ++ struct caam_aead_alg *t_alg = driver_aeads + i; ++ ++ if (t_alg->registered) ++ crypto_unregister_aead(&t_alg->aead); ++ } ++ ++ if (!alg_list.next) ++ return; ++ ++ list_for_each_entry_safe(t_alg, n, &alg_list, entry) { ++ crypto_unregister_alg(&t_alg->crypto_alg); ++ list_del(&t_alg->entry); ++ kfree(t_alg); ++ } ++} ++ ++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template ++ *template) ++{ ++ struct caam_crypto_alg *t_alg; ++ struct crypto_alg *alg; ++ ++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); ++ if (!t_alg) ++ return ERR_PTR(-ENOMEM); ++ ++ alg = &t_alg->crypto_alg; ++ ++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); ++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->driver_name); ++ alg->cra_module = THIS_MODULE; ++ alg->cra_init = caam_cra_init; ++ alg->cra_exit = caam_cra_exit; ++ alg->cra_priority = CAAM_CRA_PRIORITY; ++ alg->cra_blocksize = template->blocksize; ++ alg->cra_alignmask = 0; ++ alg->cra_ctxsize = sizeof(struct caam_ctx); ++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | ++ template->type; ++ switch (template->type) { ++ case CRYPTO_ALG_TYPE_GIVCIPHER: ++ alg->cra_type = &crypto_givcipher_type; ++ alg->cra_ablkcipher = template->template_ablkcipher; ++ break; ++ case CRYPTO_ALG_TYPE_ABLKCIPHER: ++ alg->cra_type = &crypto_ablkcipher_type; ++ alg->cra_ablkcipher = template->template_ablkcipher; ++ break; ++ } ++ ++ t_alg->caam.class1_alg_type = template->class1_alg_type; ++ t_alg->caam.class2_alg_type = template->class2_alg_type; ++ ++ return t_alg; ++} ++ ++static void caam_aead_alg_init(struct caam_aead_alg *t_alg) ++{ ++ struct aead_alg *alg = &t_alg->aead; ++ ++ alg->base.cra_module = THIS_MODULE; ++ alg->base.cra_priority = CAAM_CRA_PRIORITY; ++ alg->base.cra_ctxsize = sizeof(struct caam_ctx); ++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; ++ ++ alg->init = caam_aead_init; ++ alg->exit = caam_aead_exit; ++} ++ ++static int __init caam_qi_algapi_init(void) ++{ ++ struct device_node *dev_node; ++ struct platform_device *pdev; ++ struct device *ctrldev; ++ struct caam_drv_private *priv; ++ int i = 0, err = 0; ++ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; ++ unsigned int md_limit = SHA512_DIGEST_SIZE; ++ bool registered = false; ++ ++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); ++ if (!dev_node) { ++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); ++ if (!dev_node) ++ return -ENODEV; ++ } ++ ++ pdev = of_find_device_by_node(dev_node); ++ of_node_put(dev_node); ++ if (!pdev) ++ return -ENODEV; ++ ++ ctrldev = &pdev->dev; ++ priv = dev_get_drvdata(ctrldev); ++ ++ /* ++ * If priv is NULL, it's probably because the caam driver wasn't ++ * properly initialized (e.g. RNG4 init failed). Thus, bail out here. ++ */ ++ if (!priv || !priv->qi_present) ++ return -ENODEV; ++ ++ if (caam_dpaa2) { ++ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); ++ return -ENODEV; ++ } ++ ++ INIT_LIST_HEAD(&alg_list); ++ ++ /* ++ * Register crypto algorithms the device supports. ++ * First, detect presence and attributes of DES, AES, and MD blocks. ++ */ ++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); ++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); ++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; ++ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; ++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; ++ ++ /* If MD is present, limit digest size based on LP256 */ ++ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) ++ md_limit = SHA256_DIGEST_SIZE; ++ ++ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { ++ struct caam_crypto_alg *t_alg; ++ struct caam_alg_template *alg = driver_algs + i; ++ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; ++ ++ /* Skip DES algorithms if not supported by device */ ++ if (!des_inst && ++ ((alg_sel == OP_ALG_ALGSEL_3DES) || ++ (alg_sel == OP_ALG_ALGSEL_DES))) ++ continue; ++ ++ /* Skip AES algorithms if not supported by device */ ++ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) ++ continue; ++ ++ t_alg = caam_alg_alloc(alg); ++ if (IS_ERR(t_alg)) { ++ err = PTR_ERR(t_alg); ++ dev_warn(priv->qidev, "%s alg allocation failed\n", ++ alg->driver_name); ++ continue; ++ } ++ ++ err = crypto_register_alg(&t_alg->crypto_alg); ++ if (err) { ++ dev_warn(priv->qidev, "%s alg registration failed\n", ++ t_alg->crypto_alg.cra_driver_name); ++ kfree(t_alg); ++ continue; ++ } ++ ++ list_add_tail(&t_alg->entry, &alg_list); ++ registered = true; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { ++ struct caam_aead_alg *t_alg = driver_aeads + i; ++ u32 c1_alg_sel = t_alg->caam.class1_alg_type & ++ OP_ALG_ALGSEL_MASK; ++ u32 c2_alg_sel = t_alg->caam.class2_alg_type & ++ OP_ALG_ALGSEL_MASK; ++ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; ++ ++ /* Skip DES algorithms if not supported by device */ ++ if (!des_inst && ++ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || ++ (c1_alg_sel == OP_ALG_ALGSEL_DES))) ++ continue; ++ ++ /* Skip AES algorithms if not supported by device */ ++ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) ++ continue; ++ ++ /* ++ * Check support for AES algorithms not available ++ * on LP devices. ++ */ ++ if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && ++ (alg_aai == OP_ALG_AAI_GCM)) ++ continue; ++ ++ /* ++ * Skip algorithms requiring message digests ++ * if MD or MD size is not supported by device. ++ */ ++ if (c2_alg_sel && ++ (!md_inst || (t_alg->aead.maxauthsize > md_limit))) ++ continue; ++ ++ caam_aead_alg_init(t_alg); ++ ++ err = crypto_register_aead(&t_alg->aead); ++ if (err) { ++ pr_warn("%s alg registration failed\n", ++ t_alg->aead.base.cra_driver_name); ++ continue; ++ } ++ ++ t_alg->registered = true; ++ registered = true; ++ } ++ ++ if (registered) ++ dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); ++ ++ return err; ++} ++ ++module_init(caam_qi_algapi_init); ++module_exit(caam_qi_algapi_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); ++MODULE_AUTHOR("Freescale Semiconductor"); +--- /dev/null ++++ b/drivers/crypto/caam/caamalg_qi2.c +@@ -0,0 +1,4428 @@ ++/* ++ * Copyright 2015-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "compat.h" ++#include "regs.h" ++#include "caamalg_qi2.h" ++#include "dpseci_cmd.h" ++#include "desc_constr.h" ++#include "error.h" ++#include "sg_sw_sec4.h" ++#include "sg_sw_qm2.h" ++#include "key_gen.h" ++#include "caamalg_desc.h" ++#include "../../../drivers/staging/fsl-mc/include/mc.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" ++ ++#define CAAM_CRA_PRIORITY 2000 ++ ++/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ ++#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ ++ SHA512_DIGEST_SIZE * 2) ++ ++#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM ++bool caam_little_end; ++EXPORT_SYMBOL(caam_little_end); ++bool caam_imx; ++EXPORT_SYMBOL(caam_imx); ++#endif ++ ++/* ++ * This is a a cache of buffers, from which the users of CAAM QI driver ++ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. ++ * NOTE: A more elegant solution would be to have some headroom in the frames ++ * being processed. This can be added by the dpaa2-eth driver. This would ++ * pose a problem for userspace application processing which cannot ++ * know of this limitation. So for now, this will work. ++ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here ++ */ ++static struct kmem_cache *qi_cache; ++ ++struct caam_alg_entry { ++ struct device *dev; ++ int class1_alg_type; ++ int class2_alg_type; ++ bool rfc3686; ++ bool geniv; ++}; ++ ++struct caam_aead_alg { ++ struct aead_alg aead; ++ struct caam_alg_entry caam; ++ bool registered; ++}; ++ ++/** ++ * caam_ctx - per-session context ++ * @flc: Flow Contexts array ++ * @key: virtual address of the key(s): [authentication key], encryption key ++ * @key_dma: I/O virtual address of the key ++ * @dev: dpseci device ++ * @adata: authentication algorithm details ++ * @cdata: encryption algorithm details ++ * @authsize: authentication tag (a.k.a. ICV / MAC) size ++ */ ++struct caam_ctx { ++ struct caam_flc flc[NUM_OP]; ++ u8 key[CAAM_MAX_KEY_SIZE]; ++ dma_addr_t key_dma; ++ struct device *dev; ++ struct alginfo adata; ++ struct alginfo cdata; ++ unsigned int authsize; ++}; ++ ++void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, ++ dma_addr_t iova_addr) ++{ ++ phys_addr_t phys_addr; ++ ++ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : ++ iova_addr; ++ ++ return phys_to_virt(phys_addr); ++} ++ ++/* ++ * qi_cache_alloc - Allocate buffers from CAAM-QI cache ++ * ++ * Allocate data on the hotpath. Instead of using kmalloc, one can use the ++ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers ++ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for ++ * hosting 16 SG entries. ++ * ++ * @flags - flags that would be used for the equivalent kmalloc(..) call ++ * ++ * Returns a pointer to a retrieved buffer on success or NULL on failure. ++ */ ++static inline void *qi_cache_alloc(gfp_t flags) ++{ ++ return kmem_cache_alloc(qi_cache, flags); ++} ++ ++/* ++ * qi_cache_free - Frees buffers allocated from CAAM-QI cache ++ * ++ * @obj - buffer previously allocated by qi_cache_alloc ++ * ++ * No checking is being done, the call is a passthrough call to ++ * kmem_cache_free(...) ++ */ ++static inline void qi_cache_free(void *obj) ++{ ++ kmem_cache_free(qi_cache, obj); ++} ++ ++static struct caam_request *to_caam_req(struct crypto_async_request *areq) ++{ ++ switch (crypto_tfm_alg_type(areq->tfm)) { ++ case CRYPTO_ALG_TYPE_ABLKCIPHER: ++ case CRYPTO_ALG_TYPE_GIVCIPHER: ++ return ablkcipher_request_ctx(ablkcipher_request_cast(areq)); ++ case CRYPTO_ALG_TYPE_AEAD: ++ return aead_request_ctx(container_of(areq, struct aead_request, ++ base)); ++ default: ++ return ERR_PTR(-EINVAL); ++ } ++} ++ ++static void caam_unmap(struct device *dev, struct scatterlist *src, ++ struct scatterlist *dst, int src_nents, ++ int dst_nents, dma_addr_t iv_dma, int ivsize, ++ enum optype op_type, dma_addr_t qm_sg_dma, ++ int qm_sg_bytes) ++{ ++ if (dst != src) { ++ if (src_nents) ++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); ++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); ++ } else { ++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); ++ } ++ ++ if (iv_dma) ++ dma_unmap_single(dev, iv_dma, ivsize, ++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE : ++ DMA_TO_DEVICE); ++ ++ if (qm_sg_bytes) ++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); ++} ++ ++static int aead_set_sh_desc(struct crypto_aead *aead) ++{ ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), ++ typeof(*alg), aead); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ unsigned int ivsize = crypto_aead_ivsize(aead); ++ struct device *dev = ctx->dev; ++ struct caam_flc *flc; ++ u32 *desc; ++ u32 ctx1_iv_off = 0; ++ u32 *nonce = NULL; ++ unsigned int data_len[2]; ++ u32 inl_mask; ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == ++ OP_ALG_AAI_CTR_MOD128); ++ const bool is_rfc3686 = alg->caam.rfc3686; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ /* ++ * AES-CTR needs to load IV in CONTEXT1 reg ++ * at an offset of 128bits (16bytes) ++ * CONTEXT1[255:128] = IV ++ */ ++ if (ctr_mode) ++ ctx1_iv_off = 16; ++ ++ /* ++ * RFC3686 specific: ++ * CONTEXT1[255:128] = {NONCE, IV, COUNTER} ++ */ ++ if (is_rfc3686) { ++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ++ } ++ ++ data_len[0] = ctx->adata.keylen_pad; ++ data_len[1] = ctx->cdata.keylen; ++ ++ /* aead_encrypt shared descriptor */ ++ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : ++ DESC_QI_AEAD_ENC_LEN) + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ ++ if (alg->caam.geniv) ++ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, is_rfc3686, ++ nonce, ctx1_iv_off, true); ++ else ++ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, is_rfc3686, nonce, ++ ctx1_iv_off, true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* aead_decrypt shared descriptor */ ++ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + ++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), ++ DESC_JOB_IO_LEN, data_len, &inl_mask, ++ ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ++ ivsize, ctx->authsize, alg->caam.geniv, ++ is_rfc3686, nonce, ctx1_iv_off, true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(authenc); ++ ++ ctx->authsize = authsize; ++ aead_set_sh_desc(authenc); ++ ++ return 0; ++} ++ ++struct split_key_sh_result { ++ struct completion completion; ++ int err; ++ struct device *dev; ++}; ++ ++static void split_key_sh_done(void *cbk_ctx, u32 err) ++{ ++ struct split_key_sh_result *res = cbk_ctx; ++ ++#ifdef DEBUG ++ dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ++#endif ++ ++ if (err) ++ caam_qi2_strstatus(res->dev, err); ++ ++ res->err = err; ++ complete(&res->completion); ++} ++ ++static int gen_split_key_sh(struct device *dev, u8 *key_out, ++ struct alginfo * const adata, const u8 *key_in, ++ u32 keylen) ++{ ++ struct caam_request *req_ctx; ++ u32 *desc; ++ struct split_key_sh_result result; ++ dma_addr_t dma_addr_in, dma_addr_out; ++ struct caam_flc *flc; ++ struct dpaa2_fl_entry *in_fle, *out_fle; ++ int ret = -ENOMEM; ++ ++ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); ++ if (!req_ctx) ++ return -ENOMEM; ++ ++ in_fle = &req_ctx->fd_flt[1]; ++ out_fle = &req_ctx->fd_flt[0]; ++ ++ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); ++ if (!flc) ++ goto err_flc; ++ ++ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, dma_addr_in)) { ++ dev_err(dev, "unable to map key input memory\n"); ++ goto err_dma_addr_in; ++ } ++ ++ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, dma_addr_out)) { ++ dev_err(dev, "unable to map key output memory\n"); ++ goto err_dma_addr_out; ++ } ++ ++ desc = flc->sh_desc; ++ ++ init_sh_desc(desc, 0); ++ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); ++ ++ /* Sets MDHA up into an HMAC-INIT */ ++ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | ++ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | ++ OP_ALG_AS_INIT); ++ ++ /* ++ * do a FIFO_LOAD of zero, this will trigger the internal key expansion ++ * into both pads inside MDHA ++ */ ++ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | ++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); ++ ++ /* ++ * FIFO_STORE with the explicit split-key content store ++ * (0x26 output type) ++ */ ++ append_fifo_store(desc, dma_addr_out, adata->keylen, ++ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ goto err_flc_dma; ++ } ++ ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, dma_addr_in); ++ dpaa2_fl_set_len(in_fle, keylen); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, dma_addr_out); ++ dpaa2_fl_set_len(out_fle, adata->keylen_pad); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); ++ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ result.err = 0; ++ init_completion(&result.completion); ++ result.dev = dev; ++ ++ req_ctx->flc = flc; ++ req_ctx->cbk = split_key_sh_done; ++ req_ctx->ctx = &result; ++ ++ ret = dpaa2_caam_enqueue(dev, req_ctx); ++ if (ret == -EINPROGRESS) { ++ /* in progress */ ++ wait_for_completion(&result.completion); ++ ret = result.err; ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key_out, ++ adata->keylen_pad, 1); ++#endif ++ } ++ ++ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc), ++ DMA_TO_DEVICE); ++err_flc_dma: ++ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE); ++err_dma_addr_out: ++ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE); ++err_dma_addr_in: ++ kfree(flc); ++err_flc: ++ kfree(req_ctx); ++ return ret; ++} ++ ++static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, ++ u32 authkeylen) ++{ ++ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in, ++ authkeylen); ++} ++ ++static int aead_setkey(struct crypto_aead *aead, const u8 *key, ++ unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ struct crypto_authenc_keys keys; ++ int ret; ++ ++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ++ goto badkey; ++ ++#ifdef DEBUG ++ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n", ++ keys.authkeylen + keys.enckeylen, keys.enckeylen, ++ keys.authkeylen); ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ ctx->adata.keylen = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++#ifdef DEBUG ++ dev_err(dev, "split keylen %d split keylen padded %d\n", ++ ctx->adata.keylen, ctx->adata.keylen_pad); ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1); ++#endif ++ ++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) ++ goto badkey; ++ ++ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); ++ if (ret) ++ goto badkey; ++ ++ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); ++ ++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ++ ctx->adata.keylen_pad + keys.enckeylen, 1); ++#endif ++ ++ ctx->cdata.keylen = keys.enckeylen; ++ ++ ret = aead_set_sh_desc(aead); ++ if (ret) ++ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ ++ return ret; ++badkey: ++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ++ bool encrypt) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_request *req_ctx = aead_request_ctx(req); ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), ++ typeof(*alg), aead); ++ struct device *dev = ctx->dev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct aead_edesc *edesc; ++ dma_addr_t qm_sg_dma, iv_dma = 0; ++ int ivsize = 0; ++ unsigned int authsize = ctx->authsize; ++ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes; ++ int in_len, out_len; ++ struct dpaa2_sg_entry *sg_table; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(dev, "could not allocate extended descriptor\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (unlikely(req->dst != req->src)) { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ dst_nents = sg_nents_for_len(req->dst, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : ++ (-authsize))); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : (-authsize))); ++ qi_cache_free(edesc); ++ return ERR_PTR(dst_nents); ++ } ++ ++ if (src_nents) { ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = 0; ++ } ++ ++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(dev, "unable to map destination\n"); ++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { ++ ivsize = crypto_aead_ivsize(aead); ++ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, iv_dma)) { ++ dev_err(dev, "unable to map IV\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, ++ dst_nents, 0, 0, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ /* ++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. ++ * Input is not contiguous. ++ */ ++ qm_sg_nents = 1 + !!ivsize + mapped_src_nents + ++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0); ++ if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) { ++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_nents, CAAM_QI_MAX_AEAD_SG); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ sg_table = &edesc->sgt[0]; ++ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ ++ edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, edesc->assoclen_dma)) { ++ dev_err(dev, "unable to map assoclen\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); ++ qm_sg_index++; ++ if (ivsize) { ++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); ++ qm_sg_index++; ++ } ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); ++ qm_sg_index += mapped_src_nents; ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ qm_sg_index, 0); ++ ++ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, qm_sg_dma)) { ++ dev_err(dev, "unable to map S/G table\n"); ++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->qm_sg_dma = qm_sg_dma; ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ out_len = req->assoclen + req->cryptlen + ++ (encrypt ? ctx->authsize : (-ctx->authsize)); ++ in_len = 4 + ivsize + req->assoclen + req->cryptlen; ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, in_len); ++ ++ if (req->dst == req->src) { ++ if (mapped_src_nents == 1) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + ++ (1 + !!ivsize) * sizeof(*sg_table)); ++ } ++ } else if (mapped_dst_nents == 1) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * ++ sizeof(*sg_table)); ++ } ++ ++ dpaa2_fl_set_len(out_fle, out_len); ++ ++ return edesc; ++} ++ ++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, ++ bool encrypt) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ unsigned int blocksize = crypto_aead_blocksize(tls); ++ unsigned int padsize, authsize; ++ struct caam_request *req_ctx = aead_request_ctx(req); ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls), ++ typeof(*alg), aead); ++ struct device *dev = ctx->dev; ++ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ++ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct tls_edesc *edesc; ++ dma_addr_t qm_sg_dma, iv_dma = 0; ++ int ivsize = 0; ++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; ++ int in_len, out_len; ++ struct dpaa2_sg_entry *sg_table; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ struct scatterlist *dst; ++ ++ if (encrypt) { ++ padsize = blocksize - ((req->cryptlen + ctx->authsize) % ++ blocksize); ++ authsize = ctx->authsize + padsize; ++ } else { ++ authsize = ctx->authsize; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(dev, "could not allocate extended descriptor\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (likely(req->src == req->dst)) { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ dst = req->dst; ++ } else { ++ src_nents = sg_nents_for_len(req->src, req->assoclen + ++ req->cryptlen); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->assoclen + req->cryptlen); ++ qi_cache_free(edesc); ++ return ERR_PTR(src_nents); ++ } ++ ++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); ++ dst_nents = sg_nents_for_len(dst, req->cryptlen + ++ (encrypt ? authsize : 0)); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", ++ req->cryptlen + ++ (encrypt ? authsize : 0)); ++ qi_cache_free(edesc); ++ return ERR_PTR(dst_nents); ++ } ++ ++ if (src_nents) { ++ mapped_src_nents = dma_map_sg(dev, req->src, ++ src_nents, DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = 0; ++ } ++ ++ mapped_dst_nents = dma_map_sg(dev, dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(dev, "unable to map destination\n"); ++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ ivsize = crypto_aead_ivsize(tls); ++ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, iv_dma)) { ++ dev_err(dev, "unable to map IV\n"); ++ caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, ++ op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* ++ * Create S/G table: IV, src, dst. ++ * Input is not contiguous. ++ */ ++ qm_sg_ents = 1 + mapped_src_nents + ++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0); ++ sg_table = &edesc->sgt[0]; ++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->dst = dst; ++ edesc->iv_dma = iv_dma; ++ ++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); ++ qm_sg_index = 1; ++ ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); ++ qm_sg_index += mapped_src_nents; ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + ++ qm_sg_index, 0); ++ ++ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, qm_sg_dma)) { ++ dev_err(dev, "unable to map S/G table\n"); ++ caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma, ++ ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->qm_sg_dma = qm_sg_dma; ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ out_len = req->cryptlen + (encrypt ? authsize : 0); ++ in_len = ivsize + req->assoclen + req->cryptlen; ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, in_len); ++ ++ if (req->dst == req->src) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + ++ (sg_nents_for_len(req->src, req->assoclen) + ++ 1) * sizeof(*sg_table)); ++ } else if (mapped_dst_nents == 1) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(dst)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * ++ sizeof(*sg_table)); ++ } ++ ++ dpaa2_fl_set_len(out_fle, out_len); ++ ++ return edesc; ++} ++ ++static int tls_set_sh_desc(struct crypto_aead *tls) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ unsigned int ivsize = crypto_aead_ivsize(tls); ++ unsigned int blocksize = crypto_aead_blocksize(tls); ++ struct device *dev = ctx->dev; ++ struct caam_flc *flc; ++ u32 *desc; ++ unsigned int assoclen = 13; /* always 13 bytes for TLS */ ++ unsigned int data_len[2]; ++ u32 inl_mask; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ /* ++ * TLS 1.0 encrypt shared descriptor ++ * Job Descriptor and Shared Descriptor ++ * must fit into the 64-word Descriptor h/w Buffer ++ */ ++ data_len[0] = ctx->adata.keylen_pad; ++ data_len[1] = ctx->cdata.keylen; ++ ++ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len, ++ &inl_mask, ARRAY_SIZE(data_len)) < 0) ++ return -EINVAL; ++ ++ if (inl_mask & 1) ++ ctx->adata.key_virt = ctx->key; ++ else ++ ctx->adata.key_dma = ctx->key_dma; ++ ++ if (inl_mask & 2) ++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ++ else ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); ++ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata, ++ assoclen, ivsize, ctx->authsize, blocksize); ++ ++ flc->flc[1] = desc_len(desc); ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * TLS 1.0 decrypt shared descriptor ++ * Keys do not fit inline, regardless of algorithms used ++ */ ++ ctx->adata.key_dma = ctx->key_dma; ++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ++ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize, ++ ctx->authsize, blocksize); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int tls_setkey(struct crypto_aead *tls, const u8 *key, ++ unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ struct device *dev = ctx->dev; ++ struct crypto_authenc_keys keys; ++ int ret; ++ ++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ++ goto badkey; ++ ++#ifdef DEBUG ++ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n", ++ keys.authkeylen + keys.enckeylen, keys.enckeylen, ++ keys.authkeylen); ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ ctx->adata.keylen = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++#ifdef DEBUG ++ dev_err(dev, "split keylen %d split keylen padded %d\n", ++ ctx->adata.keylen, ctx->adata.keylen_pad); ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, ++ keys.authkeylen + keys.enckeylen, 1); ++#endif ++ ++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) ++ goto badkey; ++ ++ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); ++ if (ret) ++ goto badkey; ++ ++ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); ++ ++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ++ ctx->adata.keylen_pad + keys.enckeylen, 1); ++#endif ++ ++ ctx->cdata.keylen = keys.enckeylen; ++ ++ ret = tls_set_sh_desc(tls); ++ if (ret) ++ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ ++ return ret; ++badkey: ++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ ++ ctx->authsize = authsize; ++ tls_set_sh_desc(tls); ++ ++ return 0; ++} ++ ++static int gcm_set_sh_desc(struct crypto_aead *aead) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ unsigned int ivsize = crypto_aead_ivsize(aead); ++ struct caam_flc *flc; ++ u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ /* ++ * AES GCM encrypt shared descriptor ++ * Job Descriptor and Shared Descriptor ++ * must fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Job Descriptor and Shared Descriptors ++ * must all fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ ctx->cdata.key_virt = ctx->key; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(authenc); ++ ++ ctx->authsize = authsize; ++ gcm_set_sh_desc(authenc); ++ ++ return 0; ++} ++ ++static int gcm_setkey(struct crypto_aead *aead, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ int ret; ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ memcpy(ctx->key, key, keylen); ++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++ ctx->cdata.keylen = keylen; ++ ++ ret = gcm_set_sh_desc(aead); ++ if (ret) ++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ ++ return ret; ++} ++ ++static int rfc4106_set_sh_desc(struct crypto_aead *aead) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ unsigned int ivsize = crypto_aead_ivsize(aead); ++ struct caam_flc *flc; ++ u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ ctx->cdata.key_virt = ctx->key; ++ ++ /* ++ * RFC4106 encrypt shared descriptor ++ * Job Descriptor and Shared Descriptor ++ * must fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Job Descriptor and Shared Descriptors ++ * must all fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int rfc4106_setauthsize(struct crypto_aead *authenc, ++ unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(authenc); ++ ++ ctx->authsize = authsize; ++ rfc4106_set_sh_desc(authenc); ++ ++ return 0; ++} ++ ++static int rfc4106_setkey(struct crypto_aead *aead, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ int ret; ++ ++ if (keylen < 4) ++ return -EINVAL; ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ memcpy(ctx->key, key, keylen); ++ /* ++ * The last four bytes of the key material are used as the salt value ++ * in the nonce. Update the AES key length. ++ */ ++ ctx->cdata.keylen = keylen - 4; ++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++ ++ ret = rfc4106_set_sh_desc(aead); ++ if (ret) ++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ ++ return ret; ++} ++ ++static int rfc4543_set_sh_desc(struct crypto_aead *aead) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ unsigned int ivsize = crypto_aead_ivsize(aead); ++ struct caam_flc *flc; ++ u32 *desc; ++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ++ ctx->cdata.keylen; ++ ++ if (!ctx->cdata.keylen || !ctx->authsize) ++ return 0; ++ ++ ctx->cdata.key_virt = ctx->key; ++ ++ /* ++ * RFC4543 encrypt shared descriptor ++ * Job Descriptor and Shared Descriptor ++ * must fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { ++ ctx->cdata.key_inline = true; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Job Descriptor and Shared Descriptors ++ * must all fit into the 64-word Descriptor h/w Buffer ++ */ ++ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { ++ ctx->cdata.key_inline = true; ++ } else { ++ ctx->cdata.key_inline = false; ++ ctx->cdata.key_dma = ctx->key_dma; ++ } ++ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, ++ true); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int rfc4543_setauthsize(struct crypto_aead *authenc, ++ unsigned int authsize) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(authenc); ++ ++ ctx->authsize = authsize; ++ rfc4543_set_sh_desc(authenc); ++ ++ return 0; ++} ++ ++static int rfc4543_setkey(struct crypto_aead *aead, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct device *dev = ctx->dev; ++ int ret; ++ ++ if (keylen < 4) ++ return -EINVAL; ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ memcpy(ctx->key, key, keylen); ++ /* ++ * The last four bytes of the key material are used as the salt value ++ * in the nonce. Update the AES key length. ++ */ ++ ctx->cdata.keylen = keylen - 4; ++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++ ++ ret = rfc4543_set_sh_desc(aead); ++ if (ret) ++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_TO_DEVICE); ++ ++ return ret; ++} ++ ++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); ++ const char *alg_name = crypto_tfm_alg_name(tfm); ++ struct device *dev = ctx->dev; ++ struct caam_flc *flc; ++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ u32 *desc; ++ u32 ctx1_iv_off = 0; ++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == ++ OP_ALG_AAI_CTR_MOD128); ++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); ++ ++ memcpy(ctx->key, key, keylen); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ /* ++ * AES-CTR needs to load IV in CONTEXT1 reg ++ * at an offset of 128bits (16bytes) ++ * CONTEXT1[255:128] = IV ++ */ ++ if (ctr_mode) ++ ctx1_iv_off = 16; ++ ++ /* ++ * RFC3686 specific: ++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} ++ * | *key = {KEY, NONCE} ++ */ ++ if (is_rfc3686) { ++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ++ keylen -= CTR_RFC3686_NONCE_SIZE; ++ } ++ ++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; ++ ++ /* ablkcipher_encrypt shared descriptor */ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, ++ is_rfc3686, ctx1_iv_off); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ablkcipher_decrypt shared descriptor */ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, ++ is_rfc3686, ctx1_iv_off); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* ablkcipher_givencrypt shared descriptor */ ++ flc = &ctx->flc[GIVENCRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ++ ivsize, is_rfc3686, ctx1_iv_off); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ++ const u8 *key, unsigned int keylen) ++{ ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *dev = ctx->dev; ++ struct caam_flc *flc; ++ u32 *desc; ++ ++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { ++ dev_err(dev, "key size mismatch\n"); ++ crypto_ablkcipher_set_flags(ablkcipher, ++ CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++ } ++ ++ memcpy(ctx->key, key, keylen); ++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, ctx->key_dma)) { ++ dev_err(dev, "unable to map key i/o memory\n"); ++ return -ENOMEM; ++ } ++ ctx->cdata.keylen = keylen; ++ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_inline = true; ++ ++ /* xts_ablkcipher_encrypt shared descriptor */ ++ flc = &ctx->flc[ENCRYPT]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ /* xts_ablkcipher_decrypt shared descriptor */ ++ flc = &ctx->flc[DECRYPT]; ++ desc = flc->sh_desc; ++ ++ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, flc->flc_dma)) { ++ dev_err(dev, "unable to map shared descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ++ *req, bool encrypt) ++{ ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_request *req_ctx = ablkcipher_request_ctx(req); ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *dev = ctx->dev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; ++ struct ablkcipher_edesc *edesc; ++ dma_addr_t iv_dma; ++ bool in_contig; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ int dst_sg_idx, qm_sg_ents; ++ struct dpaa2_sg_entry *sg_table; ++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } ++ ++ if (unlikely(req->dst != req->src)) { ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(dev, "unable to map destination\n"); ++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, iv_dma)) { ++ dev_err(dev, "unable to map IV\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ if (mapped_src_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->src)) { ++ in_contig = true; ++ qm_sg_ents = 0; ++ } else { ++ in_contig = false; ++ qm_sg_ents = 1 + mapped_src_nents; ++ } ++ dst_sg_idx = qm_sg_ents; ++ ++ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; ++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { ++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (unlikely(!edesc)) { ++ dev_err(dev, "could not allocate extended descriptor\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ sg_table = &edesc->sgt[0]; ++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ ++ if (!in_contig) { ++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); ++ } ++ ++ if (mapped_dst_nents > 1) ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ dst_sg_idx, 0); ++ ++ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, edesc->qm_sg_dma)) { ++ dev_err(dev, "unable to map S/G table\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, op_type, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_len(in_fle, req->nbytes + ivsize); ++ dpaa2_fl_set_len(out_fle, req->nbytes); ++ ++ if (!in_contig) { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ } else { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, iv_dma); ++ } ++ ++ if (req->src == req->dst) { ++ if (!in_contig) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + ++ sizeof(*sg_table)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); ++ } ++ } else if (mapped_dst_nents > 1) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * ++ sizeof(*sg_table)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); ++ } ++ ++ return edesc; ++} ++ ++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ++ struct skcipher_givcrypt_request *greq) ++{ ++ struct ablkcipher_request *req = &greq->creq; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_request *req_ctx = ablkcipher_request_ctx(req); ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct device *dev = ctx->dev; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; ++ struct ablkcipher_edesc *edesc; ++ dma_addr_t iv_dma; ++ bool out_contig; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ struct dpaa2_sg_entry *sg_table; ++ int dst_sg_idx, qm_sg_ents; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (unlikely(src_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n", ++ req->nbytes); ++ return ERR_PTR(src_nents); ++ } ++ ++ if (unlikely(req->dst != req->src)) { ++ dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ if (unlikely(dst_nents < 0)) { ++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", ++ req->nbytes); ++ return ERR_PTR(dst_nents); ++ } ++ ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, ++ DMA_FROM_DEVICE); ++ if (unlikely(!mapped_dst_nents)) { ++ dev_err(dev, "unable to map destination\n"); ++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); ++ return ERR_PTR(-ENOMEM); ++ } ++ } else { ++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents, ++ DMA_BIDIRECTIONAL); ++ if (unlikely(!mapped_src_nents)) { ++ dev_err(dev, "unable to map source\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ dst_nents = src_nents; ++ mapped_dst_nents = src_nents; ++ } ++ ++ iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, iv_dma)) { ++ dev_err(dev, "unable to map IV\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, ++ 0, 0, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; ++ dst_sg_idx = qm_sg_ents; ++ if (mapped_dst_nents == 1 && ++ iv_dma + ivsize == sg_dma_address(req->dst)) { ++ out_contig = true; ++ } else { ++ out_contig = false; ++ qm_sg_ents += 1 + mapped_dst_nents; ++ } ++ ++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { ++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n", ++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_alloc(GFP_DMA | flags); ++ if (!edesc) { ++ dev_err(dev, "could not allocate extended descriptor\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ edesc->src_nents = src_nents; ++ edesc->dst_nents = dst_nents; ++ edesc->iv_dma = iv_dma; ++ sg_table = &edesc->sgt[0]; ++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); ++ ++ if (mapped_src_nents > 1) ++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); ++ ++ if (!out_contig) { ++ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); ++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ dst_sg_idx + 1, 0); ++ } ++ ++ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, edesc->qm_sg_dma)) { ++ dev_err(dev, "unable to map S/G table\n"); ++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, ++ iv_dma, ivsize, GIVENCRYPT, 0, 0); ++ qi_cache_free(edesc); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_len(in_fle, req->nbytes); ++ dpaa2_fl_set_len(out_fle, ivsize + req->nbytes); ++ ++ if (mapped_src_nents > 1) { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ } else { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); ++ } ++ ++ if (!out_contig) { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * ++ sizeof(*sg_table)); ++ } else { ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); ++ } ++ ++ return edesc; ++} ++ ++static void aead_unmap(struct device *dev, struct aead_edesc *edesc, ++ struct aead_request *req) ++{ ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ int ivsize = crypto_aead_ivsize(aead); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ ++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, ++ edesc->iv_dma, ivsize, caam_req->op_type, ++ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); ++} ++ ++static void tls_unmap(struct device *dev, struct tls_edesc *edesc, ++ struct aead_request *req) ++{ ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ int ivsize = crypto_aead_ivsize(tls); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ ++ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents, ++ edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type, ++ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++} ++ ++static void ablkcipher_unmap(struct device *dev, ++ struct ablkcipher_edesc *edesc, ++ struct ablkcipher_request *req) ++{ ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ struct caam_request *caam_req = ablkcipher_request_ctx(req); ++ ++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, ++ edesc->iv_dma, ivsize, caam_req->op_type, ++ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++} ++ ++static void aead_encrypt_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct aead_request *req = container_of(areq, struct aead_request, ++ base); ++ struct caam_request *req_ctx = to_caam_req(areq); ++ struct aead_edesc *edesc = req_ctx->edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ aead_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ aead_request_complete(req, ecode); ++} ++ ++static void aead_decrypt_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct aead_request *req = container_of(areq, struct aead_request, ++ base); ++ struct caam_request *req_ctx = to_caam_req(areq); ++ struct aead_edesc *edesc = req_ctx->edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ /* ++ * verify hw auth check passed else return -EBADMSG ++ */ ++ if ((status & JRSTA_CCBERR_ERRID_MASK) == ++ JRSTA_CCBERR_ERRID_ICVCHK) ++ ecode = -EBADMSG; ++ else ++ ecode = -EIO; ++ } ++ ++ aead_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ aead_request_complete(req, ecode); ++} ++ ++static int aead_encrypt(struct aead_request *req) ++{ ++ struct aead_edesc *edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = aead_edesc_alloc(req, true); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->op_type = ENCRYPT; ++ caam_req->cbk = aead_encrypt_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ aead_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int aead_decrypt(struct aead_request *req) ++{ ++ struct aead_edesc *edesc; ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = aead_edesc_alloc(req, false); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->op_type = DECRYPT; ++ caam_req->cbk = aead_decrypt_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ aead_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static void tls_encrypt_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct aead_request *req = container_of(areq, struct aead_request, ++ base); ++ struct caam_request *req_ctx = to_caam_req(areq); ++ struct tls_edesc *edesc = req_ctx->edesc; ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ tls_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ aead_request_complete(req, ecode); ++} ++ ++static void tls_decrypt_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct aead_request *req = container_of(areq, struct aead_request, ++ base); ++ struct caam_request *req_ctx = to_caam_req(areq); ++ struct tls_edesc *edesc = req_ctx->edesc; ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ /* ++ * verify hw auth check passed else return -EBADMSG ++ */ ++ if ((status & JRSTA_CCBERR_ERRID_MASK) == ++ JRSTA_CCBERR_ERRID_ICVCHK) ++ ecode = -EBADMSG; ++ else ++ ecode = -EIO; ++ } ++ ++ tls_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ aead_request_complete(req, ecode); ++} ++ ++static int tls_encrypt(struct aead_request *req) ++{ ++ struct tls_edesc *edesc; ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = tls_edesc_alloc(req, true); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->op_type = ENCRYPT; ++ caam_req->cbk = tls_encrypt_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ tls_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int tls_decrypt(struct aead_request *req) ++{ ++ struct tls_edesc *edesc; ++ struct crypto_aead *tls = crypto_aead_reqtfm(req); ++ struct caam_ctx *ctx = crypto_aead_ctx(tls); ++ struct caam_request *caam_req = aead_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = tls_edesc_alloc(req, false); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->op_type = DECRYPT; ++ caam_req->cbk = tls_decrypt_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ tls_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int ipsec_gcm_encrypt(struct aead_request *req) ++{ ++ if (req->assoclen < 8) ++ return -EINVAL; ++ ++ return aead_encrypt(req); ++} ++ ++static int ipsec_gcm_decrypt(struct aead_request *req) ++{ ++ if (req->assoclen < 8) ++ return -EINVAL; ++ ++ return aead_decrypt(req); ++} ++ ++static void ablkcipher_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct ablkcipher_request *req = ablkcipher_request_cast(areq); ++ struct caam_request *req_ctx = to_caam_req(areq); ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct ablkcipher_edesc *edesc = req_ctx->edesc; ++ int ecode = 0; ++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->info, ++ edesc->src_nents > 1 ? 100 : ivsize, 1); ++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst, ++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ++#endif ++ ++ ablkcipher_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ ++ /* ++ * The crypto API expects us to set the IV (req->info) to the last ++ * ciphertext block. This is used e.g. by the CTS mode. ++ */ ++ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, ++ ivsize, 0); ++ ++ ablkcipher_request_complete(req, ecode); ++} ++ ++static int ablkcipher_encrypt(struct ablkcipher_request *req) ++{ ++ struct ablkcipher_edesc *edesc; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct caam_request *caam_req = ablkcipher_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = ablkcipher_edesc_alloc(req, true); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->op_type = ENCRYPT; ++ caam_req->cbk = ablkcipher_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ ablkcipher_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq) ++{ ++ struct ablkcipher_request *req = &greq->creq; ++ struct ablkcipher_edesc *edesc; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct caam_request *caam_req = ablkcipher_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = ablkcipher_giv_edesc_alloc(greq); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[GIVENCRYPT]; ++ caam_req->op_type = GIVENCRYPT; ++ caam_req->cbk = ablkcipher_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ ablkcipher_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++static int ablkcipher_decrypt(struct ablkcipher_request *req) ++{ ++ struct ablkcipher_edesc *edesc; ++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); ++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); ++ struct caam_request *caam_req = ablkcipher_request_ctx(req); ++ int ret; ++ ++ /* allocate extended descriptor */ ++ edesc = ablkcipher_edesc_alloc(req, false); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->op_type = DECRYPT; ++ caam_req->cbk = ablkcipher_done; ++ caam_req->ctx = &req->base; ++ caam_req->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { ++ ablkcipher_unmap(ctx->dev, edesc, req); ++ qi_cache_free(edesc); ++ } ++ ++ return ret; ++} ++ ++struct caam_crypto_alg { ++ struct list_head entry; ++ struct crypto_alg crypto_alg; ++ struct caam_alg_entry caam; ++}; ++ ++static int caam_cra_init(struct crypto_tfm *tfm) ++{ ++ struct crypto_alg *alg = tfm->__crt_alg; ++ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), ++ crypto_alg); ++ struct caam_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ /* copy descriptor header template value */ ++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | ++ caam_alg->caam.class1_alg_type; ++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | ++ caam_alg->caam.class2_alg_type; ++ ++ ctx->dev = caam_alg->caam.dev; ++ ++ return 0; ++} ++ ++static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm) ++{ ++ struct ablkcipher_tfm *ablkcipher_tfm = ++ crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm)); ++ ++ ablkcipher_tfm->reqsize = sizeof(struct caam_request); ++ return caam_cra_init(tfm); ++} ++ ++static int caam_cra_init_aead(struct crypto_aead *tfm) ++{ ++ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request)); ++ return caam_cra_init(crypto_aead_tfm(tfm)); ++} ++ ++static void caam_exit_common(struct caam_ctx *ctx) ++{ ++ int i; ++ ++ for (i = 0; i < NUM_OP; i++) { ++ if (!ctx->flc[i].flc_dma) ++ continue; ++ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma, ++ sizeof(ctx->flc[i].flc) + ++ desc_bytes(ctx->flc[i].sh_desc), ++ DMA_TO_DEVICE); ++ } ++ ++ if (ctx->key_dma) ++ dma_unmap_single(ctx->dev, ctx->key_dma, ++ ctx->cdata.keylen + ctx->adata.keylen_pad, ++ DMA_TO_DEVICE); ++} ++ ++static void caam_cra_exit(struct crypto_tfm *tfm) ++{ ++ caam_exit_common(crypto_tfm_ctx(tfm)); ++} ++ ++static void caam_cra_exit_aead(struct crypto_aead *tfm) ++{ ++ caam_exit_common(crypto_aead_ctx(tfm)); ++} ++ ++#define template_ablkcipher template_u.ablkcipher ++struct caam_alg_template { ++ char name[CRYPTO_MAX_ALG_NAME]; ++ char driver_name[CRYPTO_MAX_ALG_NAME]; ++ unsigned int blocksize; ++ u32 type; ++ union { ++ struct ablkcipher_alg ablkcipher; ++ } template_u; ++ u32 class1_alg_type; ++ u32 class2_alg_type; ++}; ++ ++static struct caam_alg_template driver_algs[] = { ++ /* ablkcipher descriptor */ ++ { ++ .name = "cbc(aes)", ++ .driver_name = "cbc-aes-caam-qi2", ++ .blocksize = AES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "cbc(des3_ede)", ++ .driver_name = "cbc-3des-caam-qi2", ++ .blocksize = DES3_EDE_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = DES3_EDE_KEY_SIZE, ++ .max_keysize = DES3_EDE_KEY_SIZE, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "cbc(des)", ++ .driver_name = "cbc-des-caam-qi2", ++ .blocksize = DES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = DES_KEY_SIZE, ++ .max_keysize = DES_KEY_SIZE, ++ .ivsize = DES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ }, ++ { ++ .name = "ctr(aes)", ++ .driver_name = "ctr-aes-caam-qi2", ++ .blocksize = 1, ++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .geniv = "chainiv", ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, ++ }, ++ { ++ .name = "rfc3686(ctr(aes))", ++ .driver_name = "rfc3686-ctr-aes-caam-qi2", ++ .blocksize = 1, ++ .type = CRYPTO_ALG_TYPE_GIVCIPHER, ++ .template_ablkcipher = { ++ .setkey = ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .givencrypt = ablkcipher_givencrypt, ++ .geniv = "", ++ .min_keysize = AES_MIN_KEY_SIZE + ++ CTR_RFC3686_NONCE_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE + ++ CTR_RFC3686_NONCE_SIZE, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, ++ }, ++ { ++ .name = "xts(aes)", ++ .driver_name = "xts-aes-caam-qi2", ++ .blocksize = AES_BLOCK_SIZE, ++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, ++ .template_ablkcipher = { ++ .setkey = xts_ablkcipher_setkey, ++ .encrypt = ablkcipher_encrypt, ++ .decrypt = ablkcipher_decrypt, ++ .geniv = "eseqiv", ++ .min_keysize = 2 * AES_MIN_KEY_SIZE, ++ .max_keysize = 2 * AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ }, ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, ++ } ++}; ++ ++static struct caam_aead_alg driver_aeads[] = { ++ { ++ .aead = { ++ .base = { ++ .cra_name = "rfc4106(gcm(aes))", ++ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = rfc4106_setkey, ++ .setauthsize = rfc4106_setauthsize, ++ .encrypt = ipsec_gcm_encrypt, ++ .decrypt = ipsec_gcm_decrypt, ++ .ivsize = 8, ++ .maxauthsize = AES_BLOCK_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "rfc4543(gcm(aes))", ++ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = rfc4543_setkey, ++ .setauthsize = rfc4543_setauthsize, ++ .encrypt = ipsec_gcm_encrypt, ++ .decrypt = ipsec_gcm_decrypt, ++ .ivsize = 8, ++ .maxauthsize = AES_BLOCK_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, ++ }, ++ }, ++ /* Galois Counter Mode */ ++ { ++ .aead = { ++ .base = { ++ .cra_name = "gcm(aes)", ++ .cra_driver_name = "gcm-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = gcm_setkey, ++ .setauthsize = gcm_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = 12, ++ .maxauthsize = AES_BLOCK_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, ++ } ++ }, ++ /* single-pass ipsec_esp descriptor */ ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-cbc-aes-" ++ "caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-cbc-aes-" ++ "caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512),cbc(aes))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(aes)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-cbc-aes-" ++ "caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512)," ++ "cbc(des3_ede))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(des3_ede)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-" ++ "cbc-des3_ede-caam-qi2", ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES3_EDE_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5),cbc(des))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(md5)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-hmac-md5-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha1)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha1-cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha224)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha224-cbc-des-" ++ "caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha256)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha256-cbc-desi-" ++ "caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha384)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha384-cbc-des-" ++ "caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512),cbc(des))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "cbc-des-caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "echainiv(authenc(hmac(sha512)," ++ "cbc(des)))", ++ .cra_driver_name = "echainiv-authenc-" ++ "hmac-sha512-cbc-des-" ++ "caam-qi2", ++ .cra_blocksize = DES_BLOCK_SIZE, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = DES_BLOCK_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .geniv = true, ++ } ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(md5)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-md5-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(" ++ "hmac(md5),rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-md5-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = MD5_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_MD5 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha1)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-sha1-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(" ++ "hmac(sha1),rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-sha1-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha224)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-sha224-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(" ++ "hmac(sha224),rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-sha224-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA224_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha256)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-sha256-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(hmac(sha256)," ++ "rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-sha256-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA256_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha384)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-sha384-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(hmac(sha384)," ++ "rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-sha384-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA384_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "authenc(hmac(sha512)," ++ "rfc3686(ctr(aes)))", ++ .cra_driver_name = "authenc-hmac-sha512-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "seqiv(authenc(hmac(sha512)," ++ "rfc3686(ctr(aes))))", ++ .cra_driver_name = "seqiv-authenc-hmac-sha512-" ++ "rfc3686-ctr-aes-caam-qi2", ++ .cra_blocksize = 1, ++ }, ++ .setkey = aead_setkey, ++ .setauthsize = aead_setauthsize, ++ .encrypt = aead_encrypt, ++ .decrypt = aead_decrypt, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .maxauthsize = SHA512_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | ++ OP_ALG_AAI_CTR_MOD128, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ .rfc3686 = true, ++ .geniv = true, ++ }, ++ }, ++ { ++ .aead = { ++ .base = { ++ .cra_name = "tls10(hmac(sha1),cbc(aes))", ++ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2", ++ .cra_blocksize = AES_BLOCK_SIZE, ++ }, ++ .setkey = tls_setkey, ++ .setauthsize = tls_setauthsize, ++ .encrypt = tls_encrypt, ++ .decrypt = tls_decrypt, ++ .ivsize = AES_BLOCK_SIZE, ++ .maxauthsize = SHA1_DIGEST_SIZE, ++ }, ++ .caam = { ++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, ++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | ++ OP_ALG_AAI_HMAC_PRECOMP, ++ }, ++ }, ++}; ++ ++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template ++ *template) ++{ ++ struct caam_crypto_alg *t_alg; ++ struct crypto_alg *alg; ++ ++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); ++ if (!t_alg) ++ return ERR_PTR(-ENOMEM); ++ ++ alg = &t_alg->crypto_alg; ++ ++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); ++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->driver_name); ++ alg->cra_module = THIS_MODULE; ++ alg->cra_exit = caam_cra_exit; ++ alg->cra_priority = CAAM_CRA_PRIORITY; ++ alg->cra_blocksize = template->blocksize; ++ alg->cra_alignmask = 0; ++ alg->cra_ctxsize = sizeof(struct caam_ctx); ++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | ++ template->type; ++ switch (template->type) { ++ case CRYPTO_ALG_TYPE_GIVCIPHER: ++ alg->cra_init = caam_cra_init_ablkcipher; ++ alg->cra_type = &crypto_givcipher_type; ++ alg->cra_ablkcipher = template->template_ablkcipher; ++ break; ++ case CRYPTO_ALG_TYPE_ABLKCIPHER: ++ alg->cra_init = caam_cra_init_ablkcipher; ++ alg->cra_type = &crypto_ablkcipher_type; ++ alg->cra_ablkcipher = template->template_ablkcipher; ++ break; ++ } ++ ++ t_alg->caam.class1_alg_type = template->class1_alg_type; ++ t_alg->caam.class2_alg_type = template->class2_alg_type; ++ ++ return t_alg; ++} ++ ++static void caam_aead_alg_init(struct caam_aead_alg *t_alg) ++{ ++ struct aead_alg *alg = &t_alg->aead; ++ ++ alg->base.cra_module = THIS_MODULE; ++ alg->base.cra_priority = CAAM_CRA_PRIORITY; ++ alg->base.cra_ctxsize = sizeof(struct caam_ctx); ++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; ++ ++ alg->init = caam_cra_init_aead; ++ alg->exit = caam_cra_exit_aead; ++} ++ ++static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) ++{ ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ ++ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); ++ napi_schedule_irqoff(&ppriv->napi); ++} ++ ++static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct dpaa2_io_notification_ctx *nctx; ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err, i = 0, cpu; ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ ppriv->priv = priv; ++ nctx = &ppriv->nctx; ++ nctx->is_cdan = 0; ++ nctx->id = ppriv->rsp_fqid; ++ nctx->desired_cpu = cpu; ++ nctx->cb = dpaa2_caam_fqdan_cb; ++ ++ /* Register notification callbacks */ ++ err = dpaa2_io_service_register(NULL, nctx); ++ if (unlikely(err)) { ++ dev_err(dev, "notification register failed\n"); ++ nctx->cb = NULL; ++ goto err; ++ } ++ ++ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, ++ dev); ++ if (unlikely(!ppriv->store)) { ++ dev_err(dev, "dpaa2_io_store_create() failed\n"); ++ goto err; ++ } ++ ++ if (++i == priv->num_pairs) ++ break; ++ } ++ ++ return 0; ++ ++err: ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ if (!ppriv->nctx.cb) ++ break; ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ } ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ if (!ppriv->store) ++ break; ++ dpaa2_io_store_destroy(ppriv->store); ++ } ++ ++ return err; ++} ++ ++static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) ++{ ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int i = 0, cpu; ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_store_destroy(ppriv->store); ++ ++ if (++i == priv->num_pairs) ++ return; ++ } ++} ++ ++static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) ++{ ++ struct dpseci_rx_queue_cfg rx_queue_cfg; ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err = 0, i = 0, cpu; ++ ++ /* Configure Rx queues */ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ ++ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | ++ DPSECI_QUEUE_OPT_USER_CTX; ++ rx_queue_cfg.order_preservation_en = 0; ++ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; ++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; ++ /* ++ * Rx priority (WQ) doesn't really matter, since we use ++ * pull mode, i.e. volatile dequeues from specific FQs ++ */ ++ rx_queue_cfg.dest_cfg.priority = 0; ++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; ++ ++ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, ++ &rx_queue_cfg); ++ if (err) { ++ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", ++ err); ++ return err; ++ } ++ ++ if (++i == priv->num_pairs) ++ break; ++ } ++ ++ return err; ++} ++ ++static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ ++ if (!priv->cscn_mem) ++ return; ++ ++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++ kfree(priv->cscn_mem); ++} ++ ++static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ ++ dpaa2_dpseci_congestion_free(priv); ++ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); ++} ++ ++static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, ++ const struct dpaa2_fd *fd) ++{ ++ struct caam_request *req; ++ u32 fd_err; ++ ++ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) { ++ dev_err(priv->dev, "Only Frame List FD format is supported!\n"); ++ return; ++ } ++ ++ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; ++ if (unlikely(fd_err)) ++ dev_err(priv->dev, "FD error: %08x\n", fd_err); ++ ++ /* ++ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported ++ * in FD[ERR] or FD[FRC]. ++ */ ++ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd)); ++ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), ++ DMA_BIDIRECTIONAL); ++ req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); ++} ++ ++static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) ++{ ++ int err; ++ ++ /* Retry while portal is busy */ ++ do { ++ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, ++ ppriv->store); ++ } while (err == -EBUSY); ++ ++ if (unlikely(err)) ++ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); ++ ++ return err; ++} ++ ++static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv) ++{ ++ struct dpaa2_dq *dq; ++ int cleaned = 0, is_last; ++ ++ do { ++ dq = dpaa2_io_store_next(ppriv->store, &is_last); ++ if (unlikely(!dq)) { ++ if (unlikely(!is_last)) { ++ dev_dbg(ppriv->priv->dev, ++ "FQ %d returned no valid frames\n", ++ ppriv->rsp_fqid); ++ /* ++ * MUST retry until we get some sort of ++ * valid response token (be it "empty dequeue" ++ * or a valid frame). ++ */ ++ continue; ++ } ++ break; ++ } ++ ++ /* Process FD */ ++ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); ++ cleaned++; ++ } while (!is_last); ++ ++ return cleaned; ++} ++ ++static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) ++{ ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ struct dpaa2_caam_priv *priv; ++ int err, cleaned = 0, store_cleaned; ++ ++ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi); ++ priv = ppriv->priv; ++ ++ if (unlikely(dpaa2_caam_pull_fq(ppriv))) ++ return 0; ++ ++ do { ++ store_cleaned = dpaa2_caam_store_consume(ppriv); ++ cleaned += store_cleaned; ++ ++ if (store_cleaned == 0 || ++ cleaned > budget - DPAA2_CAAM_STORE_SIZE) ++ break; ++ ++ /* Try to dequeue some more */ ++ err = dpaa2_caam_pull_fq(ppriv); ++ if (unlikely(err)) ++ break; ++ } while (1); ++ ++ if (cleaned < budget) { ++ napi_complete_done(napi, cleaned); ++ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx); ++ if (unlikely(err)) ++ dev_err(priv->dev, "Notification rearm failed: %d\n", ++ err); ++ } ++ ++ return cleaned; ++} ++ ++static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, ++ u16 token) ++{ ++ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; ++ struct device *dev = priv->dev; ++ int err; ++ ++ /* ++ * Congestion group feature supported starting with DPSECI API v5.1 ++ * and only when object has been created with this capability. ++ */ ++ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || ++ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) ++ return 0; ++ ++ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, ++ GFP_KERNEL | GFP_DMA); ++ if (!priv->cscn_mem) ++ return -ENOMEM; ++ ++ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); ++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, ++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, priv->cscn_dma)) { ++ dev_err(dev, "Error mapping CSCN memory area\n"); ++ err = -ENOMEM; ++ goto err_dma_map; ++ } ++ ++ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES; ++ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH; ++ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH; ++ cong_notif_cfg.message_ctx = (u64)priv; ++ cong_notif_cfg.message_iova = priv->cscn_dma; ++ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER | ++ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT | ++ DPSECI_CGN_MODE_COHERENT_WRITE; ++ ++ err = dpseci_set_congestion_notification(priv->mc_io, 0, token, ++ &cong_notif_cfg); ++ if (err) { ++ dev_err(dev, "dpseci_set_congestion_notification failed\n"); ++ goto err_set_cong; ++ } ++ ++ return 0; ++ ++err_set_cong: ++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); ++err_dma_map: ++ kfree(priv->cscn_mem); ++ ++ return err; ++} ++ ++static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_caam_priv *priv; ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err, cpu; ++ u8 i; ++ ++ priv = dev_get_drvdata(dev); ++ ++ priv->dev = dev; ++ priv->dpsec_id = ls_dev->obj_desc.id; ++ ++ /* Get a handle for the DPSECI this interface is associate with */ ++ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpsec_open() failed: %d\n", err); ++ goto err_open; ++ } ++ ++ dev_info(dev, "Opened dpseci object successfully\n"); ++ ++ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, ++ &priv->minor_ver); ++ if (err) { ++ dev_err(dev, "dpseci_get_api_version() failed\n"); ++ goto err_get_vers; ++ } ++ ++ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, ++ &priv->dpseci_attr); ++ if (err) { ++ dev_err(dev, "dpseci_get_attributes() failed\n"); ++ goto err_get_vers; ++ } ++ ++ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, ++ &priv->sec_attr); ++ if (err) { ++ dev_err(dev, "dpseci_get_sec_attr() failed\n"); ++ goto err_get_vers; ++ } ++ ++ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "setup_congestion() failed\n"); ++ goto err_get_vers; ++ } ++ ++ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, ++ priv->dpseci_attr.num_tx_queues); ++ if (priv->num_pairs > num_online_cpus()) { ++ dev_warn(dev, "%d queues won't be used\n", ++ priv->num_pairs - num_online_cpus()); ++ priv->num_pairs = num_online_cpus(); ++ } ++ ++ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { ++ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, ++ &priv->rx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpseci_get_rx_queue() failed\n"); ++ goto err_get_rx_queue; ++ } ++ } ++ ++ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { ++ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, ++ &priv->tx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpseci_get_tx_queue() failed\n"); ++ goto err_get_rx_queue; ++ } ++ } ++ ++ i = 0; ++ for_each_online_cpu(cpu) { ++ dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i, ++ priv->rx_queue_attr[i].fqid, ++ priv->tx_queue_attr[i].fqid); ++ ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ ppriv->req_fqid = priv->tx_queue_attr[i].fqid; ++ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; ++ ppriv->prio = i; ++ ++ ppriv->net_dev.dev = *dev; ++ INIT_LIST_HEAD(&ppriv->net_dev.napi_list); ++ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, ++ DPAA2_CAAM_NAPI_WEIGHT); ++ if (++i == priv->num_pairs) ++ break; ++ } ++ ++ return 0; ++ ++err_get_rx_queue: ++ dpaa2_dpseci_congestion_free(priv); ++err_get_vers: ++ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); ++err_open: ++ return err; ++} ++ ++static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err, i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ ppriv = per_cpu_ptr(priv->ppriv, i); ++ napi_enable(&ppriv->napi); ++ } ++ ++ err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpseci_enable() failed\n"); ++ return err; ++ } ++ ++ dev_info(dev, "DPSECI version %d.%d\n", ++ priv->major_ver, ++ priv->minor_ver); ++ ++ return 0; ++} ++ ++static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ int i, err = 0, enabled; ++ ++ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpseci_disable() failed\n"); ++ return err; ++ } ++ ++ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); ++ if (err) { ++ dev_err(dev, "dpseci_is_enabled() failed\n"); ++ return err; ++ } ++ ++ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ ppriv = per_cpu_ptr(priv->ppriv, i); ++ napi_disable(&ppriv->napi); ++ netif_napi_del(&ppriv->napi); ++ } ++ ++ return 0; ++} ++ ++static struct list_head alg_list; ++ ++static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) ++{ ++ struct device *dev; ++ struct dpaa2_caam_priv *priv; ++ int i, err = 0; ++ bool registered = false; ++ ++ /* ++ * There is no way to get CAAM endianness - there is no direct register ++ * space access and MC f/w does not provide this attribute. ++ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this ++ * property. ++ */ ++ caam_little_end = true; ++ ++ caam_imx = false; ++ ++ dev = &dpseci_dev->dev; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ dev_set_drvdata(dev, priv); ++ ++ priv->domain = iommu_get_domain_for_dev(dev); ++ ++ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, ++ 0, SLAB_CACHE_DMA, NULL); ++ if (!qi_cache) { ++ dev_err(dev, "Can't allocate SEC cache\n"); ++ err = -ENOMEM; ++ goto err_qicache; ++ } ++ ++ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); ++ if (err) { ++ dev_err(dev, "dma_set_mask_and_coherent() failed\n"); ++ goto err_dma_mask; ++ } ++ ++ /* Obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_dma_mask; ++ } ++ ++ priv->ppriv = alloc_percpu(*priv->ppriv); ++ if (!priv->ppriv) { ++ dev_err(dev, "alloc_percpu() failed\n"); ++ goto err_alloc_ppriv; ++ } ++ ++ /* DPSECI initialization */ ++ err = dpaa2_dpseci_setup(dpseci_dev); ++ if (err < 0) { ++ dev_err(dev, "dpaa2_dpseci_setup() failed\n"); ++ goto err_dpseci_setup; ++ } ++ ++ /* DPIO */ ++ err = dpaa2_dpseci_dpio_setup(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n"); ++ goto err_dpio_setup; ++ } ++ ++ /* DPSECI binding to DPIO */ ++ err = dpaa2_dpseci_bind(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpseci_bind() failed\n"); ++ goto err_bind; ++ } ++ ++ /* DPSECI enable */ ++ err = dpaa2_dpseci_enable(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpseci_enable() failed"); ++ goto err_bind; ++ } ++ ++ /* register crypto algorithms the device supports */ ++ INIT_LIST_HEAD(&alg_list); ++ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { ++ struct caam_crypto_alg *t_alg; ++ struct caam_alg_template *alg = driver_algs + i; ++ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; ++ ++ /* Skip DES algorithms if not supported by device */ ++ if (!priv->sec_attr.des_acc_num && ++ ((alg_sel == OP_ALG_ALGSEL_3DES) || ++ (alg_sel == OP_ALG_ALGSEL_DES))) ++ continue; ++ ++ /* Skip AES algorithms if not supported by device */ ++ if (!priv->sec_attr.aes_acc_num && ++ (alg_sel == OP_ALG_ALGSEL_AES)) ++ continue; ++ ++ t_alg = caam_alg_alloc(alg); ++ if (IS_ERR(t_alg)) { ++ err = PTR_ERR(t_alg); ++ dev_warn(dev, "%s alg allocation failed: %d\n", ++ alg->driver_name, err); ++ continue; ++ } ++ t_alg->caam.dev = dev; ++ ++ err = crypto_register_alg(&t_alg->crypto_alg); ++ if (err) { ++ dev_warn(dev, "%s alg registration failed: %d\n", ++ t_alg->crypto_alg.cra_driver_name, err); ++ kfree(t_alg); ++ continue; ++ } ++ ++ list_add_tail(&t_alg->entry, &alg_list); ++ registered = true; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { ++ struct caam_aead_alg *t_alg = driver_aeads + i; ++ u32 c1_alg_sel = t_alg->caam.class1_alg_type & ++ OP_ALG_ALGSEL_MASK; ++ u32 c2_alg_sel = t_alg->caam.class2_alg_type & ++ OP_ALG_ALGSEL_MASK; ++ ++ /* Skip DES algorithms if not supported by device */ ++ if (!priv->sec_attr.des_acc_num && ++ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || ++ (c1_alg_sel == OP_ALG_ALGSEL_DES))) ++ continue; ++ ++ /* Skip AES algorithms if not supported by device */ ++ if (!priv->sec_attr.aes_acc_num && ++ (c1_alg_sel == OP_ALG_ALGSEL_AES)) ++ continue; ++ ++ /* ++ * Skip algorithms requiring message digests ++ * if MD not supported by device. ++ */ ++ if (!priv->sec_attr.md_acc_num && c2_alg_sel) ++ continue; ++ ++ t_alg->caam.dev = dev; ++ caam_aead_alg_init(t_alg); ++ ++ err = crypto_register_aead(&t_alg->aead); ++ if (err) { ++ dev_warn(dev, "%s alg registration failed: %d\n", ++ t_alg->aead.base.cra_driver_name, err); ++ continue; ++ } ++ ++ t_alg->registered = true; ++ registered = true; ++ } ++ if (registered) ++ dev_info(dev, "algorithms registered in /proc/crypto\n"); ++ ++ return err; ++ ++err_bind: ++ dpaa2_dpseci_dpio_free(priv); ++err_dpio_setup: ++ dpaa2_dpseci_free(priv); ++err_dpseci_setup: ++ free_percpu(priv->ppriv); ++err_alloc_ppriv: ++ fsl_mc_portal_free(priv->mc_io); ++err_dma_mask: ++ kmem_cache_destroy(qi_cache); ++err_qicache: ++ dev_set_drvdata(dev, NULL); ++ ++ return err; ++} ++ ++static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct dpaa2_caam_priv *priv; ++ int i; ++ ++ dev = &ls_dev->dev; ++ priv = dev_get_drvdata(dev); ++ ++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { ++ struct caam_aead_alg *t_alg = driver_aeads + i; ++ ++ if (t_alg->registered) ++ crypto_unregister_aead(&t_alg->aead); ++ } ++ ++ if (alg_list.next) { ++ struct caam_crypto_alg *t_alg, *n; ++ ++ list_for_each_entry_safe(t_alg, n, &alg_list, entry) { ++ crypto_unregister_alg(&t_alg->crypto_alg); ++ list_del(&t_alg->entry); ++ kfree(t_alg); ++ } ++ } ++ ++ dpaa2_dpseci_disable(priv); ++ dpaa2_dpseci_dpio_free(priv); ++ dpaa2_dpseci_free(priv); ++ free_percpu(priv->ppriv); ++ fsl_mc_portal_free(priv->mc_io); ++ dev_set_drvdata(dev, NULL); ++ kmem_cache_destroy(qi_cache); ++ ++ return 0; ++} ++ ++int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) ++{ ++ struct dpaa2_fd fd; ++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); ++ int err = 0, i, id; ++ ++ if (IS_ERR(req)) ++ return PTR_ERR(req); ++ ++ if (priv->cscn_mem) { ++ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, ++ DPAA2_CSCN_SIZE, ++ DMA_FROM_DEVICE); ++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { ++ dev_dbg_ratelimited(dev, "Dropping request\n"); ++ return -EBUSY; ++ } ++ } ++ ++ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma); ++ ++ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(dev, req->fd_flt_dma)) { ++ dev_err(dev, "DMA mapping error for QI enqueue request\n"); ++ goto err_out; ++ } ++ ++ memset(&fd, 0, sizeof(fd)); ++ dpaa2_fd_set_format(&fd, dpaa2_fd_list); ++ dpaa2_fd_set_addr(&fd, req->fd_flt_dma); ++ dpaa2_fd_set_len(&fd, req->fd_flt[1].len); ++ dpaa2_fd_set_flc(&fd, req->flc->flc_dma); ++ ++ /* ++ * There is no guarantee that preemption is disabled here, ++ * thus take action. ++ */ ++ preempt_disable(); ++ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues; ++ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { ++ err = dpaa2_io_service_enqueue_fq(NULL, ++ priv->tx_queue_attr[id].fqid, ++ &fd); ++ if (err != -EBUSY) ++ break; ++ } ++ preempt_enable(); ++ ++ if (unlikely(err < 0)) { ++ dev_err(dev, "Error enqueuing frame: %d\n", err); ++ goto err_out; ++ } ++ ++ return -EINPROGRESS; ++ ++err_out: ++ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), ++ DMA_BIDIRECTIONAL); ++ return -EIO; ++} ++EXPORT_SYMBOL(dpaa2_caam_enqueue); ++ ++const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpseci", ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_caam_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_caam_probe, ++ .remove = dpaa2_caam_remove, ++ .match_id_table = dpaa2_caam_match_id_table ++}; ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver"); ++ ++module_fsl_mc_driver(dpaa2_caam_driver); +--- /dev/null ++++ b/drivers/crypto/caam/caamalg_qi2.h +@@ -0,0 +1,265 @@ ++/* ++ * Copyright 2015-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _CAAMALG_QI2_H_ ++#define _CAAMALG_QI2_H_ ++ ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" ++#include ++#include "dpseci.h" ++#include "desc_constr.h" ++ ++#define DPAA2_CAAM_STORE_SIZE 16 ++/* NAPI weight *must* be a multiple of the store size. */ ++#define DPAA2_CAAM_NAPI_WEIGHT 64 ++ ++/* The congestion entrance threshold was chosen so that on LS2088 ++ * we support the maximum throughput for the available memory ++ */ ++#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024) ++#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10) ++ ++/** ++ * dpaa2_caam_priv - driver private data ++ * @dpseci_id: DPSECI object unique ID ++ * @major_ver: DPSECI major version ++ * @minor_ver: DPSECI minor version ++ * @dpseci_attr: DPSECI attributes ++ * @sec_attr: SEC engine attributes ++ * @rx_queue_attr: array of Rx queue attributes ++ * @tx_queue_attr: array of Tx queue attributes ++ * @cscn_mem: pointer to memory region containing the ++ * dpaa2_cscn struct; it's size is larger than ++ * sizeof(struct dpaa2_cscn) to accommodate alignment ++ * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed ++ * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN) ++ * @cscn_dma: dma address used by the QMAN to write CSCN messages ++ * @dev: device associated with the DPSECI object ++ * @mc_io: pointer to MC portal's I/O object ++ * @domain: IOMMU domain ++ * @ppriv: per CPU pointers to privata data ++ */ ++struct dpaa2_caam_priv { ++ int dpsec_id; ++ ++ u16 major_ver; ++ u16 minor_ver; ++ ++ struct dpseci_attr dpseci_attr; ++ struct dpseci_sec_attr sec_attr; ++ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM]; ++ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM]; ++ int num_pairs; ++ ++ /* congestion */ ++ void *cscn_mem; ++ void *cscn_mem_aligned; ++ dma_addr_t cscn_dma; ++ ++ struct device *dev; ++ struct fsl_mc_io *mc_io; ++ struct iommu_domain *domain; ++ ++ struct dpaa2_caam_priv_per_cpu __percpu *ppriv; ++}; ++ ++/** ++ * dpaa2_caam_priv_per_cpu - per CPU private data ++ * @napi: napi structure ++ * @net_dev: netdev used by napi ++ * @req_fqid: (virtual) request (Tx / enqueue) FQID ++ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID ++ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr ++ * @nctx: notification context of response FQ ++ * @store: where dequeued frames are stored ++ * @priv: backpointer to dpaa2_caam_priv ++ */ ++struct dpaa2_caam_priv_per_cpu { ++ struct napi_struct napi; ++ struct net_device net_dev; ++ int req_fqid; ++ int rsp_fqid; ++ int prio; ++ struct dpaa2_io_notification_ctx nctx; ++ struct dpaa2_io_store *store; ++ struct dpaa2_caam_priv *priv; ++}; ++ ++/* ++ * The CAAM QI hardware constructs a job descriptor which points ++ * to shared descriptor (as pointed by context_a of FQ to CAAM). ++ * When the job descriptor is executed by deco, the whole job ++ * descriptor together with shared descriptor gets loaded in ++ * deco buffer which is 64 words long (each 32-bit). ++ * ++ * The job descriptor constructed by QI hardware has layout: ++ * ++ * HEADER (1 word) ++ * Shdesc ptr (1 or 2 words) ++ * SEQ_OUT_PTR (1 word) ++ * Out ptr (1 or 2 words) ++ * Out length (1 word) ++ * SEQ_IN_PTR (1 word) ++ * In ptr (1 or 2 words) ++ * In length (1 word) ++ * ++ * The shdesc ptr is used to fetch shared descriptor contents ++ * into deco buffer. ++ * ++ * Apart from shdesc contents, the total number of words that ++ * get loaded in deco buffer are '8' or '11'. The remaining words ++ * in deco buffer can be used for storing shared descriptor. ++ */ ++#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) ++ ++/* Length of a single buffer in the QI driver memory cache */ ++#define CAAM_QI_MEMCACHE_SIZE 512 ++ ++/* ++ * aead_edesc - s/w-extended aead descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped h/w link table ++ * @qm_sg_dma: bus physical mapped address of h/w link table ++ * @assoclen_dma: bus physical mapped address of req->assoclen ++ * @sgt: the h/w link table ++ */ ++struct aead_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++ dma_addr_t assoclen_dma; ++#define CAAM_QI_MAX_AEAD_SG \ ++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ ++ sizeof(struct dpaa2_sg_entry)) ++ struct dpaa2_sg_entry sgt[0]; ++}; ++ ++/* ++ * tls_edesc - s/w-extended tls descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped h/w link table ++ * @qm_sg_dma: bus physical mapped address of h/w link table ++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd' ++ * @dst: pointer to output scatterlist, usefull for unmapping ++ * @sgt: the h/w link table ++ */ ++struct tls_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++ struct scatterlist tmp[2]; ++ struct scatterlist *dst; ++ struct dpaa2_sg_entry sgt[0]; ++}; ++ ++/* ++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor ++ * @src_nents: number of segments in input scatterlist ++ * @dst_nents: number of segments in output scatterlist ++ * @iv_dma: dma address of iv for checking continuity and link table ++ * @qm_sg_bytes: length of dma mapped qm_sg space ++ * @qm_sg_dma: I/O virtual address of h/w link table ++ * @sgt: the h/w link table ++ */ ++struct ablkcipher_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++#define CAAM_QI_MAX_ABLKCIPHER_SG \ ++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ ++ sizeof(struct dpaa2_sg_entry)) ++ struct dpaa2_sg_entry sgt[0]; ++}; ++ ++/** ++ * caam_flc - Flow Context (FLC) ++ * @flc: Flow Context options ++ * @sh_desc: Shared Descriptor ++ * @flc_dma: DMA address of the Flow Context ++ */ ++struct caam_flc { ++ u32 flc[16]; ++ u32 sh_desc[MAX_SDLEN]; ++ dma_addr_t flc_dma; ++} ____cacheline_aligned; ++ ++enum optype { ++ ENCRYPT = 0, ++ DECRYPT, ++ GIVENCRYPT, ++ NUM_OP ++}; ++ ++/** ++ * caam_request - the request structure the driver application should fill while ++ * submitting a job to driver. ++ * @fd_flt: Frame list table defining input and output ++ * fd_flt[0] - FLE pointing to output buffer ++ * fd_flt[1] - FLE pointing to input buffer ++ * @fd_flt_dma: DMA address for the frame list table ++ * @flc: Flow Context ++ * @op_type: operation type ++ * @cbk: Callback function to invoke when job is completed ++ * @ctx: arbit context attached with request by the application ++ * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc ++ */ ++struct caam_request { ++ struct dpaa2_fl_entry fd_flt[2]; ++ dma_addr_t fd_flt_dma; ++ struct caam_flc *flc; ++ enum optype op_type; ++ void (*cbk)(void *ctx, u32 err); ++ void *ctx; ++ void *edesc; ++}; ++ ++/** ++ * dpaa2_caam_enqueue() - enqueue a crypto request ++ * @dev: device associated with the DPSECI object ++ * @req: pointer to caam_request ++ */ ++int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req); ++ ++#endif /* _CAAMALG_QI2_H_ */ +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -72,7 +72,7 @@ + #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE + + /* length of descriptors text */ +-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) ++#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) + #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) + #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) + #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +@@ -103,20 +103,14 @@ struct caam_hash_ctx { + u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; +- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + dma_addr_t sh_desc_update_dma ____cacheline_aligned; + dma_addr_t sh_desc_update_first_dma; + dma_addr_t sh_desc_fin_dma; + dma_addr_t sh_desc_digest_dma; +- dma_addr_t sh_desc_finup_dma; + struct device *jrdev; +- u32 alg_type; +- u32 alg_op; + u8 key[CAAM_MAX_HASH_KEY_SIZE]; +- dma_addr_t key_dma; + int ctx_len; +- unsigned int split_key_len; +- unsigned int split_key_pad_len; ++ struct alginfo adata; + }; + + /* ahash state */ +@@ -143,6 +137,31 @@ struct caam_export_state { + int (*finup)(struct ahash_request *req); + }; + ++static inline void switch_buf(struct caam_hash_state *state) ++{ ++ state->current_buf ^= 1; ++} ++ ++static inline u8 *current_buf(struct caam_hash_state *state) ++{ ++ return state->current_buf ? state->buf_1 : state->buf_0; ++} ++ ++static inline u8 *alt_buf(struct caam_hash_state *state) ++{ ++ return state->current_buf ? state->buf_0 : state->buf_1; ++} ++ ++static inline int *current_buflen(struct caam_hash_state *state) ++{ ++ return state->current_buf ? &state->buflen_1 : &state->buflen_0; ++} ++ ++static inline int *alt_buflen(struct caam_hash_state *state) ++{ ++ return state->current_buf ? &state->buflen_0 : &state->buflen_1; ++} ++ + /* Common job descriptor seq in/out ptr routines */ + + /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ +@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr + return dst_dma; + } + +-/* Map current buffer in state and put it in link table */ +-static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, +- struct sec4_sg_entry *sec4_sg, +- u8 *buf, int buflen) ++/* Map current buffer in state (if length > 0) and put it in link table */ ++static inline int buf_map_to_sec4_sg(struct device *jrdev, ++ struct sec4_sg_entry *sec4_sg, ++ struct caam_hash_state *state) + { +- dma_addr_t buf_dma; ++ int buflen = *current_buflen(state); + +- buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); +- dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); ++ if (!buflen) ++ return 0; + +- return buf_dma; +-} ++ state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(jrdev, state->buf_dma)) { ++ dev_err(jrdev, "unable to map buf\n"); ++ state->buf_dma = 0; ++ return -ENOMEM; ++ } + +-/* +- * Only put buffer in link table if it contains data, which is possible, +- * since a buffer has previously been used, and needs to be unmapped, +- */ +-static inline dma_addr_t +-try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, +- u8 *buf, dma_addr_t buf_dma, int buflen, +- int last_buflen) +-{ +- if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) +- dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); +- if (buflen) +- buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); +- else +- buf_dma = 0; ++ dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); + +- return buf_dma; ++ return 0; + } + + /* Map state->caam_ctx, and add it to link table */ +@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 + return 0; + } + +-/* Common shared descriptor commands */ +-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) +-{ +- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +- ctx->split_key_len, CLASS_2 | +- KEY_DEST_MDHA_SPLIT | KEY_ENC); +-} +- +-/* Append key if it has been set */ +-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) +-{ +- u32 *key_jump_cmd; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- if (ctx->split_key_len) { +- /* Skip if already shared */ +- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +- JUMP_COND_SHRD); +- +- append_key_ahash(desc, ctx); +- +- set_jump_tgt_here(desc, key_jump_cmd); +- } +- +- /* Propagate errors from shared to job descriptor */ +- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); +-} +- + /* +- * For ahash read data from seqin following state->caam_ctx, +- * and write resulting class2 context to seqout, which may be state->caam_ctx +- * or req->result ++ * For ahash update, final and finup (import_ctx = true) ++ * import context, read and write to seqout ++ * For ahash firsts and digest (import_ctx = false) ++ * read and write to seqout + */ +-static inline void ahash_append_load_str(u32 *desc, int digestsize) ++static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, ++ struct caam_hash_ctx *ctx, bool import_ctx) + { +- /* Calculate remaining bytes to read */ +- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ u32 op = ctx->adata.algtype; ++ u32 *skip_key_load; + +- /* Read remaining bytes */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | +- FIFOLD_TYPE_MSG | KEY_VLF); ++ init_sh_desc(desc, HDR_SHARE_SERIAL); + +- /* Store class2 context bytes */ +- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | +- LDST_SRCDST_BYTE_CONTEXT); +-} ++ /* Append key if it has been set; ahash update excluded */ ++ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) { ++ /* Skip key loading if already shared */ ++ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); + +-/* +- * For ahash update, final and finup, import context, read and write to seqout +- */ +-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, +- int digestsize, +- struct caam_hash_ctx *ctx) +-{ +- init_sh_desc_key_ahash(desc, ctx); +- +- /* Import context from software */ +- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_2_CCB | ctx->ctx_len); ++ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, ++ ctx->adata.keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); + +- /* Class 2 operation */ +- append_operation(desc, op | state | OP_ALG_ENCRYPT); ++ set_jump_tgt_here(desc, skip_key_load); + +- /* +- * Load from buf and/or src and write to req->result or state->context +- */ +- ahash_append_load_str(desc, digestsize); +-} ++ op |= OP_ALG_AAI_HMAC_PRECOMP; ++ } + +-/* For ahash firsts and digest, read and write to seqout */ +-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, +- int digestsize, struct caam_hash_ctx *ctx) +-{ +- init_sh_desc_key_ahash(desc, ctx); ++ /* If needed, import context from software */ ++ if (import_ctx) ++ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); + + /* Class 2 operation */ + append_operation(desc, op | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context ++ * Calculate remaining bytes to read + */ +- ahash_append_load_str(desc, digestsize); ++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ /* Read remaining bytes */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | ++ FIFOLD_TYPE_MSG | KEY_VLF); ++ /* Store class2 context bytes */ ++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); + } + + static int ahash_set_sh_desc(struct crypto_ahash *ahash) +@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; +- u32 have_key = 0; + u32 *desc; + +- if (ctx->split_key_len) +- have_key = OP_ALG_AAI_HMAC_PRECOMP; +- + /* ahash_update shared descriptor */ + desc = ctx->sh_desc_update; +- +- init_sh_desc(desc, HDR_SHARE_SERIAL); +- +- /* Import context from software */ +- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | +- LDST_CLASS_2_CCB | ctx->ctx_len); +- +- /* Class 2 operation */ +- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | +- OP_ALG_ENCRYPT); +- +- /* Load data and write to result or context */ +- ahash_append_load_str(desc, ctx->ctx_len); +- +- ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } ++ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + #ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash update shdesc@"__stringify(__LINE__)": ", +@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp + + /* ahash_update_first shared descriptor */ + desc = ctx->sh_desc_update_first; +- +- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, +- ctx->ctx_len, ctx); +- +- ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } ++ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + #ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash update first shdesc@"__stringify(__LINE__)": ", +@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp + + /* ahash_final shared descriptor */ + desc = ctx->sh_desc_fin; +- +- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, +- OP_ALG_AS_FINALIZE, digestsize, ctx); +- +- ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } ++ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + #ifdef DEBUG + print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + #endif + +- /* ahash_finup shared descriptor */ +- desc = ctx->sh_desc_finup; +- +- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, +- OP_ALG_AS_FINALIZE, digestsize, ctx); +- +- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, desc, +- desc_bytes(desc), 1); +-#endif +- + /* ahash_digest shared descriptor */ + desc = ctx->sh_desc_digest; +- +- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, +- digestsize, ctx); +- +- ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, +- desc_bytes(desc), +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { +- dev_err(jrdev, "unable to map shared descriptor\n"); +- return -ENOMEM; +- } ++ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); ++ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, ++ desc_bytes(desc), DMA_TO_DEVICE); + #ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash digest shdesc@"__stringify(__LINE__)": ", +@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp + return 0; + } + +-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, +- u32 keylen) +-{ +- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, +- ctx->split_key_pad_len, key_in, keylen, +- ctx->alg_op); +-} +- + /* Digest hash size if it is too large */ + static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, + u32 *keylen, u8 *key_out, u32 digestsize) +@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h + } + + /* Job descriptor to perform unkeyed hash on key_in */ +- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | ++ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | + OP_ALG_AS_INITFINAL); + append_seq_in_ptr(desc, src_dma, *keylen, 0); + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | +@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h + static int ahash_setkey(struct crypto_ahash *ahash, + const u8 *key, unsigned int keylen) + { +- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ +- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); +- struct device *jrdev = ctx->jrdev; + int blocksize = crypto_tfm_alg_blocksize(&ahash->base); + int digestsize = crypto_ahash_digestsize(ahash); + int ret; +@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah + key = hashed_key; + } + +- /* Pick class 2 key length from algorithm submask */ +- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> +- OP_ALG_ALGSEL_SHIFT] * 2; +- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); +- +-#ifdef DEBUG +- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", +- ctx->split_key_len, ctx->split_key_pad_len); +- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +-#endif +- +- ret = gen_split_hash_key(ctx, key, keylen); ++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, ++ CAAM_MAX_HASH_KEY_SIZE); + if (ret) + goto bad_free_key; + +- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, +- DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->key_dma)) { +- dev_err(jrdev, "unable to map key i/o memory\n"); +- ret = -ENOMEM; +- goto error_free_key; +- } + #ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, +- ctx->split_key_pad_len, 1); ++ ctx->adata.keylen_pad, 1); + #endif + +- ret = ahash_set_sh_desc(ahash); +- if (ret) { +- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, +- DMA_TO_DEVICE); +- } +- error_free_key: + kfree(hashed_key); +- return ret; ++ return ahash_set_sh_desc(ahash); + bad_free_key: + kfree(hashed_key); + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); +@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de + struct ahash_edesc *edesc, + struct ahash_request *req, int dst_len) + { ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ + if (edesc->src_nents) + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + if (edesc->dst_dma) +@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de + if (edesc->sec4_sg_bytes) + dma_unmap_single(dev, edesc->sec4_sg_dma, + edesc->sec4_sg_bytes, DMA_TO_DEVICE); ++ ++ if (state->buf_dma) { ++ dma_unmap_single(dev, state->buf_dma, *current_buflen(state), ++ DMA_TO_DEVICE); ++ state->buf_dma = 0; ++ } + } + + static inline void ahash_unmap_ctx(struct device *dev, +@@ -643,8 +529,7 @@ static void ahash_done(struct device *jr + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ahash_edesc *)((char *)desc - +- offsetof(struct ahash_edesc, hw_desc)); ++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); + if (err) + caam_jr_strstatus(jrdev, err); + +@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); +-#ifdef DEBUG + struct caam_hash_state *state = ahash_request_ctx(req); ++#ifdef DEBUG + int digestsize = crypto_ahash_digestsize(ahash); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ahash_edesc *)((char *)desc - +- offsetof(struct ahash_edesc, hw_desc)); ++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ++ switch_buf(state); + kfree(edesc); + + #ifdef DEBUG +@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ahash_edesc *)((char *)desc - +- offsetof(struct ahash_edesc, hw_desc)); ++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); + if (err) + caam_jr_strstatus(jrdev, err); + +@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); +-#ifdef DEBUG + struct caam_hash_state *state = ahash_request_ctx(req); ++#ifdef DEBUG + int digestsize = crypto_ahash_digestsize(ahash); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + #endif + +- edesc = (struct ahash_edesc *)((char *)desc - +- offsetof(struct ahash_edesc, hw_desc)); ++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ++ switch_buf(state); + kfree(edesc); + + #ifdef DEBUG +@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; +- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; +- int *next_buflen = state->current_buf ? &state->buflen_0 : +- &state->buflen_1, last_buflen; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int *buflen = current_buflen(state); ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state), last_buflen; + int in_len = *buflen + req->nbytes, to_hash; + u32 *desc; + int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; +@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash + if (ret) + goto unmap_ctx; + +- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, +- edesc->sec4_sg + 1, +- buf, state->buf_dma, +- *buflen, last_buflen); ++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); ++ if (ret) ++ goto unmap_ctx; + + if (mapped_nents) { + sg_to_sec4_sg_last(req->src, mapped_nents, +@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash + to_hash - *buflen, + *next_buflen, 0); + } else { +- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= +- cpu_to_caam32(SEC4_SG_LEN_FIN); ++ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - ++ 1); + } + +- state->current_buf = !state->current_buf; +- + desc = edesc->hw_desc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, +@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; +- int last_buflen = state->current_buf ? state->buflen_0 : +- state->buflen_1; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); + u32 *desc; + int sec4_sg_bytes, sec4_sg_src_index; + int digestsize = crypto_ahash_digestsize(ahash); +@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_ + if (ret) + goto unmap_ctx; + +- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, +- buf, state->buf_dma, buflen, +- last_buflen); +- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= +- cpu_to_caam32(SEC4_SG_LEN_FIN); ++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); ++ if (ret) ++ goto unmap_ctx; ++ ++ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); +@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; +- int last_buflen = state->current_buf ? state->buflen_0 : +- state->buflen_1; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); + u32 *desc; + int sec4_sg_src_index; + int src_nents, mapped_nents; +@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_ + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, +- ctx->sh_desc_finup, ctx->sh_desc_finup_dma, ++ ctx->sh_desc_fin, ctx->sh_desc_fin_dma, + flags); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); +@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_ + if (ret) + goto unmap_ctx; + +- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, +- buf, state->buf_dma, buflen, +- last_buflen); ++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); ++ if (ret) ++ goto unmap_ctx; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, + sec4_sg_src_index, ctx->ctx_len + buflen, +@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req + { + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; + u32 *desc; + int digestsize = crypto_ahash_digestsize(ahash); + int src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret; + ++ state->buf_dma = 0; ++ + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); +@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int buflen = *current_buflen(state); + u32 *desc; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; +@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; +- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; +- int *next_buflen = state->current_buf ? &state->buflen_0 : +- &state->buflen_1; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int *buflen = current_buflen(state); ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state); + int in_len = *buflen + req->nbytes, to_hash; + int sec4_sg_bytes, src_nents, mapped_nents; + struct ahash_edesc *edesc; +@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->dst_dma = 0; + +- state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, +- buf, *buflen); ++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); ++ if (ret) ++ goto unmap_ctx; ++ + sg_to_sec4_sg_last(req->src, mapped_nents, + edesc->sec4_sg + 1, 0); + +@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah + *next_buflen, 0); + } + +- state->current_buf = !state->current_buf; +- + desc = edesc->hw_desc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, +@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; +- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; +- int last_buflen = state->current_buf ? state->buflen_0 : +- state->buflen_1; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); + u32 *desc; + int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; + int digestsize = crypto_ahash_digestsize(ahash); +@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha + edesc->src_nents = src_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + +- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, +- state->buf_dma, buflen, +- last_buflen); ++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); ++ if (ret) ++ goto unmap; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, + req->nbytes); +@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; +- u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; +- int *next_buflen = state->current_buf ? +- &state->buflen_1 : &state->buflen_0; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state); + int to_hash; + u32 *desc; + int src_nents, mapped_nents; +@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha + state->final = ahash_final_no_ctx; + scatterwalk_map_and_copy(next_buf, req->src, 0, + req->nbytes, 0); ++ switch_buf(state); + } + #ifdef DEBUG + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", +@@ -1688,7 +1561,6 @@ struct caam_hash_template { + unsigned int blocksize; + struct ahash_alg template_ahash; + u32 alg_type; +- u32 alg_op; + }; + + /* ahash descriptors */ +@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA1, +- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, { + .name = "sha224", + .driver_name = "sha224-caam", +@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA224, +- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, { + .name = "sha256", + .driver_name = "sha256-caam", +@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA256, +- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, { + .name = "sha384", + .driver_name = "sha384-caam", +@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA384, +- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, { + .name = "sha512", + .driver_name = "sha512-caam", +@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA512, +- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, { + .name = "md5", + .driver_name = "md5-caam", +@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_ + }, + }, + .alg_type = OP_ALG_ALGSEL_MD5, +- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }; + + struct caam_hash_alg { + struct list_head entry; + int alg_type; +- int alg_op; + struct ahash_alg ahash_alg; + }; + +@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry + HASH_MSG_LEN + SHA256_DIGEST_SIZE, + HASH_MSG_LEN + 64, + HASH_MSG_LEN + SHA512_DIGEST_SIZE }; ++ dma_addr_t dma_addr; + + /* + * Get a Job ring from Job Ring driver to ensure in-order +@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } ++ ++ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, ++ offsetof(struct caam_hash_ctx, ++ sh_desc_update_dma), ++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ++ if (dma_mapping_error(ctx->jrdev, dma_addr)) { ++ dev_err(ctx->jrdev, "unable to map shared descriptors\n"); ++ caam_jr_free(ctx->jrdev); ++ return -ENOMEM; ++ } ++ ++ ctx->sh_desc_update_dma = dma_addr; ++ ctx->sh_desc_update_first_dma = dma_addr + ++ offsetof(struct caam_hash_ctx, ++ sh_desc_update_first); ++ ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, ++ sh_desc_fin); ++ ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, ++ sh_desc_digest); ++ + /* copy descriptor header template value */ +- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; +- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; ++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + +- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> ++ ctx->ctx_len = runninglen[(ctx->adata.algtype & ++ OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), +@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr + { + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + +- if (ctx->sh_desc_update_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, +- desc_bytes(ctx->sh_desc_update), +- DMA_TO_DEVICE); +- if (ctx->sh_desc_update_first_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, +- desc_bytes(ctx->sh_desc_update_first), +- DMA_TO_DEVICE); +- if (ctx->sh_desc_fin_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, +- desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); +- if (ctx->sh_desc_digest_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, +- desc_bytes(ctx->sh_desc_digest), +- DMA_TO_DEVICE); +- if (ctx->sh_desc_finup_dma && +- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) +- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, +- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); +- ++ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, ++ offsetof(struct caam_hash_ctx, ++ sh_desc_update_dma), ++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); + } + +@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat + alg->cra_type = &crypto_ahash_type; + + t_alg->alg_type = template->alg_type; +- t_alg->alg_op = template->alg_op; + + return t_alg; + } +--- a/drivers/crypto/caam/caampkc.c ++++ b/drivers/crypto/caam/caampkc.c +@@ -18,6 +18,10 @@ + #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) + #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ + sizeof(struct rsa_priv_f1_pdb)) ++#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ ++ sizeof(struct rsa_priv_f2_pdb)) ++#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ ++ sizeof(struct rsa_priv_f3_pdb)) + + static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +@@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + } + ++static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, ++ struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct caam_rsa_key *key = &ctx->key; ++ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; ++ size_t p_sz = key->p_sz; ++ size_t q_sz = key->p_sz; ++ ++ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); ++} ++ ++static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, ++ struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct caam_rsa_key *key = &ctx->key; ++ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; ++ size_t p_sz = key->p_sz; ++ size_t q_sz = key->p_sz; ++ ++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); ++} ++ + /* RSA Job Completion handler */ + static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) + { +@@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi + akcipher_request_complete(req, err); + } + ++static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, ++ void *context) ++{ ++ struct akcipher_request *req = context; ++ struct rsa_edesc *edesc; ++ ++ if (err) ++ caam_jr_strstatus(dev, err); ++ ++ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); ++ ++ rsa_priv_f2_unmap(dev, edesc, req); ++ rsa_io_unmap(dev, edesc, req); ++ kfree(edesc); ++ ++ akcipher_request_complete(req, err); ++} ++ ++static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, ++ void *context) ++{ ++ struct akcipher_request *req = context; ++ struct rsa_edesc *edesc; ++ ++ if (err) ++ caam_jr_strstatus(dev, err); ++ ++ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); ++ ++ rsa_priv_f3_unmap(dev, edesc, req); ++ rsa_io_unmap(dev, edesc, req); ++ kfree(edesc); ++ ++ akcipher_request_complete(req, err); ++} ++ + static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, + size_t desclen) + { +@@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = ctx->dev; + struct rsa_edesc *edesc; +- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | +- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; + int sgc; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + int src_nents, dst_nents; +@@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak + return 0; + } + ++static int set_rsa_priv_f2_pdb(struct akcipher_request *req, ++ struct rsa_edesc *edesc) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct caam_rsa_key *key = &ctx->key; ++ struct device *dev = ctx->dev; ++ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; ++ int sec4_sg_index = 0; ++ size_t p_sz = key->p_sz; ++ size_t q_sz = key->p_sz; ++ ++ pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->d_dma)) { ++ dev_err(dev, "Unable to map RSA private exponent memory\n"); ++ return -ENOMEM; ++ } ++ ++ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->p_dma)) { ++ dev_err(dev, "Unable to map RSA prime factor p memory\n"); ++ goto unmap_d; ++ } ++ ++ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->q_dma)) { ++ dev_err(dev, "Unable to map RSA prime factor q memory\n"); ++ goto unmap_p; ++ } ++ ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->tmp1_dma)) { ++ dev_err(dev, "Unable to map RSA tmp1 memory\n"); ++ goto unmap_q; ++ } ++ ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->tmp2_dma)) { ++ dev_err(dev, "Unable to map RSA tmp2 memory\n"); ++ goto unmap_tmp1; ++ } ++ ++ if (edesc->src_nents > 1) { ++ pdb->sgf |= RSA_PRIV_PDB_SGF_G; ++ pdb->g_dma = edesc->sec4_sg_dma; ++ sec4_sg_index += edesc->src_nents; ++ } else { ++ pdb->g_dma = sg_dma_address(req->src); ++ } ++ ++ if (edesc->dst_nents > 1) { ++ pdb->sgf |= RSA_PRIV_PDB_SGF_F; ++ pdb->f_dma = edesc->sec4_sg_dma + ++ sec4_sg_index * sizeof(struct sec4_sg_entry); ++ } else { ++ pdb->f_dma = sg_dma_address(req->dst); ++ } ++ ++ pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; ++ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; ++ ++ return 0; ++ ++unmap_tmp1: ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++unmap_q: ++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); ++unmap_p: ++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); ++unmap_d: ++ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); ++ ++ return -ENOMEM; ++} ++ ++static int set_rsa_priv_f3_pdb(struct akcipher_request *req, ++ struct rsa_edesc *edesc) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct caam_rsa_key *key = &ctx->key; ++ struct device *dev = ctx->dev; ++ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; ++ int sec4_sg_index = 0; ++ size_t p_sz = key->p_sz; ++ size_t q_sz = key->p_sz; ++ ++ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->p_dma)) { ++ dev_err(dev, "Unable to map RSA prime factor p memory\n"); ++ return -ENOMEM; ++ } ++ ++ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->q_dma)) { ++ dev_err(dev, "Unable to map RSA prime factor q memory\n"); ++ goto unmap_p; ++ } ++ ++ pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->dp_dma)) { ++ dev_err(dev, "Unable to map RSA exponent dp memory\n"); ++ goto unmap_q; ++ } ++ ++ pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->dq_dma)) { ++ dev_err(dev, "Unable to map RSA exponent dq memory\n"); ++ goto unmap_dp; ++ } ++ ++ pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->c_dma)) { ++ dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n"); ++ goto unmap_dq; ++ } ++ ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->tmp1_dma)) { ++ dev_err(dev, "Unable to map RSA tmp1 memory\n"); ++ goto unmap_qinv; ++ } ++ ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, pdb->tmp2_dma)) { ++ dev_err(dev, "Unable to map RSA tmp2 memory\n"); ++ goto unmap_tmp1; ++ } ++ ++ if (edesc->src_nents > 1) { ++ pdb->sgf |= RSA_PRIV_PDB_SGF_G; ++ pdb->g_dma = edesc->sec4_sg_dma; ++ sec4_sg_index += edesc->src_nents; ++ } else { ++ pdb->g_dma = sg_dma_address(req->src); ++ } ++ ++ if (edesc->dst_nents > 1) { ++ pdb->sgf |= RSA_PRIV_PDB_SGF_F; ++ pdb->f_dma = edesc->sec4_sg_dma + ++ sec4_sg_index * sizeof(struct sec4_sg_entry); ++ } else { ++ pdb->f_dma = sg_dma_address(req->dst); ++ } ++ ++ pdb->sgf |= key->n_sz; ++ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; ++ ++ return 0; ++ ++unmap_tmp1: ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++unmap_qinv: ++ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); ++unmap_dq: ++ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); ++unmap_dp: ++ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); ++unmap_q: ++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); ++unmap_p: ++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); ++ ++ return -ENOMEM; ++} ++ + static int caam_rsa_enc(struct akcipher_request *req) + { + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); +@@ -301,24 +543,14 @@ init_fail: + return ret; + } + +-static int caam_rsa_dec(struct akcipher_request *req) ++static int caam_rsa_dec_priv_f1(struct akcipher_request *req) + { + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); +- struct caam_rsa_key *key = &ctx->key; + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + +- if (unlikely(!key->n || !key->d)) +- return -EINVAL; +- +- if (req->dst_len < key->n_sz) { +- req->dst_len = key->n_sz; +- dev_err(jrdev, "Output buffer length less than parameter n\n"); +- return -EOVERFLOW; +- } +- + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); + if (IS_ERR(edesc)) +@@ -344,17 +576,147 @@ init_fail: + return ret; + } + ++static int caam_rsa_dec_priv_f2(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct device *jrdev = ctx->dev; ++ struct rsa_edesc *edesc; ++ int ret; ++ ++ /* Allocate extended descriptor */ ++ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ ++ ret = set_rsa_priv_f2_pdb(req, edesc); ++ if (ret) ++ goto init_fail; ++ ++ /* Initialize Job Descriptor */ ++ init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); ++ ++ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); ++ if (!ret) ++ return -EINPROGRESS; ++ ++ rsa_priv_f2_unmap(jrdev, edesc, req); ++ ++init_fail: ++ rsa_io_unmap(jrdev, edesc, req); ++ kfree(edesc); ++ return ret; ++} ++ ++static int caam_rsa_dec_priv_f3(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct device *jrdev = ctx->dev; ++ struct rsa_edesc *edesc; ++ int ret; ++ ++ /* Allocate extended descriptor */ ++ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); ++ if (IS_ERR(edesc)) ++ return PTR_ERR(edesc); ++ ++ /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ ++ ret = set_rsa_priv_f3_pdb(req, edesc); ++ if (ret) ++ goto init_fail; ++ ++ /* Initialize Job Descriptor */ ++ init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); ++ ++ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); ++ if (!ret) ++ return -EINPROGRESS; ++ ++ rsa_priv_f3_unmap(jrdev, edesc, req); ++ ++init_fail: ++ rsa_io_unmap(jrdev, edesc, req); ++ kfree(edesc); ++ return ret; ++} ++ ++static int caam_rsa_dec(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct caam_rsa_key *key = &ctx->key; ++ int ret; ++ ++ if (unlikely(!key->n || !key->d)) ++ return -EINVAL; ++ ++ if (req->dst_len < key->n_sz) { ++ req->dst_len = key->n_sz; ++ dev_err(ctx->dev, "Output buffer length less than parameter n\n"); ++ return -EOVERFLOW; ++ } ++ ++ if (key->priv_form == FORM3) ++ ret = caam_rsa_dec_priv_f3(req); ++ else if (key->priv_form == FORM2) ++ ret = caam_rsa_dec_priv_f2(req); ++ else ++ ret = caam_rsa_dec_priv_f1(req); ++ ++ return ret; ++} ++ + static void caam_rsa_free_key(struct caam_rsa_key *key) + { + kzfree(key->d); ++ kzfree(key->p); ++ kzfree(key->q); ++ kzfree(key->dp); ++ kzfree(key->dq); ++ kzfree(key->qinv); ++ kzfree(key->tmp1); ++ kzfree(key->tmp2); + kfree(key->e); + kfree(key->n); +- key->d = NULL; +- key->e = NULL; +- key->n = NULL; +- key->d_sz = 0; +- key->e_sz = 0; +- key->n_sz = 0; ++ memset(key, 0, sizeof(*key)); ++} ++ ++static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) ++{ ++ while (!**ptr && *nbytes) { ++ (*ptr)++; ++ (*nbytes)--; ++ } ++} ++ ++/** ++ * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. ++ * dP, dQ and qInv could decode to less than corresponding p, q length, as the ++ * BER-encoding requires that the minimum number of bytes be used to encode the ++ * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate ++ * length. ++ * ++ * @ptr : pointer to {dP, dQ, qInv} CRT member ++ * @nbytes: length in bytes of {dP, dQ, qInv} CRT member ++ * @dstlen: length in bytes of corresponding p or q prime factor ++ */ ++static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) ++{ ++ u8 *dst; ++ ++ caam_rsa_drop_leading_zeros(&ptr, &nbytes); ++ if (!nbytes) ++ return NULL; ++ ++ dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); ++ if (!dst) ++ return NULL; ++ ++ memcpy(dst + (dstlen - nbytes), ptr, nbytes); ++ ++ return dst; + } + + /** +@@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con + { + u8 *val; + +- while (!*buf && *nbytes) { +- buf++; +- (*nbytes)--; +- } ++ caam_rsa_drop_leading_zeros(&buf, nbytes); ++ if (!*nbytes) ++ return NULL; + + val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL); + if (!val) +@@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c + unsigned int keylen) + { + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); +- struct rsa_key raw_key = {0}; ++ struct rsa_key raw_key = {NULL}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + +@@ -437,11 +798,69 @@ err: + return -ENOMEM; + } + ++static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, ++ struct rsa_key *raw_key) ++{ ++ struct caam_rsa_key *rsa_key = &ctx->key; ++ size_t p_sz = raw_key->p_sz; ++ size_t q_sz = raw_key->q_sz; ++ ++ rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); ++ if (!rsa_key->p) ++ return; ++ rsa_key->p_sz = p_sz; ++ ++ rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); ++ if (!rsa_key->q) ++ goto free_p; ++ rsa_key->q_sz = q_sz; ++ ++ rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); ++ if (!rsa_key->tmp1) ++ goto free_q; ++ ++ rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); ++ if (!rsa_key->tmp2) ++ goto free_tmp1; ++ ++ rsa_key->priv_form = FORM2; ++ ++ rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz); ++ if (!rsa_key->dp) ++ goto free_tmp2; ++ ++ rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz); ++ if (!rsa_key->dq) ++ goto free_dp; ++ ++ rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz, ++ q_sz); ++ if (!rsa_key->qinv) ++ goto free_dq; ++ ++ rsa_key->priv_form = FORM3; ++ ++ return; ++ ++free_dq: ++ kzfree(rsa_key->dq); ++free_dp: ++ kzfree(rsa_key->dp); ++free_tmp2: ++ kzfree(rsa_key->tmp2); ++free_tmp1: ++ kzfree(rsa_key->tmp1); ++free_q: ++ kzfree(rsa_key->q); ++free_p: ++ kzfree(rsa_key->p); ++} ++ + static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) + { + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); +- struct rsa_key raw_key = {0}; ++ struct rsa_key raw_key = {NULL}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + +@@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct + memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); + memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); + ++ caam_rsa_set_priv_key_form(ctx, &raw_key); ++ + return 0; + + err: +--- a/drivers/crypto/caam/caampkc.h ++++ b/drivers/crypto/caam/caampkc.h +@@ -13,21 +13,75 @@ + #include "pdb.h" + + /** ++ * caam_priv_key_form - CAAM RSA private key representation ++ * CAAM RSA private key may have either of three forms. ++ * ++ * 1. The first representation consists of the pair (n, d), where the ++ * components have the following meanings: ++ * n the RSA modulus ++ * d the RSA private exponent ++ * ++ * 2. The second representation consists of the triplet (p, q, d), where the ++ * components have the following meanings: ++ * p the first prime factor of the RSA modulus n ++ * q the second prime factor of the RSA modulus n ++ * d the RSA private exponent ++ * ++ * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv), ++ * where the components have the following meanings: ++ * p the first prime factor of the RSA modulus n ++ * q the second prime factor of the RSA modulus n ++ * dP the first factors's CRT exponent ++ * dQ the second factors's CRT exponent ++ * qInv the (first) CRT coefficient ++ * ++ * The benefit of using the third or the second key form is lower computational ++ * cost for the decryption and signature operations. ++ */ ++enum caam_priv_key_form { ++ FORM1, ++ FORM2, ++ FORM3 ++}; ++ ++/** + * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone. + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream ++ * @p : RSA prime factor p of RSA modulus n ++ * @q : RSA prime factor q of RSA modulus n ++ * @dp : RSA CRT exponent of p ++ * @dp : RSA CRT exponent of q ++ * @qinv : RSA CRT coefficient ++ * @tmp1 : CAAM uses this temporary buffer as internal state buffer. ++ * It is assumed to be as long as p. ++ * @tmp2 : CAAM uses this temporary buffer as internal state buffer. ++ * It is assumed to be as long as q. + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent ++ * @p_sz : length in bytes of RSA prime factor p of RSA modulus n ++ * @q_sz : length in bytes of RSA prime factor q of RSA modulus n ++ * @priv_form : CAAM RSA private key representation + */ + struct caam_rsa_key { + u8 *n; + u8 *e; + u8 *d; ++ u8 *p; ++ u8 *q; ++ u8 *dp; ++ u8 *dq; ++ u8 *qinv; ++ u8 *tmp1; ++ u8 *tmp2; + size_t n_sz; + size_t e_sz; + size_t d_sz; ++ size_t p_sz; ++ size_t q_sz; ++ enum caam_priv_key_form priv_form; + }; + + /** +@@ -59,6 +113,8 @@ struct rsa_edesc { + union { + struct rsa_pub_pdb pub; + struct rsa_priv_f1_pdb priv_f1; ++ struct rsa_priv_f2_pdb priv_f2; ++ struct rsa_priv_f3_pdb priv_f3; + } pdb; + u32 hw_desc[]; + }; +@@ -66,5 +122,7 @@ struct rsa_edesc { + /* Descriptor construction primitives. */ + void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb); + void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb); ++void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb); ++void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb); + + #endif +--- a/drivers/crypto/caam/caamrng.c ++++ b/drivers/crypto/caam/caamrng.c +@@ -52,7 +52,7 @@ + + /* length of descriptors */ + #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) +-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ) ++#define DESC_RNG_LEN (3 * CAAM_CMD_SZ) + + /* Buffer, its dma address and lock */ + struct buf_data { +@@ -100,8 +100,7 @@ static void rng_done(struct device *jrde + { + struct buf_data *bd; + +- bd = (struct buf_data *)((char *)desc - +- offsetof(struct buf_data, hw_desc)); ++ bd = container_of(desc, struct buf_data, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); +@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str + + init_sh_desc(desc, HDR_SHARE_SERIAL); + +- /* Propagate errors from shared to job descriptor */ +- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); +- + /* Generate random bytes */ + append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); + +@@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng + if (err) + return err; + +- err = caam_init_buf(ctx, 1); +- if (err) +- return err; +- +- return 0; ++ return caam_init_buf(ctx, 1); + } + + static struct hwrng caam_rng = { +@@ -351,7 +343,7 @@ static int __init caam_rng_init(void) + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(dev); + } +- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA); ++ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); + if (!rng_ctx) { + err = -ENOMEM; + goto free_caam_alloc; +--- a/drivers/crypto/caam/compat.h ++++ b/drivers/crypto/caam/compat.h +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +--- a/drivers/crypto/caam/ctrl.c ++++ b/drivers/crypto/caam/ctrl.c +@@ -2,40 +2,41 @@ + * Controller-level driver, kernel property detection, initialization + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP + */ + + #include + #include + #include ++#include + + #include "compat.h" + #include "regs.h" + #include "intern.h" + #include "jr.h" + #include "desc_constr.h" +-#include "error.h" + #include "ctrl.h" + + bool caam_little_end; + EXPORT_SYMBOL(caam_little_end); ++bool caam_imx; ++EXPORT_SYMBOL(caam_imx); ++bool caam_dpaa2; ++EXPORT_SYMBOL(caam_dpaa2); ++ ++#ifdef CONFIG_CAAM_QI ++#include "qi.h" ++#endif + + /* + * i.MX targets tend to have clock control subsystems that can + * enable/disable clocking to our device. + */ +-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +-static inline struct clk *caam_drv_identify_clk(struct device *dev, +- char *clk_name) +-{ +- return devm_clk_get(dev, clk_name); +-} +-#else + static inline struct clk *caam_drv_identify_clk(struct device *dev, + char *clk_name) + { +- return NULL; ++ return caam_imx ? devm_clk_get(dev, clk_name) : NULL; + } +-#endif + + /* + * Descriptor to instantiate RNG State Handle 0 in normal mode and +@@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi + /* + * If the corresponding bit is set, then it means the state + * handle was initialized by us, and thus it needs to be +- * deintialized as well ++ * deinitialized as well + */ + if ((1 << sh_idx) & state_handle_mask) { + /* +@@ -303,20 +304,24 @@ static int caam_remove(struct platform_d + struct device *ctrldev; + struct caam_drv_private *ctrlpriv; + struct caam_ctrl __iomem *ctrl; +- int ring; + + ctrldev = &pdev->dev; + ctrlpriv = dev_get_drvdata(ctrldev); + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + +- /* Remove platform devices for JobRs */ +- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { +- if (ctrlpriv->jrpdev[ring]) +- of_device_unregister(ctrlpriv->jrpdev[ring]); +- } ++ /* Remove platform devices under the crypto node */ ++ of_platform_depopulate(ctrldev); ++ ++#ifdef CONFIG_CAAM_QI ++ if (ctrlpriv->qidev) ++ caam_qi_shutdown(ctrlpriv->qidev); ++#endif + +- /* De-initialize RNG state handles initialized by this driver. */ +- if (ctrlpriv->rng4_sh_init) ++ /* ++ * De-initialize RNG state handles initialized by this driver. ++ * In case of DPAA 2.x, RNG is managed by MC firmware. ++ */ ++ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init) + deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); + + /* Shut down debug views */ +@@ -331,8 +336,8 @@ static int caam_remove(struct platform_d + clk_disable_unprepare(ctrlpriv->caam_ipg); + clk_disable_unprepare(ctrlpriv->caam_mem); + clk_disable_unprepare(ctrlpriv->caam_aclk); +- clk_disable_unprepare(ctrlpriv->caam_emi_slow); +- ++ if (ctrlpriv->caam_emi_slow) ++ clk_disable_unprepare(ctrlpriv->caam_emi_slow); + return 0; + } + +@@ -366,11 +371,8 @@ static void kick_trng(struct platform_de + */ + val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) + >> RTSDCTL_ENT_DLY_SHIFT; +- if (ent_delay <= val) { +- /* put RNG4 into run mode */ +- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0); +- return; +- } ++ if (ent_delay <= val) ++ goto start_rng; + + val = rd_reg32(&r4tst->rtsdctl); + val = (val & ~RTSDCTL_ENT_DLY_MASK) | +@@ -382,15 +384,12 @@ static void kick_trng(struct platform_de + wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); + /* read the control register */ + val = rd_reg32(&r4tst->rtmctl); ++start_rng: + /* + * select raw sampling in both entropy shifter +- * and statistical checker ++ * and statistical checker; ; put RNG4 into run mode + */ +- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC); +- /* put RNG4 into run mode */ +- clrsetbits_32(&val, RTMCTL_PRGM, 0); +- /* write back the control register */ +- wr_reg32(&r4tst->rtmctl, val); ++ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC); + } + + /** +@@ -411,28 +410,26 @@ int caam_get_era(void) + } + EXPORT_SYMBOL(caam_get_era); + +-#ifdef CONFIG_DEBUG_FS +-static int caam_debugfs_u64_get(void *data, u64 *val) +-{ +- *val = caam64_to_cpu(*(u64 *)data); +- return 0; +-} +- +-static int caam_debugfs_u32_get(void *data, u64 *val) +-{ +- *val = caam32_to_cpu(*(u32 *)data); +- return 0; +-} +- +-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); +-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); +-#endif ++static const struct of_device_id caam_match[] = { ++ { ++ .compatible = "fsl,sec-v4.0", ++ }, ++ { ++ .compatible = "fsl,sec4.0", ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, caam_match); + + /* Probe routine for CAAM top (controller) level */ + static int caam_probe(struct platform_device *pdev) + { +- int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; ++ int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; + u64 caam_id; ++ static const struct soc_device_attribute imx_soc[] = { ++ {.family = "Freescale i.MX"}, ++ {}, ++ }; + struct device *dev; + struct device_node *nprop, *np; + struct caam_ctrl __iomem *ctrl; +@@ -452,9 +449,10 @@ static int caam_probe(struct platform_de + + dev = &pdev->dev; + dev_set_drvdata(dev, ctrlpriv); +- ctrlpriv->pdev = pdev; + nprop = pdev->dev.of_node; + ++ caam_imx = (bool)soc_device_match(imx_soc); ++ + /* Enable clocking */ + clk = caam_drv_identify_clk(&pdev->dev, "ipg"); + if (IS_ERR(clk)) { +@@ -483,14 +481,16 @@ static int caam_probe(struct platform_de + } + ctrlpriv->caam_aclk = clk; + +- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); +- if (IS_ERR(clk)) { +- ret = PTR_ERR(clk); +- dev_err(&pdev->dev, +- "can't identify CAAM emi_slow clk: %d\n", ret); +- return ret; ++ if (!of_machine_is_compatible("fsl,imx6ul")) { ++ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); ++ if (IS_ERR(clk)) { ++ ret = PTR_ERR(clk); ++ dev_err(&pdev->dev, ++ "can't identify CAAM emi_slow clk: %d\n", ret); ++ return ret; ++ } ++ ctrlpriv->caam_emi_slow = clk; + } +- ctrlpriv->caam_emi_slow = clk; + + ret = clk_prepare_enable(ctrlpriv->caam_ipg); + if (ret < 0) { +@@ -511,11 +511,13 @@ static int caam_probe(struct platform_de + goto disable_caam_mem; + } + +- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); +- if (ret < 0) { +- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", +- ret); +- goto disable_caam_aclk; ++ if (ctrlpriv->caam_emi_slow) { ++ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", ++ ret); ++ goto disable_caam_aclk; ++ } + } + + /* Get configuration properties from device tree */ +@@ -542,13 +544,13 @@ static int caam_probe(struct platform_de + else + BLOCK_OFFSET = PG_SIZE_64K; + +- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; +- ctrlpriv->assure = (struct caam_assurance __force *) +- ((uint8_t *)ctrl + ++ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl; ++ ctrlpriv->assure = (struct caam_assurance __iomem __force *) ++ ((__force uint8_t *)ctrl + + BLOCK_OFFSET * ASSURE_BLOCK_NUMBER + ); +- ctrlpriv->deco = (struct caam_deco __force *) +- ((uint8_t *)ctrl + ++ ctrlpriv->deco = (struct caam_deco __iomem __force *) ++ ((__force uint8_t *)ctrl + + BLOCK_OFFSET * DECO_BLOCK_NUMBER + ); + +@@ -557,12 +559,17 @@ static int caam_probe(struct platform_de + + /* + * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, +- * long pointers in master configuration register ++ * long pointers in master configuration register. ++ * In case of DPAA 2.x, Management Complex firmware performs ++ * the configuration. + */ +- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, +- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | +- MCFGR_WDENABLE | MCFGR_LARGE_BURST | +- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); ++ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); ++ if (!caam_dpaa2) ++ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, ++ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | ++ MCFGR_WDENABLE | MCFGR_LARGE_BURST | ++ (sizeof(dma_addr_t) == sizeof(u64) ? ++ MCFGR_LONG_PTR : 0)); + + /* + * Read the Compile Time paramters and SCFGR to determine +@@ -590,64 +597,67 @@ static int caam_probe(struct platform_de + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); + +- if (sizeof(dma_addr_t) == sizeof(u64)) +- if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) +- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); ++ if (sizeof(dma_addr_t) == sizeof(u64)) { ++ if (caam_dpaa2) ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); ++ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + else +- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); +- else +- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); +- +- /* +- * Detect and enable JobRs +- * First, find out how many ring spec'ed, allocate references +- * for all, then go probe each one. +- */ +- rspec = 0; +- for_each_available_child_of_node(nprop, np) +- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || +- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) +- rspec++; ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); ++ } else { ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); ++ } ++ if (ret) { ++ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); ++ goto iounmap_ctrl; ++ } + +- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, +- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); +- if (ctrlpriv->jrpdev == NULL) { +- ret = -ENOMEM; ++ ret = of_platform_populate(nprop, caam_match, NULL, dev); ++ if (ret) { ++ dev_err(dev, "JR platform devices creation error\n"); + goto iounmap_ctrl; + } + ++#ifdef CONFIG_DEBUG_FS ++ /* ++ * FIXME: needs better naming distinction, as some amalgamation of ++ * "caam" and nprop->full_name. The OF name isn't distinctive, ++ * but does separate instances ++ */ ++ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; ++ ++ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); ++ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); ++#endif + ring = 0; +- ctrlpriv->total_jobrs = 0; + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { +- ctrlpriv->jrpdev[ring] = +- of_platform_device_create(np, NULL, dev); +- if (!ctrlpriv->jrpdev[ring]) { +- pr_warn("JR%d Platform device creation error\n", +- ring); +- continue; +- } +- ctrlpriv->jr[ring] = (struct caam_job_ring __force *) +- ((uint8_t *)ctrl + ++ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) ++ ((__force uint8_t *)ctrl + + (ring + JR_BLOCK_NUMBER) * + BLOCK_OFFSET + ); + ctrlpriv->total_jobrs++; + ring++; +- } ++ } + +- /* Check to see if QI present. If so, enable */ +- ctrlpriv->qi_present = +- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & +- CTPR_MS_QI_MASK); +- if (ctrlpriv->qi_present) { +- ctrlpriv->qi = (struct caam_queue_if __force *) +- ((uint8_t *)ctrl + ++ /* Check to see if (DPAA 1.x) QI present. If so, enable */ ++ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); ++ if (ctrlpriv->qi_present && !caam_dpaa2) { ++ ctrlpriv->qi = (struct caam_queue_if __iomem __force *) ++ ((__force uint8_t *)ctrl + + BLOCK_OFFSET * QI_BLOCK_NUMBER + ); + /* This is all that's required to physically enable QI */ + wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN); ++ ++ /* If QMAN driver is present, init CAAM-QI backend */ ++#ifdef CONFIG_CAAM_QI ++ ret = caam_qi_init(pdev); ++ if (ret) ++ dev_err(dev, "caam qi i/f init failed: %d\n", ret); ++#endif + } + + /* If no QI and no rings specified, quit and go home */ +@@ -662,8 +672,10 @@ static int caam_probe(struct platform_de + /* + * If SEC has RNG version >= 4 and RNG state handle has not been + * already instantiated, do RNG instantiation ++ * In case of DPAA 2.x, RNG is managed by MC firmware. + */ +- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { ++ if (!caam_dpaa2 && ++ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + ctrlpriv->rng4_sh_init = + rd_reg32(&ctrl->r4tst[0].rdsta); + /* +@@ -731,77 +743,46 @@ static int caam_probe(struct platform_de + /* Report "alive" for developer to see */ + dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, + caam_get_era()); +- dev_info(dev, "job rings = %d, qi = %d\n", +- ctrlpriv->total_jobrs, ctrlpriv->qi_present); ++ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", ++ ctrlpriv->total_jobrs, ctrlpriv->qi_present, ++ caam_dpaa2 ? "yes" : "no"); + + #ifdef CONFIG_DEBUG_FS +- /* +- * FIXME: needs better naming distinction, as some amalgamation of +- * "caam" and nprop->full_name. The OF name isn't distinctive, +- * but does separate instances +- */ +- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; +- +- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); +- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); +- +- /* Controller-level - performance monitor counters */ +- +- ctrlpriv->ctl_rq_dequeued = +- debugfs_create_file("rq_dequeued", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->req_dequeued, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ob_enc_req = +- debugfs_create_file("ob_rq_encrypted", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ob_enc_req, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ib_dec_req = +- debugfs_create_file("ib_rq_decrypted", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ib_dec_req, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ob_enc_bytes = +- debugfs_create_file("ob_bytes_encrypted", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ob_enc_bytes, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ob_prot_bytes = +- debugfs_create_file("ob_bytes_protected", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ob_prot_bytes, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ib_dec_bytes = +- debugfs_create_file("ib_bytes_decrypted", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ib_dec_bytes, +- &caam_fops_u64_ro); +- ctrlpriv->ctl_ib_valid_bytes = +- debugfs_create_file("ib_bytes_validated", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->ib_valid_bytes, +- &caam_fops_u64_ro); ++ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->req_dequeued, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ob_enc_req, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ib_dec_req, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ob_enc_bytes, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ob_prot_bytes, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ib_dec_bytes, ++ &caam_fops_u64_ro); ++ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->ib_valid_bytes, ++ &caam_fops_u64_ro); + + /* Controller level - global status values */ +- ctrlpriv->ctl_faultaddr = +- debugfs_create_file("fault_addr", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->faultaddr, +- &caam_fops_u32_ro); +- ctrlpriv->ctl_faultdetail = +- debugfs_create_file("fault_detail", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->faultdetail, +- &caam_fops_u32_ro); +- ctrlpriv->ctl_faultstatus = +- debugfs_create_file("fault_status", +- S_IRUSR | S_IRGRP | S_IROTH, +- ctrlpriv->ctl, &perfmon->status, +- &caam_fops_u32_ro); ++ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->faultaddr, ++ &caam_fops_u32_ro); ++ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->faultdetail, ++ &caam_fops_u32_ro); ++ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH, ++ ctrlpriv->ctl, &perfmon->status, ++ &caam_fops_u32_ro); + + /* Internal covering keys (useful in non-secure mode only) */ +- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; ++ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; + ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_kek = debugfs_create_blob("kek", + S_IRUSR | +@@ -809,7 +790,7 @@ static int caam_probe(struct platform_de + ctrlpriv->ctl, + &ctrlpriv->ctl_kek_wrap); + +- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; ++ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; + ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", + S_IRUSR | +@@ -817,7 +798,7 @@ static int caam_probe(struct platform_de + ctrlpriv->ctl, + &ctrlpriv->ctl_tkek_wrap); + +- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; ++ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; + ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", + S_IRUSR | +@@ -828,13 +809,17 @@ static int caam_probe(struct platform_de + return 0; + + caam_remove: ++#ifdef CONFIG_DEBUG_FS ++ debugfs_remove_recursive(ctrlpriv->dfs_root); ++#endif + caam_remove(pdev); + return ret; + + iounmap_ctrl: + iounmap(ctrl); + disable_caam_emi_slow: +- clk_disable_unprepare(ctrlpriv->caam_emi_slow); ++ if (ctrlpriv->caam_emi_slow) ++ clk_disable_unprepare(ctrlpriv->caam_emi_slow); + disable_caam_aclk: + clk_disable_unprepare(ctrlpriv->caam_aclk); + disable_caam_mem: +@@ -844,17 +829,6 @@ disable_caam_ipg: + return ret; + } + +-static struct of_device_id caam_match[] = { +- { +- .compatible = "fsl,sec-v4.0", +- }, +- { +- .compatible = "fsl,sec4.0", +- }, +- {}, +-}; +-MODULE_DEVICE_TABLE(of, caam_match); +- + static struct platform_driver caam_driver = { + .driver = { + .name = "caam", +--- a/drivers/crypto/caam/ctrl.h ++++ b/drivers/crypto/caam/ctrl.h +@@ -10,4 +10,6 @@ + /* Prototypes for backend-level services exposed to APIs */ + int caam_get_era(void); + ++extern bool caam_dpaa2; ++ + #endif /* CTRL_H */ +--- a/drivers/crypto/caam/desc.h ++++ b/drivers/crypto/caam/desc.h +@@ -22,12 +22,6 @@ + #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ + #define SEC4_SG_OFFSET_MASK 0x00001fff + +-struct sec4_sg_entry { +- u64 ptr; +- u32 len; +- u32 bpid_offset; +-}; +- + /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ + #define MAX_CAAM_DESCSIZE 64 + +@@ -47,6 +41,7 @@ struct sec4_sg_entry { + #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT) + #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT) + #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT) ++#define CMD_MOVEB (0x07 << CMD_SHIFT) + #define CMD_STORE (0x0a << CMD_SHIFT) + #define CMD_SEQ_STORE (0x0b << CMD_SHIFT) + #define CMD_FIFO_STORE (0x0c << CMD_SHIFT) +@@ -90,8 +85,8 @@ struct sec4_sg_entry { + #define HDR_ZRO 0x00008000 + + /* Start Index or SharedDesc Length */ +-#define HDR_START_IDX_MASK 0x3f + #define HDR_START_IDX_SHIFT 16 ++#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT) + + /* If shared descriptor header, 6-bit length */ + #define HDR_DESCLEN_SHR_MASK 0x3f +@@ -121,10 +116,10 @@ struct sec4_sg_entry { + #define HDR_PROP_DNR 0x00000800 + + /* JobDesc/SharedDesc share property */ +-#define HDR_SD_SHARE_MASK 0x03 + #define HDR_SD_SHARE_SHIFT 8 +-#define HDR_JD_SHARE_MASK 0x07 ++#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT) + #define HDR_JD_SHARE_SHIFT 8 ++#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT) + + #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) + #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) +@@ -235,7 +230,7 @@ struct sec4_sg_entry { + #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) + #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) + #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) +-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT) ++#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT) + #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) + #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) + #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) +@@ -400,7 +395,7 @@ struct sec4_sg_entry { + #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) + #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) + #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) +-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT) ++#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT) + #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) + #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) + #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) +@@ -1107,8 +1102,8 @@ struct sec4_sg_entry { + /* For non-protocol/alg-only op commands */ + #define OP_ALG_TYPE_SHIFT 24 + #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) +-#define OP_ALG_TYPE_CLASS1 2 +-#define OP_ALG_TYPE_CLASS2 4 ++#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) ++#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) + + #define OP_ALG_ALGSEL_SHIFT 16 + #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) +@@ -1249,7 +1244,7 @@ struct sec4_sg_entry { + #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f + + /* PKHA mode copy-memory functions */ +-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13 ++#define OP_ALG_PKMODE_SRC_REG_SHIFT 17 + #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) + #define OP_ALG_PKMODE_DST_REG_SHIFT 10 + #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) +@@ -1445,7 +1440,7 @@ struct sec4_sg_entry { + #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) + #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) + #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) +-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) ++#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT) + #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) + #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) + #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) +@@ -1629,4 +1624,31 @@ struct sec4_sg_entry { + /* Frame Descriptor Command for Replacement Job Descriptor */ + #define FD_CMD_REPLACE_JOB_DESC 0x20000000 + ++/* CHA Control Register bits */ ++#define CCTRL_RESET_CHA_ALL 0x1 ++#define CCTRL_RESET_CHA_AESA 0x2 ++#define CCTRL_RESET_CHA_DESA 0x4 ++#define CCTRL_RESET_CHA_AFHA 0x8 ++#define CCTRL_RESET_CHA_KFHA 0x10 ++#define CCTRL_RESET_CHA_SF8A 0x20 ++#define CCTRL_RESET_CHA_PKHA 0x40 ++#define CCTRL_RESET_CHA_MDHA 0x80 ++#define CCTRL_RESET_CHA_CRCA 0x100 ++#define CCTRL_RESET_CHA_RNG 0x200 ++#define CCTRL_RESET_CHA_SF9A 0x400 ++#define CCTRL_RESET_CHA_ZUCE 0x800 ++#define CCTRL_RESET_CHA_ZUCA 0x1000 ++#define CCTRL_UNLOAD_PK_A0 0x10000 ++#define CCTRL_UNLOAD_PK_A1 0x20000 ++#define CCTRL_UNLOAD_PK_A2 0x40000 ++#define CCTRL_UNLOAD_PK_A3 0x80000 ++#define CCTRL_UNLOAD_PK_B0 0x100000 ++#define CCTRL_UNLOAD_PK_B1 0x200000 ++#define CCTRL_UNLOAD_PK_B2 0x400000 ++#define CCTRL_UNLOAD_PK_B3 0x800000 ++#define CCTRL_UNLOAD_PK_N 0x1000000 ++#define CCTRL_UNLOAD_PK_A 0x4000000 ++#define CCTRL_UNLOAD_PK_B 0x8000000 ++#define CCTRL_UNLOAD_SBOX 0x10000000 ++ + #endif /* DESC_H */ +--- a/drivers/crypto/caam/desc_constr.h ++++ b/drivers/crypto/caam/desc_constr.h +@@ -4,6 +4,9 @@ + * Copyright 2008-2012 Freescale Semiconductor, Inc. + */ + ++#ifndef DESC_CONSTR_H ++#define DESC_CONSTR_H ++ + #include "desc.h" + #include "regs.h" + +@@ -33,38 +36,39 @@ + + extern bool caam_little_end; + +-static inline int desc_len(u32 *desc) ++static inline int desc_len(u32 * const desc) + { + return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; + } + +-static inline int desc_bytes(void *desc) ++static inline int desc_bytes(void * const desc) + { + return desc_len(desc) * CAAM_CMD_SZ; + } + +-static inline u32 *desc_end(u32 *desc) ++static inline u32 *desc_end(u32 * const desc) + { + return desc + desc_len(desc); + } + +-static inline void *sh_desc_pdb(u32 *desc) ++static inline void *sh_desc_pdb(u32 * const desc) + { + return desc + 1; + } + +-static inline void init_desc(u32 *desc, u32 options) ++static inline void init_desc(u32 * const desc, u32 options) + { + *desc = cpu_to_caam32((options | HDR_ONE) + 1); + } + +-static inline void init_sh_desc(u32 *desc, u32 options) ++static inline void init_sh_desc(u32 * const desc, u32 options) + { + PRINT_POS; + init_desc(desc, CMD_SHARED_DESC_HDR | options); + } + +-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) ++static inline void init_sh_desc_pdb(u32 * const desc, u32 options, ++ size_t pdb_bytes) + { + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + +@@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32 + options); + } + +-static inline void init_job_desc(u32 *desc, u32 options) ++static inline void init_job_desc(u32 * const desc, u32 options) + { + init_desc(desc, CMD_DESC_HDR | options); + } + +-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) ++static inline void init_job_desc_pdb(u32 * const desc, u32 options, ++ size_t pdb_bytes) + { + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + + init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); + } + +-static inline void append_ptr(u32 *desc, dma_addr_t ptr) ++static inline void append_ptr(u32 * const desc, dma_addr_t ptr) + { + dma_addr_t *offset = (dma_addr_t *)desc_end(desc); + +@@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc, + CAAM_PTR_SZ / CAAM_CMD_SZ); + } + +-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, +- u32 options) ++static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr, ++ int len, u32 options) + { + PRINT_POS; + init_job_desc(desc, HDR_SHARED | options | +@@ -103,7 +108,7 @@ static inline void init_job_desc_shared( + append_ptr(desc, ptr); + } + +-static inline void append_data(u32 *desc, void *data, int len) ++static inline void append_data(u32 * const desc, void *data, int len) + { + u32 *offset = desc_end(desc); + +@@ -114,7 +119,7 @@ static inline void append_data(u32 *desc + (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); + } + +-static inline void append_cmd(u32 *desc, u32 command) ++static inline void append_cmd(u32 * const desc, u32 command) + { + u32 *cmd = desc_end(desc); + +@@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc, + + #define append_u32 append_cmd + +-static inline void append_u64(u32 *desc, u64 data) ++static inline void append_u64(u32 * const desc, u64 data) + { + u32 *offset = desc_end(desc); + +@@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc, + } + + /* Write command without affecting header, and return pointer to next word */ +-static inline u32 *write_cmd(u32 *desc, u32 command) ++static inline u32 *write_cmd(u32 * const desc, u32 command) + { + *desc = cpu_to_caam32(command); + + return desc + 1; + } + +-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, ++static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len, + u32 command) + { + append_cmd(desc, command | len); +@@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d + } + + /* Write length after pointer, rather than inside command */ +-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, ++static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr, + unsigned int len, u32 command) + { + append_cmd(desc, command); +@@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen + append_cmd(desc, len); + } + +-static inline void append_cmd_data(u32 *desc, void *data, int len, ++static inline void append_cmd_data(u32 * const desc, void *data, int len, + u32 command) + { + append_cmd(desc, command | IMMEDIATE | len); +@@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 * + } + + #define APPEND_CMD_RET(cmd, op) \ +-static inline u32 *append_##cmd(u32 *desc, u32 options) \ ++static inline u32 *append_##cmd(u32 * const desc, u32 options) \ + { \ + u32 *cmd = desc_end(desc); \ + PRINT_POS; \ +@@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des + } + APPEND_CMD_RET(jump, JUMP) + APPEND_CMD_RET(move, MOVE) ++APPEND_CMD_RET(moveb, MOVEB) + +-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) ++static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) + { + *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | + (desc_len(desc) - (jump_cmd - desc))); + } + +-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) ++static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd) + { + u32 val = caam32_to_cpu(*move_cmd); + +@@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32 + } + + #define APPEND_CMD(cmd, op) \ +-static inline void append_##cmd(u32 *desc, u32 options) \ ++static inline void append_##cmd(u32 * const desc, u32 options) \ + { \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | options); \ +@@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des + APPEND_CMD(operation, OPERATION) + + #define APPEND_CMD_LEN(cmd, op) \ +-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ ++static inline void append_##cmd(u32 * const desc, unsigned int len, \ ++ u32 options) \ + { \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | len | options); \ +@@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L + APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) + + #define APPEND_CMD_PTR(cmd, op) \ +-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ +- u32 options) \ ++static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ ++ unsigned int len, u32 options) \ + { \ + PRINT_POS; \ + append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ +@@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD) + APPEND_CMD_PTR(fifo_load, FIFO_LOAD) + APPEND_CMD_PTR(fifo_store, FIFO_STORE) + +-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, +- u32 options) ++static inline void append_store(u32 * const desc, dma_addr_t ptr, ++ unsigned int len, u32 options) + { + u32 cmd_src; + +@@ -249,7 +256,8 @@ static inline void append_store(u32 *des + } + + #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ +-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ ++static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \ ++ dma_addr_t ptr, \ + unsigned int len, \ + u32 options) \ + { \ +@@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN) + APPEND_SEQ_PTR_INTLEN(out, OUT) + + #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ +-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ ++static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ + unsigned int len, u32 options) \ + { \ + PRINT_POS; \ +@@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD); + APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); + + #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ +-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \ ++static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \ + unsigned int len, u32 options) \ + { \ + PRINT_POS; \ +@@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O + * the size of its type + */ + #define APPEND_CMD_PTR_LEN(cmd, op, type) \ +-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \ ++static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ + type len, u32 options) \ + { \ + PRINT_POS; \ +@@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_ + * from length of immediate data provided, e.g., split keys + */ + #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ +-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ ++static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ + unsigned int data_len, \ + unsigned int len, u32 options) \ + { \ +@@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm + APPEND_CMD_PTR_TO_IMM2(key, KEY); + + #define APPEND_CMD_RAW_IMM(cmd, op, type) \ +-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ ++static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ + u32 options) \ + { \ + PRINT_POS; \ +@@ -426,3 +434,66 @@ do { \ + APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) + #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) ++ ++/** ++ * struct alginfo - Container for algorithm details ++ * @algtype: algorithm selector; for valid values, see documentation of the ++ * functions where it is used. ++ * @keylen: length of the provided algorithm key, in bytes ++ * @keylen_pad: padded length of the provided algorithm key, in bytes ++ * @key: address where algorithm key resides; virtual address if key_inline ++ * is true, dma (bus) address if key_inline is false. ++ * @key_inline: true - key can be inlined in the descriptor; false - key is ++ * referenced by the descriptor ++ */ ++struct alginfo { ++ u32 algtype; ++ unsigned int keylen; ++ unsigned int keylen_pad; ++ union { ++ dma_addr_t key_dma; ++ void *key_virt; ++ }; ++ bool key_inline; ++}; ++ ++/** ++ * desc_inline_query() - Provide indications on which data items can be inlined ++ * and which shall be referenced in a shared descriptor. ++ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands, ++ * excluding the data items to be inlined (or corresponding ++ * pointer if an item is not inlined). Each cnstr_* function that ++ * generates descriptors should have a define mentioning ++ * corresponding length. ++ * @jd_len: Maximum length of the job descriptor(s) that will be used ++ * together with the shared descriptor. ++ * @data_len: Array of lengths of the data items trying to be inlined ++ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0 ++ * otherwise. ++ * @count: Number of data items (size of @data_len array); must be <= 32 ++ * ++ * Return: 0 if data can be inlined / referenced, negative value if not. If 0, ++ * check @inl_mask for details. ++ */ ++static inline int desc_inline_query(unsigned int sd_base_len, ++ unsigned int jd_len, unsigned int *data_len, ++ u32 *inl_mask, unsigned int count) ++{ ++ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len); ++ unsigned int i; ++ ++ *inl_mask = 0; ++ for (i = 0; (i < count) && (rem_bytes > 0); i++) { ++ if (rem_bytes - (int)(data_len[i] + ++ (count - i - 1) * CAAM_PTR_SZ) >= 0) { ++ rem_bytes -= data_len[i]; ++ *inl_mask |= (1 << i); ++ } else { ++ rem_bytes -= CAAM_PTR_SZ; ++ } ++ } ++ ++ return (rem_bytes >= 0) ? 0 : -1; ++} ++ ++#endif /* DESC_CONSTR_H */ +--- /dev/null ++++ b/drivers/crypto/caam/dpseci.c +@@ -0,0 +1,859 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "../../../drivers/staging/fsl-mc/include/mc-sys.h" ++#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h" ++#include "../../../drivers/staging/fsl-mc/include/dpopr.h" ++#include "dpseci.h" ++#include "dpseci_cmd.h" ++ ++/** ++ * dpseci_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpseci_id: DPSECI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an already created ++ * object; an object may have been declared in the DPL or by calling the ++ * dpseci_create() function. ++ * This function returns a unique authentication token, associated with the ++ * specific object ID and the specific MC portal; this token must be used in all ++ * subsequent commands for this specific object. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, ++ u16 *token) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_open *cmd_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd_params = (struct dpseci_cmd_open *)cmd.params; ++ cmd_params->dpseci_id = cpu_to_le32(dpseci_id); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ *token = mc_cmd_hdr_read_token(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * ++ * After this function is called, no further operations are allowed on the ++ * object without opening a new control session. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_create() - Create the DPSECI object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @obj_id: returned object id ++ * ++ * Create the DPSECI object, allocate required resources and perform required ++ * initialization. ++ * ++ * The object can be created either by declaring it in the DPL file, or by ++ * calling this function. ++ * ++ * The function accepts an authentication token of a parent container that this ++ * object should be assigned to. The token can be '0' so the object will be ++ * assigned to the default container. ++ * The newly created object can be opened with the returned object id and using ++ * the container's associated tokens and MC portals. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, ++ const struct dpseci_cfg *cfg, u32 *obj_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_create *cmd_params; ++ int i, err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpseci_cmd_create *)cmd.params; ++ for (i = 0; i < 8; i++) ++ cmd_params->priorities[i] = cfg->priorities[i]; ++ cmd_params->num_tx_queues = cfg->num_tx_queues; ++ cmd_params->num_rx_queues = cfg->num_rx_queues; ++ cmd_params->options = cpu_to_le32(cfg->options); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ *obj_id = mc_cmd_read_object_id(&cmd); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_destroy() - Destroy the DPSECI object and release all its resources ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dprc_token: Parent container token; '0' for default container ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @object_id: The object id; it must be a valid id within the container that ++ * created this object ++ * ++ * The function accepts the authentication token of the parent container that ++ * created the object (not the one that currently owns the object). The object ++ * is searched within parent using the provided 'object_id'. ++ * All tokens to the object must be closed before calling destroy. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, ++ u32 object_id) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_destroy *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY, ++ cmd_flags, ++ dprc_token); ++ cmd_params = (struct dpseci_cmd_destroy *)cmd.params; ++ cmd_params->object_id = cpu_to_le32(object_id); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_is_enabled() - Check if the DPSECI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_rsp_is_enabled *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params; ++ *en = le32_to_cpu(rsp_params->is_enabled); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_reset() - Reset the DPSECI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned Interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u8 *en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_enable *cmd_params; ++ struct dpseci_rsp_get_irq_enable *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params; ++ *en = rsp_params->enable_state; ++ ++ return 0; ++} ++ ++/** ++ * dpseci_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. If the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u8 en) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_enable *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ cmd_params->enable_state = en; ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently. ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_mask *cmd_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params; ++ cmd_params->irq_index = irq_index; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ *mask = le32_to_cpu(cmd_params->mask); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 mask) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_mask *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params; ++ cmd_params->mask = cpu_to_le32(mask); ++ cmd_params->irq_index = irq_index; ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_irq_status() - Get the current status of any pending interrupts ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 *status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_status *cmd_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(*status); ++ cmd_params->irq_index = irq_index; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ *status = le32_to_cpu(cmd_params->status); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 status) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_irq_status *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params; ++ cmd_params->status = cpu_to_le32(status); ++ cmd_params->irq_index = irq_index; ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_attributes() - Retrieve DPSECI attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_rsp_get_attributes *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params; ++ attr->id = le32_to_cpu(rsp_params->id); ++ attr->num_tx_queues = rsp_params->num_tx_queues; ++ attr->num_rx_queues = rsp_params->num_rx_queues; ++ attr->options = le32_to_cpu(rsp_params->options); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_set_rx_queue() - Set Rx queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @queue: Select the queue relative to number of priorities configured at ++ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all ++ * Rx queues identically. ++ * @cfg: Rx queue configuration ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, const struct dpseci_rx_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_queue *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_queue *)cmd.params; ++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); ++ cmd_params->priority = cfg->dest_cfg.priority; ++ cmd_params->queue = queue; ++ cmd_params->dest_type = cfg->dest_cfg.dest_type; ++ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); ++ cmd_params->options = cpu_to_le32(cfg->options); ++ cmd_params->order_preservation_en = ++ cpu_to_le32(cfg->order_preservation_en); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_rx_queue() - Retrieve Rx queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @queue: Select the queue relative to number of priorities configured at ++ * DPSECI creation ++ * @attr: Returned Rx queue attributes ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, struct dpseci_rx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_queue *cmd_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_queue *)cmd.params; ++ cmd_params->queue = queue; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); ++ attr->dest_cfg.priority = cmd_params->priority; ++ attr->dest_cfg.dest_type = cmd_params->dest_type; ++ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); ++ attr->fqid = le32_to_cpu(cmd_params->fqid); ++ attr->order_preservation_en = ++ le32_to_cpu(cmd_params->order_preservation_en); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_get_tx_queue() - Retrieve Tx queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @queue: Select the queue relative to number of priorities configured at ++ * DPSECI creation ++ * @attr: Returned Tx queue attributes ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, struct dpseci_tx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_queue *cmd_params; ++ struct dpseci_rsp_get_tx_queue *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_queue *)cmd.params; ++ cmd_params->queue = queue; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params; ++ attr->fqid = le32_to_cpu(rsp_params->fqid); ++ attr->priority = rsp_params->priority; ++ ++ return 0; ++} ++ ++/** ++ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @attr: Returned SEC attributes ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_sec_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_rsp_get_sec_attr *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, ++ cmd_flags, ++ token); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params; ++ attr->ip_id = le16_to_cpu(rsp_params->ip_id); ++ attr->major_rev = rsp_params->major_rev; ++ attr->minor_rev = rsp_params->minor_rev; ++ attr->era = rsp_params->era; ++ attr->deco_num = rsp_params->deco_num; ++ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num; ++ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num; ++ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num; ++ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num; ++ attr->crc_acc_num = rsp_params->crc_acc_num; ++ attr->pk_acc_num = rsp_params->pk_acc_num; ++ attr->kasumi_acc_num = rsp_params->kasumi_acc_num; ++ attr->rng_acc_num = rsp_params->rng_acc_num; ++ attr->md_acc_num = rsp_params->md_acc_num; ++ attr->arc4_acc_num = rsp_params->arc4_acc_num; ++ attr->des_acc_num = rsp_params->des_acc_num; ++ attr->aes_acc_num = rsp_params->aes_acc_num; ++ ++ return 0; ++} ++ ++/** ++ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @counters: Returned SEC counters ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_sec_counters *counters) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_rsp_get_sec_counters *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS, ++ cmd_flags, ++ token); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params; ++ counters->dequeued_requests = ++ le64_to_cpu(rsp_params->dequeued_requests); ++ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests); ++ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests); ++ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes); ++ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes); ++ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes); ++ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_get_api_version() - Get Data Path SEC Interface API version ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @major_ver: Major version of data path sec API ++ * @minor_ver: Minor version of data path sec API ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 *major_ver, u16 *minor_ver) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_rsp_get_api_version *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION, ++ cmd_flags, 0); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params; ++ *major_ver = le16_to_cpu(rsp_params->major); ++ *minor_ver = le16_to_cpu(rsp_params->minor); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_set_opr() - Set Order Restoration configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @index: The queue index ++ * @options: Configuration mode options; can be OPR_OPT_CREATE or ++ * OPR_OPT_RETIRE ++ * @cfg: Configuration options for the OPR ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, ++ u8 options, struct opr_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_opr *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header( ++ DPSECI_CMDID_SET_OPR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_opr *)cmd.params; ++ cmd_params->index = index; ++ cmd_params->options = options; ++ cmd_params->oloe = cfg->oloe; ++ cmd_params->oeane = cfg->oeane; ++ cmd_params->olws = cfg->olws; ++ cmd_params->oa = cfg->oa; ++ cmd_params->oprrws = cfg->oprrws; ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_opr() - Retrieve Order Restoration config and query ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @index: The queue index ++ * @cfg: Returned OPR configuration ++ * @qry: Returned OPR query ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, ++ struct opr_cfg *cfg, struct opr_qry *qry) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_opr *cmd_params; ++ struct dpseci_rsp_get_opr *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_opr *)cmd.params; ++ cmd_params->index = index; ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params; ++ qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP); ++ qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE); ++ cfg->oloe = rsp_params->oloe; ++ cfg->oeane = rsp_params->oeane; ++ cfg->olws = rsp_params->olws; ++ cfg->oa = rsp_params->oa; ++ cfg->oprrws = rsp_params->oprrws; ++ qry->nesn = le16_to_cpu(rsp_params->nesn); ++ qry->ndsn = le16_to_cpu(rsp_params->ndsn); ++ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq); ++ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS); ++ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq); ++ qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS); ++ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr); ++ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr); ++ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid); ++ qry->opr_id = le16_to_cpu(rsp_params->opr_id); ++ ++ return 0; ++} ++ ++/** ++ * dpseci_set_congestion_notification() - Set congestion group ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 token, const struct dpseci_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_congestion_notification *cmd_params; ++ ++ cmd.header = mc_encode_cmd_header( ++ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params; ++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); ++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); ++ cmd_params->priority = cfg->dest_cfg.priority; ++ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE, ++ cfg->dest_cfg.dest_type); ++ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units); ++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova); ++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); ++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); ++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); ++ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpseci_get_congestion_notification() - Get congestion group notification ++ * configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPSECI object ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on success, error code otherwise ++ */ ++int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 token, struct dpseci_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ struct dpseci_cmd_congestion_notification *rsp_params; ++ int err; ++ ++ cmd.header = mc_encode_cmd_header( ++ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params; ++ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); ++ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); ++ cfg->dest_cfg.priority = rsp_params->priority; ++ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options, ++ CGN_DEST_TYPE); ++ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS); ++ cfg->message_iova = le64_to_cpu(rsp_params->message_iova); ++ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); ++ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); ++ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/crypto/caam/dpseci.h +@@ -0,0 +1,395 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _DPSECI_H_ ++#define _DPSECI_H_ ++ ++/* ++ * Data Path SEC Interface API ++ * Contains initialization APIs and runtime control APIs for DPSECI ++ */ ++ ++struct fsl_mc_io; ++struct opr_cfg; ++struct opr_qry; ++ ++/** ++ * General DPSECI macros ++ */ ++ ++/** ++ * Maximum number of Tx/Rx priorities per DPSECI object ++ */ ++#define DPSECI_PRIO_NUM 8 ++ ++/** ++ * All queues considered; see dpseci_set_rx_queue() ++ */ ++#define DPSECI_ALL_QUEUES (u8)(-1) ++ ++int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, ++ u16 *token); ++ ++int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); ++ ++/** ++ * Enable the Congestion Group support ++ */ ++#define DPSECI_OPT_HAS_CG 0x000020 ++ ++/** ++ * Enable the Order Restoration support ++ */ ++#define DPSECI_OPT_HAS_OPR 0x000040 ++ ++/** ++ * Order Point Records are shared for the entire DPSECI ++ */ ++#define DPSECI_OPT_OPR_SHARED 0x000080 ++ ++/** ++ * struct dpseci_cfg - Structure representing DPSECI configuration ++ * @options: Any combination of the following options: ++ * DPSECI_OPT_HAS_CG ++ * DPSECI_OPT_HAS_OPR ++ * DPSECI_OPT_OPR_SHARED ++ * @num_tx_queues: num of queues towards the SEC ++ * @num_rx_queues: num of queues back from the SEC ++ * @priorities: Priorities for the SEC hardware processing; ++ * each place in the array is the priority of the tx queue ++ * towards the SEC; ++ * valid priorities are configured with values 1-8; ++ */ ++struct dpseci_cfg { ++ u32 options; ++ u8 num_tx_queues; ++ u8 num_rx_queues; ++ u8 priorities[DPSECI_PRIO_NUM]; ++}; ++ ++int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, ++ const struct dpseci_cfg *cfg, u32 *obj_id); ++ ++int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, ++ u32 object_id); ++ ++int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); ++ ++int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); ++ ++int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ int *en); ++ ++int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); ++ ++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u8 *en); ++ ++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u8 en); ++ ++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 *mask); ++ ++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 mask); ++ ++int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 *status); ++ ++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 irq_index, u32 status); ++ ++/** ++ * struct dpseci_attr - Structure representing DPSECI attributes ++ * @id: DPSECI object ID ++ * @num_tx_queues: number of queues towards the SEC ++ * @num_rx_queues: number of queues back from the SEC ++ * @options: any combination of the following options: ++ * DPSECI_OPT_HAS_CG ++ * DPSECI_OPT_HAS_OPR ++ * DPSECI_OPT_OPR_SHARED ++ */ ++struct dpseci_attr { ++ int id; ++ u8 num_tx_queues; ++ u8 num_rx_queues; ++ u32 options; ++}; ++ ++int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_attr *attr); ++ ++/** ++ * enum dpseci_dest - DPSECI destination types ++ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode ++ * and does not generate FQDAN notifications; user is expected to dequeue ++ * from the queue based on polling or other user-defined method ++ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue from ++ * the queue only after notification is received ++ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON object; ++ * user is expected to dequeue from the DPCON channel ++ */ ++enum dpseci_dest { ++ DPSECI_DEST_NONE = 0, ++ DPSECI_DEST_DPIO, ++ DPSECI_DEST_DPCON ++}; ++ ++/** ++ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that channel; ++ * not relevant for 'DPSECI_DEST_NONE' option ++ */ ++struct dpseci_dest_cfg { ++ enum dpseci_dest dest_type; ++ int dest_id; ++ u8 priority; ++}; ++ ++/** ++ * DPSECI queue modification options ++ */ ++ ++/** ++ * Select to modify the user's context associated with the queue ++ */ ++#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001 ++ ++/** ++ * Select to modify the queue's destination ++ */ ++#define DPSECI_QUEUE_OPT_DEST 0x00000002 ++ ++/** ++ * Select to modify the queue's order preservation ++ */ ++#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004 ++ ++/** ++ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration ++ * @options: Flags representing the suggested modifications to the queue; ++ * Use any combination of 'DPSECI_QUEUE_OPT_' flags ++ * @order_preservation_en: order preservation configuration for the rx queue ++ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options' ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained ++ * in 'options' ++ * @dest_cfg: Queue destination parameters; valid only if ++ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options' ++ */ ++struct dpseci_rx_queue_cfg { ++ u32 options; ++ int order_preservation_en; ++ u64 user_ctx; ++ struct dpseci_dest_cfg dest_cfg; ++}; ++ ++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, const struct dpseci_rx_queue_cfg *cfg); ++ ++/** ++ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame ++ * @order_preservation_en: Status of the order preservation configuration on the ++ * queue ++ * @dest_cfg: Queue destination configuration ++ * @fqid: Virtual FQID value to be used for dequeue operations ++ */ ++struct dpseci_rx_queue_attr { ++ u64 user_ctx; ++ int order_preservation_en; ++ struct dpseci_dest_cfg dest_cfg; ++ u32 fqid; ++}; ++ ++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, struct dpseci_rx_queue_attr *attr); ++ ++/** ++ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues ++ * @fqid: Virtual FQID to be used for sending frames to SEC hardware ++ * @priority: SEC hardware processing priority for the queue ++ */ ++struct dpseci_tx_queue_attr { ++ u32 fqid; ++ u8 priority; ++}; ++ ++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ u8 queue, struct dpseci_tx_queue_attr *attr); ++ ++/** ++ * struct dpseci_sec_attr - Structure representing attributes of the SEC ++ * hardware accelerator ++ * @ip_id: ID for SEC ++ * @major_rev: Major revision number for SEC ++ * @minor_rev: Minor revision number for SEC ++ * @era: SEC Era ++ * @deco_num: The number of copies of the DECO that are implemented in this ++ * version of SEC ++ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this ++ * version of SEC ++ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this ++ * version of SEC ++ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are ++ * implemented in this version of SEC ++ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are ++ * implemented in this version of SEC ++ * @crc_acc_num: The number of copies of the CRC module that are implemented in ++ * this version of SEC ++ * @pk_acc_num: The number of copies of the Public Key module that are ++ * implemented in this version of SEC ++ * @kasumi_acc_num: The number of copies of the Kasumi module that are ++ * implemented in this version of SEC ++ * @rng_acc_num: The number of copies of the Random Number Generator that are ++ * implemented in this version of SEC ++ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are ++ * implemented in this version of SEC ++ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented ++ * in this version of SEC ++ * @des_acc_num: The number of copies of the DES module that are implemented in ++ * this version of SEC ++ * @aes_acc_num: The number of copies of the AES module that are implemented in ++ * this version of SEC ++ **/ ++struct dpseci_sec_attr { ++ u16 ip_id; ++ u8 major_rev; ++ u8 minor_rev; ++ u8 era; ++ u8 deco_num; ++ u8 zuc_auth_acc_num; ++ u8 zuc_enc_acc_num; ++ u8 snow_f8_acc_num; ++ u8 snow_f9_acc_num; ++ u8 crc_acc_num; ++ u8 pk_acc_num; ++ u8 kasumi_acc_num; ++ u8 rng_acc_num; ++ u8 md_acc_num; ++ u8 arc4_acc_num; ++ u8 des_acc_num; ++ u8 aes_acc_num; ++}; ++ ++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_sec_attr *attr); ++ ++/** ++ * struct dpseci_sec_counters - Structure representing global SEC counters and ++ * not per dpseci counters ++ * @dequeued_requests: Number of Requests Dequeued ++ * @ob_enc_requests: Number of Outbound Encrypt Requests ++ * @ib_dec_requests: Number of Inbound Decrypt Requests ++ * @ob_enc_bytes: Number of Outbound Bytes Encrypted ++ * @ob_prot_bytes: Number of Outbound Bytes Protected ++ * @ib_dec_bytes: Number of Inbound Bytes Decrypted ++ * @ib_valid_bytes: Number of Inbound Bytes Validated ++ */ ++struct dpseci_sec_counters { ++ u64 dequeued_requests; ++ u64 ob_enc_requests; ++ u64 ib_dec_requests; ++ u64 ob_enc_bytes; ++ u64 ob_prot_bytes; ++ u64 ib_dec_bytes; ++ u64 ib_valid_bytes; ++}; ++ ++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ++ struct dpseci_sec_counters *counters); ++ ++int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 *major_ver, u16 *minor_ver); ++ ++int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, ++ u8 options, struct opr_cfg *cfg); ++ ++int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, ++ struct opr_cfg *cfg, struct opr_qry *qry); ++ ++/** ++ * enum dpseci_congestion_unit - DPSECI congestion units ++ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units ++ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units ++ */ ++enum dpseci_congestion_unit { ++ DPSECI_CONGESTION_UNIT_BYTES = 0, ++ DPSECI_CONGESTION_UNIT_FRAMES ++}; ++ ++#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001 ++#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002 ++#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004 ++#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008 ++#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010 ++#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020 ++ ++/** ++ * struct dpseci_congestion_notification_cfg - congestion notification ++ * configuration ++ * @units: units type ++ * @threshold_entry: above this threshold we enter a congestion state. ++ * set it to '0' to disable it ++ * @threshold_exit: below this threshold we exit the congestion state. ++ * @message_ctx: The context that will be part of the CSCN message ++ * @message_iova: I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned; ++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel ++ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_' ++ * values ++ */ ++struct dpseci_congestion_notification_cfg { ++ enum dpseci_congestion_unit units; ++ u32 threshold_entry; ++ u32 threshold_exit; ++ u64 message_ctx; ++ u64 message_iova; ++ struct dpseci_dest_cfg dest_cfg; ++ u16 notification_mode; ++}; ++ ++int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 token, const struct dpseci_congestion_notification_cfg *cfg); ++ ++int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, ++ u16 token, struct dpseci_congestion_notification_cfg *cfg); ++ ++#endif /* _DPSECI_H_ */ +--- /dev/null ++++ b/drivers/crypto/caam/dpseci_cmd.h +@@ -0,0 +1,261 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _DPSECI_CMD_H_ ++#define _DPSECI_CMD_H_ ++ ++/* DPSECI Version */ ++#define DPSECI_VER_MAJOR 5 ++#define DPSECI_VER_MINOR 1 ++ ++#define DPSECI_VER(maj, min) (((maj) << 16) | (min)) ++#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR) ++ ++/* Command IDs */ ++ ++#define DPSECI_CMDID_CLOSE 0x8001 ++#define DPSECI_CMDID_OPEN 0x8091 ++#define DPSECI_CMDID_CREATE 0x9092 ++#define DPSECI_CMDID_DESTROY 0x9891 ++#define DPSECI_CMDID_GET_API_VERSION 0xa091 ++ ++#define DPSECI_CMDID_ENABLE 0x0021 ++#define DPSECI_CMDID_DISABLE 0x0031 ++#define DPSECI_CMDID_GET_ATTR 0x0041 ++#define DPSECI_CMDID_RESET 0x0051 ++#define DPSECI_CMDID_IS_ENABLED 0x0061 ++ ++#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121 ++#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131 ++#define DPSECI_CMDID_SET_IRQ_MASK 0x0141 ++#define DPSECI_CMDID_GET_IRQ_MASK 0x0151 ++#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161 ++#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171 ++ ++#define DPSECI_CMDID_SET_RX_QUEUE 0x1941 ++#define DPSECI_CMDID_GET_RX_QUEUE 0x1961 ++#define DPSECI_CMDID_GET_TX_QUEUE 0x1971 ++#define DPSECI_CMDID_GET_SEC_ATTR 0x1981 ++#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991 ++#define DPSECI_CMDID_SET_OPR 0x19A1 ++#define DPSECI_CMDID_GET_OPR 0x19B1 ++ ++#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701 ++#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711 ++ ++/* Macros for accessing command fields smaller than 1 byte */ ++#define DPSECI_MASK(field) \ ++ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \ ++ DPSECI_##field##_SHIFT) ++ ++#define dpseci_set_field(var, field, val) \ ++ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field))) ++ ++#define dpseci_get_field(var, field) \ ++ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT) ++ ++struct dpseci_cmd_open { ++ __le32 dpseci_id; ++}; ++ ++struct dpseci_cmd_create { ++ u8 priorities[8]; ++ u8 num_tx_queues; ++ u8 num_rx_queues; ++ __le16 pad; ++ __le32 options; ++}; ++ ++struct dpseci_cmd_destroy { ++ __le32 object_id; ++}; ++ ++struct dpseci_rsp_is_enabled { ++ __le32 is_enabled; ++}; ++ ++struct dpseci_cmd_irq_enable { ++ u8 enable_state; ++ u8 pad[3]; ++ u8 irq_index; ++}; ++ ++struct dpseci_rsp_get_irq_enable { ++ u8 enable_state; ++}; ++ ++struct dpseci_cmd_irq_mask { ++ __le32 mask; ++ u8 irq_index; ++}; ++ ++struct dpseci_cmd_irq_status { ++ __le32 status; ++ u8 irq_index; ++}; ++ ++struct dpseci_rsp_get_attributes { ++ __le32 id; ++ __le32 pad0; ++ u8 num_tx_queues; ++ u8 num_rx_queues; ++ u8 pad1[6]; ++ __le32 options; ++}; ++ ++struct dpseci_cmd_queue { ++ __le32 dest_id; ++ u8 priority; ++ u8 queue; ++ u8 dest_type; ++ u8 pad; ++ __le64 user_ctx; ++ union { ++ __le32 options; ++ __le32 fqid; ++ }; ++ __le32 order_preservation_en; ++}; ++ ++struct dpseci_rsp_get_tx_queue { ++ __le32 pad; ++ __le32 fqid; ++ u8 priority; ++}; ++ ++struct dpseci_rsp_get_sec_attr { ++ __le16 ip_id; ++ u8 major_rev; ++ u8 minor_rev; ++ u8 era; ++ u8 pad0[3]; ++ u8 deco_num; ++ u8 zuc_auth_acc_num; ++ u8 zuc_enc_acc_num; ++ u8 pad1; ++ u8 snow_f8_acc_num; ++ u8 snow_f9_acc_num; ++ u8 crc_acc_num; ++ u8 pad2; ++ u8 pk_acc_num; ++ u8 kasumi_acc_num; ++ u8 rng_acc_num; ++ u8 pad3; ++ u8 md_acc_num; ++ u8 arc4_acc_num; ++ u8 des_acc_num; ++ u8 aes_acc_num; ++}; ++ ++struct dpseci_rsp_get_sec_counters { ++ __le64 dequeued_requests; ++ __le64 ob_enc_requests; ++ __le64 ib_dec_requests; ++ __le64 ob_enc_bytes; ++ __le64 ob_prot_bytes; ++ __le64 ib_dec_bytes; ++ __le64 ib_valid_bytes; ++}; ++ ++struct dpseci_rsp_get_api_version { ++ __le16 major; ++ __le16 minor; ++}; ++ ++struct dpseci_cmd_opr { ++ __le16 pad; ++ u8 index; ++ u8 options; ++ u8 pad1[7]; ++ u8 oloe; ++ u8 oeane; ++ u8 olws; ++ u8 oa; ++ u8 oprrws; ++}; ++ ++#define DPSECI_OPR_RIP_SHIFT 0 ++#define DPSECI_OPR_RIP_SIZE 1 ++#define DPSECI_OPR_ENABLE_SHIFT 1 ++#define DPSECI_OPR_ENABLE_SIZE 1 ++#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1 ++#define DPSECI_OPR_TSEQ_NLIS_SIZE 1 ++#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1 ++#define DPSECI_OPR_HSEQ_NLIS_SIZE 1 ++ ++struct dpseci_rsp_get_opr { ++ __le64 pad; ++ u8 rip_enable; ++ u8 pad0[2]; ++ u8 oloe; ++ u8 oeane; ++ u8 olws; ++ u8 oa; ++ u8 oprrws; ++ __le16 nesn; ++ __le16 pad1; ++ __le16 ndsn; ++ __le16 pad2; ++ __le16 ea_tseq; ++ u8 tseq_nlis; ++ u8 pad3; ++ __le16 ea_hseq; ++ u8 hseq_nlis; ++ u8 pad4; ++ __le16 ea_hptr; ++ __le16 pad5; ++ __le16 ea_tptr; ++ __le16 pad6; ++ __le16 opr_vid; ++ __le16 pad7; ++ __le16 opr_id; ++}; ++ ++#define DPSECI_CGN_DEST_TYPE_SHIFT 0 ++#define DPSECI_CGN_DEST_TYPE_SIZE 4 ++#define DPSECI_CGN_UNITS_SHIFT 4 ++#define DPSECI_CGN_UNITS_SIZE 2 ++ ++struct dpseci_cmd_congestion_notification { ++ __le32 dest_id; ++ __le16 notification_mode; ++ u8 priority; ++ u8 options; ++ __le64 message_iova; ++ __le64 message_ctx; ++ __le32 threshold_entry; ++ __le32 threshold_exit; ++}; ++ ++#endif /* _DPSECI_CMD_H_ */ +--- a/drivers/crypto/caam/error.c ++++ b/drivers/crypto/caam/error.c +@@ -6,11 +6,54 @@ + + #include "compat.h" + #include "regs.h" +-#include "intern.h" + #include "desc.h" +-#include "jr.h" + #include "error.h" + ++#ifdef DEBUG ++ ++#include ++ ++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, struct scatterlist *sg, ++ size_t tlen, bool ascii) ++{ ++ struct scatterlist *it; ++ void *it_page; ++ size_t len; ++ void *buf; ++ ++ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) { ++ /* ++ * make sure the scatterlist's page ++ * has a valid virtual memory mapping ++ */ ++ it_page = kmap_atomic(sg_page(it)); ++ if (unlikely(!it_page)) { ++ pr_err("caam_dump_sg: kmap failed\n"); ++ return; ++ } ++ ++ buf = it_page + it->offset; ++ len = min_t(size_t, tlen, it->length); ++ print_hex_dump(level, prefix_str, prefix_type, rowsize, ++ groupsize, buf, len, ascii); ++ tlen -= len; ++ ++ kunmap_atomic(it_page); ++ } ++} ++ ++#else ++ ++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, struct scatterlist *sg, ++ size_t tlen, bool ascii) ++{} ++ ++#endif ++ ++EXPORT_SYMBOL(caam_dump_sg); ++ + static const struct { + u8 value; + const char *error_text; +@@ -69,6 +112,54 @@ static const struct { + { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, + }; + ++static const struct { ++ u8 value; ++ const char *error_text; ++} qi_error_list[] = { ++ { 0x1F, "Job terminated by FQ or ICID flush" }, ++ { 0x20, "FD format error"}, ++ { 0x21, "FD command format error"}, ++ { 0x23, "FL format error"}, ++ { 0x25, "CRJD specified in FD, but not enabled in FLC"}, ++ { 0x30, "Max. buffer size too small"}, ++ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"}, ++ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"}, ++ { 0x33, "Size over/underflow (allocate mode)"}, ++ { 0x34, "Size over/underflow (reuse mode)"}, ++ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"}, ++ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"}, ++ { 0x41, "SBC frame format not supported (allocate mode)"}, ++ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"}, ++ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"}, ++ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"}, ++ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"}, ++ { 0x46, "Annotation length exceeds offset (reuse mode)"}, ++ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"}, ++ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"}, ++ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"}, ++ { 0x51, "Unsupported IF reuse mode"}, ++ { 0x52, "Unsupported FL use mode"}, ++ { 0x53, "Unsupported RJD use mode"}, ++ { 0x54, "Unsupported inline descriptor use mode"}, ++ { 0xC0, "Table buffer pool 0 depletion"}, ++ { 0xC1, "Table buffer pool 1 depletion"}, ++ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"}, ++ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"}, ++ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"}, ++ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"}, ++ { 0xD0, "FLC read error"}, ++ { 0xD1, "FL read error"}, ++ { 0xD2, "FL write error"}, ++ { 0xD3, "OF SGT write error"}, ++ { 0xD4, "PTA read error"}, ++ { 0xD5, "PTA write error"}, ++ { 0xD6, "OF SGT F-bit write error"}, ++ { 0xD7, "ASA write error"}, ++ { 0xE1, "FLC[ICR]=0 ICID error"}, ++ { 0xE2, "FLC[ICR]=1 ICID error"}, ++ { 0xE4, "source of ICID flush not trusted (BDI = 0)"}, ++}; ++ + static const char * const cha_id_list[] = { + "", + "AES", +@@ -146,10 +237,9 @@ static void report_ccb_status(struct dev + strlen(rng_err_id_list[err_id])) { + /* RNG-only error */ + err_str = rng_err_id_list[err_id]; +- } else if (err_id < ARRAY_SIZE(err_id_list)) ++ } else { + err_str = err_id_list[err_id]; +- else +- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); ++ } + + /* + * CCB ICV check failures are part of normal operation life; +@@ -198,6 +288,27 @@ static void report_deco_status(struct de + status, error, idx_str, idx, err_str, err_err_code); + } + ++static void report_qi_status(struct device *qidev, const u32 status, ++ const char *error) ++{ ++ u8 err_id = status & JRSTA_QIERR_ERROR_MASK; ++ const char *err_str = "unidentified error value 0x"; ++ char err_err_code[3] = { 0 }; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++) ++ if (qi_error_list[i].value == err_id) ++ break; ++ ++ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text) ++ err_str = qi_error_list[i].error_text; ++ else ++ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); ++ ++ dev_err(qidev, "%08x: %s: %s%s\n", ++ status, error, err_str, err_err_code); ++} ++ + static void report_jr_status(struct device *jrdev, const u32 status, + const char *error) + { +@@ -212,7 +323,7 @@ static void report_cond_code_status(stru + status, error, __func__); + } + +-void caam_jr_strstatus(struct device *jrdev, u32 status) ++void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) + { + static const struct stat_src { + void (*report_ssed)(struct device *jrdev, const u32 status, +@@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr + { report_ccb_status, "CCB" }, + { report_jump_status, "Jump" }, + { report_deco_status, "DECO" }, +- { NULL, "Queue Manager Interface" }, ++ { report_qi_status, "Queue Manager Interface" }, + { report_jr_status, "Job Ring" }, + { report_cond_code_status, "Condition Code" }, + { NULL, NULL }, +@@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr + else + dev_err(jrdev, "%d: unknown error source\n", ssrc); + } +-EXPORT_SYMBOL(caam_jr_strstatus); ++EXPORT_SYMBOL(caam_strstatus); +--- a/drivers/crypto/caam/error.h ++++ b/drivers/crypto/caam/error.h +@@ -7,5 +7,13 @@ + #ifndef CAAM_ERROR_H + #define CAAM_ERROR_H + #define CAAM_ERROR_STR_MAX 302 +-void caam_jr_strstatus(struct device *jrdev, u32 status); ++ ++void caam_strstatus(struct device *dev, u32 status, bool qi_v2); ++ ++#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) ++#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) ++ ++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, struct scatterlist *sg, ++ size_t tlen, bool ascii); + #endif /* CAAM_ERROR_H */ +--- a/drivers/crypto/caam/intern.h ++++ b/drivers/crypto/caam/intern.h +@@ -41,6 +41,7 @@ struct caam_drv_private_jr { + struct device *dev; + int ridx; + struct caam_job_ring __iomem *rregs; /* JobR's register space */ ++ struct tasklet_struct irqtask; + int irq; /* One per queue */ + + /* Number of scatterlist crypt transforms active on the JobR */ +@@ -63,10 +64,9 @@ struct caam_drv_private_jr { + * Driver-private storage for a single CAAM block instance + */ + struct caam_drv_private { +- +- struct device *dev; +- struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ +- struct platform_device *pdev; ++#ifdef CONFIG_CAAM_QI ++ struct device *qidev; ++#endif + + /* Physical-presence section */ + struct caam_ctrl __iomem *ctrl; /* controller region */ +@@ -102,11 +102,6 @@ struct caam_drv_private { + #ifdef CONFIG_DEBUG_FS + struct dentry *dfs_root; + struct dentry *ctl; /* controller dir */ +- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req; +- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes; +- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes; +- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus; +- + struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; + struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; + #endif +@@ -114,4 +109,22 @@ struct caam_drv_private { + + void caam_jr_algapi_init(struct device *dev); + void caam_jr_algapi_remove(struct device *dev); ++ ++#ifdef CONFIG_DEBUG_FS ++static int caam_debugfs_u64_get(void *data, u64 *val) ++{ ++ *val = caam64_to_cpu(*(u64 *)data); ++ return 0; ++} ++ ++static int caam_debugfs_u32_get(void *data, u64 *val) ++{ ++ *val = caam32_to_cpu(*(u32 *)data); ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); ++DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); ++#endif ++ + #endif /* INTERN_H */ +--- a/drivers/crypto/caam/jr.c ++++ b/drivers/crypto/caam/jr.c +@@ -9,6 +9,7 @@ + #include + + #include "compat.h" ++#include "ctrl.h" + #include "regs.h" + #include "jr.h" + #include "desc.h" +@@ -73,6 +74,8 @@ static int caam_jr_shutdown(struct devic + + ret = caam_reset_hw_jr(dev); + ++ tasklet_kill(&jrp->irqtask); ++ + /* Release interrupt */ + free_irq(jrp->irq, dev); + +@@ -128,7 +131,7 @@ static irqreturn_t caam_jr_interrupt(int + + /* + * Check the output ring for ready responses, kick +- * the threaded irq if jobs done. ++ * tasklet if jobs done. + */ + irqstate = rd_reg32(&jrp->rregs->jrintstatus); + if (!irqstate) +@@ -150,13 +153,18 @@ static irqreturn_t caam_jr_interrupt(int + /* Have valid interrupt at this point, just ACK and trigger */ + wr_reg32(&jrp->rregs->jrintstatus, irqstate); + +- return IRQ_WAKE_THREAD; ++ preempt_disable(); ++ tasklet_schedule(&jrp->irqtask); ++ preempt_enable(); ++ ++ return IRQ_HANDLED; + } + +-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) ++/* Deferred service handler, run as interrupt-fired tasklet */ ++static void caam_jr_dequeue(unsigned long devarg) + { + int hw_idx, sw_idx, i, head, tail; +- struct device *dev = st_dev; ++ struct device *dev = (struct device *)devarg; + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); + u32 *userdesc, userstatus; +@@ -230,8 +238,6 @@ static irqreturn_t caam_jr_threadirq(int + + /* reenable / unmask IRQs */ + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); +- +- return IRQ_HANDLED; + } + + /** +@@ -389,10 +395,11 @@ static int caam_jr_init(struct device *d + + jrp = dev_get_drvdata(dev); + ++ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); ++ + /* Connect job ring interrupt handler. */ +- error = request_threaded_irq(jrp->irq, caam_jr_interrupt, +- caam_jr_threadirq, IRQF_SHARED, +- dev_name(dev), dev); ++ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, ++ dev_name(dev), dev); + if (error) { + dev_err(dev, "can't connect JobR %d interrupt (%d)\n", + jrp->ridx, jrp->irq); +@@ -454,6 +461,7 @@ out_free_inpring: + out_free_irq: + free_irq(jrp->irq, dev); + out_kill_deq: ++ tasklet_kill(&jrp->irqtask); + return error; + } + +@@ -489,15 +497,28 @@ static int caam_jr_probe(struct platform + return -ENOMEM; + } + +- jrpriv->rregs = (struct caam_job_ring __force *)ctrl; ++ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; + +- if (sizeof(dma_addr_t) == sizeof(u64)) +- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) +- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); ++ if (sizeof(dma_addr_t) == sizeof(u64)) { ++ if (caam_dpaa2) ++ error = dma_set_mask_and_coherent(jrdev, ++ DMA_BIT_MASK(49)); ++ else if (of_device_is_compatible(nprop, ++ "fsl,sec-v5.0-job-ring")) ++ error = dma_set_mask_and_coherent(jrdev, ++ DMA_BIT_MASK(40)); + else +- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); +- else +- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); ++ error = dma_set_mask_and_coherent(jrdev, ++ DMA_BIT_MASK(36)); ++ } else { ++ error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); ++ } ++ if (error) { ++ dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", ++ error); ++ iounmap(ctrl); ++ return error; ++ } + + /* Identify the interrupt */ + jrpriv->irq = irq_of_parse_and_map(nprop, 0); +@@ -520,7 +541,7 @@ static int caam_jr_probe(struct platform + return 0; + } + +-static struct of_device_id caam_jr_match[] = { ++static const struct of_device_id caam_jr_match[] = { + { + .compatible = "fsl,sec-v4.0-job-ring", + }, +--- a/drivers/crypto/caam/key_gen.c ++++ b/drivers/crypto/caam/key_gen.c +@@ -41,15 +41,29 @@ Split key generation-------------------- + [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 + @0xffe04000 + */ +-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, +- int split_key_pad_len, const u8 *key_in, u32 keylen, +- u32 alg_op) ++int gen_split_key(struct device *jrdev, u8 *key_out, ++ struct alginfo * const adata, const u8 *key_in, u32 keylen, ++ int max_keylen) + { + u32 *desc; + struct split_key_result result; + dma_addr_t dma_addr_in, dma_addr_out; + int ret = -ENOMEM; + ++ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); ++ adata->keylen_pad = split_key_pad_len(adata->algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++#ifdef DEBUG ++ dev_err(jrdev, "split keylen %d split keylen padded %d\n", ++ adata->keylen, adata->keylen_pad); ++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); ++#endif ++ ++ if (adata->keylen_pad > max_keylen) ++ return -EINVAL; ++ + desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + if (!desc) { + dev_err(jrdev, "unable to allocate key input memory\n"); +@@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev, + goto out_free; + } + +- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, ++ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad, + DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dma_addr_out)) { + dev_err(jrdev, "unable to map key output memory\n"); +@@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev, + append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); + + /* Sets MDHA up into an HMAC-INIT */ +- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); ++ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | ++ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | ++ OP_ALG_AS_INIT); + + /* + * do a FIFO_LOAD of zero, this will trigger the internal key expansion +@@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev, + * FIFO_STORE with the explicit split-key content store + * (0x26 output type) + */ +- append_fifo_store(desc, dma_addr_out, split_key_len, ++ append_fifo_store(desc, dma_addr_out, adata->keylen, + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); + + #ifdef DEBUG +@@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev, + #ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_out, +- split_key_pad_len, 1); ++ adata->keylen_pad, 1); + #endif + } + +- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, ++ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad, + DMA_FROM_DEVICE); + out_unmap_in: + dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); +--- a/drivers/crypto/caam/key_gen.h ++++ b/drivers/crypto/caam/key_gen.h +@@ -5,6 +5,36 @@ + * + */ + ++/** ++ * split_key_len - Compute MDHA split key length for a given algorithm ++ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, ++ * SHA224, SHA384, SHA512. ++ * ++ * Return: MDHA split key length ++ */ ++static inline u32 split_key_len(u32 hash) ++{ ++ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ ++ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; ++ u32 idx; ++ ++ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; ++ ++ return (u32)(mdpadlen[idx] * 2); ++} ++ ++/** ++ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm ++ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, ++ * SHA224, SHA384, SHA512. ++ * ++ * Return: MDHA split key pad length ++ */ ++static inline u32 split_key_pad_len(u32 hash) ++{ ++ return ALIGN(split_key_len(hash), 16); ++} ++ + struct split_key_result { + struct completion completion; + int err; +@@ -12,6 +42,6 @@ struct split_key_result { + + void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); + +-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, +- int split_key_pad_len, const u8 *key_in, u32 keylen, +- u32 alg_op); ++int gen_split_key(struct device *jrdev, u8 *key_out, ++ struct alginfo * const adata, const u8 *key_in, u32 keylen, ++ int max_keylen); +--- a/drivers/crypto/caam/pdb.h ++++ b/drivers/crypto/caam/pdb.h +@@ -483,6 +483,8 @@ struct dsa_verify_pdb { + #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT) + #define RSA_PDB_D_SHIFT 12 + #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT) ++#define RSA_PDB_Q_SHIFT 12 ++#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT) + + #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT) + #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT) +@@ -490,6 +492,8 @@ struct dsa_verify_pdb { + #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT) + + #define RSA_PRIV_KEY_FRM_1 0 ++#define RSA_PRIV_KEY_FRM_2 1 ++#define RSA_PRIV_KEY_FRM_3 2 + + /** + * RSA Encrypt Protocol Data Block +@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb { + dma_addr_t d_dma; + } __packed; + ++/** ++ * RSA Decrypt PDB - Private Key Form #2 ++ * @sgf : scatter-gather field ++ * @g_dma : dma address of encrypted input data ++ * @f_dma : dma address of output data ++ * @d_dma : dma address of RSA private exponent ++ * @p_dma : dma address of RSA prime factor p of RSA modulus n ++ * @q_dma : dma address of RSA prime factor q of RSA modulus n ++ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer ++ * as internal state buffer. It is assumed to be as long as p. ++ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer ++ * as internal state buffer. It is assumed to be as long as q. ++ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n ++ */ ++struct rsa_priv_f2_pdb { ++ u32 sgf; ++ dma_addr_t g_dma; ++ dma_addr_t f_dma; ++ dma_addr_t d_dma; ++ dma_addr_t p_dma; ++ dma_addr_t q_dma; ++ dma_addr_t tmp1_dma; ++ dma_addr_t tmp2_dma; ++ u32 p_q_len; ++} __packed; ++ ++/** ++ * RSA Decrypt PDB - Private Key Form #3 ++ * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of ++ * the RSA modulus. ++ * @sgf : scatter-gather field ++ * @g_dma : dma address of encrypted input data ++ * @f_dma : dma address of output data ++ * @c_dma : dma address of RSA CRT coefficient ++ * @p_dma : dma address of RSA prime factor p of RSA modulus n ++ * @q_dma : dma address of RSA prime factor q of RSA modulus n ++ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p ++ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q ++ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer ++ * as internal state buffer. It is assumed to be as long as p. ++ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer ++ * as internal state buffer. It is assumed to be as long as q. ++ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n ++ */ ++struct rsa_priv_f3_pdb { ++ u32 sgf; ++ dma_addr_t g_dma; ++ dma_addr_t f_dma; ++ dma_addr_t c_dma; ++ dma_addr_t p_dma; ++ dma_addr_t q_dma; ++ dma_addr_t dp_dma; ++ dma_addr_t dq_dma; ++ dma_addr_t tmp1_dma; ++ dma_addr_t tmp2_dma; ++ u32 p_q_len; ++} __packed; ++ + #endif +--- a/drivers/crypto/caam/pkc_desc.c ++++ b/drivers/crypto/caam/pkc_desc.c +@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | + RSA_PRIV_KEY_FRM_1); + } ++ ++/* Descriptor for RSA Private operation - Private Key Form #2 */ ++void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) ++{ ++ init_job_desc_pdb(desc, 0, sizeof(*pdb)); ++ append_cmd(desc, pdb->sgf); ++ append_ptr(desc, pdb->g_dma); ++ append_ptr(desc, pdb->f_dma); ++ append_ptr(desc, pdb->d_dma); ++ append_ptr(desc, pdb->p_dma); ++ append_ptr(desc, pdb->q_dma); ++ append_ptr(desc, pdb->tmp1_dma); ++ append_ptr(desc, pdb->tmp2_dma); ++ append_cmd(desc, pdb->p_q_len); ++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | ++ RSA_PRIV_KEY_FRM_2); ++} ++ ++/* Descriptor for RSA Private operation - Private Key Form #3 */ ++void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) ++{ ++ init_job_desc_pdb(desc, 0, sizeof(*pdb)); ++ append_cmd(desc, pdb->sgf); ++ append_ptr(desc, pdb->g_dma); ++ append_ptr(desc, pdb->f_dma); ++ append_ptr(desc, pdb->c_dma); ++ append_ptr(desc, pdb->p_dma); ++ append_ptr(desc, pdb->q_dma); ++ append_ptr(desc, pdb->dp_dma); ++ append_ptr(desc, pdb->dq_dma); ++ append_ptr(desc, pdb->tmp1_dma); ++ append_ptr(desc, pdb->tmp2_dma); ++ append_cmd(desc, pdb->p_q_len); ++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | ++ RSA_PRIV_KEY_FRM_3); ++} +--- /dev/null ++++ b/drivers/crypto/caam/qi.c +@@ -0,0 +1,797 @@ ++/* ++ * CAAM/SEC 4.x QI transport/backend driver ++ * Queue Interface backend functionality ++ * ++ * Copyright 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016-2017 NXP ++ */ ++ ++#include ++#include ++#include ++ ++#include "regs.h" ++#include "qi.h" ++#include "desc.h" ++#include "intern.h" ++#include "desc_constr.h" ++ ++#define PREHDR_RSLS_SHIFT 31 ++ ++/* ++ * Use a reasonable backlog of frames (per CPU) as congestion threshold, ++ * so that resources used by the in-flight buffers do not become a memory hog. ++ */ ++#define MAX_RSP_FQ_BACKLOG_PER_CPU 256 ++ ++#define CAAM_QI_ENQUEUE_RETRIES 10000 ++ ++#define CAAM_NAPI_WEIGHT 63 ++ ++/* ++ * caam_napi - struct holding CAAM NAPI-related params ++ * @irqtask: IRQ task for QI backend ++ * @p: QMan portal ++ */ ++struct caam_napi { ++ struct napi_struct irqtask; ++ struct qman_portal *p; ++}; ++ ++/* ++ * caam_qi_pcpu_priv - percpu private data structure to main list of pending ++ * responses expected on each cpu. ++ * @caam_napi: CAAM NAPI params ++ * @net_dev: netdev used by NAPI ++ * @rsp_fq: response FQ from CAAM ++ */ ++struct caam_qi_pcpu_priv { ++ struct caam_napi caam_napi; ++ struct net_device net_dev; ++ struct qman_fq *rsp_fq; ++} ____cacheline_aligned; ++ ++static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); ++static DEFINE_PER_CPU(int, last_cpu); ++ ++/* ++ * caam_qi_priv - CAAM QI backend private params ++ * @cgr: QMan congestion group ++ * @qi_pdev: platform device for QI backend ++ */ ++struct caam_qi_priv { ++ struct qman_cgr cgr; ++ struct platform_device *qi_pdev; ++}; ++ ++static struct caam_qi_priv qipriv ____cacheline_aligned; ++ ++/* ++ * This is written by only one core - the one that initialized the CGR - and ++ * read by multiple cores (all the others). ++ */ ++bool caam_congested __read_mostly; ++EXPORT_SYMBOL(caam_congested); ++ ++#ifdef CONFIG_DEBUG_FS ++/* ++ * This is a counter for the number of times the congestion group (where all ++ * the request and response queueus are) reached congestion. Incremented ++ * each time the congestion callback is called with congested == true. ++ */ ++static u64 times_congested; ++#endif ++ ++/* ++ * CPU from where the module initialised. This is required because QMan driver ++ * requires CGRs to be removed from same CPU from where they were originally ++ * allocated. ++ */ ++static int mod_init_cpu; ++ ++/* ++ * This is a a cache of buffers, from which the users of CAAM QI driver ++ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than ++ * doing malloc on the hotpath. ++ * NOTE: A more elegant solution would be to have some headroom in the frames ++ * being processed. This could be added by the dpaa-ethernet driver. ++ * This would pose a problem for userspace application processing which ++ * cannot know of this limitation. So for now, this will work. ++ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here ++ */ ++static struct kmem_cache *qi_cache; ++ ++int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) ++{ ++ struct qm_fd fd; ++ int ret; ++ int num_retries = 0; ++ ++ fd.cmd = 0; ++ fd.format = qm_fd_compound; ++ fd.cong_weight = req->fd_sgt[1].length; ++ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(qidev, fd.addr)) { ++ dev_err(qidev, "DMA mapping error for QI enqueue request\n"); ++ return -EIO; ++ } ++ ++ do { ++ ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0); ++ if (likely(!ret)) ++ return 0; ++ ++ if (ret != -EBUSY) ++ break; ++ num_retries++; ++ } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); ++ ++ dev_err(qidev, "qman_enqueue failed: %d\n", ret); ++ ++ return ret; ++} ++EXPORT_SYMBOL(caam_qi_enqueue); ++ ++static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, ++ const struct qm_mr_entry *msg) ++{ ++ const struct qm_fd *fd; ++ struct caam_drv_req *drv_req; ++ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); ++ ++ fd = &msg->ern.fd; ++ ++ if (fd->format != qm_fd_compound) { ++ dev_err(qidev, "Non-compound FD from CAAM\n"); ++ return; ++ } ++ ++ drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); ++ if (!drv_req) { ++ dev_err(qidev, ++ "Can't find original request for CAAM response\n"); ++ return; ++ } ++ ++ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), ++ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); ++ ++ drv_req->cbk(drv_req, -EIO); ++} ++ ++static struct qman_fq *create_caam_req_fq(struct device *qidev, ++ struct qman_fq *rsp_fq, ++ dma_addr_t hwdesc, ++ int fq_sched_flag) ++{ ++ int ret; ++ struct qman_fq *req_fq; ++ struct qm_mcc_initfq opts; ++ ++ req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); ++ if (!req_fq) ++ return ERR_PTR(-ENOMEM); ++ ++ req_fq->cb.ern = caam_fq_ern_cb; ++ req_fq->cb.fqs = NULL; ++ ++ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | ++ QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED, ++ req_fq); ++ if (ret) { ++ dev_err(qidev, "Failed to create session req FQ\n"); ++ goto create_req_fq_fail; ++ } ++ ++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | ++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | ++ QM_INITFQ_WE_CGID; ++ opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE; ++ opts.fqd.dest.channel = qm_channel_caam; ++ opts.fqd.dest.wq = 2; ++ opts.fqd.context_b = qman_fq_fqid(rsp_fq); ++ opts.fqd.context_a.hi = upper_32_bits(hwdesc); ++ opts.fqd.context_a.lo = lower_32_bits(hwdesc); ++ opts.fqd.cgid = qipriv.cgr.cgrid; ++ ++ ret = qman_init_fq(req_fq, fq_sched_flag, &opts); ++ if (ret) { ++ dev_err(qidev, "Failed to init session req FQ\n"); ++ goto init_req_fq_fail; ++ } ++ ++ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, ++ smp_processor_id()); ++ return req_fq; ++ ++init_req_fq_fail: ++ qman_destroy_fq(req_fq, 0); ++create_req_fq_fail: ++ kfree(req_fq); ++ return ERR_PTR(ret); ++} ++ ++static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) ++{ ++ int ret; ++ ++ ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | ++ QMAN_VOLATILE_FLAG_FINISH, ++ QM_VDQCR_PRECEDENCE_VDQCR | ++ QM_VDQCR_NUMFRAMES_TILLEMPTY); ++ if (ret) { ++ dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); ++ return ret; ++ } ++ ++ do { ++ struct qman_portal *p; ++ ++ p = qman_get_affine_portal(smp_processor_id()); ++ qman_p_poll_dqrr(p, 16); ++ } while (fq->flags & QMAN_FQ_STATE_NE); ++ ++ return 0; ++} ++ ++static int kill_fq(struct device *qidev, struct qman_fq *fq) ++{ ++ u32 flags; ++ int ret; ++ ++ ret = qman_retire_fq(fq, &flags); ++ if (ret < 0) { ++ dev_err(qidev, "qman_retire_fq failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (!ret) ++ goto empty_fq; ++ ++ /* Async FQ retirement condition */ ++ if (ret == 1) { ++ /* Retry till FQ gets in retired state */ ++ do { ++ msleep(20); ++ } while (fq->state != qman_fq_state_retired); ++ ++ WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); ++ WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); ++ } ++ ++empty_fq: ++ if (fq->flags & QMAN_FQ_STATE_NE) { ++ ret = empty_retired_fq(qidev, fq); ++ if (ret) { ++ dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", ++ fq->fqid); ++ return ret; ++ } ++ } ++ ++ ret = qman_oos_fq(fq); ++ if (ret) ++ dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); ++ ++ qman_destroy_fq(fq, 0); ++ kfree(fq); ++ ++ return ret; ++} ++ ++static int empty_caam_fq(struct qman_fq *fq) ++{ ++ int ret; ++ struct qm_mcr_queryfq_np np; ++ ++ /* Wait till the older CAAM FQ get empty */ ++ do { ++ ret = qman_query_fq_np(fq, &np); ++ if (ret) ++ return ret; ++ ++ if (!np.frm_cnt) ++ break; ++ ++ msleep(20); ++ } while (1); ++ ++ /* ++ * Give extra time for pending jobs from this FQ in holding tanks ++ * to get processed ++ */ ++ msleep(20); ++ return 0; ++} ++ ++int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) ++{ ++ int ret; ++ u32 num_words; ++ struct qman_fq *new_fq, *old_fq; ++ struct device *qidev = drv_ctx->qidev; ++ ++ num_words = desc_len(sh_desc); ++ if (num_words > MAX_SDLEN) { ++ dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); ++ return -EINVAL; ++ } ++ ++ /* Note down older req FQ */ ++ old_fq = drv_ctx->req_fq; ++ ++ /* Create a new req FQ in parked state */ ++ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, ++ drv_ctx->context_a, 0); ++ if (unlikely(IS_ERR_OR_NULL(new_fq))) { ++ dev_err(qidev, "FQ allocation for shdesc update failed\n"); ++ return PTR_ERR(new_fq); ++ } ++ ++ /* Hook up new FQ to context so that new requests keep queuing */ ++ drv_ctx->req_fq = new_fq; ++ ++ /* Empty and remove the older FQ */ ++ ret = empty_caam_fq(old_fq); ++ if (ret) { ++ dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); ++ ++ /* We can revert to older FQ */ ++ drv_ctx->req_fq = old_fq; ++ ++ if (kill_fq(qidev, new_fq)) ++ dev_warn(qidev, "New CAAM FQ kill failed\n"); ++ ++ return ret; ++ } ++ ++ /* ++ * Re-initialise pre-header. Set RSLS and SDLEN. ++ * Update the shared descriptor for driver context. ++ */ ++ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | ++ num_words); ++ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); ++ dma_sync_single_for_device(qidev, drv_ctx->context_a, ++ sizeof(drv_ctx->sh_desc) + ++ sizeof(drv_ctx->prehdr), ++ DMA_BIDIRECTIONAL); ++ ++ /* Put the new FQ in scheduled state */ ++ ret = qman_schedule_fq(new_fq); ++ if (ret) { ++ dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); ++ ++ /* ++ * We can kill new FQ and revert to old FQ. ++ * Since the desc is already modified, it is success case ++ */ ++ ++ drv_ctx->req_fq = old_fq; ++ ++ if (kill_fq(qidev, new_fq)) ++ dev_warn(qidev, "New CAAM FQ kill failed\n"); ++ } else if (kill_fq(qidev, old_fq)) { ++ dev_warn(qidev, "Old CAAM FQ kill failed\n"); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(caam_drv_ctx_update); ++ ++struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, ++ int *cpu, ++ u32 *sh_desc) ++{ ++ size_t size; ++ u32 num_words; ++ dma_addr_t hwdesc; ++ struct caam_drv_ctx *drv_ctx; ++ const cpumask_t *cpus = qman_affine_cpus(); ++ ++ num_words = desc_len(sh_desc); ++ if (num_words > MAX_SDLEN) { ++ dev_err(qidev, "Invalid descriptor len: %d words\n", ++ num_words); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); ++ if (!drv_ctx) ++ return ERR_PTR(-ENOMEM); ++ ++ /* ++ * Initialise pre-header - set RSLS and SDLEN - and shared descriptor ++ * and dma-map them. ++ */ ++ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | ++ num_words); ++ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); ++ size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); ++ hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(qidev, hwdesc)) { ++ dev_err(qidev, "DMA map error for preheader + shdesc\n"); ++ kfree(drv_ctx); ++ return ERR_PTR(-ENOMEM); ++ } ++ drv_ctx->context_a = hwdesc; ++ ++ /* If given CPU does not own the portal, choose another one that does */ ++ if (!cpumask_test_cpu(*cpu, cpus)) { ++ int *pcpu = &get_cpu_var(last_cpu); ++ ++ *pcpu = cpumask_next(*pcpu, cpus); ++ if (*pcpu >= nr_cpu_ids) ++ *pcpu = cpumask_first(cpus); ++ *cpu = *pcpu; ++ ++ put_cpu_var(last_cpu); ++ } ++ drv_ctx->cpu = *cpu; ++ ++ /* Find response FQ hooked with this CPU */ ++ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); ++ ++ /* Attach request FQ */ ++ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, ++ QMAN_INITFQ_FLAG_SCHED); ++ if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) { ++ dev_err(qidev, "create_caam_req_fq failed\n"); ++ dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); ++ kfree(drv_ctx); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ drv_ctx->qidev = qidev; ++ return drv_ctx; ++} ++EXPORT_SYMBOL(caam_drv_ctx_init); ++ ++void *qi_cache_alloc(gfp_t flags) ++{ ++ return kmem_cache_alloc(qi_cache, flags); ++} ++EXPORT_SYMBOL(qi_cache_alloc); ++ ++void qi_cache_free(void *obj) ++{ ++ kmem_cache_free(qi_cache, obj); ++} ++EXPORT_SYMBOL(qi_cache_free); ++ ++static int caam_qi_poll(struct napi_struct *napi, int budget) ++{ ++ struct caam_napi *np = container_of(napi, struct caam_napi, irqtask); ++ ++ int cleaned = qman_p_poll_dqrr(np->p, budget); ++ ++ if (cleaned < budget) { ++ napi_complete(napi); ++ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); ++ } ++ ++ return cleaned; ++} ++ ++void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) ++{ ++ if (IS_ERR_OR_NULL(drv_ctx)) ++ return; ++ ++ /* Remove request FQ */ ++ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) ++ dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); ++ ++ dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, ++ sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), ++ DMA_BIDIRECTIONAL); ++ kfree(drv_ctx); ++} ++EXPORT_SYMBOL(caam_drv_ctx_rel); ++ ++int caam_qi_shutdown(struct device *qidev) ++{ ++ int i, ret; ++ struct caam_qi_priv *priv = dev_get_drvdata(qidev); ++ const cpumask_t *cpus = qman_affine_cpus(); ++ struct cpumask old_cpumask = current->cpus_allowed; ++ ++ for_each_cpu(i, cpus) { ++ struct napi_struct *irqtask; ++ ++ irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; ++ napi_disable(irqtask); ++ netif_napi_del(irqtask); ++ ++ if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) ++ dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); ++ } ++ ++ /* ++ * QMan driver requires CGRs to be deleted from same CPU from where they ++ * were instantiated. Hence we get the module removal execute from the ++ * same CPU from where it was originally inserted. ++ */ ++ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); ++ ++ ret = qman_delete_cgr(&priv->cgr); ++ if (ret) ++ dev_err(qidev, "Deletion of CGR failed: %d\n", ret); ++ else ++ qman_release_cgrid(priv->cgr.cgrid); ++ ++ kmem_cache_destroy(qi_cache); ++ ++ /* Now that we're done with the CGRs, restore the cpus allowed mask */ ++ set_cpus_allowed_ptr(current, &old_cpumask); ++ ++ platform_device_unregister(priv->qi_pdev); ++ return ret; ++} ++ ++static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) ++{ ++ caam_congested = congested; ++ ++ if (congested) { ++#ifdef CONFIG_DEBUG_FS ++ times_congested++; ++#endif ++ pr_debug_ratelimited("CAAM entered congestion\n"); ++ ++ } else { ++ pr_debug_ratelimited("CAAM exited congestion\n"); ++ } ++} ++ ++static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np) ++{ ++ /* ++ * In case of threaded ISR, for RT kernels in_irq() does not return ++ * appropriate value, so use in_serving_softirq to distinguish between ++ * softirq and irq contexts. ++ */ ++ if (unlikely(in_irq() || !in_serving_softirq())) { ++ /* Disable QMan IRQ source and invoke NAPI */ ++ qman_p_irqsource_remove(p, QM_PIRQ_DQRI); ++ np->p = p; ++ napi_schedule(&np->irqtask); ++ return 1; ++ } ++ return 0; ++} ++ ++static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, ++ struct qman_fq *rsp_fq, ++ const struct qm_dqrr_entry *dqrr) ++{ ++ struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); ++ struct caam_drv_req *drv_req; ++ const struct qm_fd *fd; ++ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); ++ ++ if (caam_qi_napi_schedule(p, caam_napi)) ++ return qman_cb_dqrr_stop; ++ ++ fd = &dqrr->fd; ++ if (unlikely(fd->status)) ++ dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status); ++ ++ if (unlikely(fd->format != fd->format)) { ++ dev_err(qidev, "Non-compound FD from CAAM\n"); ++ return qman_cb_dqrr_consume; ++ } ++ ++ drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr); ++ if (unlikely(!drv_req)) { ++ dev_err(qidev, ++ "Can't find original request for caam response\n"); ++ return qman_cb_dqrr_consume; ++ } ++ ++ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), ++ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); ++ ++ drv_req->cbk(drv_req, fd->status); ++ return qman_cb_dqrr_consume; ++} ++ ++static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) ++{ ++ struct qm_mcc_initfq opts; ++ struct qman_fq *fq; ++ int ret; ++ ++ fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); ++ if (!fq) ++ return -ENOMEM; ++ ++ fq->cb.dqrr = caam_rsp_fq_dqrr_cb; ++ ++ ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | ++ QMAN_FQ_FLAG_DYNAMIC_FQID, fq); ++ if (ret) { ++ dev_err(qidev, "Rsp FQ create failed\n"); ++ kfree(fq); ++ return -ENODEV; ++ } ++ ++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | ++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | ++ QM_INITFQ_WE_CGID; ++ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH | ++ QM_FQCTRL_CGE; ++ opts.fqd.dest.channel = qman_affine_channel(cpu); ++ opts.fqd.dest.wq = 3; ++ opts.fqd.cgid = qipriv.cgr.cgrid; ++ opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | ++ QM_STASHING_EXCL_DATA; ++ opts.fqd.context_a.stashing.data_cl = 1; ++ opts.fqd.context_a.stashing.context_cl = 1; ++ ++ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); ++ if (ret) { ++ dev_err(qidev, "Rsp FQ init failed\n"); ++ kfree(fq); ++ return -ENODEV; ++ } ++ ++ per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; ++ ++ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); ++ return 0; ++} ++ ++static int init_cgr(struct device *qidev) ++{ ++ int ret; ++ struct qm_mcc_initcgr opts; ++ const u64 cpus = *(u64 *)qman_affine_cpus(); ++ const int num_cpus = hweight64(cpus); ++ const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU; ++ ++ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); ++ if (ret) { ++ dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret); ++ return ret; ++ } ++ ++ qipriv.cgr.cb = cgr_cb; ++ memset(&opts, 0, sizeof(opts)); ++ opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE; ++ opts.cgr.cscn_en = QM_CGR_EN; ++ opts.cgr.mode = QMAN_CGR_MODE_FRAME; ++ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); ++ ++ ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts); ++ if (ret) { ++ dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret, ++ qipriv.cgr.cgrid); ++ return ret; ++ } ++ ++ dev_dbg(qidev, "Congestion threshold set to %llu\n", val); ++ return 0; ++} ++ ++static int alloc_rsp_fqs(struct device *qidev) ++{ ++ int ret, i; ++ const cpumask_t *cpus = qman_affine_cpus(); ++ ++ /*Now create response FQs*/ ++ for_each_cpu(i, cpus) { ++ ret = alloc_rsp_fq_cpu(qidev, i); ++ if (ret) { ++ dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static void free_rsp_fqs(void) ++{ ++ int i; ++ const cpumask_t *cpus = qman_affine_cpus(); ++ ++ for_each_cpu(i, cpus) ++ kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); ++} ++ ++int caam_qi_init(struct platform_device *caam_pdev) ++{ ++ int err, i; ++ struct platform_device *qi_pdev; ++ struct device *ctrldev = &caam_pdev->dev, *qidev; ++ struct caam_drv_private *ctrlpriv; ++ const cpumask_t *cpus = qman_affine_cpus(); ++ struct cpumask old_cpumask = current->cpus_allowed; ++ static struct platform_device_info qi_pdev_info = { ++ .name = "caam_qi", ++ .id = PLATFORM_DEVID_NONE ++ }; ++ ++ /* ++ * QMAN requires CGRs to be removed from same CPU+portal from where it ++ * was originally allocated. Hence we need to note down the ++ * initialisation CPU and use the same CPU for module exit. ++ * We select the first CPU to from the list of portal owning CPUs. ++ * Then we pin module init to this CPU. ++ */ ++ mod_init_cpu = cpumask_first(cpus); ++ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); ++ ++ qi_pdev_info.parent = ctrldev; ++ qi_pdev_info.dma_mask = dma_get_mask(ctrldev); ++ qi_pdev = platform_device_register_full(&qi_pdev_info); ++ if (IS_ERR(qi_pdev)) ++ return PTR_ERR(qi_pdev); ++ arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true); ++ ++ ctrlpriv = dev_get_drvdata(ctrldev); ++ qidev = &qi_pdev->dev; ++ ++ qipriv.qi_pdev = qi_pdev; ++ dev_set_drvdata(qidev, &qipriv); ++ ++ /* Initialize the congestion detection */ ++ err = init_cgr(qidev); ++ if (err) { ++ dev_err(qidev, "CGR initialization failed: %d\n", err); ++ platform_device_unregister(qi_pdev); ++ return err; ++ } ++ ++ /* Initialise response FQs */ ++ err = alloc_rsp_fqs(qidev); ++ if (err) { ++ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); ++ free_rsp_fqs(); ++ platform_device_unregister(qi_pdev); ++ return err; ++ } ++ ++ /* ++ * Enable the NAPI contexts on each of the core which has an affine ++ * portal. ++ */ ++ for_each_cpu(i, cpus) { ++ struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); ++ struct caam_napi *caam_napi = &priv->caam_napi; ++ struct napi_struct *irqtask = &caam_napi->irqtask; ++ struct net_device *net_dev = &priv->net_dev; ++ ++ net_dev->dev = *qidev; ++ INIT_LIST_HEAD(&net_dev->napi_list); ++ ++ netif_napi_add(net_dev, irqtask, caam_qi_poll, ++ CAAM_NAPI_WEIGHT); ++ ++ napi_enable(irqtask); ++ } ++ ++ /* Hook up QI device to parent controlling caam device */ ++ ctrlpriv->qidev = qidev; ++ ++ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, ++ SLAB_CACHE_DMA, NULL); ++ if (!qi_cache) { ++ dev_err(qidev, "Can't allocate CAAM cache\n"); ++ free_rsp_fqs(); ++ platform_device_unregister(qi_pdev); ++ return -ENOMEM; ++ } ++ ++ /* Done with the CGRs; restore the cpus allowed mask */ ++ set_cpus_allowed_ptr(current, &old_cpumask); ++#ifdef CONFIG_DEBUG_FS ++ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, ++ ×_congested, &caam_fops_u64_ro); ++#endif ++ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); ++ return 0; ++} +--- /dev/null ++++ b/drivers/crypto/caam/qi.h +@@ -0,0 +1,204 @@ ++/* ++ * Public definitions for the CAAM/QI (Queue Interface) backend. ++ * ++ * Copyright 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016-2017 NXP ++ */ ++ ++#ifndef __QI_H__ ++#define __QI_H__ ++ ++#include ++#include "compat.h" ++#include "desc.h" ++#include "desc_constr.h" ++ ++/* ++ * CAAM hardware constructs a job descriptor which points to a shared descriptor ++ * (as pointed by context_a of to-CAAM FQ). ++ * When the job descriptor is executed by DECO, the whole job descriptor ++ * together with shared descriptor gets loaded in DECO buffer, which is ++ * 64 words (each 32-bit) long. ++ * ++ * The job descriptor constructed by CAAM hardware has the following layout: ++ * ++ * HEADER (1 word) ++ * Shdesc ptr (1 or 2 words) ++ * SEQ_OUT_PTR (1 word) ++ * Out ptr (1 or 2 words) ++ * Out length (1 word) ++ * SEQ_IN_PTR (1 word) ++ * In ptr (1 or 2 words) ++ * In length (1 word) ++ * ++ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer. ++ * ++ * Apart from shdesc contents, the total number of words that get loaded in DECO ++ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for ++ * storing shared descriptor. ++ */ ++#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) ++ ++/* Length of a single buffer in the QI driver memory cache */ ++#define CAAM_QI_MEMCACHE_SIZE 768 ++ ++extern bool caam_congested __read_mostly; ++ ++/* ++ * This is the request structure the driver application should fill while ++ * submitting a job to driver. ++ */ ++struct caam_drv_req; ++ ++/* ++ * caam_qi_cbk - application's callback function invoked by the driver when the ++ * request has been successfully processed. ++ * @drv_req: original request that was submitted ++ * @status: completion status of request (0 - success, non-zero - error code) ++ */ ++typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status); ++ ++enum optype { ++ ENCRYPT, ++ DECRYPT, ++ GIVENCRYPT, ++ NUM_OP ++}; ++ ++/** ++ * caam_drv_ctx - CAAM/QI backend driver context ++ * ++ * The jobs are processed by the driver against a driver context. ++ * With every cryptographic context, a driver context is attached. ++ * The driver context contains data for private use by driver. ++ * For the applications, this is an opaque structure. ++ * ++ * @prehdr: preheader placed before shrd desc ++ * @sh_desc: shared descriptor ++ * @context_a: shared descriptor dma address ++ * @req_fq: to-CAAM request frame queue ++ * @rsp_fq: from-CAAM response frame queue ++ * @cpu: cpu on which to receive CAAM response ++ * @op_type: operation type ++ * @qidev: device pointer for CAAM/QI backend ++ */ ++struct caam_drv_ctx { ++ u32 prehdr[2]; ++ u32 sh_desc[MAX_SDLEN]; ++ dma_addr_t context_a; ++ struct qman_fq *req_fq; ++ struct qman_fq *rsp_fq; ++ int cpu; ++ enum optype op_type; ++ struct device *qidev; ++} ____cacheline_aligned; ++ ++/** ++ * caam_drv_req - The request structure the driver application should fill while ++ * submitting a job to driver. ++ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1]) ++ * buffers. ++ * @cbk: callback function to invoke when job is completed ++ * @app_ctx: arbitrary context attached with request by the application ++ * ++ * The fields mentioned below should not be used by application. ++ * These are for private use by driver. ++ * ++ * @hdr__: linked list header to maintain list of outstanding requests to CAAM ++ * @hwaddr: DMA address for the S/G table. ++ */ ++struct caam_drv_req { ++ struct qm_sg_entry fd_sgt[2]; ++ struct caam_drv_ctx *drv_ctx; ++ caam_qi_cbk cbk; ++ void *app_ctx; ++} ____cacheline_aligned; ++ ++/** ++ * caam_drv_ctx_init - Initialise a CAAM/QI driver context ++ * ++ * A CAAM/QI driver context must be attached with each cryptographic context. ++ * This function allocates memory for CAAM/QI context and returns a handle to ++ * the application. This handle must be submitted along with each enqueue ++ * request to the driver by the application. ++ * ++ * @cpu: CPU where the application prefers to the driver to receive CAAM ++ * responses. The request completion callback would be issued from this ++ * CPU. ++ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver ++ * context. ++ * ++ * Returns a driver context on success or negative error code on failure. ++ */ ++struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu, ++ u32 *sh_desc); ++ ++/** ++ * caam_qi_enqueue - Submit a request to QI backend driver. ++ * ++ * The request structure must be properly filled as described above. ++ * ++ * @qidev: device pointer for QI backend ++ * @req: CAAM QI request structure ++ * ++ * Returns 0 on success or negative error code on failure. ++ */ ++int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req); ++ ++/** ++ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM ++ * or too many CAAM responses are pending to be processed. ++ * @drv_ctx: driver context for which job is to be submitted ++ * ++ * Returns caam congestion status 'true/false' ++ */ ++bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx); ++ ++/** ++ * caam_drv_ctx_update - Update QI driver context ++ * ++ * Invoked when shared descriptor is required to be change in driver context. ++ * ++ * @drv_ctx: driver context to be updated ++ * @sh_desc: new shared descriptor pointer to be updated in QI driver context ++ * ++ * Returns 0 on success or negative error code on failure. ++ */ ++int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc); ++ ++/** ++ * caam_drv_ctx_rel - Release a QI driver context ++ * @drv_ctx: context to be released ++ */ ++void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); ++ ++int caam_qi_init(struct platform_device *pdev); ++int caam_qi_shutdown(struct device *dev); ++ ++/** ++ * qi_cache_alloc - Allocate buffers from CAAM-QI cache ++ * ++ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has ++ * to be allocated on the hotpath. Instead of using malloc, one can use the ++ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers ++ * will have a size of 256B, which is sufficient for hosting 16 SG entries. ++ * ++ * @flags: flags that would be used for the equivalent malloc(..) call ++ * ++ * Returns a pointer to a retrieved buffer on success or NULL on failure. ++ */ ++void *qi_cache_alloc(gfp_t flags); ++ ++/** ++ * qi_cache_free - Frees buffers allocated from CAAM-QI cache ++ * ++ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs ++ * the buffer previously allocated by a qi_cache_alloc call. ++ * No checking is being done, the call is a passthrough call to ++ * kmem_cache_free(...) ++ * ++ * @obj: object previously allocated using qi_cache_alloc() ++ */ ++void qi_cache_free(void *obj); ++ ++#endif /* __QI_H__ */ +--- a/drivers/crypto/caam/regs.h ++++ b/drivers/crypto/caam/regs.h +@@ -2,6 +2,7 @@ + * CAAM hardware register-level view + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP + */ + + #ifndef REGS_H +@@ -67,6 +68,7 @@ + */ + + extern bool caam_little_end; ++extern bool caam_imx; + + #define caam_to_cpu(len) \ + static inline u##len caam##len ## _to_cpu(u##len val) \ +@@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem + #else /* CONFIG_64BIT */ + static inline void wr_reg64(void __iomem *reg, u64 data) + { +-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +- if (caam_little_end) { ++ if (!caam_imx && caam_little_end) { + wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); + wr_reg32((u32 __iomem *)(reg), data); +- } else +-#endif +- { ++ } else { + wr_reg32((u32 __iomem *)(reg), data >> 32); + wr_reg32((u32 __iomem *)(reg) + 1, data); + } +@@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem + + static inline u64 rd_reg64(void __iomem *reg) + { +-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +- if (caam_little_end) ++ if (!caam_imx && caam_little_end) + return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg))); +- else +-#endif +- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | +- (u64)rd_reg32((u32 __iomem *)(reg) + 1)); ++ ++ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | ++ (u64)rd_reg32((u32 __iomem *)(reg) + 1)); + } + #endif /* CONFIG_64BIT */ + ++static inline u64 cpu_to_caam_dma64(dma_addr_t value) ++{ ++ if (caam_imx) ++ return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | ++ (u64)cpu_to_caam32(upper_32_bits(value))); ++ ++ return cpu_to_caam64(value); ++} ++ ++static inline u64 caam_dma64_to_cpu(u64 value) ++{ ++ if (caam_imx) ++ return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | ++ (u64)caam32_to_cpu(upper_32_bits(value))); ++ ++ return caam64_to_cpu(value); ++} ++ + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +-#ifdef CONFIG_SOC_IMX7D +-#define cpu_to_caam_dma(value) \ +- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ +- (u64)cpu_to_caam32(upper_32_bits(value))) +-#define caam_dma_to_cpu(value) \ +- (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ +- (u64)caam32_to_cpu(upper_32_bits(value))) +-#else +-#define cpu_to_caam_dma(value) cpu_to_caam64(value) +-#define caam_dma_to_cpu(value) caam64_to_cpu(value) +-#endif /* CONFIG_SOC_IMX7D */ ++#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) ++#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) + #else + #define cpu_to_caam_dma(value) cpu_to_caam32(value) + #define caam_dma_to_cpu(value) caam32_to_cpu(value) +-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ +- +-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +-#define cpu_to_caam_dma64(value) \ +- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ +- (u64)cpu_to_caam32(upper_32_bits(value))) +-#else +-#define cpu_to_caam_dma64(value) cpu_to_caam64(value) +-#endif ++#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ + + /* + * jr_outentry +@@ -293,6 +291,7 @@ struct caam_perfmon { + u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ + #define CTPR_MS_QI_SHIFT 25 + #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) ++#define CTPR_MS_DPAA2 BIT(13) + #define CTPR_MS_VIRT_EN_INCL 0x00000001 + #define CTPR_MS_VIRT_EN_POR 0x00000002 + #define CTPR_MS_PG_SZ_MASK 0x10 +@@ -628,6 +627,8 @@ struct caam_job_ring { + #define JRSTA_DECOERR_INVSIGN 0x86 + #define JRSTA_DECOERR_DSASIGN 0x87 + ++#define JRSTA_QIERR_ERROR_MASK 0x00ff ++ + #define JRSTA_CCBERR_JUMP 0x08000000 + #define JRSTA_CCBERR_INDEX_MASK 0xff00 + #define JRSTA_CCBERR_INDEX_SHIFT 8 +--- /dev/null ++++ b/drivers/crypto/caam/sg_sw_qm.h +@@ -0,0 +1,126 @@ ++/* ++ * Copyright 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016-2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __SG_SW_QM_H ++#define __SG_SW_QM_H ++ ++#include ++#include "regs.h" ++ ++static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr) ++{ ++ dma_addr_t addr = qm_sg_ptr->opaque; ++ ++ qm_sg_ptr->opaque = cpu_to_caam64(addr); ++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl); ++} ++ ++static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma, ++ u32 len, u16 offset) ++{ ++ qm_sg_ptr->addr = dma; ++ qm_sg_ptr->length = len; ++ qm_sg_ptr->__reserved2 = 0; ++ qm_sg_ptr->bpid = 0; ++ qm_sg_ptr->__reserved3 = 0; ++ qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK; ++ ++ cpu_to_hw_sg(qm_sg_ptr); ++} ++ ++static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr, ++ dma_addr_t dma, u32 len, u16 offset) ++{ ++ qm_sg_ptr->extension = 0; ++ qm_sg_ptr->final = 0; ++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); ++} ++ ++static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr, ++ dma_addr_t dma, u32 len, u16 offset) ++{ ++ qm_sg_ptr->extension = 0; ++ qm_sg_ptr->final = 1; ++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); ++} ++ ++static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr, ++ dma_addr_t dma, u32 len, u16 offset) ++{ ++ qm_sg_ptr->extension = 1; ++ qm_sg_ptr->final = 0; ++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); ++} ++ ++static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, ++ dma_addr_t dma, u32 len, ++ u16 offset) ++{ ++ qm_sg_ptr->extension = 1; ++ qm_sg_ptr->final = 1; ++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); ++} ++ ++/* ++ * convert scatterlist to h/w link table format ++ * but does not have final bit; instead, returns last entry ++ */ ++static inline struct qm_sg_entry * ++sg_to_qm_sg(struct scatterlist *sg, int sg_count, ++ struct qm_sg_entry *qm_sg_ptr, u16 offset) ++{ ++ while (sg_count && sg) { ++ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ++ sg_dma_len(sg), offset); ++ qm_sg_ptr++; ++ sg = sg_next(sg); ++ sg_count--; ++ } ++ return qm_sg_ptr - 1; ++} ++ ++/* ++ * convert scatterlist to h/w link table format ++ * scatterlist must have been previously dma mapped ++ */ ++static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, ++ struct qm_sg_entry *qm_sg_ptr, u16 offset) ++{ ++ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); ++ ++ qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl); ++ qm_sg_ptr->final = 1; ++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl); ++} ++ ++#endif /* __SG_SW_QM_H */ +--- /dev/null ++++ b/drivers/crypto/caam/sg_sw_qm2.h +@@ -0,0 +1,81 @@ ++/* ++ * Copyright 2015-2016 Freescale Semiconductor, Inc. ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _SG_SW_QM2_H_ ++#define _SG_SW_QM2_H_ ++ ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" ++ ++static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, ++ dma_addr_t dma, u32 len, u16 offset) ++{ ++ dpaa2_sg_set_addr(qm_sg_ptr, dma); ++ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single); ++ dpaa2_sg_set_final(qm_sg_ptr, false); ++ dpaa2_sg_set_len(qm_sg_ptr, len); ++ dpaa2_sg_set_bpid(qm_sg_ptr, 0); ++ dpaa2_sg_set_offset(qm_sg_ptr, offset); ++} ++ ++/* ++ * convert scatterlist to h/w link table format ++ * but does not have final bit; instead, returns last entry ++ */ ++static inline struct dpaa2_sg_entry * ++sg_to_qm_sg(struct scatterlist *sg, int sg_count, ++ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) ++{ ++ while (sg_count && sg) { ++ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ++ sg_dma_len(sg), offset); ++ qm_sg_ptr++; ++ sg = sg_next(sg); ++ sg_count--; ++ } ++ return qm_sg_ptr - 1; ++} ++ ++/* ++ * convert scatterlist to h/w link table format ++ * scatterlist must have been previously dma mapped ++ */ ++static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, ++ struct dpaa2_sg_entry *qm_sg_ptr, ++ u16 offset) ++{ ++ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); ++ dpaa2_sg_set_final(qm_sg_ptr, true); ++} ++ ++#endif /* _SG_SW_QM2_H_ */ +--- a/drivers/crypto/caam/sg_sw_sec4.h ++++ b/drivers/crypto/caam/sg_sw_sec4.h +@@ -5,9 +5,19 @@ + * + */ + ++#ifndef _SG_SW_SEC4_H_ ++#define _SG_SW_SEC4_H_ ++ ++#include "ctrl.h" + #include "regs.h" ++#include "sg_sw_qm2.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" + +-struct sec4_sg_entry; ++struct sec4_sg_entry { ++ u64 ptr; ++ u32 len; ++ u32 bpid_offset; ++}; + + /* + * convert single dma address to h/w link table format +@@ -15,9 +25,15 @@ struct sec4_sg_entry; + static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) + { +- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); +- sec4_sg_ptr->len = cpu_to_caam32(len); +- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); ++ if (caam_dpaa2) { ++ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len, ++ offset); ++ } else { ++ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); ++ sec4_sg_ptr->len = cpu_to_caam32(len); ++ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & ++ SEC4_SG_OFFSET_MASK); ++ } + #ifdef DEBUG + print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", + DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, +@@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in + return sec4_sg_ptr - 1; + } + ++static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) ++{ ++ if (caam_dpaa2) ++ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true); ++ else ++ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); ++} ++ + /* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped +@@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st + u16 offset) + { + sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); +- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); +-} +- +-static inline struct sec4_sg_entry *sg_to_sec4_sg_len( +- struct scatterlist *sg, unsigned int total, +- struct sec4_sg_entry *sec4_sg_ptr) +-{ +- do { +- unsigned int len = min(sg_dma_len(sg), total); +- +- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); +- sec4_sg_ptr++; +- sg = sg_next(sg); +- total -= len; +- } while (total); +- return sec4_sg_ptr - 1; ++ sg_to_sec4_set_last(sec4_sg_ptr); + } + +-/* derive number of elements in scatterlist, but return 0 for 1 */ +-static inline int sg_count(struct scatterlist *sg_list, int nbytes) +-{ +- int sg_nents = sg_nents_for_len(sg_list, nbytes); +- +- if (likely(sg_nents == 1)) +- return 0; +- +- return sg_nents; +-} ++#endif /* _SG_SW_SEC4_H_ */ +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c +@@ -516,7 +516,7 @@ err: + + /** + * rsi_disconnect() - This function performs the reverse of the probe function, +- * it deintialize the driver structure. ++ * it deinitialize the driver structure. + * @pfunction: Pointer to the USB interface structure. + * + * Return: None. +--- a/drivers/staging/wilc1000/linux_wlan.c ++++ b/drivers/staging/wilc1000/linux_wlan.c +@@ -211,7 +211,7 @@ static void deinit_irq(struct net_device + vif = netdev_priv(dev); + wilc = vif->wilc; + +- /* Deintialize IRQ */ ++ /* Deinitialize IRQ */ + if (wilc->dev_irq_num) { + free_irq(wilc->dev_irq_num, wilc); + gpio_free(wilc->gpio); +--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c ++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +@@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi + del_timer_sync(&wilc_during_ip_timer); + + if (s32Error) +- netdev_err(net, "Error while deintializing host interface\n"); ++ netdev_err(net, "Error while deinitializing host interface\n"); + + return s32Error; + } +--- /dev/null ++++ b/include/crypto/acompress.h +@@ -0,0 +1,269 @@ ++/* ++ * Asynchronous Compression operations ++ * ++ * Copyright (c) 2016, Intel Corporation ++ * Authors: Weigang Li ++ * Giovanni Cabiddu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++#ifndef _CRYPTO_ACOMP_H ++#define _CRYPTO_ACOMP_H ++#include ++ ++#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 ++ ++/** ++ * struct acomp_req - asynchronous (de)compression request ++ * ++ * @base: Common attributes for asynchronous crypto requests ++ * @src: Source Data ++ * @dst: Destination data ++ * @slen: Size of the input buffer ++ * @dlen: Size of the output buffer and number of bytes produced ++ * @flags: Internal flags ++ * @__ctx: Start of private context data ++ */ ++struct acomp_req { ++ struct crypto_async_request base; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ unsigned int slen; ++ unsigned int dlen; ++ u32 flags; ++ void *__ctx[] CRYPTO_MINALIGN_ATTR; ++}; ++ ++/** ++ * struct crypto_acomp - user-instantiated objects which encapsulate ++ * algorithms and core processing logic ++ * ++ * @compress: Function performs a compress operation ++ * @decompress: Function performs a de-compress operation ++ * @dst_free: Frees destination buffer if allocated inside the ++ * algorithm ++ * @reqsize: Context size for (de)compression requests ++ * @base: Common crypto API algorithm data structure ++ */ ++struct crypto_acomp { ++ int (*compress)(struct acomp_req *req); ++ int (*decompress)(struct acomp_req *req); ++ void (*dst_free)(struct scatterlist *dst); ++ unsigned int reqsize; ++ struct crypto_tfm base; ++}; ++ ++/** ++ * struct acomp_alg - asynchronous compression algorithm ++ * ++ * @compress: Function performs a compress operation ++ * @decompress: Function performs a de-compress operation ++ * @dst_free: Frees destination buffer if allocated inside the algorithm ++ * @init: Initialize the cryptographic transformation object. ++ * This function is used to initialize the cryptographic ++ * transformation object. This function is called only once at ++ * the instantiation time, right after the transformation context ++ * was allocated. In case the cryptographic hardware has some ++ * special requirements which need to be handled by software, this ++ * function shall check for the precise requirement of the ++ * transformation and put any software fallbacks in place. ++ * @exit: Deinitialize the cryptographic transformation object. This is a ++ * counterpart to @init, used to remove various changes set in ++ * @init. ++ * ++ * @reqsize: Context size for (de)compression requests ++ * @base: Common crypto API algorithm data structure ++ */ ++struct acomp_alg { ++ int (*compress)(struct acomp_req *req); ++ int (*decompress)(struct acomp_req *req); ++ void (*dst_free)(struct scatterlist *dst); ++ int (*init)(struct crypto_acomp *tfm); ++ void (*exit)(struct crypto_acomp *tfm); ++ unsigned int reqsize; ++ struct crypto_alg base; ++}; ++ ++/** ++ * DOC: Asynchronous Compression API ++ * ++ * The Asynchronous Compression API is used with the algorithms of type ++ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) ++ */ ++ ++/** ++ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle ++ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the ++ * compression algorithm e.g. "deflate" ++ * @type: specifies the type of the algorithm ++ * @mask: specifies the mask for the algorithm ++ * ++ * Allocate a handle for a compression algorithm. The returned struct ++ * crypto_acomp is the handle that is required for any subsequent ++ * API invocation for the compression operations. ++ * ++ * Return: allocated handle in case of success; IS_ERR() is true in case ++ * of an error, PTR_ERR() returns the error code. ++ */ ++struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, ++ u32 mask); ++ ++static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) ++{ ++ return &tfm->base; ++} ++ ++static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) ++{ ++ return container_of(alg, struct acomp_alg, base); ++} ++ ++static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) ++{ ++ return container_of(tfm, struct crypto_acomp, base); ++} ++ ++static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) ++{ ++ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); ++} ++ ++static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) ++{ ++ return tfm->reqsize; ++} ++ ++static inline void acomp_request_set_tfm(struct acomp_req *req, ++ struct crypto_acomp *tfm) ++{ ++ req->base.tfm = crypto_acomp_tfm(tfm); ++} ++ ++static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) ++{ ++ return __crypto_acomp_tfm(req->base.tfm); ++} ++ ++/** ++ * crypto_free_acomp() -- free ACOMPRESS tfm handle ++ * ++ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() ++ */ ++static inline void crypto_free_acomp(struct crypto_acomp *tfm) ++{ ++ crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); ++} ++ ++static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) ++{ ++ type &= ~CRYPTO_ALG_TYPE_MASK; ++ type |= CRYPTO_ALG_TYPE_ACOMPRESS; ++ mask |= CRYPTO_ALG_TYPE_MASK; ++ ++ return crypto_has_alg(alg_name, type, mask); ++} ++ ++/** ++ * acomp_request_alloc() -- allocates asynchronous (de)compression request ++ * ++ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() ++ * ++ * Return: allocated handle in case of success or NULL in case of an error ++ */ ++struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); ++ ++/** ++ * acomp_request_free() -- zeroize and free asynchronous (de)compression ++ * request as well as the output buffer if allocated ++ * inside the algorithm ++ * ++ * @req: request to free ++ */ ++void acomp_request_free(struct acomp_req *req); ++ ++/** ++ * acomp_request_set_callback() -- Sets an asynchronous callback ++ * ++ * Callback will be called when an asynchronous operation on a given ++ * request is finished. ++ * ++ * @req: request that the callback will be set for ++ * @flgs: specify for instance if the operation may backlog ++ * @cmlp: callback which will be called ++ * @data: private data used by the caller ++ */ ++static inline void acomp_request_set_callback(struct acomp_req *req, ++ u32 flgs, ++ crypto_completion_t cmpl, ++ void *data) ++{ ++ req->base.complete = cmpl; ++ req->base.data = data; ++ req->base.flags = flgs; ++} ++ ++/** ++ * acomp_request_set_params() -- Sets request parameters ++ * ++ * Sets parameters required by an acomp operation ++ * ++ * @req: asynchronous compress request ++ * @src: pointer to input buffer scatterlist ++ * @dst: pointer to output buffer scatterlist. If this is NULL, the ++ * acomp layer will allocate the output memory ++ * @slen: size of the input buffer ++ * @dlen: size of the output buffer. If dst is NULL, this can be used by ++ * the user to specify the maximum amount of memory to allocate ++ */ ++static inline void acomp_request_set_params(struct acomp_req *req, ++ struct scatterlist *src, ++ struct scatterlist *dst, ++ unsigned int slen, ++ unsigned int dlen) ++{ ++ req->src = src; ++ req->dst = dst; ++ req->slen = slen; ++ req->dlen = dlen; ++ ++ if (!req->dst) ++ req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; ++} ++ ++/** ++ * crypto_acomp_compress() -- Invoke asynchronous compress operation ++ * ++ * Function invokes the asynchronous compress operation ++ * ++ * @req: asynchronous compress request ++ * ++ * Return: zero on success; error code in case of error ++ */ ++static inline int crypto_acomp_compress(struct acomp_req *req) ++{ ++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); ++ ++ return tfm->compress(req); ++} ++ ++/** ++ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation ++ * ++ * Function invokes the asynchronous decompress operation ++ * ++ * @req: asynchronous compress request ++ * ++ * Return: zero on success; error code in case of error ++ */ ++static inline int crypto_acomp_decompress(struct acomp_req *req) ++{ ++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); ++ ++ return tfm->decompress(req); ++} ++ ++#endif +--- /dev/null ++++ b/include/crypto/internal/acompress.h +@@ -0,0 +1,81 @@ ++/* ++ * Asynchronous Compression operations ++ * ++ * Copyright (c) 2016, Intel Corporation ++ * Authors: Weigang Li ++ * Giovanni Cabiddu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++#ifndef _CRYPTO_ACOMP_INT_H ++#define _CRYPTO_ACOMP_INT_H ++#include ++ ++/* ++ * Transform internal helpers. ++ */ ++static inline void *acomp_request_ctx(struct acomp_req *req) ++{ ++ return req->__ctx; ++} ++ ++static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) ++{ ++ return tfm->base.__crt_ctx; ++} ++ ++static inline void acomp_request_complete(struct acomp_req *req, ++ int err) ++{ ++ req->base.complete(&req->base, err); ++} ++ ++static inline const char *acomp_alg_name(struct crypto_acomp *tfm) ++{ ++ return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; ++} ++ ++static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) ++{ ++ struct acomp_req *req; ++ ++ req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); ++ if (likely(req)) ++ acomp_request_set_tfm(req, tfm); ++ return req; ++} ++ ++static inline void __acomp_request_free(struct acomp_req *req) ++{ ++ kzfree(req); ++} ++ ++/** ++ * crypto_register_acomp() -- Register asynchronous compression algorithm ++ * ++ * Function registers an implementation of an asynchronous ++ * compression algorithm ++ * ++ * @alg: algorithm definition ++ * ++ * Return: zero on success; error code in case of error ++ */ ++int crypto_register_acomp(struct acomp_alg *alg); ++ ++/** ++ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm ++ * ++ * Function unregisters an implementation of an asynchronous ++ * compression algorithm ++ * ++ * @alg: algorithm definition ++ * ++ * Return: zero on success; error code in case of error ++ */ ++int crypto_unregister_acomp(struct acomp_alg *alg); ++ ++#endif +--- /dev/null ++++ b/include/crypto/internal/scompress.h +@@ -0,0 +1,136 @@ ++/* ++ * Synchronous Compression operations ++ * ++ * Copyright 2015 LG Electronics Inc. ++ * Copyright (c) 2016, Intel Corporation ++ * Author: Giovanni Cabiddu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ */ ++#ifndef _CRYPTO_SCOMP_INT_H ++#define _CRYPTO_SCOMP_INT_H ++#include ++ ++#define SCOMP_SCRATCH_SIZE 131072 ++ ++struct crypto_scomp { ++ struct crypto_tfm base; ++}; ++ ++/** ++ * struct scomp_alg - synchronous compression algorithm ++ * ++ * @alloc_ctx: Function allocates algorithm specific context ++ * @free_ctx: Function frees context allocated with alloc_ctx ++ * @compress: Function performs a compress operation ++ * @decompress: Function performs a de-compress operation ++ * @init: Initialize the cryptographic transformation object. ++ * This function is used to initialize the cryptographic ++ * transformation object. This function is called only once at ++ * the instantiation time, right after the transformation context ++ * was allocated. In case the cryptographic hardware has some ++ * special requirements which need to be handled by software, this ++ * function shall check for the precise requirement of the ++ * transformation and put any software fallbacks in place. ++ * @exit: Deinitialize the cryptographic transformation object. This is a ++ * counterpart to @init, used to remove various changes set in ++ * @init. ++ * @base: Common crypto API algorithm data structure ++ */ ++struct scomp_alg { ++ void *(*alloc_ctx)(struct crypto_scomp *tfm); ++ void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); ++ int (*compress)(struct crypto_scomp *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen, ++ void *ctx); ++ int (*decompress)(struct crypto_scomp *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen, ++ void *ctx); ++ struct crypto_alg base; ++}; ++ ++static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) ++{ ++ return container_of(alg, struct scomp_alg, base); ++} ++ ++static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) ++{ ++ return container_of(tfm, struct crypto_scomp, base); ++} ++ ++static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) ++{ ++ return &tfm->base; ++} ++ ++static inline void crypto_free_scomp(struct crypto_scomp *tfm) ++{ ++ crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); ++} ++ ++static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) ++{ ++ return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); ++} ++ ++static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) ++{ ++ return crypto_scomp_alg(tfm)->alloc_ctx(tfm); ++} ++ ++static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, ++ void *ctx) ++{ ++ return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); ++} ++ ++static inline int crypto_scomp_compress(struct crypto_scomp *tfm, ++ const u8 *src, unsigned int slen, ++ u8 *dst, unsigned int *dlen, void *ctx) ++{ ++ return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); ++} ++ ++static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, ++ const u8 *src, unsigned int slen, ++ u8 *dst, unsigned int *dlen, ++ void *ctx) ++{ ++ return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, ++ ctx); ++} ++ ++int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); ++struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); ++void crypto_acomp_scomp_free_ctx(struct acomp_req *req); ++ ++/** ++ * crypto_register_scomp() -- Register synchronous compression algorithm ++ * ++ * Function registers an implementation of a synchronous ++ * compression algorithm ++ * ++ * @alg: algorithm definition ++ * ++ * Return: zero on success; error code in case of error ++ */ ++int crypto_register_scomp(struct scomp_alg *alg); ++ ++/** ++ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm ++ * ++ * Function unregisters an implementation of a synchronous ++ * compression algorithm ++ * ++ * @alg: algorithm definition ++ * ++ * Return: zero on success; error code in case of error ++ */ ++int crypto_unregister_scomp(struct scomp_alg *alg); ++ ++#endif +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -50,6 +50,8 @@ + #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 + #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 + #define CRYPTO_ALG_TYPE_KPP 0x00000008 ++#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a ++#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b + #define CRYPTO_ALG_TYPE_RNG 0x0000000c + #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d + #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e +@@ -60,6 +62,7 @@ + #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e + #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c ++#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e + + #define CRYPTO_ALG_LARVAL 0x00000010 + #define CRYPTO_ALG_DEAD 0x00000020 +--- a/include/uapi/linux/cryptouser.h ++++ b/include/uapi/linux/cryptouser.h +@@ -46,6 +46,7 @@ enum crypto_attr_type_t { + CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ + CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ + CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ ++ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ + __CRYPTOCFGA_MAX + + #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) +@@ -112,5 +113,9 @@ struct crypto_report_kpp { + char type[CRYPTO_MAX_NAME]; + }; + ++struct crypto_report_acomp { ++ char type[CRYPTO_MAX_NAME]; ++}; ++ + #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ + sizeof(struct crypto_report_blkcipher)) +--- a/scripts/spelling.txt ++++ b/scripts/spelling.txt +@@ -305,6 +305,9 @@ defintion||definition + defintions||definitions + defualt||default + defult||default ++deintializing||deinitializing ++deintialize||deinitialize ++deintialized||deinitialized + deivce||device + delared||declared + delare||declare +--- a/sound/soc/amd/acp-pcm-dma.c ++++ b/sound/soc/amd/acp-pcm-dma.c +@@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm + return 0; + } + +-/* Deintialize ACP */ ++/* Deinitialize ACP */ + static int acp_deinit(void __iomem *acp_mmio) + { + u32 val; diff --git a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch new file mode 100644 index 000000000..703aeed3e --- /dev/null +++ b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch @@ -0,0 +1,3750 @@ +From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:12:20 +0800 +Subject: [PATCH] dma: support layerscape + +This is a integrated patch for layerscape dma support. + +Signed-off-by: jiaheng.fan +Signed-off-by: Yangbo Lu +--- + drivers/dma/Kconfig | 14 + + drivers/dma/Makefile | 2 + + drivers/dma/dpaa2-qdma/Kconfig | 8 + + drivers/dma/dpaa2-qdma/Makefile | 8 + + drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++ + drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++ + drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++ + drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++ + 10 files changed, 3678 insertions(+) + create mode 100644 drivers/dma/dpaa2-qdma/Kconfig + create mode 100644 drivers/dma/dpaa2-qdma/Makefile + create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c + create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h + create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c + create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h + create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h + create mode 100644 drivers/dma/fsl-qdma.c + +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -192,6 +192,20 @@ config FSL_EDMA + multiplexing capability for DMA request sources(slot). + This module can be found on Freescale Vybrid and LS-1 SoCs. + ++config FSL_QDMA ++ tristate "Freescale qDMA engine support" ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ select DMA_ENGINE_RAID ++ select ASYNC_TX_ENABLE_CHANNEL_SWITCH ++ help ++ Support the Freescale qDMA engine with command queue and legacy mode. ++ Channel virtualization is supported through enqueuing of DMA jobs to, ++ or dequeuing DMA jobs from, different work queues. ++ This module can be found on Freescale LS SoCs. ++ ++source drivers/dma/dpaa2-qdma/Kconfig ++ + config FSL_RAID + tristate "Freescale RAID engine Support" + depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/ + obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o + obj-$(CONFIG_FSL_DMA) += fsldma.o + obj-$(CONFIG_FSL_EDMA) += fsl-edma.o ++obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o ++obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/ + obj-$(CONFIG_FSL_RAID) += fsl_raid.o + obj-$(CONFIG_HSU_DMA) += hsu/ + obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/Kconfig +@@ -0,0 +1,8 @@ ++menuconfig FSL_DPAA2_QDMA ++ tristate "NXP DPAA2 QDMA" ++ depends on FSL_MC_BUS && FSL_MC_DPIO ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ ---help--- ++ NXP Data Path Acceleration Architecture 2 QDMA driver, ++ using the NXP MC bus driver. +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/Makefile +@@ -0,0 +1,8 @@ ++# ++# Makefile for the NXP DPAA2 CAAM controllers ++# ++ccflags-y += -DVERSION=\"\" ++ ++obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o ++ ++fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c +@@ -0,0 +1,986 @@ ++/* ++ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c ++ * ++ * Copyright 2015-2017 NXP Semiconductor, Inc. ++ * Author: Changming Huang ++ * ++ * Driver for the NXP QDMA engine with QMan mode. ++ * Channel virtualization is supported through enqueuing of DMA jobs to, ++ * or dequeuing DMA jobs from different work queues with QMan portal. ++ * This module can be found on NXP LS2 SoCs. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../virt-dma.h" ++ ++#include "../../../drivers/staging/fsl-mc/include/mc.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" ++#include "fsl_dpdmai_cmd.h" ++#include "fsl_dpdmai.h" ++#include "dpaa2-qdma.h" ++ ++static bool smmu_disable = true; ++ ++static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan) ++{ ++ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); ++} ++ ++static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) ++{ ++ return container_of(vd, struct dpaa2_qdma_comp, vdesc); ++} ++ ++static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) ++{ ++ return 0; ++} ++ ++static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); ++ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); ++ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); ++} ++ ++/* ++ * Request a command descriptor for enqueue. ++ */ ++static struct dpaa2_qdma_comp * ++dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) ++{ ++ struct dpaa2_qdma_comp *comp_temp = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ if (list_empty(&dpaa2_chan->comp_free)) { ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ goto err; ++ comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool, ++ GFP_NOWAIT, &comp_temp->fd_bus_addr); ++ if (!comp_temp->fd_virt_addr) ++ goto err; ++ ++ comp_temp->fl_virt_addr = ++ (void *)((struct dpaa2_fd *) ++ comp_temp->fd_virt_addr + 1); ++ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr + ++ sizeof(struct dpaa2_fd); ++ comp_temp->desc_virt_addr = ++ (void *)((struct dpaa2_frame_list *) ++ comp_temp->fl_virt_addr + 3); ++ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr + ++ sizeof(struct dpaa2_frame_list) * 3; ++ ++ comp_temp->qchan = dpaa2_chan; ++ comp_temp->sg_blk_num = 0; ++ INIT_LIST_HEAD(&comp_temp->sg_src_head); ++ INIT_LIST_HEAD(&comp_temp->sg_dst_head); ++ return comp_temp; ++ } ++ comp_temp = list_first_entry(&dpaa2_chan->comp_free, ++ struct dpaa2_qdma_comp, list); ++ list_del(&comp_temp->list); ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ ++ comp_temp->qchan = dpaa2_chan; ++err: ++ return comp_temp; ++} ++ ++static void dpaa2_qdma_populate_fd(uint32_t format, ++ struct dpaa2_qdma_comp *dpaa2_comp) ++{ ++ struct dpaa2_fd *fd; ++ ++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; ++ memset(fd, 0, sizeof(struct dpaa2_fd)); ++ ++ /* fd populated */ ++ fd->simple.addr = dpaa2_comp->fl_bus_addr; ++ /* Bypass memory translation, Frame list format, short length disable */ ++ /* we need to disable BMT if fsl-mc use iova addr */ ++ if (smmu_disable) ++ fd->simple.bpid = QMAN_FD_BMT_ENABLE; ++ fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE; ++ ++ fd->simple.frc = format | QDMA_SER_CTX; ++} ++ ++/* first frame list for descriptor buffer */ ++static void dpaa2_qdma_populate_first_framel( ++ struct dpaa2_frame_list *f_list, ++ struct dpaa2_qdma_comp *dpaa2_comp) ++{ ++ struct dpaa2_qdma_sd_d *sdd; ++ ++ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr; ++ memset(sdd, 0, 2 * (sizeof(*sdd))); ++ /* source and destination descriptor */ ++ sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */ ++ sdd++; ++ sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */ ++ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ /* first frame list to source descriptor */ ++ f_list->addr_lo = dpaa2_comp->desc_bus_addr; ++ f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32); ++ f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */ ++ f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = 0; /* not the last frame list */ ++} ++ ++/* source and destination frame list */ ++static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list, ++ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt) ++{ ++ /* source frame list to source buffer */ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ f_list->addr_lo = src; ++ f_list->addr_hi = (src >> 32); ++ f_list->data_len.data_len_sl0 = len; ++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = 0; /* not the last frame list */ ++ ++ f_list++; ++ /* destination frame list to destination buffer */ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ f_list->addr_lo = dst; ++ f_list->addr_hi = (dst >> 32); ++ f_list->data_len.data_len_sl0 = len; ++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */ ++} ++ ++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy( ++ struct dma_chan *chan, dma_addr_t dst, ++ dma_addr_t src, size_t len, unsigned long flags) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_frame_list *f_list; ++ uint32_t format; ++ ++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); ++ ++#ifdef LONG_FORMAT ++ format = QDMA_FD_LONG_FORMAT; ++#else ++ format = QDMA_FD_SHORT_FORMAT; ++#endif ++ /* populate Frame descriptor */ ++ dpaa2_qdma_populate_fd(format, dpaa2_comp); ++ ++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr; ++ ++#ifdef LONG_FORMAT ++ /* first frame list for descriptor buffer (logn format) */ ++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); ++ ++ f_list++; ++#endif ++ ++ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF); ++ ++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); ++} ++ ++static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk( ++ struct dpaa2_qdma_comp *dpaa2_comp, ++ struct dpaa2_qdma_chan *dpaa2_chan) ++{ ++ struct qdma_sg_blk *sg_blk = NULL; ++ dma_addr_t phy_sgb; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ if (list_empty(&dpaa2_chan->sgb_free)) { ++ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc( ++ dpaa2_chan->sg_blk_pool, ++ GFP_NOWAIT, &phy_sgb); ++ if (!sg_blk) { ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ return sg_blk; ++ } ++ sg_blk->blk_virt_addr = (void *)(sg_blk + 1); ++ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk); ++ } else { ++ sg_blk = list_first_entry(&dpaa2_chan->sgb_free, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ } ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ ++ return sg_blk; ++} ++ ++static uint32_t dpaa2_qdma_populate_sg(struct device *dev, ++ struct dpaa2_qdma_chan *dpaa2_chan, ++ struct dpaa2_qdma_comp *dpaa2_comp, ++ struct scatterlist *dst_sg, u32 dst_nents, ++ struct scatterlist *src_sg, u32 src_nents) ++{ ++ struct dpaa2_qdma_sg *src_sge; ++ struct dpaa2_qdma_sg *dst_sge; ++ struct qdma_sg_blk *sg_blk; ++ struct qdma_sg_blk *sg_blk_dst; ++ dma_addr_t src; ++ dma_addr_t dst; ++ uint32_t num; ++ uint32_t blocks; ++ uint32_t len = 0; ++ uint32_t total_len = 0; ++ int i, j = 0; ++ ++ num = min(dst_nents, src_nents); ++ blocks = num / (NUM_SG_PER_BLK - 1); ++ if (num % (NUM_SG_PER_BLK - 1)) ++ blocks += 1; ++ if (dpaa2_comp->sg_blk_num < blocks) { ++ len = blocks - dpaa2_comp->sg_blk_num; ++ for (i = 0; i < len; i++) { ++ /* source sg blocks */ ++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); ++ if (!sg_blk) ++ return 0; ++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head); ++ /* destination sg blocks */ ++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); ++ if (!sg_blk) ++ return 0; ++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head); ++ } ++ } else { ++ len = dpaa2_comp->sg_blk_num - blocks; ++ for (i = 0; i < len; i++) { ++ spin_lock(&dpaa2_chan->queue_lock); ++ /* handle source sg blocks */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); ++ /* handle destination sg blocks */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); ++ spin_unlock(&dpaa2_chan->queue_lock); ++ } ++ } ++ dpaa2_comp->sg_blk_num = blocks; ++ ++ /* get the first source sg phy address */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, ++ struct qdma_sg_blk, list); ++ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr; ++ /* get the first destinaiton sg phy address */ ++ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head, ++ struct qdma_sg_blk, list); ++ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr; ++ ++ for (i = 0; i < blocks; i++) { ++ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr; ++ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr; ++ ++ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) { ++ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg)); ++ if (0 == len) ++ goto fetch; ++ total_len += len; ++ src = sg_dma_address(src_sg); ++ dst = sg_dma_address(dst_sg); ++ ++ /* source SG */ ++ src_sge->addr_lo = src; ++ src_sge->addr_hi = (src >> 32); ++ src_sge->data_len.data_len_sl0 = len; ++ src_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB; ++ /* destination SG */ ++ dst_sge->addr_lo = dst; ++ dst_sge->addr_hi = (dst >> 32); ++ dst_sge->data_len.data_len_sl0 = len; ++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB; ++fetch: ++ num--; ++ if (0 == num) { ++ src_sge->ctrl.f = QDMA_SG_F; ++ dst_sge->ctrl.f = QDMA_SG_F; ++ goto end; ++ } ++ dst_sg = sg_next(dst_sg); ++ src_sg = sg_next(src_sg); ++ src_sge++; ++ dst_sge++; ++ if (j == (NUM_SG_PER_BLK - 2)) { ++ /* for next blocks, extension */ ++ sg_blk = list_next_entry(sg_blk, list); ++ sg_blk_dst = list_next_entry(sg_blk_dst, list); ++ src_sge->addr_lo = sg_blk->blk_bus_addr; ++ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32; ++ src_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; ++ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr; ++ dst_sge->addr_hi = ++ sg_blk_dst->blk_bus_addr >> 32; ++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; ++ } ++ } ++ } ++ ++end: ++ return total_len; ++} ++ ++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg( ++ struct dma_chan *chan, ++ struct scatterlist *dst_sg, u32 dst_nents, ++ struct scatterlist *src_sg, u32 src_nents, ++ unsigned long flags) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_frame_list *f_list; ++ struct device *dev = dpaa2_chan->qdma->priv->dev; ++ uint32_t total_len = 0; ++ ++ /* basic sanity checks */ ++ if (dst_nents == 0 || src_nents == 0) ++ return NULL; ++ ++ if (dst_sg == NULL || src_sg == NULL) ++ return NULL; ++ ++ /* get the descriptors required */ ++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); ++ ++ /* populate Frame descriptor */ ++ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp); ++ ++ /* prepare Scatter gather entry for source and destination */ ++ total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan, ++ dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents); ++ ++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr; ++ /* first frame list for descriptor buffer */ ++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); ++ f_list++; ++ /* prepare Scatter gather entry for source and destination */ ++ /* populate source and destination frame list table */ ++ dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr, ++ dpaa2_comp->sge_src_bus_addr, ++ total_len, QDMA_FL_FMT_SGE); ++ ++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); ++} ++ ++static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan, ++ dma_cookie_t cookie, struct dma_tx_state *txstate) ++{ ++ return dma_cookie_status(chan, cookie, txstate); ++} ++ ++static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc) ++{ ++} ++ ++static void dpaa2_qdma_issue_pending(struct dma_chan *chan) ++{ ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; ++ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv; ++ struct virt_dma_desc *vdesc; ++ struct dpaa2_fd *fd; ++ int err; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ spin_lock(&dpaa2_chan->vchan.lock); ++ if (vchan_issue_pending(&dpaa2_chan->vchan)) { ++ vdesc = vchan_next_desc(&dpaa2_chan->vchan); ++ if (!vdesc) ++ goto err_enqueue; ++ dpaa2_comp = to_fsl_qdma_comp(vdesc); ++ ++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; ++ ++ list_del(&vdesc->node); ++ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used); ++ ++ /* TOBO: priority hard-coded to zero */ ++ err = dpaa2_io_service_enqueue_fq(NULL, ++ priv->tx_queue_attr[0].fqid, fd); ++ if (err) { ++ list_del(&dpaa2_comp->list); ++ list_add_tail(&dpaa2_comp->list, ++ &dpaa2_chan->comp_free); ++ } ++ ++ } ++err_enqueue: ++ spin_unlock(&dpaa2_chan->vchan.lock); ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++} ++ ++static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_qdma_priv *priv; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ uint8_t prio_def = DPDMAI_PRIO_NUM; ++ int err; ++ int i; ++ ++ priv = dev_get_drvdata(dev); ++ ++ priv->dev = dev; ++ priv->dpqdma_id = ls_dev->obj_desc.id; ++ ++ /*Get the handle for the DPDMAI this interface is associate with */ ++ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpdmai_open() failed\n"); ++ return err; ++ } ++ dev_info(dev, "Opened dpdmai object successfully\n"); ++ ++ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, ++ &priv->dpdmai_attr); ++ if (err) { ++ dev_err(dev, "dpdmai_get_attributes() failed\n"); ++ return err; ++ } ++ ++ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { ++ dev_err(dev, "DPDMAI major version mismatch\n" ++ "Found %u.%u, supported version is %u.%u\n", ++ priv->dpdmai_attr.version.major, ++ priv->dpdmai_attr.version.minor, ++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); ++ } ++ ++ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { ++ dev_err(dev, "DPDMAI minor version mismatch\n" ++ "Found %u.%u, supported version is %u.%u\n", ++ priv->dpdmai_attr.version.major, ++ priv->dpdmai_attr.version.minor, ++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); ++ } ++ ++ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def); ++ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL); ++ if (!ppriv) { ++ dev_err(dev, "kzalloc for ppriv failed\n"); ++ return -1; ++ } ++ priv->ppriv = ppriv; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ i, &priv->rx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpdmai_get_rx_queue() failed\n"); ++ return err; ++ } ++ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; ++ ++ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ i, &priv->tx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpdmai_get_tx_queue() failed\n"); ++ return err; ++ } ++ ppriv->req_fqid = priv->tx_queue_attr[i].fqid; ++ ppriv->prio = i; ++ ppriv->priv = priv; ++ ppriv++; ++ } ++ ++ return 0; ++} ++ ++static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx, ++ struct dpaa2_qdma_priv_per_prio, nctx); ++ struct dpaa2_qdma_priv *priv = ppriv->priv; ++ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp; ++ struct dpaa2_qdma_chan *qchan; ++ const struct dpaa2_fd *fd; ++ const struct dpaa2_fd *fd_eq; ++ struct dpaa2_dq *dq; ++ int err; ++ int is_last = 0; ++ uint8_t status; ++ int i; ++ int found; ++ uint32_t n_chans = priv->dpaa2_qdma->n_chans; ++ ++ do { ++ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, ++ ppriv->store); ++ } while (err); ++ ++ while (!is_last) { ++ do { ++ dq = dpaa2_io_store_next(ppriv->store, &is_last); ++ } while (!is_last && !dq); ++ if (!dq) { ++ dev_err(priv->dev, "FQID returned no valid frames!\n"); ++ continue; ++ } ++ ++ /* obtain FD and process the error */ ++ fd = dpaa2_dq_fd(dq); ++ status = fd->simple.ctrl & 0xff; ++ if (status) ++ dev_err(priv->dev, "FD error occurred\n"); ++ found = 0; ++ for (i = 0; i < n_chans; i++) { ++ qchan = &priv->dpaa2_qdma->chans[i]; ++ spin_lock(&qchan->queue_lock); ++ if (list_empty(&qchan->comp_used)) { ++ spin_unlock(&qchan->queue_lock); ++ continue; ++ } ++ list_for_each_entry_safe(dpaa2_comp, _comp_tmp, ++ &qchan->comp_used, list) { ++ fd_eq = (struct dpaa2_fd *) ++ dpaa2_comp->fd_virt_addr; ++ ++ if (fd_eq->simple.addr == ++ fd->simple.addr) { ++ ++ list_del(&dpaa2_comp->list); ++ list_add_tail(&dpaa2_comp->list, ++ &qchan->comp_free); ++ ++ spin_lock(&qchan->vchan.lock); ++ vchan_cookie_complete( ++ &dpaa2_comp->vdesc); ++ spin_unlock(&qchan->vchan.lock); ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock(&qchan->queue_lock); ++ if (found) ++ break; ++ } ++ } ++ ++ dpaa2_io_service_rearm(NULL, ctx); ++} ++ ++static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv) ++{ ++ int err, i, num; ++ struct device *dev = priv->dev; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ ++ num = priv->num_pairs; ++ ppriv = priv->ppriv; ++ for (i = 0; i < num; i++) { ++ ppriv->nctx.is_cdan = 0; ++ ppriv->nctx.desired_cpu = 1; ++ ppriv->nctx.id = ppriv->rsp_fqid; ++ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; ++ err = dpaa2_io_service_register(NULL, &ppriv->nctx); ++ if (err) { ++ dev_err(dev, "Notification register failed\n"); ++ goto err_service; ++ } ++ ++ ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, ++ dev); ++ if (!ppriv->store) { ++ dev_err(dev, "dpaa2_io_store_create() failed\n"); ++ goto err_store; ++ } ++ ++ ppriv++; ++ } ++ return 0; ++ ++err_store: ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++err_service: ++ ppriv--; ++ while (ppriv >= priv->ppriv) { ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_store_destroy(ppriv->store); ++ ppriv--; ++ } ++ return -1; ++} ++ ++static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ dpaa2_io_store_destroy(ppriv->store); ++ ppriv++; ++ } ++} ++ ++static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ ppriv++; ++ } ++} ++ ++static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) ++{ ++ int err; ++ struct dpdmai_rx_queue_cfg rx_queue_cfg; ++ struct device *dev = priv->dev; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ int i, num; ++ ++ num = priv->num_pairs; ++ ppriv = priv->ppriv; ++ for (i = 0; i < num; i++) { ++ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX | ++ DPDMAI_QUEUE_OPT_DEST; ++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; ++ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO; ++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; ++ rx_queue_cfg.dest_cfg.priority = ppriv->prio; ++ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg); ++ if (err) { ++ dev_err(dev, "dpdmai_set_rx_queue() failed\n"); ++ return err; ++ } ++ ++ ppriv++; ++ } ++ ++ return 0; ++} ++ ++static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv) ++{ ++ int err = 0; ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ ppriv->nctx.qman64 = 0; ++ ppriv->nctx.dpio_id = 0; ++ ppriv++; ++ } ++ ++ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle); ++ if (err) ++ dev_err(dev, "dpdmai_reset() failed\n"); ++ ++ return err; ++} ++ ++static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan, ++ struct list_head *head) ++{ ++ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp; ++ /* free the QDMA SG pool block */ ++ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) { ++ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *) ++ sgb_tmp->blk_virt_addr - 1); ++ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr ++ - sizeof(*sgb_tmp); ++ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr, ++ sgb_tmp->blk_bus_addr); ++ } ++ ++} ++ ++static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, ++ struct list_head *head) ++{ ++ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp; ++ /* free the QDMA comp resource */ ++ list_for_each_entry_safe(comp_tmp, _comp_tmp, ++ head, list) { ++ dma_pool_free(qchan->fd_pool, ++ comp_tmp->fd_virt_addr, ++ comp_tmp->fd_bus_addr); ++ /* free the SG source block on comp */ ++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head); ++ /* free the SG destination block on comp */ ++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head); ++ list_del(&comp_tmp->list); ++ kfree(comp_tmp); ++ } ++ ++} ++ ++static void __cold dpaa2_dpdmai_free_channels( ++ struct dpaa2_qdma_engine *dpaa2_qdma) ++{ ++ struct dpaa2_qdma_chan *qchan; ++ int num, i; ++ ++ num = dpaa2_qdma->n_chans; ++ for (i = 0; i < num; i++) { ++ qchan = &dpaa2_qdma->chans[i]; ++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); ++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); ++ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free); ++ dma_pool_destroy(qchan->fd_pool); ++ dma_pool_destroy(qchan->sg_blk_pool); ++ } ++} ++ ++static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan; ++ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev; ++ int i; ++ ++ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels); ++ for (i = 0; i < dpaa2_qdma->n_chans; i++) { ++ dpaa2_chan = &dpaa2_qdma->chans[i]; ++ dpaa2_chan->qdma = dpaa2_qdma; ++ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; ++ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); ++ ++ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", ++ dev, FD_POOL_SIZE, 32, 0); ++ if (!dpaa2_chan->fd_pool) ++ return -1; ++ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool", ++ dev, SG_POOL_SIZE, 32, 0); ++ if (!dpaa2_chan->sg_blk_pool) ++ return -1; ++ ++ spin_lock_init(&dpaa2_chan->queue_lock); ++ INIT_LIST_HEAD(&dpaa2_chan->comp_used); ++ INIT_LIST_HEAD(&dpaa2_chan->comp_free); ++ INIT_LIST_HEAD(&dpaa2_chan->sgb_free); ++ } ++ return 0; ++} ++ ++static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev) ++{ ++ struct dpaa2_qdma_priv *priv; ++ struct device *dev = &dpdmai_dev->dev; ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ int err; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ dev_set_drvdata(dev, priv); ++ priv->dpdmai_dev = dpdmai_dev; ++ ++ priv->iommu_domain = iommu_get_domain_for_dev(dev); ++ if (priv->iommu_domain) ++ smmu_disable = false; ++ ++ /* obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_mcportal; ++ } ++ ++ /* DPDMAI initialization */ ++ err = dpaa2_qdma_setup(dpdmai_dev); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_setup() failed\n"); ++ goto err_dpdmai_setup; ++ } ++ ++ /* DPIO */ ++ err = dpaa2_qdma_dpio_setup(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n"); ++ goto err_dpio_setup; ++ } ++ ++ /* DPDMAI binding to DPIO */ ++ err = dpaa2_dpdmai_bind(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_bind() failed\n"); ++ goto err_bind; ++ } ++ ++ /* DPDMAI enable */ ++ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpdmai_enable() faile\n"); ++ goto err_enable; ++ } ++ ++ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL); ++ if (!dpaa2_qdma) { ++ err = -ENOMEM; ++ goto err_eng; ++ } ++ ++ priv->dpaa2_qdma = dpaa2_qdma; ++ dpaa2_qdma->priv = priv; ++ ++ dpaa2_qdma->n_chans = NUM_CH; ++ ++ err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma); ++ if (err) { ++ dev_err(dev, "QDMA alloc channels faile\n"); ++ goto err_reg; ++ } ++ ++ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask); ++ ++ dpaa2_qdma->dma_dev.dev = dev; ++ dpaa2_qdma->dma_dev.device_alloc_chan_resources ++ = dpaa2_qdma_alloc_chan_resources; ++ dpaa2_qdma->dma_dev.device_free_chan_resources ++ = dpaa2_qdma_free_chan_resources; ++ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status; ++ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy; ++ dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg; ++ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending; ++ ++ err = dma_async_device_register(&dpaa2_qdma->dma_dev); ++ if (err) { ++ dev_err(dev, "Can't register NXP QDMA engine.\n"); ++ goto err_reg; ++ } ++ ++ return 0; ++ ++err_reg: ++ dpaa2_dpdmai_free_channels(dpaa2_qdma); ++ kfree(dpaa2_qdma); ++err_eng: ++ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle); ++err_enable: ++ dpaa2_dpdmai_dpio_unbind(priv); ++err_bind: ++ dpaa2_dpmai_store_free(priv); ++ dpaa2_dpdmai_dpio_free(priv); ++err_dpio_setup: ++ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle); ++err_dpdmai_setup: ++ fsl_mc_portal_free(priv->mc_io); ++err_mcportal: ++ kfree(priv->ppriv); ++ kfree(priv); ++ dev_set_drvdata(dev, NULL); ++ return err; ++} ++ ++static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct dpaa2_qdma_priv *priv; ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ ++ dev = &ls_dev->dev; ++ priv = dev_get_drvdata(dev); ++ dpaa2_qdma = priv->dpaa2_qdma; ++ ++ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); ++ dpaa2_dpdmai_dpio_unbind(priv); ++ dpaa2_dpmai_store_free(priv); ++ dpaa2_dpdmai_dpio_free(priv); ++ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); ++ fsl_mc_portal_free(priv->mc_io); ++ dev_set_drvdata(dev, NULL); ++ dpaa2_dpdmai_free_channels(dpaa2_qdma); ++ ++ dma_async_device_unregister(&dpaa2_qdma->dma_dev); ++ kfree(priv); ++ kfree(dpaa2_qdma); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpdmai", ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_qdma_driver = { ++ .driver = { ++ .name = "dpaa2-qdma", ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_qdma_probe, ++ .remove = dpaa2_qdma_remove, ++ .match_id_table = dpaa2_qdma_id_table ++}; ++ ++static int __init dpaa2_qdma_driver_init(void) ++{ ++ return fsl_mc_driver_register(&(dpaa2_qdma_driver)); ++} ++late_initcall(dpaa2_qdma_driver_init); ++ ++static void __exit fsl_qdma_exit(void) ++{ ++ fsl_mc_driver_unregister(&(dpaa2_qdma_driver)); ++} ++module_exit(fsl_qdma_exit); ++ ++MODULE_DESCRIPTION("NXP DPAA2 qDMA driver"); ++MODULE_LICENSE("Dual BSD/GPL"); +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h +@@ -0,0 +1,262 @@ ++/* Copyright 2015 NXP Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of NXP Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA2_QDMA_H ++#define __DPAA2_QDMA_H ++ ++#define LONG_FORMAT 1 ++ ++#define DPAA2_QDMA_STORE_SIZE 16 ++#define NUM_CH 8 ++#define NUM_SG_PER_BLK 16 ++ ++#define QDMA_DMR_OFFSET 0x0 ++#define QDMA_DQ_EN (0 << 30) ++#define QDMA_DQ_DIS (1 << 30) ++ ++#define QDMA_DSR_M_OFFSET 0x10004 ++ ++struct dpaa2_qdma_sd_d { ++ uint32_t rsv:32; ++ union { ++ struct { ++ uint32_t ssd:12; /* souce stride distance */ ++ uint32_t sss:12; /* souce stride size */ ++ uint32_t rsv1:8; ++ } sdf; ++ struct { ++ uint32_t dsd:12; /* Destination stride distance */ ++ uint32_t dss:12; /* Destination stride size */ ++ uint32_t rsv2:8; ++ } ddf; ++ } df; ++ uint32_t rbpcmd; /* Route-by-port command */ ++ uint32_t cmd; ++} __attribute__((__packed__)); ++/* Source descriptor command read transaction type for RBP=0: ++ coherent copy of cacheable memory */ ++#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28) ++/* Destination descriptor command write transaction type for RBP=0: ++ coherent copy of cacheable memory */ ++#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) ++ ++#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */ ++#define QDMA_SG_FMT_FDS 0x1 /* frame data section */ ++#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */ ++#define QDMA_SG_SL_SHORT 0x1 /* short length */ ++#define QDMA_SG_SL_LONG 0x0 /* short length */ ++#define QDMA_SG_F 0x1 /* last sg entry */ ++struct dpaa2_qdma_sg { ++ uint32_t addr_lo; /* address 0:31 */ ++ uint32_t addr_hi:17; /* address 32:48 */ ++ uint32_t rsv:15; ++ union { ++ uint32_t data_len_sl0; /* SL=0, the long format */ ++ struct { ++ uint32_t len:17; /* SL=1, the short format */ ++ uint32_t reserve:3; ++ uint32_t sf:1; ++ uint32_t sr:1; ++ uint32_t size:10; /* buff size */ ++ } data_len_sl1; ++ } data_len; /* AVAIL_LENGTH */ ++ struct { ++ uint32_t bpid:14; ++ uint32_t ivp:1; ++ uint32_t mbt:1; ++ uint32_t offset:12; ++ uint32_t fmt:2; ++ uint32_t sl:1; ++ uint32_t f:1; ++ } ctrl; ++} __attribute__((__packed__)); ++ ++#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */ ++#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */ ++#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */ ++#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */ ++#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */ ++ ++#define QDMA_SB_FRAME (0 << 28) /* single frame */ ++#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */ ++#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */ ++#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */ ++ ++#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */ ++#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */ ++#define QDMA_SER_DISABLE (0 << 8) /* no notification */ ++#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */ ++#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ ++#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ ++#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */ ++ ++#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */ ++#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */ ++#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */ ++#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */ ++#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */ ++ ++#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */ ++#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */ ++#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */ ++#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */ ++#define QDMA_FL_SL_LONG 0x0 /* long length */ ++#define QDMA_FL_SL_SHORT 0x1 /* short length */ ++#define QDMA_FL_F 0x1 /* last frame list bit */ ++/*Description of Frame list table structure*/ ++struct dpaa2_frame_list { ++ uint32_t addr_lo; /* lower 32 bits of address */ ++ uint32_t addr_hi:17; /* upper 17 bits of address */ ++ uint32_t resrvd:15; ++ union { ++ uint32_t data_len_sl0; /* If SL=0, then data length is 32 */ ++ struct { ++ uint32_t data_len:18; /* IF SL=1; length is 18bit */ ++ uint32_t resrvd:2; ++ uint32_t mem:12; /* Valid only when SL=1 */ ++ } data_len_sl1; ++ } data_len; ++ /* word 4 */ ++ uint32_t bpid:14; /* Frame buffer pool ID */ ++ uint32_t ivp:1; /* Invalid Pool ID. */ ++ uint32_t bmt:1; /* Bypass Memory Translation */ ++ uint32_t offset:12; /* Frame offset */ ++ uint32_t fmt:2; /* Frame Format */ ++ uint32_t sl:1; /* Short Length */ ++ uint32_t f:1; /* Final bit */ ++ ++ uint32_t frc; /* Frame Context */ ++ /* word 6 */ ++ uint32_t err:8; /* Frame errors */ ++ uint32_t resrvd0:8; ++ uint32_t asal:4; /* accelerator-specific annotation length */ ++ uint32_t resrvd1:1; ++ uint32_t ptv2:1; ++ uint32_t ptv1:1; ++ uint32_t pta:1; /* pass-through annotation */ ++ uint32_t resrvd2:8; ++ ++ uint32_t flc_lo; /* lower 32 bits fo flow context */ ++ uint32_t flc_hi; /* higher 32 bits fo flow context */ ++} __attribute__((__packed__)); ++ ++struct dpaa2_qdma_chan { ++ struct virt_dma_chan vchan; ++ struct virt_dma_desc vdesc; ++ enum dma_status status; ++ struct dpaa2_qdma_engine *qdma; ++ ++ struct mutex dpaa2_queue_mutex; ++ spinlock_t queue_lock; ++ struct dma_pool *fd_pool; ++ struct dma_pool *sg_blk_pool; ++ ++ struct list_head comp_used; ++ struct list_head comp_free; ++ ++ struct list_head sgb_free; ++}; ++ ++struct qdma_sg_blk { ++ dma_addr_t blk_bus_addr; ++ void *blk_virt_addr; ++ struct list_head list; ++}; ++ ++struct dpaa2_qdma_comp { ++ dma_addr_t fd_bus_addr; ++ dma_addr_t fl_bus_addr; ++ dma_addr_t desc_bus_addr; ++ dma_addr_t sge_src_bus_addr; ++ dma_addr_t sge_dst_bus_addr; ++ void *fd_virt_addr; ++ void *fl_virt_addr; ++ void *desc_virt_addr; ++ void *sg_src_virt_addr; ++ void *sg_dst_virt_addr; ++ struct qdma_sg_blk *sg_blk; ++ uint32_t sg_blk_num; ++ struct list_head sg_src_head; ++ struct list_head sg_dst_head; ++ struct dpaa2_qdma_chan *qchan; ++ struct virt_dma_desc vdesc; ++ struct list_head list; ++}; ++ ++struct dpaa2_qdma_engine { ++ struct dma_device dma_dev; ++ u32 n_chans; ++ struct dpaa2_qdma_chan chans[NUM_CH]; ++ ++ struct dpaa2_qdma_priv *priv; ++}; ++ ++/* ++ * dpaa2_qdma_priv - driver private data ++ */ ++struct dpaa2_qdma_priv { ++ int dpqdma_id; ++ ++ struct iommu_domain *iommu_domain; ++ struct dpdmai_attr dpdmai_attr; ++ struct device *dev; ++ struct fsl_mc_io *mc_io; ++ struct fsl_mc_device *dpdmai_dev; ++ ++ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; ++ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM]; ++ ++ uint8_t num_pairs; ++ ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++}; ++ ++struct dpaa2_qdma_priv_per_prio { ++ int req_fqid; ++ int rsp_fqid; ++ int prio; ++ ++ struct dpaa2_io_store *store; ++ struct dpaa2_io_notification_ctx nctx; ++ ++ struct dpaa2_qdma_priv *priv; ++}; ++ ++/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ ++#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ ++ sizeof(struct dpaa2_frame_list) * 3 + \ ++ sizeof(struct dpaa2_qdma_sd_d) * 2) ++ ++/* qdma_sg_blk + 16 SGs */ ++#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\ ++ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK) ++#endif /* __DPAA2_QDMA_H */ +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpdmai.c +@@ -0,0 +1,454 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include "fsl_dpdmai.h" ++#include "fsl_dpdmai_cmd.h" ++#include "../../../drivers/staging/fsl-mc/include/mc-sys.h" ++#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h" ++ ++int dpdmai_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpdmai_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPDMAI_CMD_OPEN(cmd, dpdmai_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpdmai_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, ++ cmd_flags, token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpdmai_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPDMAI_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpdmai_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpdmai_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpdmai_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpdmai_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpdmai_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ const struct dpdmai_rx_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, struct dpdmai_rx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr); ++ ++ return 0; ++} ++ ++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_tx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h +@@ -0,0 +1,521 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPDMAI_H ++#define __FSL_DPDMAI_H ++ ++struct fsl_mc_io; ++ ++/* Data Path DMA Interface API ++ * Contains initialization APIs and runtime control APIs for DPDMAI ++ */ ++ ++/* General DPDMAI macros */ ++ ++/** ++ * Maximum number of Tx/Rx priorities per DPDMAI object ++ */ ++#define DPDMAI_PRIO_NUM 2 ++ ++/** ++ * All queues considered; see dpdmai_set_rx_queue() ++ */ ++#define DPDMAI_ALL_QUEUES (uint8_t)(-1) ++ ++/** ++ * dpdmai_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpdmai_id: DPDMAI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpdmai_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpdmai_id, ++ uint16_t *token); ++ ++/** ++ * dpdmai_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpdmai_cfg - Structure representing DPDMAI configuration ++ * @priorities: Priorities for the DMA hardware processing; valid priorities are ++ * configured with values 1-8; the entry following last valid entry ++ * should be configured with 0 ++ */ ++struct dpdmai_cfg { ++ uint8_t priorities[DPDMAI_PRIO_NUM]; ++}; ++ ++/** ++ * dpdmai_create() - Create the DPDMAI object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPDMAI object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpdmai_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpdmai_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpdmai_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_is_enabled() - Check if the DPDMAI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpdmai_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpdmai_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpdmai_irq_cfg *irq_cfg); ++ ++/** ++ * dpdmai_get_irq() - Get IRQ information from the DPDMAI ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpdmai_irq_cfg *irq_cfg); ++ ++/** ++ * dpdmai_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpdmai_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned Interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpdmai_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpdmai_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpdmai_get_irq_status() - Get the current status of any pending interrupts ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpdmai_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpdmai_attr - Structure representing DPDMAI attributes ++ * @id: DPDMAI object ID ++ * @version: DPDMAI version ++ * @num_of_priorities: number of priorities ++ */ ++struct dpdmai_attr { ++ int id; ++ /** ++ * struct version - DPDMAI version ++ * @major: DPDMAI major version ++ * @minor: DPDMAI minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint8_t num_of_priorities; ++}; ++ ++/** ++ * dpdmai_get_attributes() - Retrieve DPDMAI attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpdmai_attr *attr); ++ ++/** ++ * enum dpdmai_dest - DPDMAI destination types ++ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode ++ * and does not generate FQDAN notifications; user is expected to dequeue ++ * from the queue based on polling or other user-defined method ++ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue ++ * from the queue only after notification is received ++ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON object; ++ * user is expected to dequeue from the DPCON channel ++ */ ++enum dpdmai_dest { ++ DPDMAI_DEST_NONE = 0, ++ DPDMAI_DEST_DPIO = 1, ++ DPDMAI_DEST_DPCON = 2 ++}; ++ ++/** ++ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that ++ * channel; not relevant for 'DPDMAI_DEST_NONE' option ++ */ ++struct dpdmai_dest_cfg { ++ enum dpdmai_dest dest_type; ++ int dest_id; ++ uint8_t priority; ++}; ++ ++/* DPDMAI queue modification options */ ++ ++/** ++ * Select to modify the user's context associated with the queue ++ */ ++#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 ++ ++/** ++ * Select to modify the queue's destination ++ */ ++#define DPDMAI_QUEUE_OPT_DEST 0x00000002 ++ ++/** ++ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration ++ * @options: Flags representing the suggested modifications to the queue; ++ * Use any combination of 'DPDMAI_QUEUE_OPT_' flags ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame; ++ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' ++ * @dest_cfg: Queue destination parameters; ++ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' ++ */ ++struct dpdmai_rx_queue_cfg { ++ uint32_t options; ++ uint64_t user_ctx; ++ struct dpdmai_dest_cfg dest_cfg; ++ ++}; ++ ++/** ++ * dpdmai_set_rx_queue() - Set Rx queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation; use ++ * DPDMAI_ALL_QUEUES to configure all Rx queues ++ * identically. ++ * @cfg: Rx queue configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ const struct dpdmai_rx_queue_cfg *cfg); ++ ++/** ++ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame ++ * @dest_cfg: Queue destination configuration ++ * @fqid: Virtual FQID value to be used for dequeue operations ++ */ ++struct dpdmai_rx_queue_attr { ++ uint64_t user_ctx; ++ struct dpdmai_dest_cfg dest_cfg; ++ uint32_t fqid; ++}; ++ ++/** ++ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation ++ * @attr: Returned Rx queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_rx_queue_attr *attr); ++ ++/** ++ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues ++ * @fqid: Virtual FQID to be used for sending frames to DMA hardware ++ */ ++ ++struct dpdmai_tx_queue_attr { ++ uint32_t fqid; ++}; ++ ++/** ++ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation ++ * @attr: Returned Tx queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_tx_queue_attr *attr); ++ ++#endif /* __FSL_DPDMAI_H */ +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h +@@ -0,0 +1,222 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPDMAI_CMD_H ++#define _FSL_DPDMAI_CMD_H ++ ++/* DPDMAI Version */ ++#define DPDMAI_VER_MAJOR 2 ++#define DPDMAI_VER_MINOR 2 ++ ++#define DPDMAI_CMD_BASE_VERSION 0 ++#define DPDMAI_CMD_ID_OFFSET 4 ++ ++/* Command IDs */ ++#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++ ++#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ ++#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ ++ ++ ++#define MAKE_UMASK64(_width) \ ++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ ++ (uint64_t)-1)) ++ ++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) ++{ ++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); ++} ++ ++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) ++{ ++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); ++} ++ ++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) ++ ++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) ++ ++#define MC_CMD_HDR_READ_TOKEN(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \ ++ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) ++ ++#endif /* _FSL_DPDMAI_CMD_H */ +--- /dev/null ++++ b/drivers/dma/fsl-qdma.c +@@ -0,0 +1,1201 @@ ++/* ++ * drivers/dma/fsl-qdma.c ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Driver for the Freescale qDMA engine with software command queue mode. ++ * Channel virtualization is supported through enqueuing of DMA jobs to, ++ * or dequeuing DMA jobs from, different work queues. ++ * This module can be found on Freescale LS SoCs. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "virt-dma.h" ++ ++#define FSL_QDMA_DMR 0x0 ++#define FSL_QDMA_DSR 0x4 ++#define FSL_QDMA_DEIER 0xe00 ++#define FSL_QDMA_DEDR 0xe04 ++#define FSL_QDMA_DECFDW0R 0xe10 ++#define FSL_QDMA_DECFDW1R 0xe14 ++#define FSL_QDMA_DECFDW2R 0xe18 ++#define FSL_QDMA_DECFDW3R 0xe1c ++#define FSL_QDMA_DECFQIDR 0xe30 ++#define FSL_QDMA_DECBR 0xe34 ++ ++#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) ++#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x)) ++#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) ++#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) ++#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) ++#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x)) ++#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x)) ++#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x)) ++ ++#define FSL_QDMA_SQDPAR 0x80c ++#define FSL_QDMA_SQEPAR 0x814 ++#define FSL_QDMA_BSQMR 0x800 ++#define FSL_QDMA_BSQSR 0x804 ++#define FSL_QDMA_BSQICR 0x828 ++#define FSL_QDMA_CQMR 0xa00 ++#define FSL_QDMA_CQDSCR1 0xa08 ++#define FSL_QDMA_CQDSCR2 0xa0c ++#define FSL_QDMA_CQIER 0xa10 ++#define FSL_QDMA_CQEDR 0xa14 ++#define FSL_QDMA_SQCCMR 0xa20 ++ ++#define FSL_QDMA_SQICR_ICEN ++ ++#define FSL_QDMA_CQIDR_CQT 0xff000000 ++#define FSL_QDMA_CQIDR_SQPE 0x800000 ++#define FSL_QDMA_CQIDR_SQT 0x8000 ++ ++#define FSL_QDMA_BCQIER_CQTIE 0x8000 ++#define FSL_QDMA_BCQIER_CQPEIE 0x800000 ++#define FSL_QDMA_BSQICR_ICEN 0x80000000 ++#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16) ++#define FSL_QDMA_CQIER_MEIE 0x80000000 ++#define FSL_QDMA_CQIER_TEIE 0x1 ++#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000 ++ ++#define FSL_QDMA_QUEUE_MAX 8 ++ ++#define FSL_QDMA_BCQMR_EN 0x80000000 ++#define FSL_QDMA_BCQMR_EI 0x40000000 ++#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) ++#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) ++ ++#define FSL_QDMA_BCQSR_QF 0x10000 ++#define FSL_QDMA_BCQSR_XOFF 0x1 ++ ++#define FSL_QDMA_BSQMR_EN 0x80000000 ++#define FSL_QDMA_BSQMR_DI 0x40000000 ++#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) ++ ++#define FSL_QDMA_BSQSR_QE 0x20000 ++ ++#define FSL_QDMA_DMR_DQD 0x40000000 ++#define FSL_QDMA_DSR_DB 0x80000000 ++ ++#define FSL_QDMA_BASE_BUFFER_SIZE 96 ++#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16 ++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 ++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 ++#define FSL_QDMA_QUEUE_NUM_MAX 8 ++ ++#define FSL_QDMA_CMD_RWTTYPE 0x4 ++#define FSL_QDMA_CMD_LWC 0x2 ++ ++#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 ++#define FSL_QDMA_CMD_NS_OFFSET 27 ++#define FSL_QDMA_CMD_DQOS_OFFSET 24 ++#define FSL_QDMA_CMD_WTHROTL_OFFSET 20 ++#define FSL_QDMA_CMD_DSEN_OFFSET 19 ++#define FSL_QDMA_CMD_LWC_OFFSET 16 ++ ++#define FSL_QDMA_E_SG_TABLE 1 ++#define FSL_QDMA_E_DATA_BUFFER 0 ++#define FSL_QDMA_F_LAST_ENTRY 1 ++ ++u64 pre_addr, pre_queue; ++ ++struct fsl_qdma_ccdf { ++ u8 status; ++ u32 rev1:22; ++ u32 ser:1; ++ u32 rev2:1; ++ u32 rev3:20; ++ u32 offset:9; ++ u32 format:3; ++ union { ++ struct { ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u32 addr_hi:8; /* high 8-bits of 40-bit address */ ++ u32 rev4:16; ++ u32 queue:3; ++ u32 rev5:3; ++ u32 dd:2; /* dynamic debug */ ++ }; ++ struct { ++ u64 addr:40; ++ /* More efficient address accessor */ ++ u64 __notaddress:24; ++ }; ++ }; ++} __packed; ++ ++struct fsl_qdma_csgf { ++ u32 offset:13; ++ u32 rev1:19; ++ u32 length:30; ++ u32 f:1; ++ u32 e:1; ++ union { ++ struct { ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u32 addr_hi:8; /* high 8-bits of 40-bit address */ ++ u32 rev2:24; ++ }; ++ struct { ++ u64 addr:40; ++ /* More efficient address accessor */ ++ u64 __notaddress:24; ++ }; ++ }; ++} __packed; ++ ++struct fsl_qdma_sdf { ++ u32 rev3:32; ++ u32 ssd:12; /* souce stride distance */ ++ u32 sss:12; /* souce stride size */ ++ u32 rev4:8; ++ u32 rev5:32; ++ u32 cmd; ++} __packed; ++ ++struct fsl_qdma_ddf { ++ u32 rev1:32; ++ u32 dsd:12; /* Destination stride distance */ ++ u32 dss:12; /* Destination stride size */ ++ u32 rev2:8; ++ u32 rev3:32; ++ u32 cmd; ++} __packed; ++ ++struct fsl_qdma_chan { ++ struct virt_dma_chan vchan; ++ struct virt_dma_desc vdesc; ++ enum dma_status status; ++ u32 slave_id; ++ struct fsl_qdma_engine *qdma; ++ struct fsl_qdma_queue *queue; ++ struct list_head qcomp; ++}; ++ ++struct fsl_qdma_queue { ++ struct fsl_qdma_ccdf *virt_head; ++ struct fsl_qdma_ccdf *virt_tail; ++ struct list_head comp_used; ++ struct list_head comp_free; ++ struct dma_pool *comp_pool; ++ struct dma_pool *sg_pool; ++ spinlock_t queue_lock; ++ dma_addr_t bus_addr; ++ u32 n_cq; ++ u32 id; ++ struct fsl_qdma_ccdf *cq; ++}; ++ ++struct fsl_qdma_sg { ++ dma_addr_t bus_addr; ++ void *virt_addr; ++}; ++ ++struct fsl_qdma_comp { ++ dma_addr_t bus_addr; ++ void *virt_addr; ++ struct fsl_qdma_chan *qchan; ++ struct fsl_qdma_sg *sg_block; ++ struct virt_dma_desc vdesc; ++ struct list_head list; ++ u32 sg_block_src; ++ u32 sg_block_dst; ++}; ++ ++struct fsl_qdma_engine { ++ struct dma_device dma_dev; ++ void __iomem *ctrl_base; ++ void __iomem *status_base; ++ void __iomem *block_base; ++ u32 n_chans; ++ u32 n_queues; ++ struct mutex fsl_qdma_mutex; ++ int error_irq; ++ int queue_irq; ++ bool big_endian; ++ struct fsl_qdma_queue *queue; ++ struct fsl_qdma_queue *status; ++ struct fsl_qdma_chan chans[]; ++ ++}; ++ ++static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr) ++{ ++ if (qdma->big_endian) ++ return ioread32be(addr); ++ else ++ return ioread32(addr); ++} ++ ++static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val, ++ void __iomem *addr) ++{ ++ if (qdma->big_endian) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan) ++{ ++ return container_of(chan, struct fsl_qdma_chan, vchan.chan); ++} ++ ++static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) ++{ ++ return container_of(vd, struct fsl_qdma_comp, vdesc); ++} ++ ++static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) ++{ ++ /* ++ * In QDMA mode, We don't need to do anything. ++ */ ++ return 0; ++} ++ ++static void fsl_qdma_free_chan_resources(struct dma_chan *chan) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&fsl_chan->vchan.lock, flags); ++ vchan_get_all_descriptors(&fsl_chan->vchan, &head); ++ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&fsl_chan->vchan, &head); ++} ++ ++static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, ++ dma_addr_t dst, dma_addr_t src, u32 len) ++{ ++ struct fsl_qdma_ccdf *ccdf; ++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest; ++ struct fsl_qdma_sdf *sdf; ++ struct fsl_qdma_ddf *ddf; ++ ++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; ++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; ++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; ++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; ++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; ++ ++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); ++ /* Head Command Descriptor(Frame Descriptor) */ ++ ccdf->addr = fsl_comp->bus_addr + 16; ++ ccdf->format = 1; /* Compound S/G format */ ++ /* Status notification is enqueued to status queue. */ ++ ccdf->ser = 1; ++ /* Compound Command Descriptor(Frame List Table) */ ++ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ /* It must be 32 as Compound S/G Descriptor */ ++ csgf_desc->length = 32; ++ csgf_src->addr = src; ++ csgf_src->length = len; ++ csgf_dest->addr = dst; ++ csgf_dest->length = len; ++ /* This entry is the last entry. */ ++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ /* Descriptor Buffer */ ++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET; ++} ++ ++static void fsl_qdma_comp_fill_sg( ++ struct fsl_qdma_comp *fsl_comp, ++ struct scatterlist *dst_sg, unsigned int dst_nents, ++ struct scatterlist *src_sg, unsigned int src_nents) ++{ ++ struct fsl_qdma_ccdf *ccdf; ++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg; ++ struct fsl_qdma_sdf *sdf; ++ struct fsl_qdma_ddf *ddf; ++ struct fsl_qdma_sg *sg_block, *temp; ++ struct scatterlist *sg; ++ u64 total_src_len = 0; ++ u64 total_dst_len = 0; ++ u32 i; ++ ++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; ++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; ++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; ++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; ++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; ++ ++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); ++ /* Head Command Descriptor(Frame Descriptor) */ ++ ccdf->addr = fsl_comp->bus_addr + 16; ++ ccdf->format = 1; /* Compound S/G format */ ++ /* Status notification is enqueued to status queue. */ ++ ccdf->ser = 1; ++ ++ /* Compound Command Descriptor(Frame List Table) */ ++ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ /* It must be 32 as Compound S/G Descriptor */ ++ csgf_desc->length = 32; ++ ++ sg_block = fsl_comp->sg_block; ++ csgf_src->addr = sg_block->bus_addr; ++ /* This entry link to the s/g entry. */ ++ csgf_src->e = FSL_QDMA_E_SG_TABLE; ++ ++ temp = sg_block + fsl_comp->sg_block_src; ++ csgf_dest->addr = temp->bus_addr; ++ /* This entry is the last entry. */ ++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ /* This entry link to the s/g entry. */ ++ csgf_dest->e = FSL_QDMA_E_SG_TABLE; ++ ++ for_each_sg(src_sg, sg, src_nents, i) { ++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg->addr = sg_dma_address(sg); ++ csgf_sg->length = sg_dma_len(sg); ++ total_src_len += sg_dma_len(sg); ++ ++ if (i == src_nents - 1) ++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; ++ temp = sg_block + ++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ csgf_sg->addr = temp->bus_addr; ++ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ } ++ } ++ ++ sg_block += fsl_comp->sg_block_src; ++ for_each_sg(dst_sg, sg, dst_nents, i) { ++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg->addr = sg_dma_address(sg); ++ csgf_sg->length = sg_dma_len(sg); ++ total_dst_len += sg_dma_len(sg); ++ ++ if (i == dst_nents - 1) ++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; ++ temp = sg_block + ++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ csgf_sg->addr = temp->bus_addr; ++ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ } ++ } ++ ++ if (total_src_len != total_dst_len) ++ dev_err(&fsl_comp->qchan->vchan.chan.dev->device, ++ "The data length for src and dst isn't match.\n"); ++ ++ csgf_src->length = total_src_len; ++ csgf_dest->length = total_dst_len; ++ ++ /* Descriptor Buffer */ ++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++} ++ ++/* ++ * Prei-request full command descriptor for enqueue. ++ */ ++static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue) ++{ ++ struct fsl_qdma_comp *comp_temp; ++ int i; ++ ++ for (i = 0; i < queue->n_cq; i++) { ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ return -1; ++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, ++ GFP_NOWAIT, ++ &comp_temp->bus_addr); ++ if (!comp_temp->virt_addr) ++ return -1; ++ list_add_tail(&comp_temp->list, &queue->comp_free); ++ } ++ return 0; ++} ++ ++/* ++ * Request a command descriptor for enqueue. ++ */ ++static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc( ++ struct fsl_qdma_chan *fsl_chan, ++ unsigned int dst_nents, ++ unsigned int src_nents) ++{ ++ struct fsl_qdma_comp *comp_temp; ++ struct fsl_qdma_sg *sg_block; ++ struct fsl_qdma_queue *queue = fsl_chan->queue; ++ unsigned long flags; ++ unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i; ++ ++ spin_lock_irqsave(&queue->queue_lock, flags); ++ if (list_empty(&queue->comp_free)) { ++ spin_unlock_irqrestore(&queue->queue_lock, flags); ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ return NULL; ++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, ++ GFP_NOWAIT, ++ &comp_temp->bus_addr); ++ if (!comp_temp->virt_addr) ++ return NULL; ++ } else { ++ comp_temp = list_first_entry(&queue->comp_free, ++ struct fsl_qdma_comp, ++ list); ++ list_del(&comp_temp->list); ++ spin_unlock_irqrestore(&queue->queue_lock, flags); ++ } ++ ++ if (dst_nents != 0) ++ dst_sg_entry_block = dst_nents / ++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ else ++ dst_sg_entry_block = 0; ++ ++ if (src_nents != 0) ++ src_sg_entry_block = src_nents / ++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ else ++ src_sg_entry_block = 0; ++ ++ sg_entry_total = dst_sg_entry_block + src_sg_entry_block; ++ if (sg_entry_total) { ++ sg_block = kzalloc(sizeof(*sg_block) * ++ sg_entry_total, ++ GFP_KERNEL); ++ if (!sg_block) ++ return NULL; ++ comp_temp->sg_block = sg_block; ++ for (i = 0; i < sg_entry_total; i++) { ++ sg_block->virt_addr = dma_pool_alloc(queue->sg_pool, ++ GFP_NOWAIT, ++ &sg_block->bus_addr); ++ memset(sg_block->virt_addr, 0, ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16); ++ sg_block++; ++ } ++ } ++ ++ comp_temp->sg_block_src = src_sg_entry_block; ++ comp_temp->sg_block_dst = dst_sg_entry_block; ++ comp_temp->qchan = fsl_chan; ++ ++ return comp_temp; ++} ++ ++static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources( ++ struct platform_device *pdev, ++ unsigned int queue_num) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_queue *queue_head, *queue_temp; ++ int ret, len, i; ++ unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; ++ ++ if (queue_num > FSL_QDMA_QUEUE_MAX) ++ queue_num = FSL_QDMA_QUEUE_MAX; ++ len = sizeof(*queue_head) * queue_num; ++ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); ++ if (!queue_head) ++ return NULL; ++ ++ ret = of_property_read_u32_array(np, "queue-sizes", queue_size, ++ queue_num); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get queue-sizes.\n"); ++ return NULL; ++ } ++ ++ for (i = 0; i < queue_num; i++) { ++ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ++ || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { ++ dev_err(&pdev->dev, "Get wrong queue-sizes.\n"); ++ return NULL; ++ } ++ queue_temp = queue_head + i; ++ queue_temp->cq = dma_alloc_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ &queue_temp->bus_addr, ++ GFP_KERNEL); ++ if (!queue_temp->cq) ++ return NULL; ++ queue_temp->n_cq = queue_size[i]; ++ queue_temp->id = i; ++ queue_temp->virt_head = queue_temp->cq; ++ queue_temp->virt_tail = queue_temp->cq; ++ /* ++ * The dma pool for queue command buffer ++ */ ++ queue_temp->comp_pool = dma_pool_create("comp_pool", ++ &pdev->dev, ++ FSL_QDMA_BASE_BUFFER_SIZE, ++ 16, 0); ++ if (!queue_temp->comp_pool) { ++ dma_free_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ queue_temp->cq, ++ queue_temp->bus_addr); ++ return NULL; ++ } ++ /* ++ * The dma pool for queue command buffer ++ */ ++ queue_temp->sg_pool = dma_pool_create("sg_pool", ++ &pdev->dev, ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16, ++ 64, 0); ++ if (!queue_temp->sg_pool) { ++ dma_free_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ queue_temp->cq, ++ queue_temp->bus_addr); ++ dma_pool_destroy(queue_temp->comp_pool); ++ return NULL; ++ } ++ /* ++ * List for queue command buffer ++ */ ++ INIT_LIST_HEAD(&queue_temp->comp_used); ++ INIT_LIST_HEAD(&queue_temp->comp_free); ++ spin_lock_init(&queue_temp->queue_lock); ++ } ++ ++ return queue_head; ++} ++ ++static struct fsl_qdma_queue *fsl_qdma_prep_status_queue( ++ struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_queue *status_head; ++ unsigned int status_size; ++ int ret; ++ ++ ret = of_property_read_u32(np, "status-sizes", &status_size); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get status-sizes.\n"); ++ return NULL; ++ } ++ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ++ || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { ++ dev_err(&pdev->dev, "Get wrong status_size.\n"); ++ return NULL; ++ } ++ status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head), ++ GFP_KERNEL); ++ if (!status_head) ++ return NULL; ++ ++ /* ++ * Buffer for queue command ++ */ ++ status_head->cq = dma_alloc_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ status_size, ++ &status_head->bus_addr, ++ GFP_KERNEL); ++ if (!status_head->cq) ++ return NULL; ++ status_head->n_cq = status_size; ++ status_head->virt_head = status_head->cq; ++ status_head->virt_tail = status_head->cq; ++ status_head->comp_pool = NULL; ++ ++ return status_head; ++} ++ ++static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) ++{ ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ void __iomem *block = fsl_qdma->block_base; ++ int i, count = 5; ++ u32 reg; ++ ++ /* Disable the command queue and wait for idle state. */ ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg |= FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++) ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); ++ ++ while (1) { ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); ++ if (!(reg & FSL_QDMA_DSR_DB)) ++ break; ++ if (count-- < 0) ++ return -EBUSY; ++ udelay(100); ++ } ++ ++ /* Disable status queue. */ ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); ++ ++ /* ++ * Clear the command queue interrupt detect register for all queues. ++ */ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ return 0; ++} ++ ++static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma) ++{ ++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; ++ struct fsl_qdma_queue *fsl_status = fsl_qdma->status; ++ struct fsl_qdma_queue *temp_queue; ++ struct fsl_qdma_comp *fsl_comp; ++ struct fsl_qdma_ccdf *status_addr; ++ struct fsl_qdma_csgf *csgf_src; ++ void __iomem *block = fsl_qdma->block_base; ++ u32 reg, i; ++ bool duplicate, duplicate_handle; ++ ++ while (1) { ++ duplicate = 0; ++ duplicate_handle = 0; ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); ++ if (reg & FSL_QDMA_BSQSR_QE) ++ return 0; ++ status_addr = fsl_status->virt_head; ++ if (status_addr->queue == pre_queue && ++ status_addr->addr == pre_addr) ++ duplicate = 1; ++ ++ i = status_addr->queue; ++ pre_queue = status_addr->queue; ++ pre_addr = status_addr->addr; ++ temp_queue = fsl_queue + i; ++ spin_lock(&temp_queue->queue_lock); ++ if (list_empty(&temp_queue->comp_used)) { ++ if (duplicate) ++ duplicate_handle = 1; ++ else { ++ spin_unlock(&temp_queue->queue_lock); ++ return -1; ++ } ++ } else { ++ fsl_comp = list_first_entry(&temp_queue->comp_used, ++ struct fsl_qdma_comp, ++ list); ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr ++ + 2; ++ if (fsl_comp->bus_addr + 16 != ++ (dma_addr_t)status_addr->addr) { ++ if (duplicate) ++ duplicate_handle = 1; ++ else { ++ spin_unlock(&temp_queue->queue_lock); ++ return -1; ++ } ++ } ++ } ++ ++ if (duplicate_handle) { ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); ++ reg |= FSL_QDMA_BSQMR_DI; ++ status_addr->addr = 0x0; ++ fsl_status->virt_head++; ++ if (fsl_status->virt_head == fsl_status->cq ++ + fsl_status->n_cq) ++ fsl_status->virt_head = fsl_status->cq; ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ spin_unlock(&temp_queue->queue_lock); ++ continue; ++ } ++ list_del(&fsl_comp->list); ++ ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); ++ reg |= FSL_QDMA_BSQMR_DI; ++ status_addr->addr = 0x0; ++ fsl_status->virt_head++; ++ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) ++ fsl_status->virt_head = fsl_status->cq; ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ spin_unlock(&temp_queue->queue_lock); ++ ++ spin_lock(&fsl_comp->qchan->vchan.lock); ++ vchan_cookie_complete(&fsl_comp->vdesc); ++ fsl_comp->qchan->status = DMA_COMPLETE; ++ spin_unlock(&fsl_comp->qchan->vchan.lock); ++ } ++ return 0; ++} ++ ++static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) ++{ ++ struct fsl_qdma_engine *fsl_qdma = dev_id; ++ unsigned int intr; ++ void __iomem *status = fsl_qdma->status_base; ++ ++ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); ++ ++ if (intr) ++ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); ++ ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) ++{ ++ struct fsl_qdma_engine *fsl_qdma = dev_id; ++ unsigned int intr, reg; ++ void __iomem *block = fsl_qdma->block_base; ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ ++ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); ++ ++ if ((intr & FSL_QDMA_CQIDR_SQT) != 0) ++ intr = fsl_qdma_queue_transfer_complete(fsl_qdma); ++ ++ if (intr != 0) { ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg |= FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); ++ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); ++ } ++ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ return IRQ_HANDLED; ++} ++ ++static int ++fsl_qdma_irq_init(struct platform_device *pdev, ++ struct fsl_qdma_engine *fsl_qdma) ++{ ++ int ret; ++ ++ fsl_qdma->error_irq = platform_get_irq_byname(pdev, ++ "qdma-error"); ++ if (fsl_qdma->error_irq < 0) { ++ dev_err(&pdev->dev, "Can't get qdma controller irq.\n"); ++ return fsl_qdma->error_irq; ++ } ++ ++ fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue"); ++ if (fsl_qdma->queue_irq < 0) { ++ dev_err(&pdev->dev, "Can't get qdma queue irq.\n"); ++ return fsl_qdma->queue_irq; ++ } ++ ++ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, ++ fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n"); ++ return ret; ++ } ++ ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq, ++ fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) ++{ ++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; ++ struct fsl_qdma_queue *temp; ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ void __iomem *status = fsl_qdma->status_base; ++ void __iomem *block = fsl_qdma->block_base; ++ int i, ret; ++ u32 reg; ++ ++ /* Try to halt the qDMA engine first. */ ++ ret = fsl_qdma_halt(fsl_qdma); ++ if (ret) { ++ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); ++ return ret; ++ } ++ ++ /* ++ * Clear the command queue interrupt detect register for all queues. ++ */ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ for (i = 0; i < fsl_qdma->n_queues; i++) { ++ temp = fsl_queue + i; ++ /* ++ * Initialize Command Queue registers to point to the first ++ * command descriptor in memory. ++ * Dequeue Pointer Address Registers ++ * Enqueue Pointer Address Registers ++ */ ++ qdma_writel(fsl_qdma, temp->bus_addr, ++ block + FSL_QDMA_BCQDPA_SADDR(i)); ++ qdma_writel(fsl_qdma, temp->bus_addr, ++ block + FSL_QDMA_BCQEPA_SADDR(i)); ++ ++ /* Initialize the queue mode. */ ++ reg = FSL_QDMA_BCQMR_EN; ++ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4); ++ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6); ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); ++ } ++ ++ /* ++ * Workaround for erratum: ERR010812. ++ * We must enable XOFF to avoid the enqueue rejection occurs. ++ * Setting SQCCMR ENTER_WM to 0x20. ++ */ ++ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, ++ block + FSL_QDMA_SQCCMR); ++ /* ++ * Initialize status queue registers to point to the first ++ * command descriptor in memory. ++ * Dequeue Pointer Address Registers ++ * Enqueue Pointer Address Registers ++ */ ++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, ++ block + FSL_QDMA_SQEPAR); ++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, ++ block + FSL_QDMA_SQDPAR); ++ /* Initialize status queue interrupt. */ ++ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, ++ block + FSL_QDMA_BCQIER(0)); ++ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5) ++ | 0x8000, ++ block + FSL_QDMA_BSQICR); ++ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE, ++ block + FSL_QDMA_CQIER); ++ /* Initialize controller interrupt register. */ ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER); ++ ++ /* Initialize the status queue mode. */ ++ reg = FSL_QDMA_BSQMR_EN; ++ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6); ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg &= ~FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ ++ return 0; ++} ++ ++static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg( ++ struct dma_chan *chan, ++ struct scatterlist *dst_sg, unsigned int dst_nents, ++ struct scatterlist *src_sg, unsigned int src_nents, ++ unsigned long flags) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_comp *fsl_comp; ++ ++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, ++ dst_nents, ++ src_nents); ++ fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents); ++ ++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); ++} ++ ++static struct dma_async_tx_descriptor * ++fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, ++ dma_addr_t src, size_t len, unsigned long flags) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_comp *fsl_comp; ++ ++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0); ++ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); ++ ++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); ++} ++ ++static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) ++{ ++ void __iomem *block = fsl_chan->qdma->block_base; ++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; ++ struct fsl_qdma_comp *fsl_comp; ++ struct virt_dma_desc *vdesc; ++ u32 reg; ++ ++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id)); ++ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF)) ++ return; ++ vdesc = vchan_next_desc(&fsl_chan->vchan); ++ if (!vdesc) ++ return; ++ list_del(&vdesc->node); ++ fsl_comp = to_fsl_qdma_comp(vdesc); ++ ++ memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16); ++ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) ++ fsl_queue->virt_head = fsl_queue->cq; ++ ++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); ++ barrier(); ++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id)); ++ reg |= FSL_QDMA_BCQMR_EI; ++ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); ++ fsl_chan->status = DMA_IN_PROGRESS; ++} ++ ++static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan, ++ dma_cookie_t cookie, struct dma_tx_state *txstate) ++{ ++ return dma_cookie_status(chan, cookie, txstate); ++} ++ ++static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) ++{ ++ struct fsl_qdma_comp *fsl_comp; ++ struct fsl_qdma_queue *fsl_queue; ++ struct fsl_qdma_sg *sg_block; ++ unsigned long flags; ++ unsigned int i; ++ ++ fsl_comp = to_fsl_qdma_comp(vdesc); ++ fsl_queue = fsl_comp->qchan->queue; ++ ++ if (fsl_comp->sg_block) { ++ for (i = 0; i < fsl_comp->sg_block_src + ++ fsl_comp->sg_block_dst; i++) { ++ sg_block = fsl_comp->sg_block + i; ++ dma_pool_free(fsl_queue->sg_pool, ++ sg_block->virt_addr, ++ sg_block->bus_addr); ++ } ++ kfree(fsl_comp->sg_block); ++ } ++ ++ spin_lock_irqsave(&fsl_queue->queue_lock, flags); ++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free); ++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); ++} ++ ++static void fsl_qdma_issue_pending(struct dma_chan *chan) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fsl_queue->queue_lock, flags); ++ spin_lock(&fsl_chan->vchan.lock); ++ if (vchan_issue_pending(&fsl_chan->vchan)) ++ fsl_qdma_enqueue_desc(fsl_chan); ++ spin_unlock(&fsl_chan->vchan.lock); ++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); ++} ++ ++static int fsl_qdma_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_engine *fsl_qdma; ++ struct fsl_qdma_chan *fsl_chan; ++ struct resource *res; ++ unsigned int len, chans, queues; ++ int ret, i; ++ ++ ret = of_property_read_u32(np, "channels", &chans); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get channels.\n"); ++ return ret; ++ } ++ ++ len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans; ++ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); ++ if (!fsl_qdma) ++ return -ENOMEM; ++ ++ ret = of_property_read_u32(np, "queues", &queues); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get queues.\n"); ++ return ret; ++ } ++ ++ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues); ++ if (!fsl_qdma->queue) ++ return -ENOMEM; ++ ++ fsl_qdma->status = fsl_qdma_prep_status_queue(pdev); ++ if (!fsl_qdma->status) ++ return -ENOMEM; ++ ++ fsl_qdma->n_chans = chans; ++ fsl_qdma->n_queues = queues; ++ mutex_init(&fsl_qdma->fsl_qdma_mutex); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->ctrl_base)) ++ return PTR_ERR(fsl_qdma->ctrl_base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->status_base)) ++ return PTR_ERR(fsl_qdma->status_base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->block_base)) ++ return PTR_ERR(fsl_qdma->block_base); ++ ++ ret = fsl_qdma_irq_init(pdev, fsl_qdma); ++ if (ret) ++ return ret; ++ ++ fsl_qdma->big_endian = of_property_read_bool(np, "big-endian"); ++ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); ++ for (i = 0; i < fsl_qdma->n_chans; i++) { ++ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; ++ ++ fsl_chan->qdma = fsl_qdma; ++ fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues; ++ fsl_chan->vchan.desc_free = fsl_qdma_free_desc; ++ INIT_LIST_HEAD(&fsl_chan->qcomp); ++ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); ++ } ++ for (i = 0; i < fsl_qdma->n_queues; i++) ++ fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i); ++ ++ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask); ++ ++ fsl_qdma->dma_dev.dev = &pdev->dev; ++ fsl_qdma->dma_dev.device_alloc_chan_resources ++ = fsl_qdma_alloc_chan_resources; ++ fsl_qdma->dma_dev.device_free_chan_resources ++ = fsl_qdma_free_chan_resources; ++ fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status; ++ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; ++ fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg; ++ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; ++ ++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); ++ ++ platform_set_drvdata(pdev, fsl_qdma); ++ ++ ret = dma_async_device_register(&fsl_qdma->dma_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n"); ++ return ret; ++ } ++ ++ ret = fsl_qdma_reg_init(fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); ++ return ret; ++ } ++ ++ ++ return 0; ++} ++ ++static int fsl_qdma_remove(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); ++ struct fsl_qdma_queue *queue_temp; ++ struct fsl_qdma_queue *status = fsl_qdma->status; ++ struct fsl_qdma_comp *comp_temp, *_comp_temp; ++ int i; ++ ++ of_dma_controller_free(np); ++ dma_async_device_unregister(&fsl_qdma->dma_dev); ++ ++ /* Free descriptor areas */ ++ for (i = 0; i < fsl_qdma->n_queues; i++) { ++ queue_temp = fsl_qdma->queue + i; ++ list_for_each_entry_safe(comp_temp, _comp_temp, ++ &queue_temp->comp_used, list) { ++ dma_pool_free(queue_temp->comp_pool, ++ comp_temp->virt_addr, ++ comp_temp->bus_addr); ++ list_del(&comp_temp->list); ++ kfree(comp_temp); ++ } ++ list_for_each_entry_safe(comp_temp, _comp_temp, ++ &queue_temp->comp_free, list) { ++ dma_pool_free(queue_temp->comp_pool, ++ comp_temp->virt_addr, ++ comp_temp->bus_addr); ++ list_del(&comp_temp->list); ++ kfree(comp_temp); ++ } ++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * ++ queue_temp->n_cq, queue_temp->cq, ++ queue_temp->bus_addr); ++ dma_pool_destroy(queue_temp->comp_pool); ++ } ++ ++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * ++ status->n_cq, status->cq, status->bus_addr); ++ return 0; ++} ++ ++static const struct of_device_id fsl_qdma_dt_ids[] = { ++ { .compatible = "fsl,ls1021a-qdma", }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids); ++ ++static struct platform_driver fsl_qdma_driver = { ++ .driver = { ++ .name = "fsl-qdma", ++ .owner = THIS_MODULE, ++ .of_match_table = fsl_qdma_dt_ids, ++ }, ++ .probe = fsl_qdma_probe, ++ .remove = fsl_qdma_remove, ++}; ++ ++static int __init fsl_qdma_init(void) ++{ ++ return platform_driver_register(&fsl_qdma_driver); ++} ++subsys_initcall(fsl_qdma_init); ++ ++static void __exit fsl_qdma_exit(void) ++{ ++ platform_driver_unregister(&fsl_qdma_driver); ++} ++module_exit(fsl_qdma_exit); ++ ++MODULE_ALIAS("platform:fsl-qdma"); ++MODULE_DESCRIPTION("Freescale qDMA engine driver"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch new file mode 100644 index 000000000..711fde77d --- /dev/null +++ b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch @@ -0,0 +1,323 @@ +From a5b3155b532289af793c26251cb087b4a24d5c15 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:13:12 +0800 +Subject: [PATCH] flextimer: support layerscape + +This is a integrated patch for layerscape flextimer support. + +Signed-off-by: Wang Dongsheng +Signed-off-by: Meng Yi +Signed-off-by: Yangbo Lu +--- + drivers/clocksource/fsl_ftm_timer.c | 8 +- + drivers/soc/fsl/layerscape/ftm_alarm.c | 286 +++++++++++++++++++++++++++++++++ + 2 files changed, 290 insertions(+), 4 deletions(-) + create mode 100644 drivers/soc/fsl/layerscape/ftm_alarm.c + +--- a/drivers/clocksource/fsl_ftm_timer.c ++++ b/drivers/clocksource/fsl_ftm_timer.c +@@ -83,11 +83,11 @@ static inline void ftm_counter_disable(v + + static inline void ftm_irq_acknowledge(void __iomem *base) + { +- u32 val; ++ unsigned int timeout = 100; + +- val = ftm_readl(base + FTM_SC); +- val &= ~FTM_SC_TOF; +- ftm_writel(val, base + FTM_SC); ++ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--) ++ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF), ++ base + FTM_SC); + } + + static inline void ftm_irq_enable(void __iomem *base) +--- /dev/null ++++ b/drivers/soc/fsl/layerscape/ftm_alarm.c +@@ -0,0 +1,286 @@ ++/* ++ * Freescale FlexTimer Module (FTM) Alarm driver. ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FTM_SC 0x00 ++#define FTM_SC_CLK_SHIFT 3 ++#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) ++#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) ++#define FTM_SC_PS_MASK 0x7 ++#define FTM_SC_TOIE BIT(6) ++#define FTM_SC_TOF BIT(7) ++ ++#define FTM_SC_CLKS_FIXED_FREQ 0x02 ++ ++#define FTM_CNT 0x04 ++#define FTM_MOD 0x08 ++#define FTM_CNTIN 0x4C ++ ++#define FIXED_FREQ_CLK 32000 ++#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK) ++#define MAX_COUNT_VAL 0xffff ++ ++static void __iomem *ftm1_base; ++static void __iomem *rcpm_ftm_addr; ++static u32 alarm_freq; ++static bool big_endian; ++ ++static inline u32 ftm_readl(void __iomem *addr) ++{ ++ if (big_endian) ++ return ioread32be(addr); ++ ++ return ioread32(addr); ++} ++ ++static inline void ftm_writel(u32 val, void __iomem *addr) ++{ ++ if (big_endian) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static inline void ftm_counter_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* select and enable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ)); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_counter_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* disable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_acknowledge(void __iomem *base) ++{ ++ unsigned int timeout = 100; ++ ++ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--) ++ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF), ++ base + FTM_SC); ++} ++ ++static inline void ftm_irq_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val |= FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_reset_counter(void __iomem *base) ++{ ++ /* ++ * The CNT register contains the FTM counter value. ++ * Reset clears the CNT register. Writing any value to COUNT ++ * updates the counter with its initial value, CNTIN. ++ */ ++ ftm_writel(0x00, base + FTM_CNT); ++} ++ ++static u32 time_to_cycle(unsigned long time) ++{ ++ u32 cycle; ++ ++ cycle = time * alarm_freq; ++ if (cycle > MAX_COUNT_VAL) { ++ pr_err("Out of alarm range.\n"); ++ cycle = 0; ++ } ++ ++ return cycle; ++} ++ ++static u32 cycle_to_time(u32 cycle) ++{ ++ return cycle / alarm_freq + 1; ++} ++ ++static void ftm_clean_alarm(void) ++{ ++ ftm_counter_disable(ftm1_base); ++ ++ ftm_writel(0x00, ftm1_base + FTM_CNTIN); ++ ftm_writel(~0U, ftm1_base + FTM_MOD); ++ ++ ftm_reset_counter(ftm1_base); ++} ++ ++static int ftm_set_alarm(u64 cycle) ++{ ++ ftm_irq_disable(ftm1_base); ++ ++ /* ++ * The counter increments until the value of MOD is reached, ++ * at which point the counter is reloaded with the value of CNTIN. ++ * The TOF (the overflow flag) bit is set when the FTM counter ++ * changes from MOD to CNTIN. So we should using the cycle - 1. ++ */ ++ ftm_writel(cycle - 1, ftm1_base + FTM_MOD); ++ ++ ftm_counter_enable(ftm1_base); ++ ++ ftm_irq_enable(ftm1_base); ++ ++ return 0; ++} ++ ++static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id) ++{ ++ ftm_irq_acknowledge(ftm1_base); ++ ftm_irq_disable(ftm1_base); ++ ftm_clean_alarm(); ++ ++ return IRQ_HANDLED; ++} ++ ++static ssize_t ftm_alarm_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ u32 count, val; ++ ++ count = ftm_readl(ftm1_base + FTM_MOD); ++ val = ftm_readl(ftm1_base + FTM_CNT); ++ val = (count & MAX_COUNT_VAL) - val; ++ val = cycle_to_time(val); ++ ++ return sprintf(buf, "%u\n", val); ++} ++ ++static ssize_t ftm_alarm_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ u32 cycle; ++ unsigned long time; ++ ++ if (kstrtoul(buf, 0, &time)) ++ return -EINVAL; ++ ++ ftm_clean_alarm(); ++ ++ cycle = time_to_cycle(time); ++ if (!cycle) ++ return -EINVAL; ++ ++ ftm_set_alarm(cycle); ++ ++ return count; ++} ++ ++static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644, ++ ftm_alarm_show, ftm_alarm_store); ++ ++static int ftm_alarm_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct resource *r; ++ int irq; ++ int ret; ++ u32 ippdexpcr; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!r) ++ return -ENODEV; ++ ++ ftm1_base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(ftm1_base)) ++ return PTR_ERR(ftm1_base); ++ ++ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "FlexTimer1"); ++ if (r) { ++ rcpm_ftm_addr = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(rcpm_ftm_addr)) ++ return PTR_ERR(rcpm_ftm_addr); ++ ippdexpcr = ioread32be(rcpm_ftm_addr); ++ ippdexpcr |= 0x20000; ++ iowrite32be(ippdexpcr, rcpm_ftm_addr); ++ } ++ ++ irq = irq_of_parse_and_map(np, 0); ++ if (irq <= 0) { ++ pr_err("ftm: unable to get IRQ from DT, %d\n", irq); ++ return -EINVAL; ++ } ++ ++ big_endian = of_property_read_bool(np, "big-endian"); ++ ++ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt, ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++ ++ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes); ++ if (ret) { ++ dev_err(&pdev->dev, "create sysfs fail.\n"); ++ return ret; ++ } ++ ++ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; ++ ++ ftm_clean_alarm(); ++ ++ device_init_wakeup(&pdev->dev, true); ++ ++ return ret; ++} ++ ++static const struct of_device_id ftm_alarm_match[] = { ++ { .compatible = "fsl,ftm-alarm", }, ++ { .compatible = "fsl,ftm-timer", }, ++ { }, ++}; ++ ++static struct platform_driver ftm_alarm_driver = { ++ .probe = ftm_alarm_probe, ++ .driver = { ++ .name = "ftm-alarm", ++ .owner = THIS_MODULE, ++ .of_match_table = ftm_alarm_match, ++ }, ++}; ++ ++static int __init ftm_alarm_init(void) ++{ ++ return platform_driver_register(&ftm_alarm_driver); ++} ++device_initcall(ftm_alarm_init); diff --git a/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch new file mode 100644 index 000000000..6c81b0044 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch @@ -0,0 +1,68 @@ +From 4278a546526094dd57bfa3cf7ae2bf34092246db Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:10:01 +0800 +Subject: [PATCH] gpu: support layerscape + +This is a integrated patch for layerscape dcu support. + +Signed-off-by: Alison Wang +Signed-off-by: Yangbo Lu +--- + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 18 ++++++++++++++++-- + 1 file changed, 16 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c ++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +@@ -225,7 +225,6 @@ static int fsl_dcu_drm_pm_suspend(struct + if (!fsl_dev) + return 0; + +- disable_irq(fsl_dev->irq); + drm_kms_helper_poll_disable(fsl_dev->drm); + + console_lock(); +@@ -243,6 +242,8 @@ static int fsl_dcu_drm_pm_suspend(struct + return PTR_ERR(fsl_dev->state); + } + ++ disable_irq(fsl_dev->irq); ++ + clk_disable_unprepare(fsl_dev->pix_clk); + clk_disable_unprepare(fsl_dev->clk); + +@@ -263,6 +264,14 @@ static int fsl_dcu_drm_pm_resume(struct + return ret; + } + ++ ret = clk_prepare_enable(fsl_dev->pix_clk); ++ if (ret < 0) { ++ dev_err(dev, "failed to enable dcu pix clk\n"); ++ return ret; ++ } ++ ++ enable_irq(fsl_dev->irq); ++ + if (fsl_dev->tcon) + fsl_tcon_bypass_enable(fsl_dev->tcon); + fsl_dcu_drm_init_planes(fsl_dev->drm); +@@ -273,7 +282,6 @@ static int fsl_dcu_drm_pm_resume(struct + console_unlock(); + + drm_kms_helper_poll_enable(fsl_dev->drm); +- enable_irq(fsl_dev->irq); + + return 0; + } +@@ -389,6 +397,12 @@ static int fsl_dcu_drm_probe(struct plat + goto disable_clk; + } + ++ ret = clk_prepare_enable(fsl_dev->pix_clk); ++ if (ret < 0) { ++ dev_err(dev, "failed to enable dcu pix clk\n"); ++ return ret; ++ } ++ + fsl_dev->tcon = fsl_tcon_init(dev); + + drm = drm_dev_alloc(driver, dev); diff --git a/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch new file mode 100644 index 000000000..ffda8a6cf --- /dev/null +++ b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch @@ -0,0 +1,452 @@ +From d51e307e4ecf51832c9e3bc30acb5dbd559d5f4d Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:19:34 +0800 +Subject: [PATCH] guts: support layerscape + +This is a integrated patch for layerscape guts support. + +Signed-off-by: Roy Pledge +Signed-off-by: Geert Uytterhoeven +Signed-off-by: Amrita Kumari +Signed-off-by: Yangbo Lu +--- + drivers/base/soc.c | 12 ++- + drivers/soc/fsl/guts.c | 238 +++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/fsl/guts.h | 125 +++++++++++++++---------- + 3 files changed, 323 insertions(+), 52 deletions(-) + create mode 100644 drivers/soc/fsl/guts.c + +--- a/drivers/base/soc.c ++++ b/drivers/base/soc.c +@@ -167,19 +167,23 @@ static int soc_device_match_one(struct d + const struct soc_device_attribute *match = arg; + + if (match->machine && +- !glob_match(match->machine, soc_dev->attr->machine)) ++ (!soc_dev->attr->machine || ++ !glob_match(match->machine, soc_dev->attr->machine))) + return 0; + + if (match->family && +- !glob_match(match->family, soc_dev->attr->family)) ++ (!soc_dev->attr->family || ++ !glob_match(match->family, soc_dev->attr->family))) + return 0; + + if (match->revision && +- !glob_match(match->revision, soc_dev->attr->revision)) ++ (!soc_dev->attr->revision || ++ !glob_match(match->revision, soc_dev->attr->revision))) + return 0; + + if (match->soc_id && +- !glob_match(match->soc_id, soc_dev->attr->soc_id)) ++ (!soc_dev->attr->soc_id || ++ !glob_match(match->soc_id, soc_dev->attr->soc_id))) + return 0; + + return 1; +--- /dev/null ++++ b/drivers/soc/fsl/guts.c +@@ -0,0 +1,238 @@ ++/* ++ * Freescale QorIQ Platforms GUTS Driver ++ * ++ * Copyright (C) 2016 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct guts { ++ struct ccsr_guts __iomem *regs; ++ bool little_endian; ++}; ++ ++struct fsl_soc_die_attr { ++ char *die; ++ u32 svr; ++ u32 mask; ++}; ++ ++static struct guts *guts; ++static struct soc_device_attribute soc_dev_attr; ++static struct soc_device *soc_dev; ++ ++ ++/* SoC die attribute definition for QorIQ platform */ ++static const struct fsl_soc_die_attr fsl_soc_die[] = { ++ /* ++ * Power Architecture-based SoCs T Series ++ */ ++ ++ /* Die: T4240, SoC: T4240/T4160/T4080 */ ++ { .die = "T4240", ++ .svr = 0x82400000, ++ .mask = 0xfff00000, ++ }, ++ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */ ++ { .die = "T1040", ++ .svr = 0x85200000, ++ .mask = 0xfff00000, ++ }, ++ /* Die: T2080, SoC: T2080/T2081 */ ++ { .die = "T2080", ++ .svr = 0x85300000, ++ .mask = 0xfff00000, ++ }, ++ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */ ++ { .die = "T1024", ++ .svr = 0x85400000, ++ .mask = 0xfff00000, ++ }, ++ ++ /* ++ * ARM-based SoCs LS Series ++ */ ++ ++ /* Die: LS1043A, SoC: LS1043A/LS1023A */ ++ { .die = "LS1043A", ++ .svr = 0x87920000, ++ .mask = 0xffff0000, ++ }, ++ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */ ++ { .die = "LS2080A", ++ .svr = 0x87010000, ++ .mask = 0xff3f0000, ++ }, ++ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */ ++ { .die = "LS1088A", ++ .svr = 0x87030000, ++ .mask = 0xff3f0000, ++ }, ++ /* Die: LS1012A, SoC: LS1012A */ ++ { .die = "LS1012A", ++ .svr = 0x87040000, ++ .mask = 0xffff0000, ++ }, ++ /* Die: LS1046A, SoC: LS1046A/LS1026A */ ++ { .die = "LS1046A", ++ .svr = 0x87070000, ++ .mask = 0xffff0000, ++ }, ++ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */ ++ { .die = "LS2088A", ++ .svr = 0x87090000, ++ .mask = 0xff3f0000, ++ }, ++ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */ ++ { .die = "LS1021A", ++ .svr = 0x87000000, ++ .mask = 0xfff70000, ++ }, ++ { }, ++}; ++ ++static const struct fsl_soc_die_attr *fsl_soc_die_match( ++ u32 svr, const struct fsl_soc_die_attr *matches) ++{ ++ while (matches->svr) { ++ if (matches->svr == (svr & matches->mask)) ++ return matches; ++ matches++; ++ }; ++ return NULL; ++} ++ ++u32 fsl_guts_get_svr(void) ++{ ++ u32 svr = 0; ++ ++ if (!guts || !guts->regs) ++ return svr; ++ ++ if (guts->little_endian) ++ svr = ioread32(&guts->regs->svr); ++ else ++ svr = ioread32be(&guts->regs->svr); ++ ++ return svr; ++} ++EXPORT_SYMBOL(fsl_guts_get_svr); ++ ++static int fsl_guts_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ const struct fsl_soc_die_attr *soc_die; ++ const char *machine; ++ u32 svr; ++ ++ /* Initialize guts */ ++ guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL); ++ if (!guts) ++ return -ENOMEM; ++ ++ guts->little_endian = of_property_read_bool(np, "little-endian"); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ guts->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(guts->regs)) ++ return PTR_ERR(guts->regs); ++ ++ /* Register soc device */ ++ machine = of_flat_dt_get_machine_name(); ++ if (machine) ++ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); ++ ++ svr = fsl_guts_get_svr(); ++ soc_die = fsl_soc_die_match(svr, fsl_soc_die); ++ if (soc_die) { ++ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, ++ "QorIQ %s", soc_die->die); ++ } else { ++ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ"); ++ } ++ soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL, ++ "svr:0x%08x", svr); ++ soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d", ++ (svr >> 4) & 0xf, svr & 0xf); ++ ++ soc_dev = soc_device_register(&soc_dev_attr); ++ if (IS_ERR(soc_dev)) ++ return PTR_ERR(soc_dev); ++ ++ pr_info("Machine: %s\n", soc_dev_attr.machine); ++ pr_info("SoC family: %s\n", soc_dev_attr.family); ++ pr_info("SoC ID: %s, Revision: %s\n", ++ soc_dev_attr.soc_id, soc_dev_attr.revision); ++ return 0; ++} ++ ++static int fsl_guts_remove(struct platform_device *dev) ++{ ++ soc_device_unregister(soc_dev); ++ return 0; ++} ++ ++/* ++ * Table for matching compatible strings, for device tree ++ * guts node, for Freescale QorIQ SOCs. ++ */ ++static const struct of_device_id fsl_guts_of_match[] = { ++ { .compatible = "fsl,qoriq-device-config-1.0", }, ++ { .compatible = "fsl,qoriq-device-config-2.0", }, ++ { .compatible = "fsl,p1010-guts", }, ++ { .compatible = "fsl,p1020-guts", }, ++ { .compatible = "fsl,p1021-guts", }, ++ { .compatible = "fsl,p1022-guts", }, ++ { .compatible = "fsl,p1023-guts", }, ++ { .compatible = "fsl,p2020-guts", }, ++ { .compatible = "fsl,bsc9131-guts", }, ++ { .compatible = "fsl,bsc9132-guts", }, ++ { .compatible = "fsl,mpc8536-guts", }, ++ { .compatible = "fsl,mpc8544-guts", }, ++ { .compatible = "fsl,mpc8548-guts", }, ++ { .compatible = "fsl,mpc8568-guts", }, ++ { .compatible = "fsl,mpc8569-guts", }, ++ { .compatible = "fsl,mpc8572-guts", }, ++ { .compatible = "fsl,ls1021a-dcfg", }, ++ { .compatible = "fsl,ls1043a-dcfg", }, ++ { .compatible = "fsl,ls1046a-dcfg", }, ++ { .compatible = "fsl,ls2080a-dcfg", }, ++ { .compatible = "fsl,ls1088a-dcfg", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, fsl_guts_of_match); ++ ++static struct platform_driver fsl_guts_driver = { ++ .driver = { ++ .name = "fsl-guts", ++ .of_match_table = fsl_guts_of_match, ++ }, ++ .probe = fsl_guts_probe, ++ .remove = fsl_guts_remove, ++}; ++ ++static int __init fsl_guts_init(void) ++{ ++ return platform_driver_register(&fsl_guts_driver); ++} ++core_initcall(fsl_guts_init); ++ ++static void __exit fsl_guts_exit(void) ++{ ++ platform_driver_unregister(&fsl_guts_driver); ++} ++module_exit(fsl_guts_exit); +--- a/include/linux/fsl/guts.h ++++ b/include/linux/fsl/guts.h +@@ -29,83 +29,112 @@ + * #ifdefs. + */ + struct ccsr_guts { +- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ +- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ +- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ +- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ +- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ +- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ ++ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ ++ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ ++ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and ++ * Control Register ++ */ ++ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ ++ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ ++ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; +- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ ++ u32 porcir; /* 0x.0020 - POR Configuration Information ++ * Register ++ */ + u8 res024[0x30 - 0x24]; +- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ ++ u32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; +- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ ++ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data ++ * Register ++ */ + u8 res044[0x50 - 0x44]; +- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ ++ u32 gpindr; /* 0x.0050 - General-Purpose Input Data ++ * Register ++ */ + u8 res054[0x60 - 0x54]; +- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ +- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ +- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ ++ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal ++ * Multiplex Control ++ */ ++ u32 pmuxcr2; /* 0x.0064 - Alternate function signal ++ * multiplex control 2 ++ */ ++ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; +- __be32 devdisr; /* 0x.0070 - Device Disable Control */ ++ u32 devdisr; /* 0x.0070 - Device Disable Control */ + #define CCSR_GUTS_DEVDISR_TB1 0x00001000 + #define CCSR_GUTS_DEVDISR_TB0 0x00004000 +- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ ++ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; +- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ +- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ +- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ +- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ +- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ +- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ +- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ +- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ +- __be32 autorstsr; /* 0x.009c - Automatic reset status register */ +- __be32 pvr; /* 0x.00a0 - Processor Version Register */ +- __be32 svr; /* 0x.00a4 - System Version Register */ ++ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control ++ * Register ++ */ ++ u32 powmgtcsr; /* 0x.0080 - Power Management Status and ++ * Control Register ++ */ ++ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter ++ * Configuration Register ++ */ ++ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter ++ * Configuration Register ++ */ ++ u32 pmcdr; /* 0x.008c - 4Power management clock disable ++ * register ++ */ ++ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ ++ u32 rstrscr; /* 0x.0094 - Reset Request Status and ++ * Control Register ++ */ ++ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ ++ u32 autorstsr; /* 0x.009c - Automatic reset status register */ ++ u32 pvr; /* 0x.00a0 - Processor Version Register */ ++ u32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; +- __be32 rstcr; /* 0x.00b0 - Reset Control Register */ ++ u32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; +- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register ++ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; +- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers ++ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; +- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ +- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ ++ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ ++ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; +- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ ++ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; +- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ ++ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; +- __be32 ircr; /* 0x.0900 - Infrared Control Register */ ++ u32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; +- __be32 dmacr; /* 0x.0908 - DMA Control Register */ ++ u32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; +- __be32 elbccr; /* 0x.0914 - eLBC Control Register */ ++ u32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; +- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ +- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ +- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ ++ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ ++ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ ++ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; +- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ ++ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; +- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ ++ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; +- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ +- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ ++ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ ++ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override ++ * register ++ */ + u8 rese28[0xf04 - 0xe28]; +- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ +- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ ++ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ ++ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; +- __be32 itcr; /* 0x.0f2c - Internal transaction control register */ ++ u32 itcr; /* 0x.0f2c - Internal transaction control ++ * register ++ */ + u8 resf30[0xf40 - 0xf30]; +- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ +- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ ++ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ ++ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + } __attribute__ ((packed)); + ++u32 fsl_guts_get_svr(void); + + /* Alternate function signal multiplex control */ + #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) diff --git a/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch new file mode 100644 index 000000000..edb61b5c7 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch @@ -0,0 +1,133 @@ +From 3c5032fe34f1af50e9e5fe58d40bf93c1717302f Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:19:53 +0800 +Subject: [PATCH] i2c: support layerscape + +This is a integrated patch for layerscape i2c support. + +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Priyanka Jain +Signed-off-by: Yangbo Lu +--- + drivers/i2c/busses/i2c-imx.c | 10 ++++++++- + drivers/i2c/muxes/i2c-mux-pca954x.c | 43 +++++++++++++++++++++++++++++++++++++ + 2 files changed, 52 insertions(+), 1 deletion(-) + +--- a/drivers/i2c/busses/i2c-imx.c ++++ b/drivers/i2c/busses/i2c-imx.c +@@ -889,6 +889,14 @@ static int i2c_imx_xfer(struct i2c_adapt + + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + ++ /* ++ * workround for ERR010027: ensure that the I2C BUS is idle ++ * before switching to master mode and attempting a Start cycle ++ */ ++ result = i2c_imx_bus_busy(i2c_imx, 0); ++ if (result) ++ goto out; ++ + result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent); + if (result < 0) + goto out; +@@ -1100,7 +1108,7 @@ static int i2c_imx_probe(struct platform + } + + /* Request IRQ */ +- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, ++ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, + pdev->name, i2c_imx); + if (ret) { + dev_err(&pdev->dev, "can't claim irq %d\n", irq); +--- a/drivers/i2c/muxes/i2c-mux-pca954x.c ++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c +@@ -74,6 +74,7 @@ struct pca954x { + u8 last_chan; /* last register value */ + u8 deselect; + struct i2c_client *client; ++ u8 disable_mux; /* do not disable mux if val not 0 */ + }; + + /* Provide specs for the PCA954x types we know about */ +@@ -196,6 +197,13 @@ static int pca954x_deselect_mux(struct i + if (!(data->deselect & (1 << chan))) + return 0; + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = data->chip->nchans; ++ else ++ data->last_chan = 0; ++ return pca954x_reg_write(muxc->parent, client, data->disable_mux); ++#endif + /* Deselect active channel */ + data->last_chan = 0; + return pca954x_reg_write(muxc->parent, client, data->last_chan); +@@ -228,6 +236,28 @@ static int pca954x_probe(struct i2c_clie + return -ENOMEM; + data = i2c_mux_priv(muxc); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ /* The point here is that you must not disable a mux if there ++ * are no pullups on the input or you mess up the I2C. This ++ * needs to be put into the DTS really as the kernel cannot ++ * know this otherwise. ++ */ ++ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); ++ if (match) ++ data->chip = of_device_get_match_data(&client->dev); ++ else ++ data->chip = &chips[id->driver_data]; ++ ++ data->disable_mux = of_node && ++ of_property_read_bool(of_node, "i2c-mux-never-disable") && ++ data->chip->muxtype == pca954x_ismux ? ++ data->chip->enable : 0; ++ /* force the first selection */ ++ if (data->disable_mux != 0) ++ data->last_chan = data->chip->nchans; ++ else ++ data->last_chan = 0; ++#endif + i2c_set_clientdata(client, muxc); + data->client = client; + +@@ -240,11 +270,16 @@ static int pca954x_probe(struct i2c_clie + * that the mux is in fact present. This also + * initializes the mux to disconnected state. + */ ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { ++#else + if (i2c_smbus_write_byte(client, 0) < 0) { ++#endif + dev_warn(&client->dev, "probe failed\n"); + return -ENODEV; + } + ++#ifndef CONFIG_ARCH_LAYERSCAPE + match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); + if (match) + data->chip = of_device_get_match_data(&client->dev); +@@ -252,6 +287,7 @@ static int pca954x_probe(struct i2c_clie + data->chip = &chips[id->driver_data]; + + data->last_chan = 0; /* force the first selection */ ++#endif + + idle_disconnect_dt = of_node && + of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); +@@ -312,6 +348,13 @@ static int pca954x_resume(struct device + struct i2c_mux_core *muxc = i2c_get_clientdata(client); + struct pca954x *data = i2c_mux_priv(muxc); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = data->chip->nchans; ++ else ++ data->last_chan = 0; ++ return i2c_smbus_write_byte(client, data->disable_mux); ++#endif + data->last_chan = 0; + return i2c_smbus_write_byte(client, 0); + } diff --git a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch new file mode 100644 index 000000000..ec8917361 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch @@ -0,0 +1,1314 @@ +From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Wed, 27 Sep 2017 10:33:26 +0800 +Subject: [PATCH] iommu: support layerscape + +This is a integrated patch for layerscape smmu support. + +Signed-off-by: Eric Auger +Signed-off-by: Robin Murphy +Signed-off-by: Nipun Gupta +Signed-off-by: Sunil Goutham +Signed-off-by: Yangbo Lu +--- + drivers/iommu/amd_iommu.c | 56 ++++++---- + drivers/iommu/arm-smmu-v3.c | 35 ++++++- + drivers/iommu/arm-smmu.c | 74 ++++++++++--- + drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++------- + drivers/iommu/intel-iommu.c | 92 ++++++++++++---- + drivers/iommu/iommu.c | 191 ++++++++++++++++++++++++++++++++-- + drivers/iommu/mtk_iommu.c | 2 + + drivers/iommu/mtk_iommu_v1.c | 2 + + include/linux/dma-iommu.h | 11 ++ + include/linux/iommu.h | 55 +++++++--- + 10 files changed, 645 insertions(+), 115 deletions(-) + +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic + + if (!entry->group) + entry->group = generic_device_group(dev); ++ else ++ iommu_group_ref_get(entry->group); + + return entry->group; + } +@@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu + return false; + } + +-static void amd_iommu_get_dm_regions(struct device *dev, +- struct list_head *head) ++static void amd_iommu_get_resv_regions(struct device *dev, ++ struct list_head *head) + { ++ struct iommu_resv_region *region; + struct unity_map_entry *entry; + int devid; + +@@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str + return; + + list_for_each_entry(entry, &amd_iommu_unity_map, list) { +- struct iommu_dm_region *region; ++ size_t length; ++ int prot = 0; + + if (devid < entry->devid_start || devid > entry->devid_end) + continue; + +- region = kzalloc(sizeof(*region), GFP_KERNEL); ++ length = entry->address_end - entry->address_start; ++ if (entry->prot & IOMMU_PROT_IR) ++ prot |= IOMMU_READ; ++ if (entry->prot & IOMMU_PROT_IW) ++ prot |= IOMMU_WRITE; ++ ++ region = iommu_alloc_resv_region(entry->address_start, ++ length, prot, ++ IOMMU_RESV_DIRECT); + if (!region) { + pr_err("Out of memory allocating dm-regions for %s\n", + dev_name(dev)); + return; + } +- +- region->start = entry->address_start; +- region->length = entry->address_end - entry->address_start; +- if (entry->prot & IOMMU_PROT_IR) +- region->prot |= IOMMU_READ; +- if (entry->prot & IOMMU_PROT_IW) +- region->prot |= IOMMU_WRITE; +- + list_add_tail(®ion->list, head); + } ++ ++ region = iommu_alloc_resv_region(MSI_RANGE_START, ++ MSI_RANGE_END - MSI_RANGE_START + 1, ++ 0, IOMMU_RESV_MSI); ++ if (!region) ++ return; ++ list_add_tail(®ion->list, head); ++ ++ region = iommu_alloc_resv_region(HT_RANGE_START, ++ HT_RANGE_END - HT_RANGE_START + 1, ++ 0, IOMMU_RESV_RESERVED); ++ if (!region) ++ return; ++ list_add_tail(®ion->list, head); + } + +-static void amd_iommu_put_dm_regions(struct device *dev, ++static void amd_iommu_put_resv_regions(struct device *dev, + struct list_head *head) + { +- struct iommu_dm_region *entry, *next; ++ struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); + } + +-static void amd_iommu_apply_dm_region(struct device *dev, ++static void amd_iommu_apply_resv_region(struct device *dev, + struct iommu_domain *domain, +- struct iommu_dm_region *region) ++ struct iommu_resv_region *region) + { + struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); + unsigned long start, end; +@@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_ + .add_device = amd_iommu_add_device, + .remove_device = amd_iommu_remove_device, + .device_group = amd_iommu_device_group, +- .get_dm_regions = amd_iommu_get_dm_regions, +- .put_dm_regions = amd_iommu_put_dm_regions, +- .apply_dm_region = amd_iommu_apply_dm_region, ++ .get_resv_regions = amd_iommu_get_resv_regions, ++ .put_resv_regions = amd_iommu_put_resv_regions, ++ .apply_resv_region = amd_iommu_apply_resv_region, + .pgsize_bitmap = AMD_IOMMU_PGSIZES, + }; + +--- a/drivers/iommu/arm-smmu-v3.c ++++ b/drivers/iommu/arm-smmu-v3.c +@@ -410,6 +410,9 @@ + /* High-level queue structures */ + #define ARM_SMMU_POLL_TIMEOUT_US 100 + ++#define MSI_IOVA_BASE 0x8000000 ++#define MSI_IOVA_LENGTH 0x100000 ++ + static bool disable_bypass; + module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); + MODULE_PARM_DESC(disable_bypass, +@@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; +- case IOMMU_CAP_INTR_REMAP: +- return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; + default: +@@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domai + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + ++ if (domain->type == IOMMU_DOMAIN_IDENTITY) ++ return iova; ++ + if (!ops) + return 0; + +@@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct devi + return iommu_fwspec_add_ids(dev, args->args, 1); + } + ++static void arm_smmu_get_resv_regions(struct device *dev, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *region; ++ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; ++ ++ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, ++ prot, IOMMU_RESV_SW_MSI); ++ if (!region) ++ return; ++ ++ list_add_tail(®ion->list, head); ++ ++ iommu_dma_get_resv_regions(dev, head); ++} ++ ++static void arm_smmu_put_resv_regions(struct device *dev, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *entry, *next; ++ ++ list_for_each_entry_safe(entry, next, head, list) ++ kfree(entry); ++} ++ + static struct iommu_ops arm_smmu_ops = { + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, +@@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = { + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .of_xlate = arm_smmu_of_xlate, ++ .get_resv_regions = arm_smmu_get_resv_regions, ++ .put_resv_regions = arm_smmu_put_resv_regions, + .pgsize_bitmap = -1UL, /* Restricted during device attach */ + }; + +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -49,6 +49,7 @@ + #include + + #include ++#include "../staging/fsl-mc/include/mc-bus.h" + + #include "io-pgtable.h" + +@@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg { + #define ARM_MMU500_ACTLR_CPRE (1 << 1) + + #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) ++#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) + + #define CB_PAR_F (1 << 0) + +@@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg { + + #define FSYNR0_WNR (1 << 4) + ++#define MSI_IOVA_BASE 0x8000000 ++#define MSI_IOVA_LENGTH 0x100000 ++ + static int force_stage; + module_param(force_stage, int, S_IRUGO); + MODULE_PARM_DESC(force_stage, +@@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + ++ if (domain->type == IOMMU_DOMAIN_IDENTITY) ++ return iova; ++ + if (!ops) + return 0; + +@@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_ + * requests. + */ + return true; +- case IOMMU_CAP_INTR_REMAP: +- return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; + default: +@@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_devi + } + + if (group) +- return group; ++ return iommu_group_ref_get(group); + + if (dev_is_pci(dev)) + group = pci_device_group(dev); ++ else if (dev_is_fsl_mc(dev)) ++ group = fsl_mc_device_group(dev); + else + group = generic_device_group(dev); + +@@ -1534,17 +1542,44 @@ out_unlock: + + static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) + { +- u32 fwid = 0; ++ u32 mask, fwid = 0; + + if (args->args_count > 0) + fwid |= (u16)args->args[0]; + + if (args->args_count > 1) + fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; ++ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) ++ fwid |= (u16)mask << SMR_MASK_SHIFT; + + return iommu_fwspec_add_ids(dev, &fwid, 1); + } + ++static void arm_smmu_get_resv_regions(struct device *dev, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *region; ++ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; ++ ++ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, ++ prot, IOMMU_RESV_SW_MSI); ++ if (!region) ++ return; ++ ++ list_add_tail(®ion->list, head); ++ ++ iommu_dma_get_resv_regions(dev, head); ++} ++ ++static void arm_smmu_put_resv_regions(struct device *dev, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *entry, *next; ++ ++ list_for_each_entry_safe(entry, next, head, list) ++ kfree(entry); ++} ++ + static struct iommu_ops arm_smmu_ops = { + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, +@@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = { + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .of_xlate = arm_smmu_of_xlate, ++ .get_resv_regions = arm_smmu_get_resv_regions, ++ .put_resv_regions = arm_smmu_put_resv_regions, + .pgsize_bitmap = -1UL, /* Restricted during device attach */ + }; + +@@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct + for (i = 0; i < smmu->num_mapping_groups; ++i) + arm_smmu_write_sme(smmu, i); + +- /* +- * Before clearing ARM_MMU500_ACTLR_CPRE, need to +- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK +- * bit is only present in MMU-500r2 onwards. +- */ +- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); +- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; +- if ((smmu->model == ARM_MMU500) && (major >= 2)) { ++ if (smmu->model == ARM_MMU500) { ++ /* ++ * Before clearing ARM_MMU500_ACTLR_CPRE, need to ++ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK ++ * bit is only present in MMU-500r2 onwards. ++ */ ++ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); ++ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR); +- reg &= ~ARM_MMU500_ACR_CACHE_LOCK; ++ if (major >= 2) ++ reg &= ~ARM_MMU500_ACR_CACHE_LOCK; ++ /* ++ * Allow unmatched Stream IDs to allocate bypass ++ * TLB entries for reduced latency. ++ */ ++ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN; + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); + } + +@@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(stru + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + } + #endif ++#ifdef CONFIG_FSL_MC_BUS ++ if (!iommu_present(&fsl_mc_bus_type)) ++ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops); ++#endif ++ + return 0; + } + +--- a/drivers/iommu/dma-iommu.c ++++ b/drivers/iommu/dma-iommu.c +@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { + phys_addr_t phys; + }; + ++enum iommu_dma_cookie_type { ++ IOMMU_DMA_IOVA_COOKIE, ++ IOMMU_DMA_MSI_COOKIE, ++}; ++ + struct iommu_dma_cookie { +- struct iova_domain iovad; +- struct list_head msi_page_list; +- spinlock_t msi_lock; ++ enum iommu_dma_cookie_type type; ++ union { ++ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ ++ struct iova_domain iovad; ++ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ ++ dma_addr_t msi_iova; ++ }; ++ struct list_head msi_page_list; ++ spinlock_t msi_lock; + }; + ++static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) ++{ ++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) ++ return cookie->iovad.granule; ++ return PAGE_SIZE; ++} ++ + static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) + { +- return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; ++ struct iommu_dma_cookie *cookie = domain->iova_cookie; ++ ++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) ++ return &cookie->iovad; ++ return NULL; ++} ++ ++static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) ++{ ++ struct iommu_dma_cookie *cookie; ++ ++ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); ++ if (cookie) { ++ spin_lock_init(&cookie->msi_lock); ++ INIT_LIST_HEAD(&cookie->msi_page_list); ++ cookie->type = type; ++ } ++ return cookie; + } + + int iommu_dma_init(void) +@@ -62,25 +97,53 @@ int iommu_dma_init(void) + */ + int iommu_get_dma_cookie(struct iommu_domain *domain) + { ++ if (domain->iova_cookie) ++ return -EEXIST; ++ ++ domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); ++ if (!domain->iova_cookie) ++ return -ENOMEM; ++ ++ return 0; ++} ++EXPORT_SYMBOL(iommu_get_dma_cookie); ++ ++/** ++ * iommu_get_msi_cookie - Acquire just MSI remapping resources ++ * @domain: IOMMU domain to prepare ++ * @base: Start address of IOVA region for MSI mappings ++ * ++ * Users who manage their own IOVA allocation and do not want DMA API support, ++ * but would still like to take advantage of automatic MSI remapping, can use ++ * this to initialise their own domain appropriately. Users should reserve a ++ * contiguous IOVA region, starting at @base, large enough to accommodate the ++ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address ++ * used by the devices attached to @domain. ++ */ ++int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) ++{ + struct iommu_dma_cookie *cookie; + ++ if (domain->type != IOMMU_DOMAIN_UNMANAGED) ++ return -EINVAL; ++ + if (domain->iova_cookie) + return -EEXIST; + +- cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); ++ cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); + if (!cookie) + return -ENOMEM; + +- spin_lock_init(&cookie->msi_lock); +- INIT_LIST_HEAD(&cookie->msi_page_list); ++ cookie->msi_iova = base; + domain->iova_cookie = cookie; + return 0; + } +-EXPORT_SYMBOL(iommu_get_dma_cookie); ++EXPORT_SYMBOL(iommu_get_msi_cookie); + + /** + * iommu_put_dma_cookie - Release a domain's DMA mapping resources +- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() ++ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or ++ * iommu_get_msi_cookie() + * + * IOMMU drivers should normally call this from their domain_free callback. + */ +@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d + if (!cookie) + return; + +- if (cookie->iovad.granule) ++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) + put_iova_domain(&cookie->iovad); + + list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { +@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d + } + EXPORT_SYMBOL(iommu_put_dma_cookie); + +-static void iova_reserve_pci_windows(struct pci_dev *dev, +- struct iova_domain *iovad) ++/** ++ * iommu_dma_get_resv_regions - Reserved region driver helper ++ * @dev: Device from iommu_get_resv_regions() ++ * @list: Reserved region list from iommu_get_resv_regions() ++ * ++ * IOMMU drivers can use this to implement their .get_resv_regions callback ++ * for general non-IOMMU-specific reservations. Currently, this covers host ++ * bridge windows for PCI devices. ++ */ ++void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) + { +- struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); ++ struct pci_host_bridge *bridge; + struct resource_entry *window; +- unsigned long lo, hi; + ++ if (!dev_is_pci(dev)) ++ return; ++ ++ bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); + resource_list_for_each_entry(window, &bridge->windows) { ++ struct iommu_resv_region *region; ++ phys_addr_t start; ++ size_t length; ++ + if (resource_type(window->res) != IORESOURCE_MEM) + continue; + +- lo = iova_pfn(iovad, window->res->start - window->offset); +- hi = iova_pfn(iovad, window->res->end - window->offset); ++ start = window->res->start - window->offset; ++ length = window->res->end - window->res->start + 1; ++ region = iommu_alloc_resv_region(start, length, 0, ++ IOMMU_RESV_RESERVED); ++ if (!region) ++ return; ++ ++ list_add_tail(®ion->list, list); ++ } ++} ++EXPORT_SYMBOL(iommu_dma_get_resv_regions); ++ ++static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, ++ phys_addr_t start, phys_addr_t end) ++{ ++ struct iova_domain *iovad = &cookie->iovad; ++ struct iommu_dma_msi_page *msi_page; ++ int i, num_pages; ++ ++ start -= iova_offset(iovad, start); ++ num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); ++ ++ msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); ++ if (!msi_page) ++ return -ENOMEM; ++ ++ for (i = 0; i < num_pages; i++) { ++ msi_page[i].phys = start; ++ msi_page[i].iova = start; ++ INIT_LIST_HEAD(&msi_page[i].list); ++ list_add(&msi_page[i].list, &cookie->msi_page_list); ++ start += iovad->granule; ++ } ++ ++ return 0; ++} ++ ++static int iova_reserve_iommu_regions(struct device *dev, ++ struct iommu_domain *domain) ++{ ++ struct iommu_dma_cookie *cookie = domain->iova_cookie; ++ struct iova_domain *iovad = &cookie->iovad; ++ struct iommu_resv_region *region; ++ LIST_HEAD(resv_regions); ++ int ret = 0; ++ ++ iommu_get_resv_regions(dev, &resv_regions); ++ list_for_each_entry(region, &resv_regions, list) { ++ unsigned long lo, hi; ++ ++ /* We ARE the software that manages these! */ ++ if (region->type == IOMMU_RESV_SW_MSI) ++ continue; ++ ++ lo = iova_pfn(iovad, region->start); ++ hi = iova_pfn(iovad, region->start + region->length - 1); + reserve_iova(iovad, lo, hi); ++ ++ if (region->type == IOMMU_RESV_MSI) ++ ret = cookie_init_hw_msi_region(cookie, region->start, ++ region->start + region->length); ++ if (ret) ++ break; + } ++ iommu_put_resv_regions(dev, &resv_regions); ++ ++ return ret; + } + + /** +@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str + int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev) + { +- struct iova_domain *iovad = cookie_iovad(domain); ++ struct iommu_dma_cookie *cookie = domain->iova_cookie; ++ struct iova_domain *iovad = &cookie->iovad; + unsigned long order, base_pfn, end_pfn; + +- if (!iovad) +- return -ENODEV; ++ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) ++ return -EINVAL; + + /* Use the smallest supported page size for IOVA granularity */ + order = __ffs(domain->pgsize_bitmap); +@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d + end_pfn = min_t(unsigned long, end_pfn, + domain->geometry.aperture_end >> order); + } ++ /* ++ * PCI devices may have larger DMA masks, but still prefer allocating ++ * within a 32-bit mask to avoid DAC addressing. Such limitations don't ++ * apply to the typical platform device, so for those we may as well ++ * leave the cache limit at the top of their range to save an rb_last() ++ * traversal on every allocation. ++ */ ++ if (dev && dev_is_pci(dev)) ++ end_pfn &= DMA_BIT_MASK(32) >> order; + +- /* All we can safely do with an existing domain is enlarge it */ ++ /* start_pfn is always nonzero for an already-initialised domain */ + if (iovad->start_pfn) { + if (1UL << order != iovad->granule || +- base_pfn != iovad->start_pfn || +- end_pfn < iovad->dma_32bit_pfn) { ++ base_pfn != iovad->start_pfn) { + pr_warn("Incompatible range for DMA domain\n"); + return -EFAULT; + } +- iovad->dma_32bit_pfn = end_pfn; +- } else { +- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); +- if (dev && dev_is_pci(dev)) +- iova_reserve_pci_windows(to_pci_dev(dev), iovad); ++ /* ++ * If we have devices with different DMA masks, move the free ++ * area cache limit down for the benefit of the smaller one. ++ */ ++ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); ++ ++ return 0; + } +- return 0; ++ ++ init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); ++ if (!dev) ++ return 0; ++ ++ return iova_reserve_iommu_regions(dev, domain); + } + EXPORT_SYMBOL(iommu_dma_init_domain); + +@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_ + { + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iommu_dma_msi_page *msi_page; +- struct iova_domain *iovad = &cookie->iovad; ++ struct iova_domain *iovad = cookie_iovad(domain); + struct iova *iova; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; ++ size_t size = cookie_msi_granule(cookie); + +- msi_addr &= ~(phys_addr_t)iova_mask(iovad); ++ msi_addr &= ~(phys_addr_t)(size - 1); + list_for_each_entry(msi_page, &cookie->msi_page_list, list) + if (msi_page->phys == msi_addr) + return msi_page; +@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_ + if (!msi_page) + return NULL; + +- iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); +- if (!iova) +- goto out_free_page; +- + msi_page->phys = msi_addr; +- msi_page->iova = iova_dma_addr(iovad, iova); +- if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) ++ if (iovad) { ++ iova = __alloc_iova(domain, size, dma_get_mask(dev)); ++ if (!iova) ++ goto out_free_page; ++ msi_page->iova = iova_dma_addr(iovad, iova); ++ } else { ++ msi_page->iova = cookie->msi_iova; ++ cookie->msi_iova += size; ++ } ++ ++ if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) + goto out_free_iova; + + INIT_LIST_HEAD(&msi_page->list); +@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_ + return msi_page; + + out_free_iova: +- __free_iova(iovad, iova); ++ if (iovad) ++ __free_iova(iovad, iova); ++ else ++ cookie->msi_iova -= size; + out_free_page: + kfree(msi_page); + return NULL; +@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru + msg->data = ~0U; + } else { + msg->address_hi = upper_32_bits(msi_page->iova); +- msg->address_lo &= iova_mask(&cookie->iovad); ++ msg->address_lo &= cookie_msi_granule(cookie) - 1; + msg->address_lo += lower_32_bits(msi_page->iova); + } + } +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -440,6 +440,7 @@ struct dmar_rmrr_unit { + u64 end_address; /* reserved end address */ + struct dmar_dev_scope *devices; /* target devices */ + int devices_cnt; /* target device count */ ++ struct iommu_resv_region *resv; /* reserved region handle */ + }; + + struct dmar_atsr_unit { +@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(voi + int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) + { + struct acpi_dmar_reserved_memory *rmrr; ++ int prot = DMA_PTE_READ|DMA_PTE_WRITE; + struct dmar_rmrr_unit *rmrru; ++ size_t length; + + rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); + if (!rmrru) +- return -ENOMEM; ++ goto out; + + rmrru->hdr = header; + rmrr = (struct acpi_dmar_reserved_memory *)header; + rmrru->base_address = rmrr->base_address; + rmrru->end_address = rmrr->end_address; ++ ++ length = rmrr->end_address - rmrr->base_address + 1; ++ rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, ++ IOMMU_RESV_DIRECT); ++ if (!rmrru->resv) ++ goto free_rmrru; ++ + rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + &rmrru->devices_cnt); +- if (rmrru->devices_cnt && rmrru->devices == NULL) { +- kfree(rmrru); +- return -ENOMEM; +- } ++ if (rmrru->devices_cnt && rmrru->devices == NULL) ++ goto free_all; + + list_add(&rmrru->list, &dmar_rmrr_units); + + return 0; ++free_all: ++ kfree(rmrru->resv); ++free_rmrru: ++ kfree(rmrru); ++out: ++ return -ENOMEM; + } + + static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) +@@ -4484,6 +4498,7 @@ static void intel_iommu_free_dmars(void) + list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { + list_del(&rmrru->list); + dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); ++ kfree(rmrru->resv); + kfree(rmrru); + } + +@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(st + iommu_device_unlink(iommu->iommu_dev, dev); + } + ++static void intel_iommu_get_resv_regions(struct device *device, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *reg; ++ struct dmar_rmrr_unit *rmrr; ++ struct device *i_dev; ++ int i; ++ ++ rcu_read_lock(); ++ for_each_rmrr_units(rmrr) { ++ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, ++ i, i_dev) { ++ if (i_dev != device) ++ continue; ++ ++ list_add_tail(&rmrr->resv->list, head); ++ } ++ } ++ rcu_read_unlock(); ++ ++ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, ++ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, ++ 0, IOMMU_RESV_MSI); ++ if (!reg) ++ return; ++ list_add_tail(®->list, head); ++} ++ ++static void intel_iommu_put_resv_regions(struct device *dev, ++ struct list_head *head) ++{ ++ struct iommu_resv_region *entry, *next; ++ ++ list_for_each_entry_safe(entry, next, head, list) { ++ if (entry->type == IOMMU_RESV_RESERVED) ++ kfree(entry); ++ } ++} ++ + #ifdef CONFIG_INTEL_IOMMU_SVM + #define MAX_NR_PASID_BITS (20) + static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) +@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_ + #endif /* CONFIG_INTEL_IOMMU_SVM */ + + static const struct iommu_ops intel_iommu_ops = { +- .capable = intel_iommu_capable, +- .domain_alloc = intel_iommu_domain_alloc, +- .domain_free = intel_iommu_domain_free, +- .attach_dev = intel_iommu_attach_device, +- .detach_dev = intel_iommu_detach_device, +- .map = intel_iommu_map, +- .unmap = intel_iommu_unmap, +- .map_sg = default_iommu_map_sg, +- .iova_to_phys = intel_iommu_iova_to_phys, +- .add_device = intel_iommu_add_device, +- .remove_device = intel_iommu_remove_device, +- .device_group = pci_device_group, +- .pgsize_bitmap = INTEL_IOMMU_PGSIZES, ++ .capable = intel_iommu_capable, ++ .domain_alloc = intel_iommu_domain_alloc, ++ .domain_free = intel_iommu_domain_free, ++ .attach_dev = intel_iommu_attach_device, ++ .detach_dev = intel_iommu_detach_device, ++ .map = intel_iommu_map, ++ .unmap = intel_iommu_unmap, ++ .map_sg = default_iommu_map_sg, ++ .iova_to_phys = intel_iommu_iova_to_phys, ++ .add_device = intel_iommu_add_device, ++ .remove_device = intel_iommu_remove_device, ++ .get_resv_regions = intel_iommu_get_resv_regions, ++ .put_resv_regions = intel_iommu_put_resv_regions, ++ .device_group = pci_device_group, ++ .pgsize_bitmap = INTEL_IOMMU_PGSIZES, + }; + + static void quirk_iommu_g4x_gfx(struct pci_dev *dev) +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -68,6 +68,13 @@ struct iommu_group_attribute { + const char *buf, size_t count); + }; + ++static const char * const iommu_group_resv_type_string[] = { ++ [IOMMU_RESV_DIRECT] = "direct", ++ [IOMMU_RESV_RESERVED] = "reserved", ++ [IOMMU_RESV_MSI] = "msi", ++ [IOMMU_RESV_SW_MSI] = "msi", ++}; ++ + #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ + struct iommu_group_attribute iommu_group_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) +@@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(str + return sprintf(buf, "%s\n", group->name); + } + ++/** ++ * iommu_insert_resv_region - Insert a new region in the ++ * list of reserved regions. ++ * @new: new region to insert ++ * @regions: list of regions ++ * ++ * The new element is sorted by address with respect to the other ++ * regions of the same type. In case it overlaps with another ++ * region of the same type, regions are merged. In case it ++ * overlaps with another region of different type, regions are ++ * not merged. ++ */ ++static int iommu_insert_resv_region(struct iommu_resv_region *new, ++ struct list_head *regions) ++{ ++ struct iommu_resv_region *region; ++ phys_addr_t start = new->start; ++ phys_addr_t end = new->start + new->length - 1; ++ struct list_head *pos = regions->next; ++ ++ while (pos != regions) { ++ struct iommu_resv_region *entry = ++ list_entry(pos, struct iommu_resv_region, list); ++ phys_addr_t a = entry->start; ++ phys_addr_t b = entry->start + entry->length - 1; ++ int type = entry->type; ++ ++ if (end < a) { ++ goto insert; ++ } else if (start > b) { ++ pos = pos->next; ++ } else if ((start >= a) && (end <= b)) { ++ if (new->type == type) ++ goto done; ++ else ++ pos = pos->next; ++ } else { ++ if (new->type == type) { ++ phys_addr_t new_start = min(a, start); ++ phys_addr_t new_end = max(b, end); ++ ++ list_del(&entry->list); ++ entry->start = new_start; ++ entry->length = new_end - new_start + 1; ++ iommu_insert_resv_region(entry, regions); ++ } else { ++ pos = pos->next; ++ } ++ } ++ } ++insert: ++ region = iommu_alloc_resv_region(new->start, new->length, ++ new->prot, new->type); ++ if (!region) ++ return -ENOMEM; ++ ++ list_add_tail(®ion->list, pos); ++done: ++ return 0; ++} ++ ++static int ++iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, ++ struct list_head *group_resv_regions) ++{ ++ struct iommu_resv_region *entry; ++ int ret; ++ ++ list_for_each_entry(entry, dev_resv_regions, list) { ++ ret = iommu_insert_resv_region(entry, group_resv_regions); ++ if (ret) ++ break; ++ } ++ return ret; ++} ++ ++int iommu_get_group_resv_regions(struct iommu_group *group, ++ struct list_head *head) ++{ ++ struct iommu_device *device; ++ int ret = 0; ++ ++ mutex_lock(&group->mutex); ++ list_for_each_entry(device, &group->devices, list) { ++ struct list_head dev_resv_regions; ++ ++ INIT_LIST_HEAD(&dev_resv_regions); ++ iommu_get_resv_regions(device->dev, &dev_resv_regions); ++ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); ++ iommu_put_resv_regions(device->dev, &dev_resv_regions); ++ if (ret) ++ break; ++ } ++ mutex_unlock(&group->mutex); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); ++ ++static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, ++ char *buf) ++{ ++ struct iommu_resv_region *region, *next; ++ struct list_head group_resv_regions; ++ char *str = buf; ++ ++ INIT_LIST_HEAD(&group_resv_regions); ++ iommu_get_group_resv_regions(group, &group_resv_regions); ++ ++ list_for_each_entry_safe(region, next, &group_resv_regions, list) { ++ str += sprintf(str, "0x%016llx 0x%016llx %s\n", ++ (long long int)region->start, ++ (long long int)(region->start + ++ region->length - 1), ++ iommu_group_resv_type_string[region->type]); ++ kfree(region); ++ } ++ ++ return (str - buf); ++} ++ + static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); + ++static IOMMU_GROUP_ATTR(reserved_regions, 0444, ++ iommu_group_show_resv_regions, NULL); ++ + static void iommu_group_release(struct kobject *kobj) + { + struct iommu_group *group = to_iommu_group(kobj); +@@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(vo + */ + kobject_put(&group->kobj); + ++ ret = iommu_group_create_file(group, ++ &iommu_group_attr_reserved_regions); ++ if (ret) ++ return ERR_PTR(ret); ++ + pr_debug("Allocated group %d\n", group->id); + + return group; +@@ -318,7 +453,7 @@ static int iommu_group_create_direct_map + struct device *dev) + { + struct iommu_domain *domain = group->default_domain; +- struct iommu_dm_region *entry; ++ struct iommu_resv_region *entry; + struct list_head mappings; + unsigned long pg_size; + int ret = 0; +@@ -331,18 +466,21 @@ static int iommu_group_create_direct_map + pg_size = 1UL << __ffs(domain->pgsize_bitmap); + INIT_LIST_HEAD(&mappings); + +- iommu_get_dm_regions(dev, &mappings); ++ iommu_get_resv_regions(dev, &mappings); + + /* We need to consider overlapping regions for different devices */ + list_for_each_entry(entry, &mappings, list) { + dma_addr_t start, end, addr; + +- if (domain->ops->apply_dm_region) +- domain->ops->apply_dm_region(dev, domain, entry); ++ if (domain->ops->apply_resv_region) ++ domain->ops->apply_resv_region(dev, domain, entry); + + start = ALIGN(entry->start, pg_size); + end = ALIGN(entry->start + entry->length, pg_size); + ++ if (entry->type != IOMMU_RESV_DIRECT) ++ continue; ++ + for (addr = start; addr < end; addr += pg_size) { + phys_addr_t phys_addr; + +@@ -358,7 +496,7 @@ static int iommu_group_create_direct_map + } + + out: +- iommu_put_dm_regions(dev, &mappings); ++ iommu_put_resv_regions(dev, &mappings); + + return ret; + } +@@ -563,6 +701,19 @@ struct iommu_group *iommu_group_get(stru + EXPORT_SYMBOL_GPL(iommu_group_get); + + /** ++ * iommu_group_ref_get - Increment reference on a group ++ * @group: the group to use, must not be NULL ++ * ++ * This function is called by iommu drivers to take additional references on an ++ * existing group. Returns the given group for convenience. ++ */ ++struct iommu_group *iommu_group_ref_get(struct iommu_group *group) ++{ ++ kobject_get(group->devices_kobj); ++ return group; ++} ++ ++/** + * iommu_group_put - Decrement group reference + * @group: the group to use + * +@@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_d + } + EXPORT_SYMBOL_GPL(iommu_domain_set_attr); + +-void iommu_get_dm_regions(struct device *dev, struct list_head *list) ++void iommu_get_resv_regions(struct device *dev, struct list_head *list) + { + const struct iommu_ops *ops = dev->bus->iommu_ops; + +- if (ops && ops->get_dm_regions) +- ops->get_dm_regions(dev, list); ++ if (ops && ops->get_resv_regions) ++ ops->get_resv_regions(dev, list); + } + +-void iommu_put_dm_regions(struct device *dev, struct list_head *list) ++void iommu_put_resv_regions(struct device *dev, struct list_head *list) + { + const struct iommu_ops *ops = dev->bus->iommu_ops; + +- if (ops && ops->put_dm_regions) +- ops->put_dm_regions(dev, list); ++ if (ops && ops->put_resv_regions) ++ ops->put_resv_regions(dev, list); ++} ++ ++struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, ++ size_t length, int prot, ++ enum iommu_resv_type type) ++{ ++ struct iommu_resv_region *region; ++ ++ region = kzalloc(sizeof(*region), GFP_KERNEL); ++ if (!region) ++ return NULL; ++ ++ INIT_LIST_HEAD(®ion->list); ++ region->start = start; ++ region->length = length; ++ region->prot = prot; ++ region->type = type; ++ return region; + } + + /* Request that a device is direct mapped by the IOMMU */ +--- a/drivers/iommu/mtk_iommu.c ++++ b/drivers/iommu/mtk_iommu.c +@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev + data->m4u_group = iommu_group_alloc(); + if (IS_ERR(data->m4u_group)) + dev_err(dev, "Failed to allocate M4U IOMMU group\n"); ++ } else { ++ iommu_group_ref_get(data->m4u_group); + } + return data->m4u_group; + } +--- a/drivers/iommu/mtk_iommu_v1.c ++++ b/drivers/iommu/mtk_iommu_v1.c +@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev + data->m4u_group = iommu_group_alloc(); + if (IS_ERR(data->m4u_group)) + dev_err(dev, "Failed to allocate M4U IOMMU group\n"); ++ } else { ++ iommu_group_ref_get(data->m4u_group); + } + return data->m4u_group; + } +--- a/include/linux/dma-iommu.h ++++ b/include/linux/dma-iommu.h +@@ -27,6 +27,7 @@ int iommu_dma_init(void); + + /* Domain management interface for IOMMU drivers */ + int iommu_get_dma_cookie(struct iommu_domain *domain); ++int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); + void iommu_put_dma_cookie(struct iommu_domain *domain); + + /* Setup call for arch DMA mapping code */ +@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct devic + + /* The DMA API isn't _quite_ the whole story, though... */ + void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); ++void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); + + #else + +@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(s + return -ENODEV; + } + ++static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) ++{ ++ return -ENODEV; ++} ++ + static inline void iommu_put_dma_cookie(struct iommu_domain *domain) + { + } +@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg + { + } + ++static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) ++{ ++} ++ + #endif /* CONFIG_IOMMU_DMA */ + #endif /* __KERNEL__ */ + #endif /* __DMA_IOMMU_H */ +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -117,18 +117,32 @@ enum iommu_attr { + DOMAIN_ATTR_MAX, + }; + ++/* These are the possible reserved region types */ ++enum iommu_resv_type { ++ /* Memory regions which must be mapped 1:1 at all times */ ++ IOMMU_RESV_DIRECT, ++ /* Arbitrary "never map this or give it to a device" address ranges */ ++ IOMMU_RESV_RESERVED, ++ /* Hardware MSI region (untranslated) */ ++ IOMMU_RESV_MSI, ++ /* Software-managed MSI translation window */ ++ IOMMU_RESV_SW_MSI, ++}; ++ + /** +- * struct iommu_dm_region - descriptor for a direct mapped memory region ++ * struct iommu_resv_region - descriptor for a reserved memory region + * @list: Linked list pointers + * @start: System physical start address of the region + * @length: Length of the region in bytes + * @prot: IOMMU Protection flags (READ/WRITE/...) ++ * @type: Type of the reserved region + */ +-struct iommu_dm_region { ++struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; ++ enum iommu_resv_type type; + }; + + #ifdef CONFIG_IOMMU_API +@@ -150,9 +164,9 @@ struct iommu_dm_region { + * @device_group: find iommu group for a particular device + * @domain_get_attr: Query domain attributes + * @domain_set_attr: Change domain attributes +- * @get_dm_regions: Request list of direct mapping requirements for a device +- * @put_dm_regions: Free list of direct mapping requirements for a device +- * @apply_dm_region: Temporary helper call-back for iova reserved ranges ++ * @get_resv_regions: Request list of reserved regions for a device ++ * @put_resv_regions: Free list of reserved regions for a device ++ * @apply_resv_region: Temporary helper call-back for iova reserved ranges + * @domain_window_enable: Configure and enable a particular window for a domain + * @domain_window_disable: Disable a particular window for a domain + * @domain_set_windows: Set the number of windows for a domain +@@ -184,11 +198,12 @@ struct iommu_ops { + int (*domain_set_attr)(struct iommu_domain *domain, + enum iommu_attr attr, void *data); + +- /* Request/Free a list of direct mapping requirements for a device */ +- void (*get_dm_regions)(struct device *dev, struct list_head *list); +- void (*put_dm_regions)(struct device *dev, struct list_head *list); +- void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, +- struct iommu_dm_region *region); ++ /* Request/Free a list of reserved regions for a device */ ++ void (*get_resv_regions)(struct device *dev, struct list_head *list); ++ void (*put_resv_regions)(struct device *dev, struct list_head *list); ++ void (*apply_resv_region)(struct device *dev, ++ struct iommu_domain *domain, ++ struct iommu_resv_region *region); + + /* Window handling functions */ + int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, +@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st + extern void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler, void *token); + +-extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); +-extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); ++extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); ++extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); + extern int iommu_request_dm_for_dev(struct device *dev); ++extern struct iommu_resv_region * ++iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, ++ enum iommu_resv_type type); ++extern int iommu_get_group_resv_regions(struct iommu_group *group, ++ struct list_head *head); + + extern int iommu_attach_group(struct iommu_domain *domain, + struct iommu_group *group); +@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st + extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, + int (*fn)(struct device *, void *)); + extern struct iommu_group *iommu_group_get(struct device *dev); ++extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); + extern void iommu_group_put(struct iommu_group *group); + extern int iommu_group_register_notifier(struct iommu_group *group, + struct notifier_block *nb); +@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl + { + } + +-static inline void iommu_get_dm_regions(struct device *dev, ++static inline void iommu_get_resv_regions(struct device *dev, + struct list_head *list) + { + } + +-static inline void iommu_put_dm_regions(struct device *dev, ++static inline void iommu_put_resv_regions(struct device *dev, + struct list_head *list) + { + } + ++static inline int iommu_get_group_resv_regions(struct iommu_group *group, ++ struct list_head *head) ++{ ++ return -ENODEV; ++} ++ + static inline int iommu_request_dm_for_dev(struct device *dev) + { + return -ENODEV; diff --git a/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch new file mode 100644 index 000000000..ab1630698 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch @@ -0,0 +1,169 @@ +From 1d596855b596db88f10b12a1be6fd19e249be170 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:13:29 +0800 +Subject: [PATCH] irqchip: support layerscape + +This is a integrated patch for layerscape gic support. + +Signed-off-by: Eric Auger +Signed-off-by: Zhao Qiang +Signed-off-by: Yangbo Lu +--- + drivers/irqchip/Makefile | 1 + + drivers/irqchip/irq-gic-v3-its.c | 1 + + include/linux/irqdomain.h | 36 ++++++++++++++++++++++++++++++++++++ + kernel/irq/irqdomain.c | 39 +++++++++++++++++++++++++++++++++++++++ + kernel/irq/msi.c | 4 ++-- + 5 files changed, 79 insertions(+), 2 deletions(-) + +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -74,3 +74,4 @@ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scf + obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o + obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o + obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o ++obj-$(CONFIG_QUICC_ENGINE) += irq-qeic.o +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -1659,6 +1659,7 @@ static int its_init_domain(struct fwnode + + inner_domain->parent = its_parent; + inner_domain->bus_token = DOMAIN_BUS_NEXUS; ++ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; + info->ops = &its_msi_domain_ops; + info->data = its; + inner_domain->host_data = info; +--- a/include/linux/irqdomain.h ++++ b/include/linux/irqdomain.h +@@ -183,6 +183,12 @@ enum { + /* Irq domain is an IPI domain with single virq */ + IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), + ++ /* Irq domain implements MSIs */ ++ IRQ_DOMAIN_FLAG_MSI = (1 << 4), ++ ++ /* Irq domain implements MSI remapping */ ++ IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), ++ + /* + * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved + * for implementation specific purposes and ignored by the +@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy + void *host_data); + extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); ++extern bool irq_domain_check_msi_remap(void); + extern void irq_set_default_host(struct irq_domain *host); + extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, + irq_hw_number_t hwirq, int node, +@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_sin + { + return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; + } ++ ++static inline bool irq_domain_is_msi(struct irq_domain *domain) ++{ ++ return domain->flags & IRQ_DOMAIN_FLAG_MSI; ++} ++ ++static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) ++{ ++ return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; ++} ++ ++extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); ++ + #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + static inline void irq_domain_activate_irq(struct irq_data *data) { } + static inline void irq_domain_deactivate_irq(struct irq_data *data) { } +@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_sin + { + return false; + } ++ ++static inline bool irq_domain_is_msi(struct irq_domain *domain) ++{ ++ return false; ++} ++ ++static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) ++{ ++ return false; ++} ++ ++static inline bool ++irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) ++{ ++ return false; ++} + #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + + #else /* CONFIG_IRQ_DOMAIN */ +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -278,6 +278,31 @@ struct irq_domain *irq_find_matching_fws + EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); + + /** ++ * irq_domain_check_msi_remap - Check whether all MSI irq domains implement ++ * IRQ remapping ++ * ++ * Return: false if any MSI irq domain does not support IRQ remapping, ++ * true otherwise (including if there is no MSI irq domain) ++ */ ++bool irq_domain_check_msi_remap(void) ++{ ++ struct irq_domain *h; ++ bool ret = true; ++ ++ mutex_lock(&irq_domain_mutex); ++ list_for_each_entry(h, &irq_domain_list, link) { ++ if (irq_domain_is_msi(h) && ++ !irq_domain_hierarchical_is_msi_remap(h)) { ++ ret = false; ++ break; ++ } ++ } ++ mutex_unlock(&irq_domain_mutex); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap); ++ ++/** + * irq_set_default_host() - Set a "default" irq domain + * @domain: default domain pointer + * +@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(s + if (domain->ops->alloc) + domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; + } ++ ++/** ++ * irq_domain_hierarchical_is_msi_remap - Check if the domain or any ++ * parent has MSI remapping support ++ * @domain: domain pointer ++ */ ++bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) ++{ ++ for (; domain; domain = domain->parent) { ++ if (irq_domain_is_msi_remap(domain)) ++ return true; ++ } ++ return false; ++} + #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + /** + * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -272,8 +272,8 @@ struct irq_domain *msi_create_irq_domain + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + msi_domain_update_chip_ops(info); + +- return irq_domain_create_hierarchy(parent, 0, 0, fwnode, +- &msi_domain_ops, info); ++ return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, ++ fwnode, &msi_domain_ops, info); + } + + int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, diff --git a/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch new file mode 100644 index 000000000..6cad565ad --- /dev/null +++ b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch @@ -0,0 +1,599 @@ +From b31046c51c72232363711f0c623df08bf28c37e4 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:21:30 +0800 +Subject: [PATCH] mmc: layerscape support + +This is a integrated patch for layerscape mmc support. + +Adrian Hunter +Jaehoon Chung +Masahiro Yamada +Signed-off-by: Yangbo Lu +--- + drivers/mmc/host/Kconfig | 1 + + drivers/mmc/host/sdhci-esdhc.h | 52 +++++--- + drivers/mmc/host/sdhci-of-esdhc.c | 251 ++++++++++++++++++++++++++++++++++++-- + drivers/mmc/host/sdhci.c | 45 ++++--- + drivers/mmc/host/sdhci.h | 3 + + 5 files changed, 306 insertions(+), 46 deletions(-) + +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -144,6 +144,7 @@ config MMC_SDHCI_OF_ESDHC + depends on MMC_SDHCI_PLTFM + depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE + select MMC_SDHCI_IO_ACCESSORS ++ select FSL_GUTS + help + This selects the Freescale eSDHC controller support. + +--- a/drivers/mmc/host/sdhci-esdhc.h ++++ b/drivers/mmc/host/sdhci-esdhc.h +@@ -24,30 +24,46 @@ + SDHCI_QUIRK_PIO_NEEDS_DELAY | \ + SDHCI_QUIRK_NO_HISPD_BIT) + +-#define ESDHC_PROCTL 0x28 +- +-#define ESDHC_SYSTEM_CONTROL 0x2c +-#define ESDHC_CLOCK_MASK 0x0000fff0 +-#define ESDHC_PREDIV_SHIFT 8 +-#define ESDHC_DIVIDER_SHIFT 4 +-#define ESDHC_CLOCK_PEREN 0x00000004 +-#define ESDHC_CLOCK_HCKEN 0x00000002 +-#define ESDHC_CLOCK_IPGEN 0x00000001 +- + /* pltfm-specific */ + #define ESDHC_HOST_CONTROL_LE 0x20 + + /* +- * P2020 interpretation of the SDHCI_HOST_CONTROL register ++ * eSDHC register definition + */ +-#define ESDHC_CTRL_4BITBUS (0x1 << 1) +-#define ESDHC_CTRL_8BITBUS (0x2 << 1) +-#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) +- +-/* OF-specific */ +-#define ESDHC_DMA_SYSCTL 0x40c +-#define ESDHC_DMA_SNOOP 0x00000040 + +-#define ESDHC_HOST_CONTROL_RES 0x01 ++/* Present State Register */ ++#define ESDHC_PRSSTAT 0x24 ++#define ESDHC_CLOCK_STABLE 0x00000008 ++ ++/* Protocol Control Register */ ++#define ESDHC_PROCTL 0x28 ++#define ESDHC_VOLT_SEL 0x00000400 ++#define ESDHC_CTRL_4BITBUS (0x1 << 1) ++#define ESDHC_CTRL_8BITBUS (0x2 << 1) ++#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) ++#define ESDHC_HOST_CONTROL_RES 0x01 ++ ++/* System Control Register */ ++#define ESDHC_SYSTEM_CONTROL 0x2c ++#define ESDHC_CLOCK_MASK 0x0000fff0 ++#define ESDHC_PREDIV_SHIFT 8 ++#define ESDHC_DIVIDER_SHIFT 4 ++#define ESDHC_CLOCK_SDCLKEN 0x00000008 ++#define ESDHC_CLOCK_PEREN 0x00000004 ++#define ESDHC_CLOCK_HCKEN 0x00000002 ++#define ESDHC_CLOCK_IPGEN 0x00000001 ++ ++/* Host Controller Capabilities Register 2 */ ++#define ESDHC_CAPABILITIES_1 0x114 ++ ++/* Tuning Block Control Register */ ++#define ESDHC_TBCTL 0x120 ++#define ESDHC_TB_EN 0x00000004 ++ ++/* Control Register for DMA transfer */ ++#define ESDHC_DMA_SYSCTL 0x40c ++#define ESDHC_PERIPHERAL_CLK_SEL 0x00080000 ++#define ESDHC_FLUSH_ASYNC_FIFO 0x00040000 ++#define ESDHC_DMA_SNOOP 0x00000040 + + #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -16,8 +16,12 @@ + #include + #include + #include ++#include + #include + #include ++#include ++#include ++#include + #include + #include "sdhci-pltfm.h" + #include "sdhci-esdhc.h" +@@ -28,8 +32,12 @@ + struct sdhci_esdhc { + u8 vendor_ver; + u8 spec_ver; ++ bool quirk_incorrect_hostver; ++ unsigned int peripheral_clock; + }; + ++static void esdhc_clock_enable(struct sdhci_host *host, bool enable); ++ + /** + * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register + * to make it compatible with SD spec. +@@ -80,6 +88,17 @@ static u32 esdhc_readl_fixup(struct sdhc + return ret; + } + ++ /* ++ * DTS properties of mmc host are used to enable each speed mode ++ * according to soc and board capability. So clean up ++ * SDR50/SDR104/DDR50 support bits here. ++ */ ++ if (spec_reg == SDHCI_CAPABILITIES_1) { ++ ret = value & (~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | ++ SDHCI_SUPPORT_DDR50)); ++ return ret; ++ } ++ + ret = value; + return ret; + } +@@ -87,6 +106,8 @@ static u32 esdhc_readl_fixup(struct sdhc + static u16 esdhc_readw_fixup(struct sdhci_host *host, + int spec_reg, u32 value) + { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u16 ret; + int shift = (spec_reg & 0x2) * 8; + +@@ -94,6 +115,12 @@ static u16 esdhc_readw_fixup(struct sdhc + ret = value & 0xffff; + else + ret = (value >> shift) & 0xffff; ++ /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect ++ * vendor version and spec version information. ++ */ ++ if ((spec_reg == SDHCI_HOST_VERSION) && ++ (esdhc->quirk_incorrect_hostver)) ++ ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200; + return ret; + } + +@@ -235,7 +262,11 @@ static u32 esdhc_be_readl(struct sdhci_h + u32 ret; + u32 value; + +- value = ioread32be(host->ioaddr + reg); ++ if (reg == SDHCI_CAPABILITIES_1) ++ value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1); ++ else ++ value = ioread32be(host->ioaddr + reg); ++ + ret = esdhc_readl_fixup(host, reg, value); + + return ret; +@@ -246,7 +277,11 @@ static u32 esdhc_le_readl(struct sdhci_h + u32 ret; + u32 value; + +- value = ioread32(host->ioaddr + reg); ++ if (reg == SDHCI_CAPABILITIES_1) ++ value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1); ++ else ++ value = ioread32(host->ioaddr + reg); ++ + ret = esdhc_readl_fixup(host, reg, value); + + return ret; +@@ -404,15 +439,25 @@ static int esdhc_of_enable_dma(struct sd + static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) + { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + +- return pltfm_host->clock; ++ if (esdhc->peripheral_clock) ++ return esdhc->peripheral_clock; ++ else ++ return pltfm_host->clock; + } + + static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) + { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); ++ unsigned int clock; + +- return pltfm_host->clock / 256 / 16; ++ if (esdhc->peripheral_clock) ++ clock = esdhc->peripheral_clock; ++ else ++ clock = pltfm_host->clock; ++ return clock / 256 / 16; + } + + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) +@@ -421,17 +466,34 @@ static void esdhc_of_set_clock(struct sd + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + int pre_div = 1; + int div = 1; ++ ktime_t timeout; + u32 temp; + + host->mmc->actual_clock = 0; + +- if (clock == 0) ++ if (clock == 0) { ++ esdhc_clock_enable(host, false); + return; ++ } + + /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ + if (esdhc->vendor_ver < VENDOR_V_23) + pre_div = 2; + ++ /* ++ * Limit SD clock to 167MHz for ls1046a according to its datasheet ++ */ ++ if (clock > 167000000 && ++ of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc")) ++ clock = 167000000; ++ ++ /* ++ * Limit SD clock to 125MHz for ls1012a according to its datasheet ++ */ ++ if (clock > 125000000 && ++ of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc")) ++ clock = 125000000; ++ + /* Workaround to reduce the clock frequency for p1010 esdhc */ + if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { + if (clock > 20000000) +@@ -441,8 +503,8 @@ static void esdhc_of_set_clock(struct sd + } + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); +- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +- | ESDHC_CLOCK_MASK); ++ temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ++ ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) +@@ -462,7 +524,20 @@ static void esdhc_of_set_clock(struct sd + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); +- mdelay(1); ++ ++ /* Wait max 20 ms */ ++ timeout = ktime_add_ms(ktime_get(), 20); ++ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) { ++ if (ktime_after(ktime_get(), timeout)) { ++ pr_err("%s: Internal clock never stabilised.\n", ++ mmc_hostname(host->mmc)); ++ return; ++ } ++ udelay(10); ++ } ++ ++ temp |= ESDHC_CLOCK_SDCLKEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + } + + static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +@@ -487,6 +562,33 @@ static void esdhc_pltfm_set_bus_width(st + sdhci_writel(host, ctrl, ESDHC_PROCTL); + } + ++static void esdhc_clock_enable(struct sdhci_host *host, bool enable) ++{ ++ u32 val; ++ ktime_t timeout; ++ ++ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); ++ ++ if (enable) ++ val |= ESDHC_CLOCK_SDCLKEN; ++ else ++ val &= ~ESDHC_CLOCK_SDCLKEN; ++ ++ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); ++ ++ /* Wait max 20 ms */ ++ timeout = ktime_add_ms(ktime_get(), 20); ++ val = ESDHC_CLOCK_STABLE; ++ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { ++ if (ktime_after(ktime_get(), timeout)) { ++ pr_err("%s: Internal clock never stabilised.\n", ++ mmc_hostname(host->mmc)); ++ break; ++ } ++ udelay(10); ++ } ++} ++ + static void esdhc_reset(struct sdhci_host *host, u8 mask) + { + sdhci_reset(host, mask); +@@ -495,6 +597,95 @@ static void esdhc_reset(struct sdhci_hos + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + ++/* The SCFG, Supplemental Configuration Unit, provides SoC specific ++ * configuration and status registers for the device. There is a ++ * SDHC IO VSEL control register on SCFG for some platforms. It's ++ * used to support SDHC IO voltage switching. ++ */ ++static const struct of_device_id scfg_device_ids[] = { ++ { .compatible = "fsl,t1040-scfg", }, ++ { .compatible = "fsl,ls1012a-scfg", }, ++ { .compatible = "fsl,ls1046a-scfg", }, ++ {} ++}; ++ ++/* SDHC IO VSEL control register definition */ ++#define SCFG_SDHCIOVSELCR 0x408 ++#define SDHCIOVSELCR_TGLEN 0x80000000 ++#define SDHCIOVSELCR_VSELVAL 0x60000000 ++#define SDHCIOVSELCR_SDHC_VS 0x00000001 ++ ++static int esdhc_signal_voltage_switch(struct mmc_host *mmc, ++ struct mmc_ios *ios) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ struct device_node *scfg_node; ++ void __iomem *scfg_base = NULL; ++ u32 sdhciovselcr; ++ u32 val; ++ ++ /* ++ * Signal Voltage Switching is only applicable for Host Controllers ++ * v3.00 and above. ++ */ ++ if (host->version < SDHCI_SPEC_300) ++ return 0; ++ ++ val = sdhci_readl(host, ESDHC_PROCTL); ++ ++ switch (ios->signal_voltage) { ++ case MMC_SIGNAL_VOLTAGE_330: ++ val &= ~ESDHC_VOLT_SEL; ++ sdhci_writel(host, val, ESDHC_PROCTL); ++ return 0; ++ case MMC_SIGNAL_VOLTAGE_180: ++ scfg_node = of_find_matching_node(NULL, scfg_device_ids); ++ if (scfg_node) ++ scfg_base = of_iomap(scfg_node, 0); ++ if (scfg_base) { ++ sdhciovselcr = SDHCIOVSELCR_TGLEN | ++ SDHCIOVSELCR_VSELVAL; ++ iowrite32be(sdhciovselcr, ++ scfg_base + SCFG_SDHCIOVSELCR); ++ ++ val |= ESDHC_VOLT_SEL; ++ sdhci_writel(host, val, ESDHC_PROCTL); ++ mdelay(5); ++ ++ sdhciovselcr = SDHCIOVSELCR_TGLEN | ++ SDHCIOVSELCR_SDHC_VS; ++ iowrite32be(sdhciovselcr, ++ scfg_base + SCFG_SDHCIOVSELCR); ++ iounmap(scfg_base); ++ } else { ++ val |= ESDHC_VOLT_SEL; ++ sdhci_writel(host, val, ESDHC_PROCTL); ++ } ++ return 0; ++ default: ++ return 0; ++ } ++} ++ ++static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ u32 val; ++ ++ /* Use tuning block for tuning procedure */ ++ esdhc_clock_enable(host, false); ++ val = sdhci_readl(host, ESDHC_DMA_SYSCTL); ++ val |= ESDHC_FLUSH_ASYNC_FIFO; ++ sdhci_writel(host, val, ESDHC_DMA_SYSCTL); ++ ++ val = sdhci_readl(host, ESDHC_TBCTL); ++ val |= ESDHC_TB_EN; ++ sdhci_writel(host, val, ESDHC_TBCTL); ++ esdhc_clock_enable(host, true); ++ ++ return sdhci_execute_tuning(mmc, opcode); ++} ++ + #ifdef CONFIG_PM_SLEEP + static u32 esdhc_proctl; + static int esdhc_of_suspend(struct device *dev) +@@ -575,10 +766,19 @@ static const struct sdhci_pltfm_data sdh + .ops = &sdhci_esdhc_le_ops, + }; + ++static struct soc_device_attribute soc_incorrect_hostver[] = { ++ { .family = "QorIQ T4240", .revision = "1.0", }, ++ { .family = "QorIQ T4240", .revision = "2.0", }, ++ { }, ++}; ++ + static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) + { + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_esdhc *esdhc; ++ struct device_node *np; ++ struct clk *clk; ++ u32 val; + u16 host_ver; + + pltfm_host = sdhci_priv(host); +@@ -588,6 +788,36 @@ static void esdhc_init(struct platform_d + esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT; + esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; ++ if (soc_device_match(soc_incorrect_hostver)) ++ esdhc->quirk_incorrect_hostver = true; ++ else ++ esdhc->quirk_incorrect_hostver = false; ++ ++ np = pdev->dev.of_node; ++ clk = of_clk_get(np, 0); ++ if (!IS_ERR(clk)) { ++ /* ++ * esdhc->peripheral_clock would be assigned with a value ++ * which is eSDHC base clock when use periperal clock. ++ * For ls1046a, the clock value got by common clk API is ++ * peripheral clock while the eSDHC base clock is 1/2 ++ * peripheral clock. ++ */ ++ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc")) ++ esdhc->peripheral_clock = clk_get_rate(clk) / 2; ++ else ++ esdhc->peripheral_clock = clk_get_rate(clk); ++ ++ clk_put(clk); ++ } ++ ++ if (esdhc->peripheral_clock) { ++ esdhc_clock_enable(host, false); ++ val = sdhci_readl(host, ESDHC_DMA_SYSCTL); ++ val |= ESDHC_PERIPHERAL_CLK_SEL; ++ sdhci_writel(host, val, ESDHC_DMA_SYSCTL); ++ esdhc_clock_enable(host, true); ++ } + } + + static int sdhci_esdhc_probe(struct platform_device *pdev) +@@ -610,6 +840,11 @@ static int sdhci_esdhc_probe(struct plat + if (IS_ERR(host)) + return PTR_ERR(host); + ++ host->mmc_host_ops.start_signal_voltage_switch = ++ esdhc_signal_voltage_switch; ++ host->mmc_host_ops.execute_tuning = esdhc_execute_tuning; ++ host->tuning_delay = 1; ++ + esdhc_init(pdev, host); + + sdhci_get_of_property(pdev); +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1624,26 +1624,24 @@ static void sdhci_set_ios(struct mmc_hos + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + +- if ((ios->timing == MMC_TIMING_SD_HS || +- ios->timing == MMC_TIMING_MMC_HS) +- && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) +- ctrl |= SDHCI_CTRL_HISPD; +- else +- ctrl &= ~SDHCI_CTRL_HISPD; ++ if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { ++ if ((ios->timing == MMC_TIMING_SD_HS || ++ ios->timing == MMC_TIMING_MMC_HS || ++ ios->timing == MMC_TIMING_MMC_HS400 || ++ ios->timing == MMC_TIMING_MMC_HS200 || ++ ios->timing == MMC_TIMING_MMC_DDR52 || ++ ios->timing == MMC_TIMING_UHS_SDR50 || ++ ios->timing == MMC_TIMING_UHS_SDR104 || ++ ios->timing == MMC_TIMING_UHS_DDR50 || ++ ios->timing == MMC_TIMING_UHS_SDR25)) ++ ctrl |= SDHCI_CTRL_HISPD; ++ else ++ ctrl &= ~SDHCI_CTRL_HISPD; ++ } + + if (host->version >= SDHCI_SPEC_300) { + u16 clk, ctrl_2; + +- /* In case of UHS-I modes, set High Speed Enable */ +- if ((ios->timing == MMC_TIMING_MMC_HS400) || +- (ios->timing == MMC_TIMING_MMC_HS200) || +- (ios->timing == MMC_TIMING_MMC_DDR52) || +- (ios->timing == MMC_TIMING_UHS_SDR50) || +- (ios->timing == MMC_TIMING_UHS_SDR104) || +- (ios->timing == MMC_TIMING_UHS_DDR50) || +- (ios->timing == MMC_TIMING_UHS_SDR25)) +- ctrl |= SDHCI_CTRL_HISPD; +- + if (!host->preset_enabled) { + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + /* +@@ -1956,7 +1954,7 @@ static int sdhci_prepare_hs400_tuning(st + return 0; + } + +-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) ++int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) + { + struct sdhci_host *host = mmc_priv(mmc); + u16 ctrl; +@@ -2015,6 +2013,9 @@ static int sdhci_execute_tuning(struct m + return err; + } + ++ if (host->tuning_delay < 0) ++ host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; ++ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl |= SDHCI_CTRL_EXEC_TUNING; + if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) +@@ -2127,9 +2128,10 @@ static int sdhci_execute_tuning(struct m + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + +- /* eMMC spec does not require a delay between tuning cycles */ +- if (opcode == MMC_SEND_TUNING_BLOCK) +- mdelay(1); ++ /* Spec does not require a delay between tuning cycles */ ++ if (host->tuning_delay > 0) ++ mdelay(host->tuning_delay); ++ + } while (ctrl & SDHCI_CTRL_EXEC_TUNING); + + /* +@@ -2165,6 +2167,7 @@ out_unlock: + spin_unlock_irqrestore(&host->lock, flags); + return err; + } ++EXPORT_SYMBOL_GPL(sdhci_execute_tuning); + + static int sdhci_select_drive_strength(struct mmc_card *card, + unsigned int max_dtr, int host_drv, +@@ -2997,6 +3000,8 @@ struct sdhci_host *sdhci_alloc_host(stru + + host->flags = SDHCI_SIGNALING_330; + ++ host->tuning_delay = -1; ++ + return host; + } + +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -524,6 +524,8 @@ struct sdhci_host { + #define SDHCI_TUNING_MODE_1 0 + #define SDHCI_TUNING_MODE_2 1 + #define SDHCI_TUNING_MODE_3 2 ++ /* Delay (ms) between tuning commands */ ++ int tuning_delay; + + unsigned long private[0] ____cacheline_aligned; + }; +@@ -689,6 +691,7 @@ void sdhci_set_power_noreg(struct sdhci_ + void sdhci_set_bus_width(struct sdhci_host *host, int width); + void sdhci_reset(struct sdhci_host *host, u8 mask); + void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); ++int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); + + #ifdef CONFIG_PM + extern int sdhci_suspend_host(struct sdhci_host *host); diff --git a/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch new file mode 100644 index 000000000..3675f3350 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch @@ -0,0 +1,1976 @@ +From adb377019768396f339010ebb9e80fa8384992f7 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:20:30 +0800 +Subject: [PATCH] qe: support layerscape + +This is a integrated patch for layerscape qe support. + +Signed-off-by: Zhao Qiang +Signed-off-by: Yangbo Lu +--- + drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++++-------- + drivers/net/wan/fsl_ucc_hdlc.c | 4 +- + drivers/soc/fsl/qe/Kconfig | 2 +- + drivers/soc/fsl/qe/Makefile | 2 +- + drivers/soc/fsl/qe/qe.c | 80 +++-- + drivers/soc/fsl/qe/qe_ic.h | 103 ------ + drivers/soc/fsl/qe/qe_io.c | 42 +-- + drivers/soc/fsl/qe/qe_tdm.c | 8 +- + drivers/soc/fsl/qe/ucc.c | 10 +- + drivers/soc/fsl/qe/ucc_fast.c | 74 ++-- + drivers/tty/serial/ucc_uart.c | 1 + + include/soc/fsl/qe/qe.h | 1 - + include/soc/fsl/qe/qe_ic.h | 139 -------- + 13 files changed, 359 insertions(+), 496 deletions(-) + rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%) + delete mode 100644 drivers/soc/fsl/qe/qe_ic.h + delete mode 100644 include/soc/fsl/qe/qe_ic.h + +--- a/drivers/soc/fsl/qe/qe_ic.c ++++ /dev/null +@@ -1,512 +0,0 @@ +-/* +- * arch/powerpc/sysdev/qe_lib/qe_ic.c +- * +- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. +- * +- * Author: Li Yang +- * Based on code from Shlomi Gridish +- * +- * QUICC ENGINE Interrupt Controller +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2 of the License, or (at your +- * option) any later version. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "qe_ic.h" +- +-static DEFINE_RAW_SPINLOCK(qe_ic_lock); +- +-static struct qe_ic_info qe_ic_info[] = { +- [1] = { +- .mask = 0x00008000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 0, +- .pri_reg = QEIC_CIPWCC, +- }, +- [2] = { +- .mask = 0x00004000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 1, +- .pri_reg = QEIC_CIPWCC, +- }, +- [3] = { +- .mask = 0x00002000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 2, +- .pri_reg = QEIC_CIPWCC, +- }, +- [10] = { +- .mask = 0x00000040, +- .mask_reg = QEIC_CIMR, +- .pri_code = 1, +- .pri_reg = QEIC_CIPZCC, +- }, +- [11] = { +- .mask = 0x00000020, +- .mask_reg = QEIC_CIMR, +- .pri_code = 2, +- .pri_reg = QEIC_CIPZCC, +- }, +- [12] = { +- .mask = 0x00000010, +- .mask_reg = QEIC_CIMR, +- .pri_code = 3, +- .pri_reg = QEIC_CIPZCC, +- }, +- [13] = { +- .mask = 0x00000008, +- .mask_reg = QEIC_CIMR, +- .pri_code = 4, +- .pri_reg = QEIC_CIPZCC, +- }, +- [14] = { +- .mask = 0x00000004, +- .mask_reg = QEIC_CIMR, +- .pri_code = 5, +- .pri_reg = QEIC_CIPZCC, +- }, +- [15] = { +- .mask = 0x00000002, +- .mask_reg = QEIC_CIMR, +- .pri_code = 6, +- .pri_reg = QEIC_CIPZCC, +- }, +- [20] = { +- .mask = 0x10000000, +- .mask_reg = QEIC_CRIMR, +- .pri_code = 3, +- .pri_reg = QEIC_CIPRTA, +- }, +- [25] = { +- .mask = 0x00800000, +- .mask_reg = QEIC_CRIMR, +- .pri_code = 0, +- .pri_reg = QEIC_CIPRTB, +- }, +- [26] = { +- .mask = 0x00400000, +- .mask_reg = QEIC_CRIMR, +- .pri_code = 1, +- .pri_reg = QEIC_CIPRTB, +- }, +- [27] = { +- .mask = 0x00200000, +- .mask_reg = QEIC_CRIMR, +- .pri_code = 2, +- .pri_reg = QEIC_CIPRTB, +- }, +- [28] = { +- .mask = 0x00100000, +- .mask_reg = QEIC_CRIMR, +- .pri_code = 3, +- .pri_reg = QEIC_CIPRTB, +- }, +- [32] = { +- .mask = 0x80000000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 0, +- .pri_reg = QEIC_CIPXCC, +- }, +- [33] = { +- .mask = 0x40000000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 1, +- .pri_reg = QEIC_CIPXCC, +- }, +- [34] = { +- .mask = 0x20000000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 2, +- .pri_reg = QEIC_CIPXCC, +- }, +- [35] = { +- .mask = 0x10000000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 3, +- .pri_reg = QEIC_CIPXCC, +- }, +- [36] = { +- .mask = 0x08000000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 4, +- .pri_reg = QEIC_CIPXCC, +- }, +- [40] = { +- .mask = 0x00800000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 0, +- .pri_reg = QEIC_CIPYCC, +- }, +- [41] = { +- .mask = 0x00400000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 1, +- .pri_reg = QEIC_CIPYCC, +- }, +- [42] = { +- .mask = 0x00200000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 2, +- .pri_reg = QEIC_CIPYCC, +- }, +- [43] = { +- .mask = 0x00100000, +- .mask_reg = QEIC_CIMR, +- .pri_code = 3, +- .pri_reg = QEIC_CIPYCC, +- }, +-}; +- +-static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) +-{ +- return in_be32(base + (reg >> 2)); +-} +- +-static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, +- u32 value) +-{ +- out_be32(base + (reg >> 2), value); +-} +- +-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) +-{ +- return irq_get_chip_data(virq); +-} +- +-static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) +-{ +- return irq_data_get_irq_chip_data(d); +-} +- +-static void qe_ic_unmask_irq(struct irq_data *d) +-{ +- struct qe_ic *qe_ic = qe_ic_from_irq_data(d); +- unsigned int src = irqd_to_hwirq(d); +- unsigned long flags; +- u32 temp; +- +- raw_spin_lock_irqsave(&qe_ic_lock, flags); +- +- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); +- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, +- temp | qe_ic_info[src].mask); +- +- raw_spin_unlock_irqrestore(&qe_ic_lock, flags); +-} +- +-static void qe_ic_mask_irq(struct irq_data *d) +-{ +- struct qe_ic *qe_ic = qe_ic_from_irq_data(d); +- unsigned int src = irqd_to_hwirq(d); +- unsigned long flags; +- u32 temp; +- +- raw_spin_lock_irqsave(&qe_ic_lock, flags); +- +- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); +- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, +- temp & ~qe_ic_info[src].mask); +- +- /* Flush the above write before enabling interrupts; otherwise, +- * spurious interrupts will sometimes happen. To be 100% sure +- * that the write has reached the device before interrupts are +- * enabled, the mask register would have to be read back; however, +- * this is not required for correctness, only to avoid wasting +- * time on a large number of spurious interrupts. In testing, +- * a sync reduced the observed spurious interrupts to zero. +- */ +- mb(); +- +- raw_spin_unlock_irqrestore(&qe_ic_lock, flags); +-} +- +-static struct irq_chip qe_ic_irq_chip = { +- .name = "QEIC", +- .irq_unmask = qe_ic_unmask_irq, +- .irq_mask = qe_ic_mask_irq, +- .irq_mask_ack = qe_ic_mask_irq, +-}; +- +-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, +- enum irq_domain_bus_token bus_token) +-{ +- /* Exact match, unless qe_ic node is NULL */ +- struct device_node *of_node = irq_domain_get_of_node(h); +- return of_node == NULL || of_node == node; +-} +- +-static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, +- irq_hw_number_t hw) +-{ +- struct qe_ic *qe_ic = h->host_data; +- struct irq_chip *chip; +- +- if (hw >= ARRAY_SIZE(qe_ic_info)) { +- pr_err("%s: Invalid hw irq number for QEIC\n", __func__); +- return -EINVAL; +- } +- +- if (qe_ic_info[hw].mask == 0) { +- printk(KERN_ERR "Can't map reserved IRQ\n"); +- return -EINVAL; +- } +- /* Default chip */ +- chip = &qe_ic->hc_irq; +- +- irq_set_chip_data(virq, qe_ic); +- irq_set_status_flags(virq, IRQ_LEVEL); +- +- irq_set_chip_and_handler(virq, chip, handle_level_irq); +- +- return 0; +-} +- +-static const struct irq_domain_ops qe_ic_host_ops = { +- .match = qe_ic_host_match, +- .map = qe_ic_host_map, +- .xlate = irq_domain_xlate_onetwocell, +-}; +- +-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +-{ +- int irq; +- +- BUG_ON(qe_ic == NULL); +- +- /* get the interrupt source vector. */ +- irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; +- +- if (irq == 0) +- return NO_IRQ; +- +- return irq_linear_revmap(qe_ic->irqhost, irq); +-} +- +-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +-{ +- int irq; +- +- BUG_ON(qe_ic == NULL); +- +- /* get the interrupt source vector. */ +- irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; +- +- if (irq == 0) +- return NO_IRQ; +- +- return irq_linear_revmap(qe_ic->irqhost, irq); +-} +- +-void __init qe_ic_init(struct device_node *node, unsigned int flags, +- void (*low_handler)(struct irq_desc *desc), +- void (*high_handler)(struct irq_desc *desc)) +-{ +- struct qe_ic *qe_ic; +- struct resource res; +- u32 temp = 0, ret, high_active = 0; +- +- ret = of_address_to_resource(node, 0, &res); +- if (ret) +- return; +- +- qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); +- if (qe_ic == NULL) +- return; +- +- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, +- &qe_ic_host_ops, qe_ic); +- if (qe_ic->irqhost == NULL) { +- kfree(qe_ic); +- return; +- } +- +- qe_ic->regs = ioremap(res.start, resource_size(&res)); +- +- qe_ic->hc_irq = qe_ic_irq_chip; +- +- qe_ic->virq_high = irq_of_parse_and_map(node, 0); +- qe_ic->virq_low = irq_of_parse_and_map(node, 1); +- +- if (qe_ic->virq_low == NO_IRQ) { +- printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); +- kfree(qe_ic); +- return; +- } +- +- /* default priority scheme is grouped. If spread mode is */ +- /* required, configure cicr accordingly. */ +- if (flags & QE_IC_SPREADMODE_GRP_W) +- temp |= CICR_GWCC; +- if (flags & QE_IC_SPREADMODE_GRP_X) +- temp |= CICR_GXCC; +- if (flags & QE_IC_SPREADMODE_GRP_Y) +- temp |= CICR_GYCC; +- if (flags & QE_IC_SPREADMODE_GRP_Z) +- temp |= CICR_GZCC; +- if (flags & QE_IC_SPREADMODE_GRP_RISCA) +- temp |= CICR_GRTA; +- if (flags & QE_IC_SPREADMODE_GRP_RISCB) +- temp |= CICR_GRTB; +- +- /* choose destination signal for highest priority interrupt */ +- if (flags & QE_IC_HIGH_SIGNAL) { +- temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); +- high_active = 1; +- } +- +- qe_ic_write(qe_ic->regs, QEIC_CICR, temp); +- +- irq_set_handler_data(qe_ic->virq_low, qe_ic); +- irq_set_chained_handler(qe_ic->virq_low, low_handler); +- +- if (qe_ic->virq_high != NO_IRQ && +- qe_ic->virq_high != qe_ic->virq_low) { +- irq_set_handler_data(qe_ic->virq_high, qe_ic); +- irq_set_chained_handler(qe_ic->virq_high, high_handler); +- } +-} +- +-void qe_ic_set_highest_priority(unsigned int virq, int high) +-{ +- struct qe_ic *qe_ic = qe_ic_from_irq(virq); +- unsigned int src = virq_to_hw(virq); +- u32 temp = 0; +- +- temp = qe_ic_read(qe_ic->regs, QEIC_CICR); +- +- temp &= ~CICR_HP_MASK; +- temp |= src << CICR_HP_SHIFT; +- +- temp &= ~CICR_HPIT_MASK; +- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; +- +- qe_ic_write(qe_ic->regs, QEIC_CICR, temp); +-} +- +-/* Set Priority level within its group, from 1 to 8 */ +-int qe_ic_set_priority(unsigned int virq, unsigned int priority) +-{ +- struct qe_ic *qe_ic = qe_ic_from_irq(virq); +- unsigned int src = virq_to_hw(virq); +- u32 temp; +- +- if (priority > 8 || priority == 0) +- return -EINVAL; +- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), +- "%s: Invalid hw irq number for QEIC\n", __func__)) +- return -EINVAL; +- if (qe_ic_info[src].pri_reg == 0) +- return -EINVAL; +- +- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); +- +- if (priority < 4) { +- temp &= ~(0x7 << (32 - priority * 3)); +- temp |= qe_ic_info[src].pri_code << (32 - priority * 3); +- } else { +- temp &= ~(0x7 << (24 - priority * 3)); +- temp |= qe_ic_info[src].pri_code << (24 - priority * 3); +- } +- +- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); +- +- return 0; +-} +- +-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */ +-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) +-{ +- struct qe_ic *qe_ic = qe_ic_from_irq(virq); +- unsigned int src = virq_to_hw(virq); +- u32 temp, control_reg = QEIC_CICNR, shift = 0; +- +- if (priority > 2 || priority == 0) +- return -EINVAL; +- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), +- "%s: Invalid hw irq number for QEIC\n", __func__)) +- return -EINVAL; +- +- switch (qe_ic_info[src].pri_reg) { +- case QEIC_CIPZCC: +- shift = CICNR_ZCC1T_SHIFT; +- break; +- case QEIC_CIPWCC: +- shift = CICNR_WCC1T_SHIFT; +- break; +- case QEIC_CIPYCC: +- shift = CICNR_YCC1T_SHIFT; +- break; +- case QEIC_CIPXCC: +- shift = CICNR_XCC1T_SHIFT; +- break; +- case QEIC_CIPRTA: +- shift = CRICR_RTA1T_SHIFT; +- control_reg = QEIC_CRICR; +- break; +- case QEIC_CIPRTB: +- shift = CRICR_RTB1T_SHIFT; +- control_reg = QEIC_CRICR; +- break; +- default: +- return -EINVAL; +- } +- +- shift += (2 - priority) * 2; +- temp = qe_ic_read(qe_ic->regs, control_reg); +- temp &= ~(SIGNAL_MASK << shift); +- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; +- qe_ic_write(qe_ic->regs, control_reg, temp); +- +- return 0; +-} +- +-static struct bus_type qe_ic_subsys = { +- .name = "qe_ic", +- .dev_name = "qe_ic", +-}; +- +-static struct device device_qe_ic = { +- .id = 0, +- .bus = &qe_ic_subsys, +-}; +- +-static int __init init_qe_ic_sysfs(void) +-{ +- int rc; +- +- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); +- +- rc = subsys_system_register(&qe_ic_subsys, NULL); +- if (rc) { +- printk(KERN_ERR "Failed registering qe_ic sys class\n"); +- return -ENODEV; +- } +- rc = device_register(&device_qe_ic); +- if (rc) { +- printk(KERN_ERR "Failed registering qe_ic sys device\n"); +- return -ENODEV; +- } +- return 0; +-} +- +-subsys_initcall(init_qe_ic_sysfs); +--- /dev/null ++++ b/drivers/irqchip/irq-qeic.c +@@ -0,0 +1,605 @@ ++/* ++ * drivers/irqchip/irq-qeic.c ++ * ++ * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved. ++ * ++ * Author: Li Yang ++ * Based on code from Shlomi Gridish ++ * ++ * QUICC ENGINE Interrupt Controller ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define NR_QE_IC_INTS 64 ++ ++/* QE IC registers offset */ ++#define QEIC_CICR 0x00 ++#define QEIC_CIVEC 0x04 ++#define QEIC_CRIPNR 0x08 ++#define QEIC_CIPNR 0x0c ++#define QEIC_CIPXCC 0x10 ++#define QEIC_CIPYCC 0x14 ++#define QEIC_CIPWCC 0x18 ++#define QEIC_CIPZCC 0x1c ++#define QEIC_CIMR 0x20 ++#define QEIC_CRIMR 0x24 ++#define QEIC_CICNR 0x28 ++#define QEIC_CIPRTA 0x30 ++#define QEIC_CIPRTB 0x34 ++#define QEIC_CRICR 0x3c ++#define QEIC_CHIVEC 0x60 ++ ++/* Interrupt priority registers */ ++#define CIPCC_SHIFT_PRI0 29 ++#define CIPCC_SHIFT_PRI1 26 ++#define CIPCC_SHIFT_PRI2 23 ++#define CIPCC_SHIFT_PRI3 20 ++#define CIPCC_SHIFT_PRI4 13 ++#define CIPCC_SHIFT_PRI5 10 ++#define CIPCC_SHIFT_PRI6 7 ++#define CIPCC_SHIFT_PRI7 4 ++ ++/* CICR priority modes */ ++#define CICR_GWCC 0x00040000 ++#define CICR_GXCC 0x00020000 ++#define CICR_GYCC 0x00010000 ++#define CICR_GZCC 0x00080000 ++#define CICR_GRTA 0x00200000 ++#define CICR_GRTB 0x00400000 ++#define CICR_HPIT_SHIFT 8 ++#define CICR_HPIT_MASK 0x00000300 ++#define CICR_HP_SHIFT 24 ++#define CICR_HP_MASK 0x3f000000 ++ ++/* CICNR */ ++#define CICNR_WCC1T_SHIFT 20 ++#define CICNR_ZCC1T_SHIFT 28 ++#define CICNR_YCC1T_SHIFT 12 ++#define CICNR_XCC1T_SHIFT 4 ++ ++/* CRICR */ ++#define CRICR_RTA1T_SHIFT 20 ++#define CRICR_RTB1T_SHIFT 28 ++ ++/* Signal indicator */ ++#define SIGNAL_MASK 3 ++#define SIGNAL_HIGH 2 ++#define SIGNAL_LOW 0 ++ ++#define NUM_OF_QE_IC_GROUPS 6 ++ ++/* Flags when we init the QE IC */ ++#define QE_IC_SPREADMODE_GRP_W 0x00000001 ++#define QE_IC_SPREADMODE_GRP_X 0x00000002 ++#define QE_IC_SPREADMODE_GRP_Y 0x00000004 ++#define QE_IC_SPREADMODE_GRP_Z 0x00000008 ++#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010 ++#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020 ++ ++#define QE_IC_LOW_SIGNAL 0x00000100 ++#define QE_IC_HIGH_SIGNAL 0x00000200 ++ ++#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000 ++#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000 ++#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000 ++#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000 ++#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000 ++#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000 ++#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000 ++#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000 ++#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000 ++#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000 ++#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000 ++#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000 ++#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12) ++ ++/* QE interrupt sources groups */ ++enum qe_ic_grp_id { ++ QE_IC_GRP_W = 0, /* QE interrupt controller group W */ ++ QE_IC_GRP_X, /* QE interrupt controller group X */ ++ QE_IC_GRP_Y, /* QE interrupt controller group Y */ ++ QE_IC_GRP_Z, /* QE interrupt controller group Z */ ++ QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ ++ QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ ++}; ++ ++struct qe_ic { ++ /* Control registers offset */ ++ u32 __iomem *regs; ++ ++ /* The remapper for this QEIC */ ++ struct irq_domain *irqhost; ++ ++ /* The "linux" controller struct */ ++ struct irq_chip hc_irq; ++ ++ /* VIRQ numbers of QE high/low irqs */ ++ unsigned int virq_high; ++ unsigned int virq_low; ++}; ++ ++/* ++ * QE interrupt controller internal structure ++ */ ++struct qe_ic_info { ++ /* location of this source at the QIMR register. */ ++ u32 mask; ++ ++ /* Mask register offset */ ++ u32 mask_reg; ++ ++ /* ++ * for grouped interrupts sources - the interrupt ++ * code as appears at the group priority register ++ */ ++ u8 pri_code; ++ ++ /* Group priority register offset */ ++ u32 pri_reg; ++}; ++ ++static DEFINE_RAW_SPINLOCK(qe_ic_lock); ++ ++static struct qe_ic_info qe_ic_info[] = { ++ [1] = { ++ .mask = 0x00008000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 0, ++ .pri_reg = QEIC_CIPWCC, ++ }, ++ [2] = { ++ .mask = 0x00004000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 1, ++ .pri_reg = QEIC_CIPWCC, ++ }, ++ [3] = { ++ .mask = 0x00002000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 2, ++ .pri_reg = QEIC_CIPWCC, ++ }, ++ [10] = { ++ .mask = 0x00000040, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 1, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [11] = { ++ .mask = 0x00000020, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 2, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [12] = { ++ .mask = 0x00000010, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 3, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [13] = { ++ .mask = 0x00000008, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 4, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [14] = { ++ .mask = 0x00000004, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 5, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [15] = { ++ .mask = 0x00000002, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 6, ++ .pri_reg = QEIC_CIPZCC, ++ }, ++ [20] = { ++ .mask = 0x10000000, ++ .mask_reg = QEIC_CRIMR, ++ .pri_code = 3, ++ .pri_reg = QEIC_CIPRTA, ++ }, ++ [25] = { ++ .mask = 0x00800000, ++ .mask_reg = QEIC_CRIMR, ++ .pri_code = 0, ++ .pri_reg = QEIC_CIPRTB, ++ }, ++ [26] = { ++ .mask = 0x00400000, ++ .mask_reg = QEIC_CRIMR, ++ .pri_code = 1, ++ .pri_reg = QEIC_CIPRTB, ++ }, ++ [27] = { ++ .mask = 0x00200000, ++ .mask_reg = QEIC_CRIMR, ++ .pri_code = 2, ++ .pri_reg = QEIC_CIPRTB, ++ }, ++ [28] = { ++ .mask = 0x00100000, ++ .mask_reg = QEIC_CRIMR, ++ .pri_code = 3, ++ .pri_reg = QEIC_CIPRTB, ++ }, ++ [32] = { ++ .mask = 0x80000000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 0, ++ .pri_reg = QEIC_CIPXCC, ++ }, ++ [33] = { ++ .mask = 0x40000000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 1, ++ .pri_reg = QEIC_CIPXCC, ++ }, ++ [34] = { ++ .mask = 0x20000000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 2, ++ .pri_reg = QEIC_CIPXCC, ++ }, ++ [35] = { ++ .mask = 0x10000000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 3, ++ .pri_reg = QEIC_CIPXCC, ++ }, ++ [36] = { ++ .mask = 0x08000000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 4, ++ .pri_reg = QEIC_CIPXCC, ++ }, ++ [40] = { ++ .mask = 0x00800000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 0, ++ .pri_reg = QEIC_CIPYCC, ++ }, ++ [41] = { ++ .mask = 0x00400000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 1, ++ .pri_reg = QEIC_CIPYCC, ++ }, ++ [42] = { ++ .mask = 0x00200000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 2, ++ .pri_reg = QEIC_CIPYCC, ++ }, ++ [43] = { ++ .mask = 0x00100000, ++ .mask_reg = QEIC_CIMR, ++ .pri_code = 3, ++ .pri_reg = QEIC_CIPYCC, ++ }, ++}; ++ ++static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) ++{ ++ return ioread32be(base + (reg >> 2)); ++} ++ ++static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, ++ u32 value) ++{ ++ iowrite32be(value, base + (reg >> 2)); ++} ++ ++static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) ++{ ++ return irq_get_chip_data(virq); ++} ++ ++static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) ++{ ++ return irq_data_get_irq_chip_data(d); ++} ++ ++static void qe_ic_unmask_irq(struct irq_data *d) ++{ ++ struct qe_ic *qe_ic = qe_ic_from_irq_data(d); ++ unsigned int src = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 temp; ++ ++ raw_spin_lock_irqsave(&qe_ic_lock, flags); ++ ++ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); ++ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, ++ temp | qe_ic_info[src].mask); ++ ++ raw_spin_unlock_irqrestore(&qe_ic_lock, flags); ++} ++ ++static void qe_ic_mask_irq(struct irq_data *d) ++{ ++ struct qe_ic *qe_ic = qe_ic_from_irq_data(d); ++ unsigned int src = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 temp; ++ ++ raw_spin_lock_irqsave(&qe_ic_lock, flags); ++ ++ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); ++ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, ++ temp & ~qe_ic_info[src].mask); ++ ++ /* Flush the above write before enabling interrupts; otherwise, ++ * spurious interrupts will sometimes happen. To be 100% sure ++ * that the write has reached the device before interrupts are ++ * enabled, the mask register would have to be read back; however, ++ * this is not required for correctness, only to avoid wasting ++ * time on a large number of spurious interrupts. In testing, ++ * a sync reduced the observed spurious interrupts to zero. ++ */ ++ mb(); ++ ++ raw_spin_unlock_irqrestore(&qe_ic_lock, flags); ++} ++ ++static struct irq_chip qe_ic_irq_chip = { ++ .name = "QEIC", ++ .irq_unmask = qe_ic_unmask_irq, ++ .irq_mask = qe_ic_mask_irq, ++ .irq_mask_ack = qe_ic_mask_irq, ++}; ++ ++static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) ++{ ++ /* Exact match, unless qe_ic node is NULL */ ++ struct device_node *of_node = irq_domain_get_of_node(h); ++ return of_node == NULL || of_node == node; ++} ++ ++static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, ++ irq_hw_number_t hw) ++{ ++ struct qe_ic *qe_ic = h->host_data; ++ struct irq_chip *chip; ++ ++ if (hw >= ARRAY_SIZE(qe_ic_info)) { ++ pr_err("%s: Invalid hw irq number for QEIC\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (qe_ic_info[hw].mask == 0) { ++ printk(KERN_ERR "Can't map reserved IRQ\n"); ++ return -EINVAL; ++ } ++ /* Default chip */ ++ chip = &qe_ic->hc_irq; ++ ++ irq_set_chip_data(virq, qe_ic); ++ irq_set_status_flags(virq, IRQ_LEVEL); ++ ++ irq_set_chip_and_handler(virq, chip, handle_level_irq); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops qe_ic_host_ops = { ++ .match = qe_ic_host_match, ++ .map = qe_ic_host_map, ++ .xlate = irq_domain_xlate_onetwocell, ++}; ++ ++/* Return an interrupt vector or 0 if no interrupt is pending. */ ++static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) ++{ ++ int irq; ++ ++ BUG_ON(qe_ic == NULL); ++ ++ /* get the interrupt source vector. */ ++ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; ++ ++ if (irq == 0) ++ return 0; ++ ++ return irq_linear_revmap(qe_ic->irqhost, irq); ++} ++ ++/* Return an interrupt vector or 0 if no interrupt is pending. */ ++static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) ++{ ++ int irq; ++ ++ BUG_ON(qe_ic == NULL); ++ ++ /* get the interrupt source vector. */ ++ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; ++ ++ if (irq == 0) ++ return 0; ++ ++ return irq_linear_revmap(qe_ic->irqhost, irq); ++} ++ ++static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) ++{ ++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); ++ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); ++ ++ if (cascade_irq != 0) ++ generic_handle_irq(cascade_irq); ++} ++ ++static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) ++{ ++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); ++ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); ++ ++ if (cascade_irq != 0) ++ generic_handle_irq(cascade_irq); ++} ++ ++static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) ++{ ++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); ++ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ ++ if (cascade_irq != 0) ++ generic_handle_irq(cascade_irq); ++ ++ chip->irq_eoi(&desc->irq_data); ++} ++ ++static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) ++{ ++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); ++ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ ++ if (cascade_irq != 0) ++ generic_handle_irq(cascade_irq); ++ ++ chip->irq_eoi(&desc->irq_data); ++} ++ ++static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) ++{ ++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); ++ unsigned int cascade_irq; ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ ++ cascade_irq = qe_ic_get_high_irq(qe_ic); ++ if (cascade_irq == 0) ++ cascade_irq = qe_ic_get_low_irq(qe_ic); ++ ++ if (cascade_irq != 0) ++ generic_handle_irq(cascade_irq); ++ ++ chip->irq_eoi(&desc->irq_data); ++} ++ ++static int __init qe_ic_init(struct device_node *node, unsigned int flags) ++{ ++ struct qe_ic *qe_ic; ++ struct resource res; ++ u32 temp = 0, high_active = 0; ++ int ret = 0; ++ ++ if (!node) ++ return -ENODEV; ++ ++ ret = of_address_to_resource(node, 0, &res); ++ if (ret) { ++ ret = -ENODEV; ++ goto err_put_node; ++ } ++ ++ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); ++ if (qe_ic == NULL) { ++ ret = -ENOMEM; ++ goto err_put_node; ++ } ++ ++ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, ++ &qe_ic_host_ops, qe_ic); ++ if (qe_ic->irqhost == NULL) { ++ ret = -ENOMEM; ++ goto err_free_qe_ic; ++ } ++ ++ qe_ic->regs = ioremap(res.start, resource_size(&res)); ++ ++ qe_ic->hc_irq = qe_ic_irq_chip; ++ ++ qe_ic->virq_high = irq_of_parse_and_map(node, 0); ++ qe_ic->virq_low = irq_of_parse_and_map(node, 1); ++ ++ if (qe_ic->virq_low == 0) { ++ pr_err("Failed to map QE_IC low IRQ\n"); ++ ret = -ENOMEM; ++ goto err_domain_remove; ++ } ++ ++ /* default priority scheme is grouped. If spread mode is */ ++ /* required, configure cicr accordingly. */ ++ if (flags & QE_IC_SPREADMODE_GRP_W) ++ temp |= CICR_GWCC; ++ if (flags & QE_IC_SPREADMODE_GRP_X) ++ temp |= CICR_GXCC; ++ if (flags & QE_IC_SPREADMODE_GRP_Y) ++ temp |= CICR_GYCC; ++ if (flags & QE_IC_SPREADMODE_GRP_Z) ++ temp |= CICR_GZCC; ++ if (flags & QE_IC_SPREADMODE_GRP_RISCA) ++ temp |= CICR_GRTA; ++ if (flags & QE_IC_SPREADMODE_GRP_RISCB) ++ temp |= CICR_GRTB; ++ ++ /* choose destination signal for highest priority interrupt */ ++ if (flags & QE_IC_HIGH_SIGNAL) { ++ temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); ++ high_active = 1; ++ } ++ ++ qe_ic_write(qe_ic->regs, QEIC_CICR, temp); ++ ++ irq_set_handler_data(qe_ic->virq_low, qe_ic); ++ irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic); ++ ++ if (qe_ic->virq_high != 0 && ++ qe_ic->virq_high != qe_ic->virq_low) { ++ irq_set_handler_data(qe_ic->virq_high, qe_ic); ++ irq_set_chained_handler(qe_ic->virq_high, ++ qe_ic_cascade_high_mpic); ++ } ++ of_node_put(node); ++ return 0; ++ ++err_domain_remove: ++ irq_domain_remove(qe_ic->irqhost); ++err_free_qe_ic: ++ kfree(qe_ic); ++err_put_node: ++ of_node_put(node); ++ return ret; ++} ++ ++static int __init init_qe_ic(struct device_node *node, ++ struct device_node *parent) ++{ ++ int ret; ++ ++ ret = qe_ic_init(node, 0); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic); +--- a/drivers/net/wan/fsl_ucc_hdlc.c ++++ b/drivers/net/wan/fsl_ucc_hdlc.c +@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk + /* set bd status and length */ + bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; + +- iowrite16be(bd_status, &bd->status); + iowrite16be(skb->len, &bd->length); ++ iowrite16be(bd_status, &bd->status); + + /* Move to next BD in the ring */ + if (!(bd_status & T_W_S)) +@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_ + struct sk_buff *skb; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct qe_bd *bd; +- u32 bd_status; ++ u16 bd_status; + u16 length, howmany = 0; + u8 *bdbuffer; + int i; +--- a/drivers/soc/fsl/qe/Kconfig ++++ b/drivers/soc/fsl/qe/Kconfig +@@ -4,7 +4,7 @@ + + config QUICC_ENGINE + bool "Freescale QUICC Engine (QE) Support" +- depends on FSL_SOC && PPC32 ++ depends on OF && HAS_IOMEM + select GENERIC_ALLOCATOR + select CRC32 + help +--- a/drivers/soc/fsl/qe/Makefile ++++ b/drivers/soc/fsl/qe/Makefile +@@ -1,7 +1,7 @@ + # + # Makefile for the linux ppc-specific parts of QE + # +-obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o ++obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o + obj-$(CONFIG_CPM) += qe_common.o + obj-$(CONFIG_UCC) += ucc.o + obj-$(CONFIG_UCC_SLOW) += ucc_slow.o +--- a/drivers/soc/fsl/qe/qe.c ++++ b/drivers/soc/fsl/qe/qe.c +@@ -33,8 +33,6 @@ + #include + #include + #include +-#include +-#include + + static void qe_snums_init(void); + static int qe_sdma_init(void); +@@ -109,15 +107,27 @@ void qe_reset(void) + panic("sdma init failed!"); + } + ++/* issue commands to QE, return 0 on success while -EIO on error ++ * ++ * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on ++ * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8 ++ * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3, ++ * QE_CR_SUBBLOCK_IDMA1 - 4 and such on. ++ * @mcn_protocol: specifies mode for the command for non-MCC, should be ++ * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART ++ * and such on. ++ * @cmd_input: command related data. ++ */ + int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) + { + unsigned long flags; + u8 mcn_shift = 0, dev_shift = 0; +- u32 ret; ++ int ret; ++ int i; + + spin_lock_irqsave(&qe_lock, flags); + if (cmd == QE_RESET) { +- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); ++ iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr); + } else { + if (cmd == QE_ASSIGN_PAGE) { + /* Here device is the SNUM, not sub-block */ +@@ -134,20 +144,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 + mcn_shift = QE_CR_MCN_NORMAL_SHIFT; + } + +- out_be32(&qe_immr->cp.cecdr, cmd_input); +- out_be32(&qe_immr->cp.cecr, +- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) +- mcn_protocol << mcn_shift)); ++ iowrite32be(cmd_input, &qe_immr->cp.cecdr); ++ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | ++ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr); + } + + /* wait for the QE_CR_FLG to clear */ +- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, +- 100, 0); ++ ret = -EIO; ++ for (i = 0; i < 100; i++) { ++ if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) { ++ ret = 0; ++ break; ++ } ++ udelay(1); ++ } ++ + /* On timeout (e.g. failure), the expression will be false (ret == 0), + otherwise it will be true (ret == 1). */ + spin_unlock_irqrestore(&qe_lock, flags); + +- return ret == 1; ++ return ret; + } + EXPORT_SYMBOL(qe_issue_cmd); + +@@ -166,8 +182,8 @@ static unsigned int brg_clk = 0; + unsigned int qe_get_brg_clk(void) + { + struct device_node *qe; +- int size; +- const u32 *prop; ++ u32 val; ++ int ret; + + if (brg_clk) + return brg_clk; +@@ -179,9 +195,9 @@ unsigned int qe_get_brg_clk(void) + return brg_clk; + } + +- prop = of_get_property(qe, "brg-frequency", &size); +- if (prop && size == sizeof(*prop)) +- brg_clk = *prop; ++ ret = of_property_read_u32(qe, "brg-frequency", &val); ++ if (!ret) ++ brg_clk = val; + + of_node_put(qe); + +@@ -221,7 +237,7 @@ int qe_setbrg(enum qe_clock brg, unsigne + tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | + QE_BRGC_ENABLE | div16; + +- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); ++ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); + + return 0; + } +@@ -355,9 +371,9 @@ static int qe_sdma_init(void) + return -ENOMEM; + } + +- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); +- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | +- (0x1 << QE_SDMR_CEN_SHIFT))); ++ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr); ++ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), ++ &sdma->sdmr); + + return 0; + } +@@ -395,14 +411,14 @@ static void qe_upload_microcode(const vo + "uploading microcode '%s'\n", ucode->id); + + /* Use auto-increment */ +- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | +- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); ++ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | ++ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd); + + for (i = 0; i < be32_to_cpu(ucode->count); i++) +- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); ++ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); + + /* Set I-RAM Ready Register */ +- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); ++ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); + } + + /* +@@ -487,7 +503,7 @@ int qe_upload_firmware(const struct qe_f + * If the microcode calls for it, split the I-RAM. + */ + if (!firmware->split) +- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); ++ qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); + + if (firmware->soc.model) + printk(KERN_INFO +@@ -521,11 +537,11 @@ int qe_upload_firmware(const struct qe_f + u32 trap = be32_to_cpu(ucode->traps[j]); + + if (trap) +- out_be32(&qe_immr->rsp[i].tibcr[j], trap); ++ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]); + } + + /* Enable traps */ +- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); ++ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr); + } + + qe_firmware_uploaded = 1; +@@ -644,9 +660,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc); + unsigned int qe_get_num_of_snums(void) + { + struct device_node *qe; +- int size; + unsigned int num_of_snums; +- const u32 *prop; ++ u32 val; ++ int ret; + + num_of_snums = 28; /* The default number of snum for threads is 28 */ + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); +@@ -660,9 +676,9 @@ unsigned int qe_get_num_of_snums(void) + return num_of_snums; + } + +- prop = of_get_property(qe, "fsl,qe-num-snums", &size); +- if (prop && size == sizeof(*prop)) { +- num_of_snums = *prop; ++ ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val); ++ if (!ret) { ++ num_of_snums = val; + if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { + /* No QE ever has fewer than 28 SNUMs */ + pr_err("QE: number of snum is invalid\n"); +--- a/drivers/soc/fsl/qe/qe_ic.h ++++ /dev/null +@@ -1,103 +0,0 @@ +-/* +- * drivers/soc/fsl/qe/qe_ic.h +- * +- * QUICC ENGINE Interrupt Controller Header +- * +- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. +- * +- * Author: Li Yang +- * Based on code from Shlomi Gridish +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2 of the License, or (at your +- * option) any later version. +- */ +-#ifndef _POWERPC_SYSDEV_QE_IC_H +-#define _POWERPC_SYSDEV_QE_IC_H +- +-#include +- +-#define NR_QE_IC_INTS 64 +- +-/* QE IC registers offset */ +-#define QEIC_CICR 0x00 +-#define QEIC_CIVEC 0x04 +-#define QEIC_CRIPNR 0x08 +-#define QEIC_CIPNR 0x0c +-#define QEIC_CIPXCC 0x10 +-#define QEIC_CIPYCC 0x14 +-#define QEIC_CIPWCC 0x18 +-#define QEIC_CIPZCC 0x1c +-#define QEIC_CIMR 0x20 +-#define QEIC_CRIMR 0x24 +-#define QEIC_CICNR 0x28 +-#define QEIC_CIPRTA 0x30 +-#define QEIC_CIPRTB 0x34 +-#define QEIC_CRICR 0x3c +-#define QEIC_CHIVEC 0x60 +- +-/* Interrupt priority registers */ +-#define CIPCC_SHIFT_PRI0 29 +-#define CIPCC_SHIFT_PRI1 26 +-#define CIPCC_SHIFT_PRI2 23 +-#define CIPCC_SHIFT_PRI3 20 +-#define CIPCC_SHIFT_PRI4 13 +-#define CIPCC_SHIFT_PRI5 10 +-#define CIPCC_SHIFT_PRI6 7 +-#define CIPCC_SHIFT_PRI7 4 +- +-/* CICR priority modes */ +-#define CICR_GWCC 0x00040000 +-#define CICR_GXCC 0x00020000 +-#define CICR_GYCC 0x00010000 +-#define CICR_GZCC 0x00080000 +-#define CICR_GRTA 0x00200000 +-#define CICR_GRTB 0x00400000 +-#define CICR_HPIT_SHIFT 8 +-#define CICR_HPIT_MASK 0x00000300 +-#define CICR_HP_SHIFT 24 +-#define CICR_HP_MASK 0x3f000000 +- +-/* CICNR */ +-#define CICNR_WCC1T_SHIFT 20 +-#define CICNR_ZCC1T_SHIFT 28 +-#define CICNR_YCC1T_SHIFT 12 +-#define CICNR_XCC1T_SHIFT 4 +- +-/* CRICR */ +-#define CRICR_RTA1T_SHIFT 20 +-#define CRICR_RTB1T_SHIFT 28 +- +-/* Signal indicator */ +-#define SIGNAL_MASK 3 +-#define SIGNAL_HIGH 2 +-#define SIGNAL_LOW 0 +- +-struct qe_ic { +- /* Control registers offset */ +- volatile u32 __iomem *regs; +- +- /* The remapper for this QEIC */ +- struct irq_domain *irqhost; +- +- /* The "linux" controller struct */ +- struct irq_chip hc_irq; +- +- /* VIRQ numbers of QE high/low irqs */ +- unsigned int virq_high; +- unsigned int virq_low; +-}; +- +-/* +- * QE interrupt controller internal structure +- */ +-struct qe_ic_info { +- u32 mask; /* location of this source at the QIMR register. */ +- u32 mask_reg; /* Mask register offset */ +- u8 pri_code; /* for grouped interrupts sources - the interrupt +- code as appears at the group priority register */ +- u32 pri_reg; /* Group priority register offset */ +-}; +- +-#endif /* _POWERPC_SYSDEV_QE_IC_H */ +--- a/drivers/soc/fsl/qe/qe_io.c ++++ b/drivers/soc/fsl/qe/qe_io.c +@@ -22,8 +22,6 @@ + + #include + #include +-#include +-#include + + #undef DEBUG + +@@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_r + pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); + + /* Set open drain, if required */ +- tmp_val = in_be32(&par_io->cpodr); ++ tmp_val = ioread32be(&par_io->cpodr); + if (open_drain) +- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); ++ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); + else +- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); ++ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); + + /* define direction */ + tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? +- in_be32(&par_io->cpdir2) : +- in_be32(&par_io->cpdir1); ++ ioread32be(&par_io->cpdir2) : ++ ioread32be(&par_io->cpdir1); + + /* get all bits mask for 2 bit per port */ + pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - +@@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_r + + /* clear and set 2 bits mask */ + if (pin > (QE_PIO_PINS / 2) - 1) { +- out_be32(&par_io->cpdir2, +- ~pin_mask2bits & tmp_val); ++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); + tmp_val &= ~pin_mask2bits; +- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); ++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); + } else { +- out_be32(&par_io->cpdir1, +- ~pin_mask2bits & tmp_val); ++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); + tmp_val &= ~pin_mask2bits; +- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); ++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); + } + /* define pin assignment */ + tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? +- in_be32(&par_io->cppar2) : +- in_be32(&par_io->cppar1); ++ ioread32be(&par_io->cppar2) : ++ ioread32be(&par_io->cppar1); + + new_mask2bits = (u32) (assignment << (QE_PIO_PINS - + (pin % (QE_PIO_PINS / 2) + 1) * 2)); + /* clear and set 2 bits mask */ + if (pin > (QE_PIO_PINS / 2) - 1) { +- out_be32(&par_io->cppar2, +- ~pin_mask2bits & tmp_val); ++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); + tmp_val &= ~pin_mask2bits; +- out_be32(&par_io->cppar2, new_mask2bits | tmp_val); ++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); + } else { +- out_be32(&par_io->cppar1, +- ~pin_mask2bits & tmp_val); ++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); + tmp_val &= ~pin_mask2bits; +- out_be32(&par_io->cppar1, new_mask2bits | tmp_val); ++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); + } + } + EXPORT_SYMBOL(__par_io_config_pin); +@@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8 + /* calculate pin location */ + pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); + +- tmp_val = in_be32(&par_io[port].cpdata); ++ tmp_val = ioread32be(&par_io[port].cpdata); + + if (val == 0) /* clear */ +- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); ++ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); + else /* set */ +- out_be32(&par_io[port].cpdata, pin_mask | tmp_val); ++ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); + + return 0; + } +--- a/drivers/soc/fsl/qe/qe_tdm.c ++++ b/drivers/soc/fsl/qe/qe_tdm.c +@@ -227,10 +227,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, + &siram[siram_entry_id * 32 + 0x200 + i]); + } + +- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], +- SIR_LAST); +- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], +- SIR_LAST); ++ qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], ++ SIR_LAST); ++ qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 + ++ (utdm->num_of_ts - 1)], SIR_LAST); + + /* Set SIxMR register */ + sixmr = SIMR_SAD(siram_entry_id); +--- a/drivers/soc/fsl/qe/ucc.c ++++ b/drivers/soc/fsl/qe/ucc.c +@@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int + return -EINVAL; + + spin_lock_irqsave(&cmxgcr_lock, flags); +- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, ++ qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, + ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); + spin_unlock_irqrestore(&cmxgcr_lock, flags); + +@@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, e + return -EINVAL; + } + +- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, ++ qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK, + UCC_GUEMR_SET_RESERVED3 | speed); + + return 0; +@@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned + get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); + + if (set) +- setbits32(cmxucr, mask << shift); ++ qe_setbits32(cmxucr, mask << shift); + else +- clrbits32(cmxucr, mask << shift); ++ qe_clrbits32(cmxucr, mask << shift); + + return 0; + } +@@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc + if (mode == COMM_DIR_RX) + shift += 4; + +- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, ++ qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); + + return 0; +--- a/drivers/soc/fsl/qe/ucc_fast.c ++++ b/drivers/soc/fsl/qe/ucc_fast.c +@@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_ + printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); + + printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); ++ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr)); + printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); ++ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr)); + printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); ++ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr)); + printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); ++ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr)); + printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); ++ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce)); + printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); ++ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm)); + printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", +- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); ++ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs)); + printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); ++ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb)); + printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); ++ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs)); + printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); ++ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet)); + printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); ++ &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset)); + printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); ++ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb)); + printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); ++ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs)); + printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); ++ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet)); + printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); ++ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt)); + printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", +- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); ++ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt)); + printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", +- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); ++ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry)); + printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", +- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); ++ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr)); + } + EXPORT_SYMBOL(ucc_fast_dump_regs); + +@@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc + + void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) + { +- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); ++ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); + } + EXPORT_SYMBOL(ucc_fast_transmit_on_demand); + +@@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_pri + uf_regs = uccf->uf_regs; + + /* Enable reception and/or transmission on this UCC. */ +- gumr = in_be32(&uf_regs->gumr); ++ gumr = ioread32be(&uf_regs->gumr); + if (mode & COMM_DIR_TX) { + gumr |= UCC_FAST_GUMR_ENT; + uccf->enabled_tx = 1; +@@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_pri + gumr |= UCC_FAST_GUMR_ENR; + uccf->enabled_rx = 1; + } +- out_be32(&uf_regs->gumr, gumr); ++ iowrite32be(gumr, &uf_regs->gumr); + } + EXPORT_SYMBOL(ucc_fast_enable); + +@@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_pr + uf_regs = uccf->uf_regs; + + /* Disable reception and/or transmission on this UCC. */ +- gumr = in_be32(&uf_regs->gumr); ++ gumr = ioread32be(&uf_regs->gumr); + if (mode & COMM_DIR_TX) { + gumr &= ~UCC_FAST_GUMR_ENT; + uccf->enabled_tx = 0; +@@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_pr + gumr &= ~UCC_FAST_GUMR_ENR; + uccf->enabled_rx = 0; + } +- out_be32(&uf_regs->gumr, gumr); ++ iowrite32be(gumr, &uf_regs->gumr); + } + EXPORT_SYMBOL(ucc_fast_disable); + +@@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info * + gumr |= uf_info->tenc; + gumr |= uf_info->tcrc; + gumr |= uf_info->mode; +- out_be32(&uf_regs->gumr, gumr); ++ iowrite32be(gumr, &uf_regs->gumr); + + /* Allocate memory for Tx Virtual Fifo */ + uccf->ucc_fast_tx_virtual_fifo_base_offset = + qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); +- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { ++ if (IS_ERR_VALUE((unsigned long)uccf-> ++ ucc_fast_tx_virtual_fifo_base_offset)) { + printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", + __func__); + uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; +@@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info * + qe_muram_alloc(uf_info->urfs + + UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, + UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); +- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { ++ if (IS_ERR_VALUE((unsigned long)uccf-> ++ ucc_fast_rx_virtual_fifo_base_offset)) { + printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", + __func__); + uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; +@@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info * + } + + /* Set Virtual Fifo registers */ +- out_be16(&uf_regs->urfs, uf_info->urfs); +- out_be16(&uf_regs->urfet, uf_info->urfet); +- out_be16(&uf_regs->urfset, uf_info->urfset); +- out_be16(&uf_regs->utfs, uf_info->utfs); +- out_be16(&uf_regs->utfet, uf_info->utfet); +- out_be16(&uf_regs->utftt, uf_info->utftt); ++ iowrite16be(uf_info->urfs, &uf_regs->urfs); ++ iowrite16be(uf_info->urfet, &uf_regs->urfet); ++ iowrite16be(uf_info->urfset, &uf_regs->urfset); ++ iowrite16be(uf_info->utfs, &uf_regs->utfs); ++ iowrite16be(uf_info->utfet, &uf_regs->utfet); ++ iowrite16be(uf_info->utftt, &uf_regs->utftt); + /* utfb, urfb are offsets from MURAM base */ +- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); +- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); ++ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); ++ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); + + /* Mux clocking */ + /* Grant Support */ +@@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info * + } + + /* Set interrupt mask register at UCC level. */ +- out_be32(&uf_regs->uccm, uf_info->uccm_mask); ++ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); + + /* First, clear anything pending at UCC level, + * otherwise, old garbage may come through + * as soon as the dam is opened. */ + + /* Writing '1' clears */ +- out_be32(&uf_regs->ucce, 0xffffffff); ++ iowrite32be(0xffffffff, &uf_regs->ucce); + + *uccf_ret = uccf; + return 0; +--- a/drivers/tty/serial/ucc_uart.c ++++ b/drivers/tty/serial/ucc_uart.c +@@ -34,6 +34,7 @@ + #include + + #include ++#include + #include + + /* +--- a/include/soc/fsl/qe/qe.h ++++ b/include/soc/fsl/qe/qe.h +@@ -21,7 +21,6 @@ + #include + #include + #include +-#include + #include + #include + #include +--- a/include/soc/fsl/qe/qe_ic.h ++++ /dev/null +@@ -1,139 +0,0 @@ +-/* +- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. +- * +- * Authors: Shlomi Gridish +- * Li Yang +- * +- * Description: +- * QE IC external definitions and structure. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2 of the License, or (at your +- * option) any later version. +- */ +-#ifndef _ASM_POWERPC_QE_IC_H +-#define _ASM_POWERPC_QE_IC_H +- +-#include +- +-struct device_node; +-struct qe_ic; +- +-#define NUM_OF_QE_IC_GROUPS 6 +- +-/* Flags when we init the QE IC */ +-#define QE_IC_SPREADMODE_GRP_W 0x00000001 +-#define QE_IC_SPREADMODE_GRP_X 0x00000002 +-#define QE_IC_SPREADMODE_GRP_Y 0x00000004 +-#define QE_IC_SPREADMODE_GRP_Z 0x00000008 +-#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010 +-#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020 +- +-#define QE_IC_LOW_SIGNAL 0x00000100 +-#define QE_IC_HIGH_SIGNAL 0x00000200 +- +-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000 +-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000 +-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000 +-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000 +-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000 +-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000 +-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000 +-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000 +-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000 +-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000 +-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000 +-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000 +-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12) +- +-/* QE interrupt sources groups */ +-enum qe_ic_grp_id { +- QE_IC_GRP_W = 0, /* QE interrupt controller group W */ +- QE_IC_GRP_X, /* QE interrupt controller group X */ +- QE_IC_GRP_Y, /* QE interrupt controller group Y */ +- QE_IC_GRP_Z, /* QE interrupt controller group Z */ +- QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ +- QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ +-}; +- +-#ifdef CONFIG_QUICC_ENGINE +-void qe_ic_init(struct device_node *node, unsigned int flags, +- void (*low_handler)(struct irq_desc *desc), +- void (*high_handler)(struct irq_desc *desc)); +-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); +-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); +-#else +-static inline void qe_ic_init(struct device_node *node, unsigned int flags, +- void (*low_handler)(struct irq_desc *desc), +- void (*high_handler)(struct irq_desc *desc)) +-{} +-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +-{ return 0; } +-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +-{ return 0; } +-#endif /* CONFIG_QUICC_ENGINE */ +- +-void qe_ic_set_highest_priority(unsigned int virq, int high); +-int qe_ic_set_priority(unsigned int virq, unsigned int priority); +-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); +- +-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) +-{ +- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); +- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); +- +- if (cascade_irq != NO_IRQ) +- generic_handle_irq(cascade_irq); +-} +- +-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) +-{ +- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); +- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); +- +- if (cascade_irq != NO_IRQ) +- generic_handle_irq(cascade_irq); +-} +- +-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) +-{ +- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); +- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); +- struct irq_chip *chip = irq_desc_get_chip(desc); +- +- if (cascade_irq != NO_IRQ) +- generic_handle_irq(cascade_irq); +- +- chip->irq_eoi(&desc->irq_data); +-} +- +-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) +-{ +- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); +- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); +- struct irq_chip *chip = irq_desc_get_chip(desc); +- +- if (cascade_irq != NO_IRQ) +- generic_handle_irq(cascade_irq); +- +- chip->irq_eoi(&desc->irq_data); +-} +- +-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) +-{ +- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); +- unsigned int cascade_irq; +- struct irq_chip *chip = irq_desc_get_chip(desc); +- +- cascade_irq = qe_ic_get_high_irq(qe_ic); +- if (cascade_irq == NO_IRQ) +- cascade_irq = qe_ic_get_low_irq(qe_ic); +- +- if (cascade_irq != NO_IRQ) +- generic_handle_irq(cascade_irq); +- +- chip->irq_eoi(&desc->irq_data); +-} +- +-#endif /* _ASM_POWERPC_QE_IC_H */ diff --git a/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch new file mode 100644 index 000000000..0c726fbb7 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch @@ -0,0 +1,682 @@ +From 7e7944c484954ff7b5d53047194e59bfffd1540a Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:20:55 +0800 +Subject: [PATCH] rtc: support layerscape + +This is a integrated patch for layerscape rtc support. + +Signed-off-by: Zhang Ying-22455 +Signed-off-by: Yangbo Lu +--- + drivers/rtc/rtc-pcf85263.c | 665 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 665 insertions(+) + create mode 100644 drivers/rtc/rtc-pcf85263.c + +--- /dev/null ++++ b/drivers/rtc/rtc-pcf85263.c +@@ -0,0 +1,665 @@ ++/* ++ * rtc-pcf85263 Driver for the NXP PCF85263 RTC ++ * Copyright 2016 Parkeon ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define DRV_NAME "rtc-pcf85263" ++ ++/* Quartz capacitance */ ++#define PCF85263_QUARTZCAP_7pF 0 ++#define PCF85263_QUARTZCAP_6pF 1 ++#define PCF85263_QUARTZCAP_12p5pF 2 ++ ++/* Quartz drive strength */ ++#define PCF85263_QUARTZDRIVE_NORMAL 0 ++#define PCF85263_QUARTZDRIVE_LOW 1 ++#define PCF85263_QUARTZDRIVE_HIGH 2 ++ ++ ++#define PCF85263_REG_RTC_SC 0x01 /* Seconds */ ++#define PCF85263_REG_RTC_SC_OS BIT(7) /* Oscilator stopped flag */ ++ ++#define PCF85263_REG_RTC_MN 0x02 /* Minutes */ ++#define PCF85263_REG_RTC_HR 0x03 /* Hours */ ++#define PCF85263_REG_RTC_DT 0x04 /* Day of month 1-31 */ ++#define PCF85263_REG_RTC_DW 0x05 /* Day of week 0-6 */ ++#define PCF85263_REG_RTC_MO 0x06 /* Month 1-12 */ ++#define PCF85263_REG_RTC_YR 0x07 /* Year 0-99 */ ++ ++#define PCF85263_REG_ALM1_SC 0x08 /* Seconds */ ++#define PCF85263_REG_ALM1_MN 0x09 /* Minutes */ ++#define PCF85263_REG_ALM1_HR 0x0a /* Hours */ ++#define PCF85263_REG_ALM1_DT 0x0b /* Day of month 1-31 */ ++#define PCF85263_REG_ALM1_MO 0x0c /* Month 1-12 */ ++ ++#define PCF85263_REG_ALM_CTL 0x10 ++#define PCF85263_REG_ALM_CTL_ALL_A1E 0x1f /* sec,min,hr,day,mon alarm 1 */ ++ ++#define PCF85263_REG_OSC 0x25 ++#define PCF85263_REG_OSC_CL_MASK (BIT(0) | BIT(1)) ++#define PCF85263_REG_OSC_CL_SHIFT 0 ++#define PCF85263_REG_OSC_OSCD_MASK (BIT(2) | BIT(3)) ++#define PCF85263_REG_OSC_OSCD_SHIFT 2 ++#define PCF85263_REG_OSC_LOWJ BIT(4) ++#define PCF85263_REG_OSC_12H BIT(5) ++ ++#define PCF85263_REG_PINIO 0x27 ++#define PCF85263_REG_PINIO_INTAPM_MASK (BIT(0) | BIT(1)) ++#define PCF85263_REG_PINIO_INTAPM_SHIFT 0 ++#define PCF85263_INTAPM_INTA (0x2 << PCF85263_REG_PINIO_INTAPM_SHIFT) ++#define PCF85263_INTAPM_HIGHZ (0x3 << PCF85263_REG_PINIO_INTAPM_SHIFT) ++#define PCF85263_REG_PINIO_TSPM_MASK (BIT(2) | BIT(3)) ++#define PCF85263_REG_PINIO_TSPM_SHIFT 2 ++#define PCF85263_TSPM_DISABLED (0x0 << PCF85263_REG_PINIO_TSPM_SHIFT) ++#define PCF85263_TSPM_INTB (0x1 << PCF85263_REG_PINIO_TSPM_SHIFT) ++#define PCF85263_REG_PINIO_CLKDISABLE BIT(7) ++ ++#define PCF85263_REG_FUNCTION 0x28 ++#define PCF85263_REG_FUNCTION_COF_MASK 0x7 ++#define PCF85263_REG_FUNCTION_COF_OFF 0x7 /* No clock output */ ++ ++#define PCF85263_REG_INTA_CTL 0x29 ++#define PCF85263_REG_INTB_CTL 0x2A ++#define PCF85263_REG_INTx_CTL_A1E BIT(4) /* Alarm 1 */ ++#define PCF85263_REG_INTx_CTL_ILP BIT(7) /* 0=pulse, 1=level */ ++ ++#define PCF85263_REG_FLAGS 0x2B ++#define PCF85263_REG_FLAGS_A1F BIT(5) ++ ++#define PCF85263_REG_RAM_BYTE 0x2c ++ ++#define PCF85263_REG_STOPENABLE 0x2e ++#define PCF85263_REG_STOPENABLE_STOP BIT(0) ++ ++#define PCF85263_REG_RESET 0x2f /* Reset command */ ++#define PCF85263_REG_RESET_CMD_CPR 0xa4 /* Clear prescaler */ ++ ++#define PCF85263_MAX_REG 0x2f ++ ++#define PCF85263_HR_PM BIT(5) ++ ++enum pcf85263_irqpin { ++ PCF85263_IRQPIN_NONE, ++ PCF85263_IRQPIN_INTA, ++ PCF85263_IRQPIN_INTB ++}; ++ ++static const char *const pcf85263_irqpin_names[] = { ++ [PCF85263_IRQPIN_NONE] = "None", ++ [PCF85263_IRQPIN_INTA] = "INTA", ++ [PCF85263_IRQPIN_INTB] = "INTB" ++}; ++ ++struct pcf85263 { ++ struct device *dev; ++ struct rtc_device *rtc; ++ struct regmap *regmap; ++ enum pcf85263_irqpin irq_pin; ++ int irq; ++ bool mode_12h; ++}; ++ ++/* ++ * Helpers to convert 12h to 24h and vice versa. ++ * Values in register are stored in BCD with a PM flag in bit 5 ++ * ++ * 23:00 <=> 11PM <=> 0x31 ++ * 00:00 <=> 12AM <=> 0x12 ++ * 01:00 <=> 1AM <=> 0x01 ++ * 12:00 <=> 12PM <=> 0x32 ++ * 13:00 <=> 1PM <=> 0x21 ++ */ ++static int pcf85263_bcd12h_to_bin24h(int regval) ++{ ++ int hr = bcd2bin(regval & 0x1f); ++ bool pm = regval & PCF85263_HR_PM; ++ ++ if (hr == 12) ++ return pm ? 12 : 0; ++ ++ return pm ? hr + 12 : hr; ++} ++ ++static int pcf85263_bin24h_to_bcd12h(int hr24) ++{ ++ bool pm = hr24 >= 12; ++ int hr12 = hr24 % 12; ++ ++ if (!hr12) ++ hr12++; ++ ++ return bin2bcd(hr12) | pm ? 0 : PCF85263_HR_PM; ++} ++ ++static int pcf85263_read_time(struct device *dev, struct rtc_time *tm) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ const int first = PCF85263_REG_RTC_SC; ++ const int last = PCF85263_REG_RTC_YR; ++ const int len = last - first + 1; ++ u8 regs[len]; ++ u8 hr_reg; ++ int ret; ++ ++ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len); ++ if (ret) ++ return ret; ++ ++ if (regs[PCF85263_REG_RTC_SC - first] & PCF85263_REG_RTC_SC_OS) { ++ dev_warn(dev, "Oscillator stop detected, date/time is not reliable.\n"); ++ return -EINVAL; ++ } ++ ++ tm->tm_sec = bcd2bin(regs[PCF85263_REG_RTC_SC - first] & 0x7f); ++ tm->tm_min = bcd2bin(regs[PCF85263_REG_RTC_MN - first] & 0x7f); ++ ++ hr_reg = regs[PCF85263_REG_RTC_HR - first]; ++ if (pcf85263->mode_12h) ++ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg); ++ else ++ tm->tm_hour = bcd2bin(hr_reg & 0x3f); ++ ++ tm->tm_mday = bcd2bin(regs[PCF85263_REG_RTC_DT - first]); ++ tm->tm_wday = bcd2bin(regs[PCF85263_REG_RTC_DW - first]); ++ tm->tm_mon = bcd2bin(regs[PCF85263_REG_RTC_MO - first]) - 1; ++ tm->tm_year = bcd2bin(regs[PCF85263_REG_RTC_YR - first]); ++ ++ tm->tm_year += 100; /* Assume 21st century */ ++ ++ return 0; ++} ++ ++static int pcf85263_set_time(struct device *dev, struct rtc_time *tm) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ ++ /* ++ * Before setting time need to stop RTC and disable prescaler ++ * Do this all in a single I2C transaction exploiting wraparound ++ * as described in data sheet. ++ * This means that the array below must be in register order ++ */ ++ u8 regs[] = { ++ PCF85263_REG_STOPENABLE_STOP, /* STOP */ ++ PCF85263_REG_RESET_CMD_CPR, /* Disable prescaler */ ++ /* Wrap around to register 0 (1/100s) */ ++ 0, /* 1/100s always zero. */ ++ bin2bcd(tm->tm_sec), ++ bin2bcd(tm->tm_min), ++ bin2bcd(tm->tm_hour), /* 24-hour */ ++ bin2bcd(tm->tm_mday), ++ bin2bcd(tm->tm_wday + 1), ++ bin2bcd(tm->tm_mon + 1), ++ bin2bcd(tm->tm_year % 100) ++ }; ++ int ret; ++ ++ ret = regmap_bulk_write(pcf85263->regmap, PCF85263_REG_STOPENABLE, ++ regs, sizeof(regs)); ++ if (ret) ++ return ret; ++ ++ /* As we have set the time in 24H update the hardware for that */ ++ if (pcf85263->mode_12h) { ++ pcf85263->mode_12h = false; ++ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_OSC, ++ PCF85263_REG_OSC_12H, 0); ++ if (ret) ++ return ret; ++ } ++ ++ /* Start it again */ ++ return regmap_write(pcf85263->regmap, PCF85263_REG_STOPENABLE, 0); ++} ++ ++static int pcf85263_enable_alarm(struct pcf85263 *pcf85263, bool enable) ++{ ++ int reg; ++ int ret; ++ ++ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_ALM_CTL, ++ PCF85263_REG_ALM_CTL_ALL_A1E, ++ enable ? PCF85263_REG_ALM_CTL_ALL_A1E : 0); ++ if (ret) ++ return ret; ++ ++ switch (pcf85263->irq_pin) { ++ case PCF85263_IRQPIN_NONE: ++ return 0; ++ ++ case PCF85263_IRQPIN_INTA: ++ reg = PCF85263_REG_INTA_CTL; ++ break; ++ ++ case PCF85263_IRQPIN_INTB: ++ reg = PCF85263_REG_INTB_CTL; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ return regmap_update_bits(pcf85263->regmap, reg, ++ PCF85263_REG_INTx_CTL_A1E, ++ enable ? PCF85263_REG_INTx_CTL_A1E : 0); ++} ++ ++static int pcf85263_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ struct rtc_time *tm = &alarm->time; ++ const int first = PCF85263_REG_ALM1_SC; ++ const int last = PCF85263_REG_ALM1_MO; ++ const int len = last - first + 1; ++ u8 regs[len]; ++ u8 hr_reg; ++ unsigned int regval; ++ int ret; ++ ++ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len); ++ if (ret) ++ return ret; ++ ++ tm->tm_sec = bcd2bin(regs[PCF85263_REG_ALM1_SC - first] & 0x7f); ++ tm->tm_min = bcd2bin(regs[PCF85263_REG_ALM1_MN - first] & 0x7f); ++ ++ hr_reg = regs[PCF85263_REG_ALM1_HR - first]; ++ if (pcf85263->mode_12h) ++ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg); ++ else ++ tm->tm_hour = bcd2bin(hr_reg & 0x3f); ++ ++ tm->tm_mday = bcd2bin(regs[PCF85263_REG_ALM1_DT - first]); ++ tm->tm_mon = bcd2bin(regs[PCF85263_REG_ALM1_MO - first]) - 1; ++ tm->tm_year = -1; ++ tm->tm_wday = -1; ++ ++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_ALM_CTL, ®val); ++ if (ret) ++ return ret; ++ alarm->enabled = !!(regval & PCF85263_REG_ALM_CTL_ALL_A1E); ++ ++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, ®val); ++ if (ret) ++ return ret; ++ alarm->pending = !!(regval & PCF85263_REG_FLAGS_A1F); ++ ++ return 0; ++} ++ ++static int pcf85263_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ struct rtc_time *tm = &alarm->time; ++ const int first = PCF85263_REG_ALM1_SC; ++ const int last = PCF85263_REG_ALM1_MO; ++ const int len = last - first + 1; ++ u8 regs[len]; ++ int ret; ++ ++ /* Disable alarm comparison during update */ ++ ret = pcf85263_enable_alarm(pcf85263, false); ++ if (ret) ++ return ret; ++ ++ /* Clear any pending alarm (write 0=>clr, 1=>no change) */ ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS, ++ (unsigned int)(~PCF85263_REG_FLAGS_A1F)); ++ if (ret) ++ return ret; ++ ++ /* Set the alarm time registers */ ++ regs[PCF85263_REG_ALM1_SC - first] = bin2bcd(tm->tm_sec); ++ regs[PCF85263_REG_ALM1_MN - first] = bin2bcd(tm->tm_min); ++ regs[PCF85263_REG_ALM1_HR - first] = pcf85263->mode_12h ? ++ pcf85263_bin24h_to_bcd12h(tm->tm_hour) : ++ bin2bcd(tm->tm_hour); ++ regs[PCF85263_REG_ALM1_DT - first] = bin2bcd(tm->tm_mday); ++ regs[PCF85263_REG_ALM1_MO - first] = bin2bcd(tm->tm_mon + 1); ++ ++ ret = regmap_bulk_write(pcf85263->regmap, first, regs, sizeof(regs)); ++ if (ret) ++ return ret; ++ ++ if (alarm->enabled) ++ ret = pcf85263_enable_alarm(pcf85263, true); ++ ++ return ret; ++} ++ ++static int pcf85263_alarm_irq_enable(struct device *dev, unsigned int enable) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ ++ return pcf85263_enable_alarm(pcf85263, !!enable); ++} ++ ++static irqreturn_t pcf85263_irq(int irq, void *data) ++{ ++ struct pcf85263 *pcf85263 = data; ++ unsigned int regval; ++ int ret; ++ ++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, ®val); ++ if (ret) ++ return IRQ_NONE; ++ ++ if (regval & PCF85263_REG_FLAGS_A1F) { ++ regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS, ++ (unsigned int)(~PCF85263_REG_FLAGS_A1F)); ++ ++ rtc_update_irq(pcf85263->rtc, 1, RTC_IRQF | RTC_AF); ++ ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static int pcf85263_check_osc_stopped(struct pcf85263 *pcf85263) ++{ ++ unsigned int regval; ++ int ret; ++ ++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_RTC_SC, ®val); ++ if (ret) ++ return ret; ++ ++ ret = regval & PCF85263_REG_RTC_SC_OS ? 1 : 0; ++ if (ret) ++ dev_warn(pcf85263->dev, "Oscillator stop detected, date/time is not reliable.\n"); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_RTC_INTF_DEV ++static int pcf85263_ioctl(struct device *dev, ++ unsigned int cmd, unsigned long arg) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ int ret; ++ ++ switch (cmd) { ++ case RTC_VL_READ: ++ ret = pcf85263_check_osc_stopped(pcf85263); ++ if (ret < 0) ++ return ret; ++ ++ if (copy_to_user((void __user *)arg, &ret, sizeof(int))) ++ return -EFAULT; ++ return 0; ++ ++ case RTC_VL_CLR: ++ return regmap_update_bits(pcf85263->regmap, ++ PCF85263_REG_RTC_SC, ++ PCF85263_REG_RTC_SC_OS, 0); ++ default: ++ return -ENOIOCTLCMD; ++ } ++} ++#else ++#define pcf85263_ioctl NULL ++#endif ++ ++static int pcf85263_init_hw(struct pcf85263 *pcf85263) ++{ ++ struct device_node *np = pcf85263->dev->of_node; ++ unsigned int regval; ++ u32 propval; ++ int ret; ++ ++ /* Determine if oscilator has been stopped (probably low power) */ ++ ret = pcf85263_check_osc_stopped(pcf85263); ++ if (ret < 0) { ++ /* Log here since this is the first hw access on probe */ ++ dev_err(pcf85263->dev, "Unable to read register\n"); ++ ++ return ret; ++ } ++ ++ /* Determine 12/24H mode */ ++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_OSC, ®val); ++ if (ret) ++ return ret; ++ pcf85263->mode_12h = !!(regval & PCF85263_REG_OSC_12H); ++ ++ /* Set oscilator register */ ++ regval &= ~PCF85263_REG_OSC_12H; /* keep current 12/24 h setting */ ++ ++ propval = PCF85263_QUARTZCAP_12p5pF; ++ of_property_read_u32(np, "quartz-load-capacitance", &propval); ++ regval |= ((propval << PCF85263_REG_OSC_CL_SHIFT) ++ & PCF85263_REG_OSC_CL_MASK); ++ ++ propval = PCF85263_QUARTZDRIVE_NORMAL; ++ of_property_read_u32(np, "quartz-drive-strength", &propval); ++ regval |= ((propval << PCF85263_REG_OSC_OSCD_SHIFT) ++ & PCF85263_REG_OSC_OSCD_MASK); ++ ++ if (of_property_read_bool(np, "quartz-low-jitter")) ++ regval |= PCF85263_REG_OSC_LOWJ; ++ ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_OSC, regval); ++ if (ret) ++ return ret; ++ ++ /* Set function register (RTC mode, 1s tick, clock output static) */ ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FUNCTION, ++ PCF85263_REG_FUNCTION_COF_OFF); ++ if (ret) ++ return ret; ++ ++ /* Set all interrupts to disabled, level mode */ ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTA_CTL, ++ PCF85263_REG_INTx_CTL_ILP); ++ if (ret) ++ return ret; ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTB_CTL, ++ PCF85263_REG_INTx_CTL_ILP); ++ if (ret) ++ return ret; ++ ++ /* Setup IO pin config register */ ++ regval = PCF85263_REG_PINIO_CLKDISABLE; ++ switch (pcf85263->irq_pin) { ++ case PCF85263_IRQPIN_INTA: ++ regval |= (PCF85263_INTAPM_INTA | PCF85263_TSPM_DISABLED); ++ break; ++ case PCF85263_IRQPIN_INTB: ++ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_INTB); ++ break; ++ case PCF85263_IRQPIN_NONE: ++ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_DISABLED); ++ break; ++ } ++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_PINIO, regval); ++ ++ return ret; ++} ++ ++static const struct rtc_class_ops rtc_ops = { ++ .ioctl = pcf85263_ioctl, ++ .read_time = pcf85263_read_time, ++ .set_time = pcf85263_set_time, ++ .read_alarm = pcf85263_read_alarm, ++ .set_alarm = pcf85263_set_alarm, ++ .alarm_irq_enable = pcf85263_alarm_irq_enable, ++}; ++ ++static const struct regmap_config pcf85263_regmap_cfg = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = PCF85263_MAX_REG, ++}; ++ ++/* ++ * On some boards the interrupt line may not be wired to the CPU but only to ++ * a power supply circuit. ++ * In that case no interrupt will be specified in the device tree but the ++ * wakeup-source DT property may be used to enable wakeup programming in ++ * sysfs ++ */ ++static bool pcf85263_can_wakeup_machine(struct pcf85263 *pcf85263) ++{ ++ return pcf85263->irq || ++ of_property_read_bool(pcf85263->dev->of_node, "wakeup-source"); ++} ++ ++static int pcf85263_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct device *dev = &client->dev; ++ struct pcf85263 *pcf85263; ++ int ret; ++ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | ++ I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_I2C_BLOCK)) ++ return -ENODEV; ++ ++ pcf85263 = devm_kzalloc(dev, sizeof(*pcf85263), GFP_KERNEL); ++ if (!pcf85263) ++ return -ENOMEM; ++ ++ pcf85263->dev = dev; ++ pcf85263->irq = client->irq; ++ dev_set_drvdata(dev, pcf85263); ++ ++ pcf85263->regmap = devm_regmap_init_i2c(client, &pcf85263_regmap_cfg); ++ if (IS_ERR(pcf85263->regmap)) { ++ ret = PTR_ERR(pcf85263->regmap); ++ dev_err(dev, "regmap allocation failed (%d)\n", ret); ++ ++ return ret; ++ } ++ ++ /* Determine which interrupt pin the board uses */ ++ if (pcf85263_can_wakeup_machine(pcf85263)) { ++ if (of_property_match_string(dev->of_node, ++ "interrupt-names", "INTB") >= 0) ++ pcf85263->irq_pin = PCF85263_IRQPIN_INTB; ++ else ++ pcf85263->irq_pin = PCF85263_IRQPIN_INTA; ++ } else { ++ pcf85263->irq_pin = PCF85263_IRQPIN_NONE; ++ } ++ ++ ret = pcf85263_init_hw(pcf85263); ++ if (ret) ++ return ret; ++ ++ if (pcf85263->irq) { ++ ret = devm_request_threaded_irq(dev, pcf85263->irq, NULL, ++ pcf85263_irq, ++ IRQF_ONESHOT, ++ dev->driver->name, pcf85263); ++ if (ret) { ++ dev_err(dev, "irq %d unavailable (%d)\n", ++ pcf85263->irq, ret); ++ pcf85263->irq = 0; ++ } ++ } ++ ++ if (pcf85263_can_wakeup_machine(pcf85263)) ++ device_init_wakeup(dev, true); ++ ++ pcf85263->rtc = devm_rtc_device_register(dev, dev->driver->name, ++ &rtc_ops, THIS_MODULE); ++ ret = PTR_ERR_OR_ZERO(pcf85263->rtc); ++ if (ret) ++ return ret; ++ ++ /* We cannot support UIE mode if we do not have an IRQ line */ ++ if (!pcf85263->irq) ++ pcf85263->rtc->uie_unsupported = 1; ++ ++ dev_info(pcf85263->dev, ++ "PCF85263 RTC (irqpin=%s irq=%d)\n", ++ pcf85263_irqpin_names[pcf85263->irq_pin], ++ pcf85263->irq); ++ ++ return 0; ++} ++ ++static int pcf85263_remove(struct i2c_client *client) ++{ ++ struct pcf85263 *pcf85263 = i2c_get_clientdata(client); ++ ++ if (pcf85263_can_wakeup_machine(pcf85263)) ++ device_init_wakeup(pcf85263->dev, false); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int pcf85263_suspend(struct device *dev) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ int ret = 0; ++ ++ if (device_may_wakeup(dev)) ++ ret = enable_irq_wake(pcf85263->irq); ++ ++ return ret; ++} ++ ++static int pcf85263_resume(struct device *dev) ++{ ++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev); ++ int ret = 0; ++ ++ if (device_may_wakeup(dev)) ++ ret = disable_irq_wake(pcf85263->irq); ++ ++ return ret; ++} ++ ++#endif ++ ++static const struct i2c_device_id pcf85263_id[] = { ++ { "pcf85263", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, pcf85263_id); ++ ++#ifdef CONFIG_OF ++static const struct of_device_id pcf85263_of_match[] = { ++ { .compatible = "nxp,pcf85263" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, pcf85263_of_match); ++#endif ++ ++static SIMPLE_DEV_PM_OPS(pcf85263_pm_ops, pcf85263_suspend, pcf85263_resume); ++ ++static struct i2c_driver pcf85263_driver = { ++ .driver = { ++ .name = "rtc-pcf85263", ++ .of_match_table = of_match_ptr(pcf85263_of_match), ++ .pm = &pcf85263_pm_ops, ++ }, ++ .probe = pcf85263_probe, ++ .remove = pcf85263_remove, ++ .id_table = pcf85263_id, ++}; ++ ++module_i2c_driver(pcf85263_driver); ++ ++MODULE_AUTHOR("Martin Fuzzey "); ++MODULE_DESCRIPTION("PCF85263 RTC Driver"); ++MODULE_LICENSE("GPL"); ++ diff --git a/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch new file mode 100644 index 000000000..b9c531651 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch @@ -0,0 +1,438 @@ +From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Mon, 25 Sep 2017 12:12:41 +0800 +Subject: [PATCH] spi: support layerscape + +This is a integrated patch for layerscape dspi support. + +Signed-off-by: Christophe JAILLET +Signed-off-by: Sanchayan Maity +Signed-off-by: Geert Uytterhoeven +Signed-off-by: Sanchayan Maity +Signed-off-by: Yangbo Lu +--- + drivers/spi/Kconfig | 1 + + drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 305 insertions(+), 5 deletions(-) + +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -365,6 +365,7 @@ config SPI_FSL_SPI + config SPI_FSL_DSPI + tristate "Freescale DSPI controller" + select REGMAP_MMIO ++ depends on HAS_DMA + depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST + help + This enables support for the Freescale DSPI controller in master +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -15,6 +15,8 @@ + + #include + #include ++#include ++#include + #include + #include + #include +@@ -40,6 +42,7 @@ + #define TRAN_STATE_WORD_ODD_NUM 0x04 + + #define DSPI_FIFO_SIZE 4 ++#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) + + #define SPI_MCR 0x00 + #define SPI_MCR_MASTER (1 << 31) +@@ -72,6 +75,11 @@ + #define SPI_SR_TCFQF 0x80000000 + #define SPI_SR_CLEAR 0xdaad0000 + ++#define SPI_RSER_TFFFE BIT(25) ++#define SPI_RSER_TFFFD BIT(24) ++#define SPI_RSER_RFDFE BIT(17) ++#define SPI_RSER_RFDFD BIT(16) ++ + #define SPI_RSER 0x30 + #define SPI_RSER_EOQFE 0x10000000 + #define SPI_RSER_TCFQE 0x80000000 +@@ -109,6 +117,8 @@ + + #define SPI_TCR_TCNT_MAX 0x10000 + ++#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) ++ + struct chip_data { + u32 mcr_val; + u32 ctar_val; +@@ -118,6 +128,7 @@ struct chip_data { + enum dspi_trans_mode { + DSPI_EOQ_MODE = 0, + DSPI_TCFQ_MODE, ++ DSPI_DMA_MODE, + }; + + struct fsl_dspi_devtype_data { +@@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data { + }; + + static const struct fsl_dspi_devtype_data vf610_data = { +- .trans_mode = DSPI_EOQ_MODE, ++ .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 2, + }; + +@@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_dat + .max_clock_factor = 8, + }; + ++struct fsl_dspi_dma { ++ /* Length of transfer in words of DSPI_FIFO_SIZE */ ++ u32 curr_xfer_len; ++ ++ u32 *tx_dma_buf; ++ struct dma_chan *chan_tx; ++ dma_addr_t tx_dma_phys; ++ struct completion cmd_tx_complete; ++ struct dma_async_tx_descriptor *tx_desc; ++ ++ u32 *rx_dma_buf; ++ struct dma_chan *chan_rx; ++ dma_addr_t rx_dma_phys; ++ struct completion cmd_rx_complete; ++ struct dma_async_tx_descriptor *rx_desc; ++}; ++ + struct fsl_dspi { + struct spi_master *master; + struct platform_device *pdev; +@@ -166,8 +194,11 @@ struct fsl_dspi { + u32 waitflags; + + u32 spi_tcnt; ++ struct fsl_dspi_dma *dma; + }; + ++static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word); ++ + static inline int is_double_byte_mode(struct fsl_dspi *dspi) + { + unsigned int val; +@@ -177,6 +208,255 @@ static inline int is_double_byte_mode(st + return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; + } + ++static void dspi_tx_dma_callback(void *arg) ++{ ++ struct fsl_dspi *dspi = arg; ++ struct fsl_dspi_dma *dma = dspi->dma; ++ ++ complete(&dma->cmd_tx_complete); ++} ++ ++static void dspi_rx_dma_callback(void *arg) ++{ ++ struct fsl_dspi *dspi = arg; ++ struct fsl_dspi_dma *dma = dspi->dma; ++ int rx_word; ++ int i; ++ u16 d; ++ ++ rx_word = is_double_byte_mode(dspi); ++ ++ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) { ++ for (i = 0; i < dma->curr_xfer_len; i++) { ++ d = dspi->dma->rx_dma_buf[i]; ++ rx_word ? (*(u16 *)dspi->rx = d) : ++ (*(u8 *)dspi->rx = d); ++ dspi->rx += rx_word + 1; ++ } ++ } ++ ++ complete(&dma->cmd_rx_complete); ++} ++ ++static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) ++{ ++ struct fsl_dspi_dma *dma = dspi->dma; ++ struct device *dev = &dspi->pdev->dev; ++ int time_left; ++ int tx_word; ++ int i; ++ ++ tx_word = is_double_byte_mode(dspi); ++ ++ for (i = 0; i < dma->curr_xfer_len; i++) { ++ dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word); ++ if ((dspi->cs_change) && (!dspi->len)) ++ dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT; ++ } ++ ++ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, ++ dma->tx_dma_phys, ++ dma->curr_xfer_len * ++ DMA_SLAVE_BUSWIDTH_4_BYTES, ++ DMA_MEM_TO_DEV, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!dma->tx_desc) { ++ dev_err(dev, "Not able to get desc for DMA xfer\n"); ++ return -EIO; ++ } ++ ++ dma->tx_desc->callback = dspi_tx_dma_callback; ++ dma->tx_desc->callback_param = dspi; ++ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) { ++ dev_err(dev, "DMA submit failed\n"); ++ return -EINVAL; ++ } ++ ++ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, ++ dma->rx_dma_phys, ++ dma->curr_xfer_len * ++ DMA_SLAVE_BUSWIDTH_4_BYTES, ++ DMA_DEV_TO_MEM, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!dma->rx_desc) { ++ dev_err(dev, "Not able to get desc for DMA xfer\n"); ++ return -EIO; ++ } ++ ++ dma->rx_desc->callback = dspi_rx_dma_callback; ++ dma->rx_desc->callback_param = dspi; ++ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) { ++ dev_err(dev, "DMA submit failed\n"); ++ return -EINVAL; ++ } ++ ++ reinit_completion(&dspi->dma->cmd_rx_complete); ++ reinit_completion(&dspi->dma->cmd_tx_complete); ++ ++ dma_async_issue_pending(dma->chan_rx); ++ dma_async_issue_pending(dma->chan_tx); ++ ++ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, ++ DMA_COMPLETION_TIMEOUT); ++ if (time_left == 0) { ++ dev_err(dev, "DMA tx timeout\n"); ++ dmaengine_terminate_all(dma->chan_tx); ++ dmaengine_terminate_all(dma->chan_rx); ++ return -ETIMEDOUT; ++ } ++ ++ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete, ++ DMA_COMPLETION_TIMEOUT); ++ if (time_left == 0) { ++ dev_err(dev, "DMA rx timeout\n"); ++ dmaengine_terminate_all(dma->chan_tx); ++ dmaengine_terminate_all(dma->chan_rx); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++ ++static int dspi_dma_xfer(struct fsl_dspi *dspi) ++{ ++ struct fsl_dspi_dma *dma = dspi->dma; ++ struct device *dev = &dspi->pdev->dev; ++ int curr_remaining_bytes; ++ int bytes_per_buffer; ++ int word = 1; ++ int ret = 0; ++ ++ if (is_double_byte_mode(dspi)) ++ word = 2; ++ curr_remaining_bytes = dspi->len; ++ bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE; ++ while (curr_remaining_bytes) { ++ /* Check if current transfer fits the DMA buffer */ ++ dma->curr_xfer_len = curr_remaining_bytes / word; ++ if (dma->curr_xfer_len > bytes_per_buffer) ++ dma->curr_xfer_len = bytes_per_buffer; ++ ++ ret = dspi_next_xfer_dma_submit(dspi); ++ if (ret) { ++ dev_err(dev, "DMA transfer failed\n"); ++ goto exit; ++ ++ } else { ++ curr_remaining_bytes -= dma->curr_xfer_len * word; ++ if (curr_remaining_bytes < 0) ++ curr_remaining_bytes = 0; ++ } ++ } ++ ++exit: ++ return ret; ++} ++ ++static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) ++{ ++ struct fsl_dspi_dma *dma; ++ struct dma_slave_config cfg; ++ struct device *dev = &dspi->pdev->dev; ++ int ret; ++ ++ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); ++ if (!dma) ++ return -ENOMEM; ++ ++ dma->chan_rx = dma_request_slave_channel(dev, "rx"); ++ if (!dma->chan_rx) { ++ dev_err(dev, "rx dma channel not available\n"); ++ ret = -ENODEV; ++ return ret; ++ } ++ ++ dma->chan_tx = dma_request_slave_channel(dev, "tx"); ++ if (!dma->chan_tx) { ++ dev_err(dev, "tx dma channel not available\n"); ++ ret = -ENODEV; ++ goto err_tx_channel; ++ } ++ ++ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, ++ &dma->tx_dma_phys, GFP_KERNEL); ++ if (!dma->tx_dma_buf) { ++ ret = -ENOMEM; ++ goto err_tx_dma_buf; ++ } ++ ++ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, ++ &dma->rx_dma_phys, GFP_KERNEL); ++ if (!dma->rx_dma_buf) { ++ ret = -ENOMEM; ++ goto err_rx_dma_buf; ++ } ++ ++ cfg.src_addr = phy_addr + SPI_POPR; ++ cfg.dst_addr = phy_addr + SPI_PUSHR; ++ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ cfg.src_maxburst = 1; ++ cfg.dst_maxburst = 1; ++ ++ cfg.direction = DMA_DEV_TO_MEM; ++ ret = dmaengine_slave_config(dma->chan_rx, &cfg); ++ if (ret) { ++ dev_err(dev, "can't configure rx dma channel\n"); ++ ret = -EINVAL; ++ goto err_slave_config; ++ } ++ ++ cfg.direction = DMA_MEM_TO_DEV; ++ ret = dmaengine_slave_config(dma->chan_tx, &cfg); ++ if (ret) { ++ dev_err(dev, "can't configure tx dma channel\n"); ++ ret = -EINVAL; ++ goto err_slave_config; ++ } ++ ++ dspi->dma = dma; ++ init_completion(&dma->cmd_tx_complete); ++ init_completion(&dma->cmd_rx_complete); ++ ++ return 0; ++ ++err_slave_config: ++ dma_free_coherent(dev, DSPI_DMA_BUFSIZE, ++ dma->rx_dma_buf, dma->rx_dma_phys); ++err_rx_dma_buf: ++ dma_free_coherent(dev, DSPI_DMA_BUFSIZE, ++ dma->tx_dma_buf, dma->tx_dma_phys); ++err_tx_dma_buf: ++ dma_release_channel(dma->chan_tx); ++err_tx_channel: ++ dma_release_channel(dma->chan_rx); ++ ++ devm_kfree(dev, dma); ++ dspi->dma = NULL; ++ ++ return ret; ++} ++ ++static void dspi_release_dma(struct fsl_dspi *dspi) ++{ ++ struct fsl_dspi_dma *dma = dspi->dma; ++ struct device *dev = &dspi->pdev->dev; ++ ++ if (dma) { ++ if (dma->chan_tx) { ++ dma_unmap_single(dev, dma->tx_dma_phys, ++ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE); ++ dma_release_channel(dma->chan_tx); ++ } ++ ++ if (dma->chan_rx) { ++ dma_unmap_single(dev, dma->rx_dma_phys, ++ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE); ++ dma_release_channel(dma->chan_rx); ++ } ++ } ++} ++ + static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, + unsigned long clkrate) + { +@@ -425,6 +705,12 @@ static int dspi_transfer_one_message(str + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE); + dspi_tcfq_write(dspi); + break; ++ case DSPI_DMA_MODE: ++ regmap_write(dspi->regmap, SPI_RSER, ++ SPI_RSER_TFFFE | SPI_RSER_TFFFD | ++ SPI_RSER_RFDFE | SPI_RSER_RFDFD); ++ status = dspi_dma_xfer(dspi); ++ break; + default: + dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", + trans_mode); +@@ -432,9 +718,13 @@ static int dspi_transfer_one_message(str + goto out; + } + +- if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) +- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); +- dspi->waitflags = 0; ++ if (trans_mode != DSPI_DMA_MODE) { ++ if (wait_event_interruptible(dspi->waitq, ++ dspi->waitflags)) ++ dev_err(&dspi->pdev->dev, ++ "wait transfer complete fail!\n"); ++ dspi->waitflags = 0; ++ } + + if (transfer->delay_usecs) + udelay(transfer->delay_usecs); +@@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_de + if (IS_ERR(dspi->regmap)) { + dev_err(&pdev->dev, "failed to init regmap: %ld\n", + PTR_ERR(dspi->regmap)); +- return PTR_ERR(dspi->regmap); ++ ret = PTR_ERR(dspi->regmap); ++ goto out_master_put; + } + + dspi_init(dspi); +@@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_de + if (ret) + goto out_master_put; + ++ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { ++ if (dspi_request_dma(dspi, res->start)) { ++ dev_err(&pdev->dev, "can't get dma channels\n"); ++ goto out_clk_put; ++ } ++ } ++ + master->max_speed_hz = + clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; + +@@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_d + struct fsl_dspi *dspi = spi_master_get_devdata(master); + + /* Disconnect from the SPI framework */ ++ dspi_release_dma(dspi); + clk_disable_unprepare(dspi->clk); + spi_unregister_master(dspi->master); + diff --git a/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch new file mode 100644 index 000000000..a14df9d70 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch @@ -0,0 +1,158 @@ +From 469daac0faff06209bc1d1390571b860d153a82b Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Wed, 27 Sep 2017 10:33:47 +0800 +Subject: [PATCH] tty: serial: support layerscape + +This is a integrated patch for layerscape uart support. + +Signed-off-by: Nikita Yushchenko +Signed-off-by: Yuan Yao +Signed-off-by: Stefan Agner +Signed-off-by: Yangbo Lu +--- + drivers/tty/serial/fsl_lpuart.c | 66 ++++++++++++++++++++++++++++------------- + 1 file changed, 46 insertions(+), 20 deletions(-) + +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -231,6 +231,8 @@ + #define DEV_NAME "ttyLP" + #define UART_NR 6 + ++static DECLARE_BITMAP(linemap, UART_NR); ++ + struct lpuart_port { + struct uart_port port; + struct clk *clk; +@@ -1348,6 +1350,18 @@ lpuart_set_termios(struct uart_port *por + /* ask the core to calculate the divisor */ + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + ++ /* ++ * Need to update the Ring buffer length according to the selected ++ * baud rate and restart Rx DMA path. ++ * ++ * Since timer function acqures sport->port.lock, need to stop before ++ * acquring same lock because otherwise del_timer_sync() can deadlock. ++ */ ++ if (old && sport->lpuart_dma_rx_use) { ++ del_timer_sync(&sport->lpuart_timer); ++ lpuart_dma_rx_free(&sport->port); ++ } ++ + spin_lock_irqsave(&sport->port.lock, flags); + + sport->port.read_status_mask = 0; +@@ -1397,22 +1411,11 @@ lpuart_set_termios(struct uart_port *por + /* restore control register */ + writeb(old_cr2, sport->port.membase + UARTCR2); + +- /* +- * If new baud rate is set, we will also need to update the Ring buffer +- * length according to the selected baud rate and restart Rx DMA path. +- */ +- if (old) { +- if (sport->lpuart_dma_rx_use) { +- del_timer_sync(&sport->lpuart_timer); +- lpuart_dma_rx_free(&sport->port); +- } +- +- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) { +- sport->lpuart_dma_rx_use = true; ++ if (old && sport->lpuart_dma_rx_use) { ++ if (!lpuart_start_rx_dma(sport)) + rx_dma_timer_init(sport); +- } else { ++ else + sport->lpuart_dma_rx_use = false; +- } + } + + spin_unlock_irqrestore(&sport->port.lock, flags); +@@ -1640,6 +1643,13 @@ lpuart_console_write(struct console *co, + { + struct lpuart_port *sport = lpuart_ports[co->index]; + unsigned char old_cr2, cr2; ++ unsigned long flags; ++ int locked = 1; ++ ++ if (sport->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&sport->port.lock, flags); ++ else ++ spin_lock_irqsave(&sport->port.lock, flags); + + /* first save CR2 and then disable interrupts */ + cr2 = old_cr2 = readb(sport->port.membase + UARTCR2); +@@ -1654,6 +1664,9 @@ lpuart_console_write(struct console *co, + barrier(); + + writeb(old_cr2, sport->port.membase + UARTCR2); ++ ++ if (locked) ++ spin_unlock_irqrestore(&sport->port.lock, flags); + } + + static void +@@ -1661,6 +1674,13 @@ lpuart32_console_write(struct console *c + { + struct lpuart_port *sport = lpuart_ports[co->index]; + unsigned long old_cr, cr; ++ unsigned long flags; ++ int locked = 1; ++ ++ if (sport->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&sport->port.lock, flags); ++ else ++ spin_lock_irqsave(&sport->port.lock, flags); + + /* first save CR2 and then disable interrupts */ + cr = old_cr = lpuart32_read(sport->port.membase + UARTCTRL); +@@ -1675,6 +1695,9 @@ lpuart32_console_write(struct console *c + barrier(); + + lpuart32_write(old_cr, sport->port.membase + UARTCTRL); ++ ++ if (locked) ++ spin_unlock_irqrestore(&sport->port.lock, flags); + } + + /* +@@ -1899,9 +1922,13 @@ static int lpuart_probe(struct platform_ + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { +- dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); +- return ret; ++ ret = find_first_zero_bit(linemap, UART_NR); ++ if (ret >= UART_NR) { ++ dev_err(&pdev->dev, "port line is full, add device failed\n"); ++ return ret; ++ } + } ++ set_bit(ret, linemap); + sport->port.line = ret; + sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart"); + +@@ -1983,6 +2010,7 @@ static int lpuart_remove(struct platform + struct lpuart_port *sport = platform_get_drvdata(pdev); + + uart_remove_one_port(&lpuart_reg, &sport->port); ++ clear_bit(sport->port.line, linemap); + + clk_disable_unprepare(sport->clk); + +@@ -2067,12 +2095,10 @@ static int lpuart_resume(struct device * + + if (sport->lpuart_dma_rx_use) { + if (sport->port.irq_wake) { +- if (!lpuart_start_rx_dma(sport)) { +- sport->lpuart_dma_rx_use = true; ++ if (!lpuart_start_rx_dma(sport)) + rx_dma_timer_init(sport); +- } else { ++ else + sport->lpuart_dma_rx_use = false; +- } + } + } + diff --git a/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch new file mode 100644 index 000000000..147b03ae2 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch @@ -0,0 +1,1434 @@ +From f8daa8e984213554008e73cd155530dceec5a109 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Wed, 27 Sep 2017 10:34:07 +0800 +Subject: [PATCH] usb: support layerscape + +This is a integrated patch for layerscape usb support. + +Signed-off-by: yinbo.zhu +Signed-off-by: Ramneek Mehresh +Signed-off-by: Nikhil Badola +Signed-off-by: Changming Huang +Signed-off-by: Catalin Marinas +Signed-off-by: Rajesh Bhagat +Signed-off-by: Suresh Gupta +Signed-off-by: Zhao Chenhui +Signed-off-by: Yangbo Lu +--- + drivers/usb/common/common.c | 50 ++++++ + drivers/usb/core/hub.c | 8 + + drivers/usb/dwc3/core.c | 235 ++++++++++++++++++++++++++- + drivers/usb/dwc3/core.h | 46 +++++- + drivers/usb/dwc3/host.c | 15 +- + drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++--- + drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +- + drivers/usb/host/Kconfig | 2 +- + drivers/usb/host/ehci-fsl.c | 289 +++++++++++++++++++++++++++++++--- + drivers/usb/host/ehci-fsl.h | 3 + + drivers/usb/host/ehci-hub.c | 2 + + drivers/usb/host/ehci.h | 5 + + drivers/usb/host/fsl-mph-dr-of.c | 12 ++ + drivers/usb/phy/phy-fsl-usb.c | 59 +++++-- + drivers/usb/phy/phy-fsl-usb.h | 8 + + include/linux/usb.h | 1 + + include/linux/usb/of.h | 2 + + 17 files changed, 726 insertions(+), 73 deletions(-) + +--- a/drivers/usb/common/common.c ++++ b/drivers/usb/common/common.c +@@ -105,6 +105,56 @@ static const char *const usb_dr_modes[] + [USB_DR_MODE_OTG] = "otg", + }; + ++/** ++ * of_usb_get_dr_mode - Get dual role mode for given device_node ++ * @np: Pointer to the given device_node ++ * ++ * The function gets phy interface string from property 'dr_mode', ++ * and returns the correspondig enum usb_dr_mode ++ */ ++enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) ++{ ++ const char *dr_mode; ++ int err, i; ++ ++ err = of_property_read_string(np, "dr_mode", &dr_mode); ++ if (err < 0) ++ return USB_DR_MODE_UNKNOWN; ++ ++ for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++) ++ if (!strcmp(dr_mode, usb_dr_modes[i])) ++ return i; ++ ++ return USB_DR_MODE_UNKNOWN; ++} ++EXPORT_SYMBOL_GPL(of_usb_get_dr_mode); ++ ++/** ++ * of_usb_get_maximum_speed - Get maximum requested speed for a given USB ++ * controller. ++ * @np: Pointer to the given device_node ++ * ++ * The function gets the maximum speed string from property "maximum-speed", ++ * and returns the corresponding enum usb_device_speed. ++ */ ++enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np) ++{ ++ const char *maximum_speed; ++ int err; ++ int i; ++ ++ err = of_property_read_string(np, "maximum-speed", &maximum_speed); ++ if (err < 0) ++ return USB_SPEED_UNKNOWN; ++ ++ for (i = 0; i < ARRAY_SIZE(speed_names); i++) ++ if (strcmp(maximum_speed, speed_names[i]) == 0) ++ return i; ++ ++ return USB_SPEED_UNKNOWN; ++} ++EXPORT_SYMBOL_GPL(of_usb_get_maximum_speed); ++ + static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str) + { + int ret; +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -4412,6 +4412,14 @@ hub_port_init(struct usb_hub *hub, struc + else + speed = usb_speed_string(udev->speed); + ++#if !defined(CONFIG_FSL_USB2_OTG) && !defined(CONFIG_FSL_USB2_OTG_MODULE) ++if (udev->speed != USB_SPEED_SUPER) ++ dev_info(&udev->dev, ++ "%s %s USB device number %d using %s\n", ++ (udev->config) ? "reset" : "new", speed, ++ devnum, udev->bus->controller->driver->name); ++#endif ++ + if (udev->speed < USB_SPEED_SUPER) + dev_info(&udev->dev, + "%s %s USB device number %d using %s\n", +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -58,6 +58,7 @@ static int dwc3_get_dr_mode(struct dwc3 + enum usb_dr_mode mode; + struct device *dev = dwc->dev; + unsigned int hw_mode; ++ struct device_node *node = dev->of_node; + + if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) + dwc->dr_mode = USB_DR_MODE_OTG; +@@ -83,6 +84,24 @@ static int dwc3_get_dr_mode(struct dwc3 + mode = USB_DR_MODE_HOST; + break; + default: ++ /* Adjust Frame Length */ ++ if (dwc->configure_gfladj) ++ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL | ++ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT)); ++ ++ /* Change burst beat and outstanding pipelined transfers requests */ ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, ++ (dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) & ~0xff) | 0xf); ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, ++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG1) | 0xf00); ++ ++ /* Enable Snooping */ ++ if (node && of_dma_is_coherent(node)) { ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, ++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) | 0x22220000); ++ dev_dbg(dev, "enabled snooping for usb\n"); ++ } ++ + if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) +@@ -213,8 +232,9 @@ static void dwc3_frame_length_adjustment + + reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); + dft = reg & DWC3_GFLADJ_30MHZ_MASK; +- if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj, +- "request value same as default, ignoring\n")) { ++ if (dft == dwc->fladj) { ++ dev_warn(dwc->dev, "request value same as default, ignoring\n"); ++ } else { + reg &= ~DWC3_GFLADJ_30MHZ_MASK; + reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; + dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); +@@ -579,6 +599,99 @@ static int dwc3_phy_setup(struct dwc3 *d + return 0; + } + ++/* set global soc bus configuration registers */ ++static void dwc3_set_soc_bus_cfg(struct dwc3 *dwc) ++{ ++ struct device *dev = dwc->dev; ++ u32 *vals; ++ u32 cfg; ++ int ntype; ++ int ret; ++ int i; ++ ++ cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); ++ ++ /* ++ * Handle property "snps,incr-burst-type-adjustment". ++ * Get the number of value from this property: ++ * result <= 0, means this property is not supported. ++ * result = 1, means INCRx burst mode supported. ++ * result > 1, means undefined length burst mode supported. ++ */ ++ ntype = device_property_read_u32_array(dev, ++ "snps,incr-burst-type-adjustment", NULL, 0); ++ if (ntype > 0) { ++ vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); ++ if (!vals) { ++ dev_err(dev, "Error to get memory\n"); ++ return; ++ } ++ /* Get INCR burst type, and parse it */ ++ ret = device_property_read_u32_array(dev, ++ "snps,incr-burst-type-adjustment", vals, ntype); ++ if (ret) { ++ dev_err(dev, "Error to get property\n"); ++ return; ++ } ++ *(dwc->incrx_type + 1) = vals[0]; ++ if (ntype > 1) { ++ *dwc->incrx_type = 1; ++ for (i = 1; i < ntype; i++) { ++ if (vals[i] > *(dwc->incrx_type + 1)) ++ *(dwc->incrx_type + 1) = vals[i]; ++ } ++ } else ++ *dwc->incrx_type = 0; ++ ++ /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ ++ cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; ++ if (*dwc->incrx_type) ++ cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; ++ switch (*(dwc->incrx_type + 1)) { ++ case 256: ++ cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; ++ break; ++ case 128: ++ cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; ++ break; ++ case 64: ++ cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; ++ break; ++ case 32: ++ cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; ++ break; ++ case 16: ++ cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; ++ break; ++ case 8: ++ cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; ++ break; ++ case 4: ++ cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; ++ break; ++ case 1: ++ break; ++ default: ++ dev_err(dev, "Invalid property\n"); ++ break; ++ } ++ } ++ ++ /* Handle usb snooping */ ++ if (dwc->dma_snooping_quirk) { ++ cfg &= ~DWC3_GSBUSCFG0_SNP_MASK; ++ cfg |= (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATARD_SHIFT) | ++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCRD_SHIFT) | ++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATAWR_SHIFT) | ++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCWR_SHIFT); ++ } ++ ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); ++ ++} ++ ++ ++ + static void dwc3_core_exit(struct dwc3 *dwc) + { + dwc3_event_buffers_cleanup(dwc); +@@ -721,6 +834,8 @@ static int dwc3_core_init(struct dwc3 *d + if (ret) + goto err1; + ++ dwc3_set_soc_bus_cfg(dwc); ++ + /* Adjust Frame Length */ + dwc3_frame_length_adjustment(dwc); + +@@ -919,11 +1034,109 @@ static void dwc3_core_exit_mode(struct d + } + } + ++static void dwc3_get_properties(struct dwc3 *dwc) ++{ ++ struct device *dev = dwc->dev; ++ struct device_node *node = dev->of_node; ++ u8 lpm_nyet_threshold; ++ u8 tx_de_emphasis; ++ u8 hird_threshold; ++ ++ /* default to highest possible threshold */ ++ lpm_nyet_threshold = 0xff; ++ ++ /* default to -3.5dB de-emphasis */ ++ tx_de_emphasis = 1; ++ ++ /* ++ * default to assert utmi_sleep_n and use maximum allowed HIRD ++ * threshold value of 0b1100 ++ */ ++ hird_threshold = 12; ++ ++ dwc->maximum_speed = usb_get_maximum_speed(dev); ++ dwc->dr_mode = usb_get_dr_mode(dev); ++ dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); ++ ++ dwc->sysdev_is_parent = device_property_read_bool(dev, ++ "linux,sysdev_is_parent"); ++ if (dwc->sysdev_is_parent) ++ dwc->sysdev = dwc->dev->parent; ++ else ++ dwc->sysdev = dwc->dev; ++ ++ dwc->has_lpm_erratum = device_property_read_bool(dev, ++ "snps,has-lpm-erratum"); ++ device_property_read_u8(dev, "snps,lpm-nyet-threshold", ++ &lpm_nyet_threshold); ++ dwc->is_utmi_l1_suspend = device_property_read_bool(dev, ++ "snps,is-utmi-l1-suspend"); ++ device_property_read_u8(dev, "snps,hird-threshold", ++ &hird_threshold); ++ dwc->usb3_lpm_capable = device_property_read_bool(dev, ++ "snps,usb3_lpm_capable"); ++ ++ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); ++ ++ dwc->configure_gfladj = ++ of_property_read_bool(node, "configure-gfladj"); ++ dwc->dr_mode = usb_get_dr_mode(dev); ++ ++ dwc->disable_scramble_quirk = device_property_read_bool(dev, ++ "snps,disable_scramble_quirk"); ++ dwc->u2exit_lfps_quirk = device_property_read_bool(dev, ++ "snps,u2exit_lfps_quirk"); ++ dwc->u2ss_inp3_quirk = device_property_read_bool(dev, ++ "snps,u2ss_inp3_quirk"); ++ dwc->req_p1p2p3_quirk = device_property_read_bool(dev, ++ "snps,req_p1p2p3_quirk"); ++ dwc->del_p1p2p3_quirk = device_property_read_bool(dev, ++ "snps,del_p1p2p3_quirk"); ++ dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, ++ "snps,del_phy_power_chg_quirk"); ++ dwc->lfps_filter_quirk = device_property_read_bool(dev, ++ "snps,lfps_filter_quirk"); ++ dwc->rx_detect_poll_quirk = device_property_read_bool(dev, ++ "snps,rx_detect_poll_quirk"); ++ dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, ++ "snps,dis_u3_susphy_quirk"); ++ dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, ++ "snps,dis_u2_susphy_quirk"); ++ dwc->dis_enblslpm_quirk = device_property_read_bool(dev, ++ "snps,dis_enblslpm_quirk"); ++ dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, ++ "snps,dis_rxdet_inp3_quirk"); ++ dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, ++ "snps,dis-u2-freeclk-exists-quirk"); ++ dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, ++ "snps,dis-del-phy-power-chg-quirk"); ++ dwc->dma_snooping_quirk = device_property_read_bool(dev, ++ "snps,dma-snooping"); ++ ++ dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, ++ "snps,tx_de_emphasis_quirk"); ++ device_property_read_u8(dev, "snps,tx_de_emphasis", ++ &tx_de_emphasis); ++ device_property_read_string(dev, "snps,hsphy_interface", ++ &dwc->hsphy_interface); ++ device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", ++ &dwc->fladj); ++ ++ dwc->lpm_nyet_threshold = lpm_nyet_threshold; ++ dwc->tx_de_emphasis = tx_de_emphasis; ++ ++ dwc->hird_threshold = hird_threshold ++ | (dwc->is_utmi_l1_suspend << 4); ++ ++ dwc->imod_interval = 0; ++} ++ + #define DWC3_ALIGN_MASK (16 - 1) + + static int dwc3_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; + struct resource *res; + struct dwc3 *dwc; + u8 lpm_nyet_threshold; +@@ -955,6 +1168,11 @@ static int dwc3_probe(struct platform_de + dwc->xhci_resources[0].flags = res->flags; + dwc->xhci_resources[0].name = res->name; + ++ if (node) { ++ dwc->configure_gfladj = ++ of_property_read_bool(node, "configure-gfladj"); ++ } ++ + res->start += DWC3_GLOBALS_REGS_START; + + /* +@@ -997,6 +1215,12 @@ static int dwc3_probe(struct platform_de + dwc->usb3_lpm_capable = device_property_read_bool(dev, + "snps,usb3_lpm_capable"); + ++ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); ++ ++ dwc->configure_gfladj = ++ of_property_read_bool(node, "configure-gfladj"); ++ dwc->dr_mode = of_usb_get_dr_mode(node); ++ + dwc->disable_scramble_quirk = device_property_read_bool(dev, + "snps,disable_scramble_quirk"); + dwc->u2exit_lfps_quirk = device_property_read_bool(dev, +@@ -1041,6 +1265,8 @@ static int dwc3_probe(struct platform_de + dwc->hird_threshold = hird_threshold + | (dwc->is_utmi_l1_suspend << 4); + ++ dwc3_get_properties(dwc); ++ + platform_set_drvdata(pdev, dwc); + dwc3_cache_hwparams(dwc); + +@@ -1064,6 +1290,11 @@ static int dwc3_probe(struct platform_de + if (ret < 0) + goto err1; + ++ /* Adjust Frame Length */ ++ if (dwc->configure_gfladj) ++ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL | ++ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT)); ++ + pm_runtime_forbid(dev); + + ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -154,6 +155,32 @@ + + /* Bit fields */ + ++/* Global SoC Bus Configuration Register 0 */ ++#define AXI3_CACHE_TYPE_AW 0x8 /* write allocate */ ++#define AXI3_CACHE_TYPE_AR 0x4 /* read allocate */ ++#define AXI3_CACHE_TYPE_SNP 0x2 /* cacheable */ ++#define AXI3_CACHE_TYPE_BUF 0x1 /* bufferable */ ++#define DWC3_GSBUSCFG0_DATARD_SHIFT 28 ++#define DWC3_GSBUSCFG0_DESCRD_SHIFT 24 ++#define DWC3_GSBUSCFG0_DATAWR_SHIFT 20 ++#define DWC3_GSBUSCFG0_DESCWR_SHIFT 16 ++#define DWC3_GSBUSCFG0_SNP_MASK 0xffff0000 ++#define DWC3_GSBUSCFG0_DATABIGEND (1 << 11) ++#define DWC3_GSBUSCFG0_DESCBIGEND (1 << 10) ++#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */ ++#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */ ++#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */ ++#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */ ++#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */ ++#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */ ++#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */ ++#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */ ++#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff ++ ++/* Global SoC Bus Configuration Register 1 */ ++#define DWC3_GSBUSCFG1_1KPAGEENA (1 << 12) /* 1K page boundary enable */ ++#define DWC3_GSBUSCFG1_PTRANSLIMIT_MASK 0xf00 ++ + /* Global Debug Queue/FIFO Space Available Register */ + #define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f) + #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) +@@ -180,7 +207,6 @@ + #define DWC3_GCTL_CLK_PIPE (1) + #define DWC3_GCTL_CLK_PIPEHALF (2) + #define DWC3_GCTL_CLK_MASK (3) +- + #define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12) + #define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12) + #define DWC3_GCTL_PRTCAP_HOST 1 +@@ -289,6 +315,10 @@ + /* Global Frame Length Adjustment Register */ + #define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7) + #define DWC3_GFLADJ_30MHZ_MASK 0x3f ++#define GFLADJ_30MHZ_REG_SEL (1 << 7) ++#define GFLADJ_30MHZ(n) ((n) & 0x3f) ++#define GFLADJ_30MHZ_DEFAULT 0x20 ++ + + /* Global User Control Register 2 */ + #define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14) +@@ -753,6 +783,7 @@ struct dwc3_scratchpad_array { + * @regs: base address for our registers + * @regs_size: address space size + * @fladj: frame length adjustment ++ * @incrx_type: INCR burst type adjustment + * @irq_gadget: peripheral controller's IRQ number + * @nr_scratch: number of scratch buffers + * @u1u2: only used on revisions <1.83a for workaround +@@ -847,6 +878,7 @@ struct dwc3 { + spinlock_t lock; + + struct device *dev; ++ struct device *sysdev; + + struct platform_device *xhci; + struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; +@@ -872,6 +904,12 @@ struct dwc3 { + enum usb_phy_interface hsphy_mode; + + u32 fladj; ++ /* ++ * For INCR burst type. ++ * First field: for undefined length INCR burst type enable. ++ * Second field: for INCRx burst type enable ++ */ ++ u32 incrx_type[2]; + u32 irq_gadget; + u32 nr_scratch; + u32 u1u2; +@@ -948,9 +986,12 @@ struct dwc3 { + unsigned ep0_bounced:1; + unsigned ep0_expect_in:1; + unsigned has_hibernation:1; ++ unsigned sysdev_is_parent:1; + unsigned has_lpm_erratum:1; + unsigned is_utmi_l1_suspend:1; + unsigned is_fpga:1; ++ unsigned needs_fifo_resize:1; ++ unsigned configure_gfladj:1; + unsigned pending_events:1; + unsigned pullups_connected:1; + unsigned setup_packet_pending:1; +@@ -971,9 +1012,12 @@ struct dwc3 { + unsigned dis_rxdet_inp3_quirk:1; + unsigned dis_u2_freeclk_exists_quirk:1; + unsigned dis_del_phy_power_chg_quirk:1; ++ unsigned dma_snooping_quirk:1; + + unsigned tx_de_emphasis_quirk:1; + unsigned tx_de_emphasis:2; ++ ++ u16 imod_interval; + }; + + /* -------------------------------------------------------------------------- */ +--- a/drivers/usb/dwc3/host.c ++++ b/drivers/usb/dwc3/host.c +@@ -17,6 +17,8 @@ + + #include + ++#include ++ + #include "core.h" + + int dwc3_host_init(struct dwc3 *dwc) +@@ -73,12 +75,21 @@ int dwc3_host_init(struct dwc3 *dwc) + return -ENOMEM; + } + +- dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask); ++ if (IS_ENABLED(CONFIG_OF) && dwc->dev->of_node) ++ of_dma_configure(&xhci->dev, dwc->dev->of_node); ++ else ++ dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask); + +- xhci->dev.parent = dwc->dev; ++ xhci->dev.parent = dwc->dev; + xhci->dev.dma_mask = dwc->dev->dma_mask; + xhci->dev.dma_parms = dwc->dev->dma_parms; + ++ /* set DMA operations */ ++ if (dwc->dev->of_node && of_dma_is_coherent(dwc->dev->of_node)) { ++ xhci->dev.archdata.dma_ops = dwc->dev->archdata.dma_ops; ++ dev_dbg(dwc->dev, "set dma_ops for usb\n"); ++ } ++ + dwc->xhci = xhci; + + ret = platform_device_add_resources(xhci, dwc->xhci_resources, +--- a/drivers/usb/gadget/udc/fsl_udc_core.c ++++ b/drivers/usb/gadget/udc/fsl_udc_core.c +@@ -198,7 +198,11 @@ __acquires(ep->udc->lock) + + spin_unlock(&ep->udc->lock); + +- usb_gadget_giveback_request(&ep->ep, &req->req); ++ /* this complete() should a func implemented by gadget layer, ++ * eg fsg->bulk_in_complete() ++ */ ++ if (req->req.complete) ++ usb_gadget_giveback_request(&ep->ep, &req->req); + + spin_lock(&ep->udc->lock); + ep->stopped = stopped; +@@ -245,10 +249,10 @@ static int dr_controller_setup(struct fs + if (udc->pdata->have_sysif_regs) { + if (udc->pdata->controller_ver) { + /* controller version 1.6 or above */ +- ctrl = __raw_readl(&usb_sys_regs->control); ++ ctrl = ioread32be(&usb_sys_regs->control); + ctrl &= ~USB_CTRL_UTMI_PHY_EN; + ctrl |= USB_CTRL_USB_EN; +- __raw_writel(ctrl, &usb_sys_regs->control); ++ iowrite32be(ctrl, &usb_sys_regs->control); + } + } + portctrl |= PORTSCX_PTS_ULPI; +@@ -257,13 +261,14 @@ static int dr_controller_setup(struct fs + portctrl |= PORTSCX_PTW_16BIT; + /* fall through */ + case FSL_USB2_PHY_UTMI: ++ case FSL_USB2_PHY_UTMI_DUAL: + if (udc->pdata->have_sysif_regs) { + if (udc->pdata->controller_ver) { + /* controller version 1.6 or above */ +- ctrl = __raw_readl(&usb_sys_regs->control); ++ ctrl = ioread32be(&usb_sys_regs->control); + ctrl |= (USB_CTRL_UTMI_PHY_EN | + USB_CTRL_USB_EN); +- __raw_writel(ctrl, &usb_sys_regs->control); ++ iowrite32be(ctrl, &usb_sys_regs->control); + mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI + PHY CLK to become stable - 10ms*/ + } +@@ -329,22 +334,22 @@ static int dr_controller_setup(struct fs + /* Config control enable i/o output, cpu endian register */ + #ifndef CONFIG_ARCH_MXC + if (udc->pdata->have_sysif_regs) { +- ctrl = __raw_readl(&usb_sys_regs->control); ++ ctrl = ioread32be(&usb_sys_regs->control); + ctrl |= USB_CTRL_IOENB; +- __raw_writel(ctrl, &usb_sys_regs->control); ++ iowrite32be(ctrl, &usb_sys_regs->control); + } + #endif + +-#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) ++#if !defined(CONFIG_NOT_COHERENT_CACHE) + /* Turn on cache snooping hardware, since some PowerPC platforms + * wholly rely on hardware to deal with cache coherent. */ + + if (udc->pdata->have_sysif_regs) { + /* Setup Snooping for all the 4GB space */ + tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */ +- __raw_writel(tmp, &usb_sys_regs->snoop1); ++ iowrite32be(tmp, &usb_sys_regs->snoop1); + tmp |= 0x80000000; /* starts from 0x8000000, size 2G */ +- __raw_writel(tmp, &usb_sys_regs->snoop2); ++ iowrite32be(tmp, &usb_sys_regs->snoop2); + } + #endif + +@@ -1057,7 +1062,7 @@ static int fsl_ep_fifo_status(struct usb + struct ep_queue_head *qh; + + ep = container_of(_ep, struct fsl_ep, ep); +- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0)) ++ if (!_ep || !ep->ep.desc || (ep_index(ep) == 0)) + return -ENODEV; + + udc = (struct fsl_udc *)ep->udc; +@@ -1599,14 +1604,13 @@ static int process_ep_req(struct fsl_udc + struct fsl_req *curr_req) + { + struct ep_td_struct *curr_td; +- int td_complete, actual, remaining_length, j, tmp; ++ int actual, remaining_length, j, tmp; + int status = 0; + int errors = 0; + struct ep_queue_head *curr_qh = &udc->ep_qh[pipe]; + int direction = pipe % 2; + + curr_td = curr_req->head; +- td_complete = 0; + actual = curr_req->req.length; + + for (j = 0; j < curr_req->dtd_count; j++) { +@@ -1651,11 +1655,9 @@ static int process_ep_req(struct fsl_udc + status = -EPROTO; + break; + } else { +- td_complete++; + break; + } + } else { +- td_complete++; + VDBG("dTD transmitted successful"); + } + +@@ -1698,7 +1700,7 @@ static void dtd_complete_irq(struct fsl_ + curr_ep = get_ep_by_pipe(udc, i); + + /* If the ep is configured */ +- if (curr_ep->name == NULL) { ++ if (strncmp(curr_ep->name, "ep", 2)) { + WARNING("Invalid EP?"); + continue; + } +@@ -2420,10 +2422,12 @@ static int fsl_udc_probe(struct platform + usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET; + #endif + ++#ifdef CONFIG_ARCH_MXC + /* Initialize USB clocks */ + ret = fsl_udc_clk_init(pdev); + if (ret < 0) + goto err_iounmap_noclk; ++#endif + + /* Read Device Controller Capability Parameters register */ + dccparams = fsl_readl(&dr_regs->dccparams); +@@ -2463,9 +2467,11 @@ static int fsl_udc_probe(struct platform + dr_controller_setup(udc_controller); + } + ++#ifdef CONFIG_ARCH_MXC + ret = fsl_udc_clk_finalize(pdev); + if (ret) + goto err_free_irq; ++#endif + + /* Setup gadget structure */ + udc_controller->gadget.ops = &fsl_gadget_ops; +@@ -2478,6 +2484,7 @@ static int fsl_udc_probe(struct platform + /* Setup gadget.dev and register with kernel */ + dev_set_name(&udc_controller->gadget.dev, "gadget"); + udc_controller->gadget.dev.of_node = pdev->dev.of_node; ++ set_dma_ops(&udc_controller->gadget.dev, pdev->dev.archdata.dma_ops); + + if (!IS_ERR_OR_NULL(udc_controller->transceiver)) + udc_controller->gadget.is_otg = 1; +@@ -2529,7 +2536,9 @@ err_free_irq: + err_iounmap: + if (pdata->exit) + pdata->exit(pdev); ++#ifdef CONFIG_ARCH_MXC + fsl_udc_clk_release(); ++#endif + err_iounmap_noclk: + iounmap(dr_regs); + err_release_mem_region: +@@ -2557,8 +2566,9 @@ static int fsl_udc_remove(struct platfor + udc_controller->done = &done; + usb_del_gadget_udc(&udc_controller->gadget); + ++#ifdef CONFIG_ARCH_MXC + fsl_udc_clk_release(); +- ++#endif + /* DR has been stopped in usb_gadget_unregister_driver() */ + remove_proc_file(); + +@@ -2570,7 +2580,7 @@ static int fsl_udc_remove(struct platfor + dma_pool_destroy(udc_controller->td_pool); + free_irq(udc_controller->irq, udc_controller); + iounmap(dr_regs); +- if (pdata->operating_mode == FSL_USB2_DR_DEVICE) ++ if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE)) + release_mem_region(res->start, resource_size(res)); + + /* free udc --wait for the release() finished */ +--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h ++++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h +@@ -20,6 +20,10 @@ + #define USB_MAX_CTRL_PAYLOAD 64 + #define USB_DR_SYS_OFFSET 0x400 + ++#ifdef CONFIG_SOC_LS1021A ++#undef CONFIG_ARCH_MXC ++#endif ++ + /* USB DR device mode registers (Little Endian) */ + struct usb_dr_device { + /* Capability register */ +@@ -597,18 +601,6 @@ struct platform_device; + int fsl_udc_clk_init(struct platform_device *pdev); + int fsl_udc_clk_finalize(struct platform_device *pdev); + void fsl_udc_clk_release(void); +-#else +-static inline int fsl_udc_clk_init(struct platform_device *pdev) +-{ +- return 0; +-} +-static inline int fsl_udc_clk_finalize(struct platform_device *pdev) +-{ +- return 0; +-} +-static inline void fsl_udc_clk_release(void) +-{ +-} + #endif + + #endif +--- a/drivers/usb/host/Kconfig ++++ b/drivers/usb/host/Kconfig +@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX + + config USB_EHCI_FSL + tristate "Support for Freescale PPC on-chip EHCI USB controller" +- depends on FSL_SOC ++ depends on USB_EHCI_HCD + select USB_EHCI_ROOT_HUB_TT + ---help--- + Variation of ARC USB block used in some Freescale chips. +--- a/drivers/usb/host/ehci-fsl.c ++++ b/drivers/usb/host/ehci-fsl.c +@@ -37,13 +37,141 @@ + #include + #include + ++#ifdef CONFIG_PPC ++#include ++#include ++#endif ++ + #include "ehci.h" + #include "ehci-fsl.h" + ++#define FSL_USB_PHY_ADDR 0xffe214000 ++ ++struct ccsr_usb_port_ctrl { ++ u32 ctrl; ++ u32 drvvbuscfg; ++ u32 pwrfltcfg; ++ u32 sts; ++ u8 res_14[0xc]; ++ u32 bistcfg; ++ u32 biststs; ++ u32 abistcfg; ++ u32 abiststs; ++ u8 res_30[0x10]; ++ u32 xcvrprg; ++ u32 anaprg; ++ u32 anadrv; ++ u32 anasts; ++}; ++ ++struct ccsr_usb_phy { ++ u32 id; ++ struct ccsr_usb_port_ctrl port1; ++ u8 res_50[0xc]; ++ u32 tvr; ++ u32 pllprg[4]; ++ u8 res_70[0x4]; ++ u32 anaccfg; ++ u32 dbg; ++ u8 res_7c[0x4]; ++ struct ccsr_usb_port_ctrl port2; ++ u8 res_dc[0x334]; ++}; ++ + #define DRIVER_DESC "Freescale EHCI Host controller driver" + #define DRV_NAME "ehci-fsl" + + static struct hc_driver __read_mostly fsl_ehci_hc_driver; ++struct ehci_fsl { ++ /* store current hcd state for otg; ++ * have_hcd is true when host drv al already part of otg framework, ++ * otherwise false; ++ * hcd_add is true when otg framework wants to add host ++ * drv as part of otg;flase when it wants to remove it ++ */ ++unsigned have_hcd:1; ++unsigned hcd_add:1; ++}; ++ ++static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) ++{ ++struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ ++return container_of(ehci, struct ehci_fsl, ehci); ++} ++ ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++static void do_change_hcd(struct work_struct *work) ++{ ++struct ehci_hcd *ehci = container_of(work, struct ehci_hcd, ++ change_hcd_work); ++struct usb_hcd *hcd = ehci_to_hcd(ehci); ++struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); ++void __iomem *non_ehci = hcd->regs; ++int retval; ++ ++ if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) { ++ writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE); ++ /* host, gadget and otg share same int line */ ++ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); ++ if (retval == 0) ++ ehci_fsl->have_hcd = 1; ++ } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) { ++ usb_remove_hcd(hcd); ++ ehci_fsl->have_hcd = 0; ++ } ++} ++#endif ++ ++struct ehci_fsl { ++ struct ehci_hcd ehci; ++ ++#ifdef CONFIG_PM ++struct ehci_regs saved_regs; ++struct ccsr_usb_phy saved_phy_regs; ++/* Saved USB PHY settings, need to restore after deep sleep. */ ++u32 usb_ctrl; ++#endif ++ /* ++ * store current hcd state for otg; ++ * have_hcd is true when host drv al already part of otg framework, ++ * otherwise false; ++ * hcd_add is true when otg framework wants to add host ++ * drv as part of otg;flase when it wants to remove it ++ */ ++unsigned have_hcd:1; ++unsigned hcd_add:1; ++}; ++ ++static strut ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) ++{ ++struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ ++return container_of(ehci, struct ehci_fsl, ehci); ++} ++ ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++static void do_change_hcd(struct work_struct *work) ++{ ++struct ehci_hcd *ehci = container_of(work, struct ehci_hcd, ++change_hcd_work); ++struct usb_hcd *hcd = ehci_to_hcd(ehci); ++struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); ++void __iomem *non_ehci = hcd->regs; ++int retval; ++ ++if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) { ++writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE); ++/* host, gadget and otg share same int line */ ++retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); ++if (retval == 0) ++ehci_fsl->have_hcd = 1; ++} else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) { ++ usb_remove_hcd(hcd); ++ehci_fsl->have_hcd = 0; ++} ++} ++#endif + + /* configure so an HC device and id are always provided */ + /* always called with process context; sleeping is OK */ +@@ -131,6 +259,12 @@ static int fsl_ehci_drv_probe(struct pla + clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL, + CONTROL_REGISTER_W1C_MASK, 0x4); + ++ /* Set USB_EN bit to select ULPI phy for USB controller version 2.5 */ ++ if (pdata->controller_ver == FSL_USB_VER_2_5 && ++ pdata->phy_mode == FSL_USB2_PHY_ULPI) ++ iowrite32be(USB_CTRL_USB_EN, hcd->regs + FSL_SOC_USB_CTRL); ++ ++ + /* + * Enable UTMI phy and program PTS field in UTMI mode before asserting + * controller reset for USB Controller version 2.5 +@@ -143,16 +277,20 @@ static int fsl_ehci_drv_probe(struct pla + + /* Don't need to set host mode here. It will be done by tdi_reset() */ + +- retval = usb_add_hcd(hcd, irq, IRQF_SHARED); ++ retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_NO_SUSPEND); + if (retval != 0) + goto err2; + device_wakeup_enable(hcd->self.controller); + +-#ifdef CONFIG_USB_OTG ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) + if (pdata->operating_mode == FSL_USB2_DR_OTG) { + struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + + hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); ++ ++ INIT_WORK(&ehci->change_hcd_work, do_change_hcd); ++ + dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n", + hcd, ehci, hcd->usb_phy); + +@@ -168,6 +306,11 @@ static int fsl_ehci_drv_probe(struct pla + retval = -ENODEV; + goto err2; + } ++ ++ ehci_fsl->have_hcd = 1; ++ } else { ++ dev_err(&pdev->dev, "wrong operating mode\n"); ++ return -ENODEV; + } + #endif + return retval; +@@ -181,6 +324,18 @@ static int fsl_ehci_drv_probe(struct pla + return retval; + } + ++static bool usb_phy_clk_valid(struct usb_hcd *hcd, ++ enum fsl_usb2_phy_modes phy_mode) ++{ ++ void __iomem *non_ehci = hcd->regs; ++ bool ret = true; ++ ++ if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) ++ ret = false; ++ ++ return ret; ++} ++ + static int ehci_fsl_setup_phy(struct usb_hcd *hcd, + enum fsl_usb2_phy_modes phy_mode, + unsigned int port_offset) +@@ -219,6 +374,21 @@ static int ehci_fsl_setup_phy(struct usb + /* fall through */ + case FSL_USB2_PHY_UTMI: + case FSL_USB2_PHY_UTMI_DUAL: ++ if (pdata->has_fsl_erratum_a006918) { ++ pr_warn("fsl-ehci: USB PHY clock invalid\n"); ++ return -EINVAL; ++ } ++ ++ /* PHY_CLK_VALID bit is de-featured from all controller ++ * versions below 2.4 and is to be checked only for ++ * internal UTMI phy ++ */ ++ if (pdata->controller_ver > FSL_USB_VER_2_4 && ++ pdata->have_sysif_regs && !usb_phy_clk_valid(hcd)) { ++ pr_err("fsl-ehci: USB PHY clock invalid\n"); ++ return -EINVAL; ++ } ++ + if (pdata->have_sysif_regs && pdata->controller_ver) { + /* controller version 1.6 or above */ + clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL, +@@ -292,14 +462,9 @@ static int ehci_fsl_usb_setup(struct ehc + return -EINVAL; + + if (pdata->operating_mode == FSL_USB2_MPH_HOST) { +- unsigned int chip, rev, svr; +- +- svr = mfspr(SPRN_SVR); +- chip = svr >> 16; +- rev = (svr >> 4) & 0xf; + + /* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */ +- if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055)) ++ if (pdata->has_fsl_erratum_14 == 1) + ehci->has_fsl_port_bug = 1; + + if (pdata->port_enables & FSL_USB2_PORT0_ENABLED) +@@ -379,16 +544,57 @@ static int ehci_fsl_setup(struct usb_hcd + return retval; + } + +-struct ehci_fsl { +- struct ehci_hcd ehci; + + #ifdef CONFIG_PM +- /* Saved USB PHY settings, need to restore after deep sleep. */ +- u32 usb_ctrl; +-#endif +-}; ++void __iomem *phy_reg; + +-#ifdef CONFIG_PM ++#ifdef CONFIG_PPC ++/* save usb registers */ ++static int ehci_fsl_save_context(struct usb_hcd *hcd) ++{ ++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); ++ struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ void __iomem *non_ehci = hcd->regs; ++ struct device *dev = hcd->self.controller; ++ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev); ++ ++ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) { ++ phy_reg = ioremap(FSL_USB_PHY_ADDR, ++ sizeof(struct ccsr_usb_phy)); ++ _memcpy_fromio((void *)&ehci_fsl->saved_phy_regs, phy_reg, ++ sizeof(struct ccsr_usb_phy)); ++ } ++ ++ _memcpy_fromio((void *)&ehci_fsl->saved_regs, ehci->regs, ++ sizeof(struct ehci_regs)); ++ ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); ++ ++ return 0; ++} ++ ++/*Restore usb registers */ ++static int ehci_fsl_restore_context(struct usb_hcd *hcd) ++{ ++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); ++ struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ void __iomem *non_ehci = hcd->regs; ++ struct device *dev = hcd->self.controller; ++ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev); ++ ++ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) { ++ if (phy_reg) ++ _memcpy_toio(phy_reg, ++ (void *)&ehci_fsl->saved_phy_regs, ++ sizeof(struct ccsr_usb_phy)); ++ } ++ ++ _memcpy_toio(ehci->regs, (void *)&ehci_fsl->saved_regs, ++ sizeof(struct ehci_regs)); ++ iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); ++ ++ return 0; ++} ++#endif + + #ifdef CONFIG_PPC_MPC512x + static int ehci_fsl_mpc512x_drv_suspend(struct device *dev) +@@ -535,26 +741,43 @@ static inline int ehci_fsl_mpc512x_drv_r + } + #endif /* CONFIG_PPC_MPC512x */ + +-static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) +-{ +- struct ehci_hcd *ehci = hcd_to_ehci(hcd); +- +- return container_of(ehci, struct ehci_fsl, ehci); +-} +- + static int ehci_fsl_drv_suspend(struct device *dev) + { + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + void __iomem *non_ehci = hcd->regs; ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++ struct usb_bus host = hcd->self; ++#endif ++ ++#ifdef CONFIG_PPC ++suspend_state_t pm_state; ++pm_state = pm_suspend_state(); ++ ++if (pm_state == PM_SUSPEND_MEM) ++ ehci_fsl_save_context(hcd); ++#endif + + if (of_device_is_compatible(dev->parent->of_node, + "fsl,mpc5121-usb2-dr")) { + return ehci_fsl_mpc512x_drv_suspend(dev); + } + ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++ if (host.is_otg) { ++ struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ ++ /* remove hcd */ ++ ehci_fsl->hcd_add = 0; ++ schedule_work(&ehci->change_hcd_work); ++ host.is_otg = 0; ++ return 0; ++ } ++#endif ++ + ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), + device_may_wakeup(dev)); ++ + if (!fsl_deep_sleep()) + return 0; + +@@ -568,12 +791,34 @@ static int ehci_fsl_drv_resume(struct de + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + void __iomem *non_ehci = hcd->regs; ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++ struct usb_bus host = hcd->self; ++#endif ++ ++#ifdef CONFIG_PPC ++suspend_state_t pm_state; ++pm_state = pm_suspend_state(); ++ ++if (pm_state == PM_SUSPEND_MEM) ++ ehci_fsl_restore_context(hcd); ++#endif + + if (of_device_is_compatible(dev->parent->of_node, + "fsl,mpc5121-usb2-dr")) { + return ehci_fsl_mpc512x_drv_resume(dev); + } + ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++ if (host.is_otg) { ++ /* add hcd */ ++ ehci_fsl->hcd_add = 1; ++ schedule_work(&ehci->change_hcd_work); ++ usb_hcd_resume_root_hub(hcd); ++ host.is_otg = 0; ++ return 0; ++ } ++#endif ++ + ehci_prepare_ports_for_controller_resume(ehci); + if (!fsl_deep_sleep()) + return 0; +--- a/drivers/usb/host/ehci-fsl.h ++++ b/drivers/usb/host/ehci-fsl.h +@@ -63,4 +63,7 @@ + #define UTMI_PHY_EN (1<<9) + #define ULPI_PHY_CLK_SEL (1<<10) + #define PHY_CLK_VALID (1<<17) ++ ++/* Retry count for checking UTMI PHY CLK validity */ ++#define UTMI_PHY_CLK_VALID_CHK_RETRY 5 + #endif /* _EHCI_FSL_H */ +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_ + USB_PORT_STAT_HIGH_SPEED) + fs_idle_delay = true; + ehci_writel(ehci, t2, reg); ++ if (ehci_has_fsl_susp_errata(ehci)) ++ usleep_range(10000, 20000); + changed = 1; + } + } +--- a/drivers/usb/host/ehci.h ++++ b/drivers/usb/host/ehci.h +@@ -180,6 +180,9 @@ struct ehci_hcd { /* one per controlle + unsigned periodic_count; /* periodic activity count */ + unsigned uframe_periodic_max; /* max periodic time per uframe */ + ++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) ++ struct work_struct change_hcd_work; ++#endif + + /* list of itds & sitds completed while now_frame was still active */ + struct list_head cached_itd_list; +@@ -706,8 +709,10 @@ ehci_port_speed(struct ehci_hcd *ehci, u + * incoming packets get corrupted in HS mode + */ + #define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata) ++#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata) + #else + #define ehci_has_fsl_hs_errata(e) (0) ++#define ehci_has_fsl_susp_errata(e) (0) + #endif + + /* +--- a/drivers/usb/host/fsl-mph-dr-of.c ++++ b/drivers/usb/host/fsl-mph-dr-of.c +@@ -226,6 +226,18 @@ static int fsl_usb2_mph_dr_of_probe(stru + of_property_read_bool(np, "fsl,usb-erratum-a007792"); + pdata->has_fsl_erratum_a005275 = + of_property_read_bool(np, "fsl,usb-erratum-a005275"); ++ pdata->has_fsl_erratum_a005697 = ++ of_property_read_bool(np, "fsl,usb_erratum-a005697"); ++ if (of_get_property(np, "fsl,erratum_a006918", NULL)) ++ pdata->has_fsl_erratum_a006918 = 1; ++ else ++ pdata->has_fsl_erratum_a006918 = 0; ++ ++ if (of_get_property(np, "fsl,usb_erratum_14", NULL)) ++ pdata->has_fsl_erratum_14 = 1; ++ else ++ pdata->has_fsl_erratum_14 = 0; ++ + + /* + * Determine whether phy_clk_valid needs to be checked +--- a/drivers/usb/phy/phy-fsl-usb.c ++++ b/drivers/usb/phy/phy-fsl-usb.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (C) 2007,2008 Freescale semiconductor, Inc. ++ * Copyright 2007,2008 Freescale Semiconductor, Inc. + * + * Author: Li Yang + * Jerry Huang +@@ -463,6 +463,7 @@ void otg_reset_controller(void) + int fsl_otg_start_host(struct otg_fsm *fsm, int on) + { + struct usb_otg *otg = fsm->otg; ++ struct usb_bus *host = otg->host; + struct device *dev; + struct fsl_otg *otg_dev = + container_of(otg->usb_phy, struct fsl_otg, phy); +@@ -486,6 +487,7 @@ int fsl_otg_start_host(struct otg_fsm *f + otg_reset_controller(); + VDBG("host on......\n"); + if (dev->driver->pm && dev->driver->pm->resume) { ++ host->is_otg = 1; + retval = dev->driver->pm->resume(dev); + if (fsm->id) { + /* default-b */ +@@ -510,8 +512,11 @@ int fsl_otg_start_host(struct otg_fsm *f + else { + VDBG("host off......\n"); + if (dev && dev->driver) { +- if (dev->driver->pm && dev->driver->pm->suspend) ++ if (dev->driver->pm && ++ dev->driver->pm->suspend) { ++ host->is_otg = 1; + retval = dev->driver->pm->suspend(dev); ++ } + if (fsm->id) + /* default-b */ + fsl_otg_drv_vbus(fsm, 0); +@@ -539,8 +544,17 @@ int fsl_otg_start_gadget(struct otg_fsm + dev = otg->gadget->dev.parent; + + if (on) { +- if (dev->driver->resume) ++ /* Delay gadget resume to synchronize between host and gadget ++ * drivers. Upon role-reversal host drv is shutdown by kernel ++ * worker thread. By the time host drv shuts down, controller ++ * gets programmed for gadget role. Shutting host drv after ++ * this results in controller getting reset, and it stops ++ * responding to otg events ++ */ ++ if (dev->driver->resume) { ++ msleep(1000); + dev->driver->resume(dev); ++ } + } else { + if (dev->driver->suspend) + dev->driver->suspend(dev, otg_suspend_state); +@@ -672,6 +686,10 @@ static void fsl_otg_event(struct work_st + fsl_otg_start_host(fsm, 0); + otg_drv_vbus(fsm, 0); + fsl_otg_start_gadget(fsm, 1); ++ } else { ++ fsl_otg_start_gadget(fsm, 0); ++ otg_drv_vbus(fsm, 1); ++ fsl_otg_start_host(fsm, 1); + } + } + +@@ -724,6 +742,7 @@ irqreturn_t fsl_otg_isr(int irq, void *d + { + struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm; + struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg; ++ struct fsl_otg *otg_dev = dev_id; + u32 otg_int_src, otg_sc; + + otg_sc = fsl_readl(&usb_dr_regs->otgsc); +@@ -753,18 +772,8 @@ irqreturn_t fsl_otg_isr(int irq, void *d + otg->gadget->is_a_peripheral = !fsm->id; + VDBG("ID int (ID is %d)\n", fsm->id); + +- if (fsm->id) { /* switch to gadget */ +- schedule_delayed_work( +- &((struct fsl_otg *)dev_id)->otg_event, +- 100); +- } else { /* switch to host */ +- cancel_delayed_work(& +- ((struct fsl_otg *)dev_id)-> +- otg_event); +- fsl_otg_start_gadget(fsm, 0); +- otg_drv_vbus(fsm, 1); +- fsl_otg_start_host(fsm, 1); +- } ++ schedule_delayed_work(&otg_dev->otg_event, 100); ++ + return IRQ_HANDLED; + } + } +@@ -923,12 +932,32 @@ int usb_otg_start(struct platform_device + temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW); + switch (pdata->phy_mode) { + case FSL_USB2_PHY_ULPI: ++ if (pdata->controller_ver) { ++ /* controller version 1.6 or above */ ++ setbits32(&p_otg->dr_mem_map->control, ++ USB_CTRL_ULPI_PHY_CLK_SEL); ++ /* ++ * Due to controller issue of PHY_CLK_VALID in ULPI ++ * mode, we set USB_CTRL_USB_EN before checking ++ * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work. ++ */ ++ clrsetbits_be32(&p_otg->dr_mem_map->control, ++ USB_CTRL_UTMI_PHY_EN, USB_CTRL_IOENB); ++ } + temp |= PORTSC_PTS_ULPI; + break; + case FSL_USB2_PHY_UTMI_WIDE: + temp |= PORTSC_PTW_16BIT; + /* fall through */ + case FSL_USB2_PHY_UTMI: ++ if (pdata->controller_ver) { ++ /* controller version 1.6 or above */ ++ setbits32(&p_otg->dr_mem_map->control, ++ USB_CTRL_UTMI_PHY_EN); ++ /* Delay for UTMI PHY CLK to become stable - 10ms */ ++ mdelay(FSL_UTMI_PHY_DLY); ++ } ++ setbits32(&p_otg->dr_mem_map->control, USB_CTRL_UTMI_PHY_EN); + temp |= PORTSC_PTS_UTMI; + /* fall through */ + default: +--- a/drivers/usb/phy/phy-fsl-usb.h ++++ b/drivers/usb/phy/phy-fsl-usb.h +@@ -199,6 +199,14 @@ + /* control Register Bit Masks */ + #define USB_CTRL_IOENB (0x1<<2) + #define USB_CTRL_ULPI_INT0EN (0x1<<0) ++#define USB_CTRL_WU_INT_EN (0x1<<1) ++#define USB_CTRL_LINE_STATE_FILTER__EN (0x1<<3) ++#define USB_CTRL_KEEP_OTG_ON (0x1<<4) ++#define USB_CTRL_OTG_PORT (0x1<<5) ++#define USB_CTRL_PLL_RESET (0x1<<8) ++#define USB_CTRL_UTMI_PHY_EN (0x1<<9) ++#define USB_CTRL_ULPI_PHY_CLK_SEL (0x1<<10) ++#define USB_CTRL_PHY_CLK_VALID (0x1<<17) + + /* BCSR5 */ + #define BCSR5_INT_USB (0x02) +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -362,6 +362,7 @@ struct usb_bus { + * for control transfers? + */ + u8 otg_port; /* 0, or number of OTG/HNP port */ ++ unsigned is_otg:1; /* true when host is also otg */ + unsigned is_b_host:1; /* true during some HNP roleswitches */ + unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ + unsigned no_stop_on_short:1; /* +--- a/include/linux/usb/of.h ++++ b/include/linux/usb/of.h +@@ -11,6 +11,8 @@ + #include + #include + ++enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); ++ + #if IS_ENABLED(CONFIG_OF) + enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); + bool of_usb_host_tpl_support(struct device_node *np); diff --git a/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch new file mode 100644 index 000000000..4854738d3 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch @@ -0,0 +1,1166 @@ +From 8d82d92ea697145c32bb36d9f39afd5bb0927bc2 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Wed, 27 Sep 2017 10:34:46 +0800 +Subject: [PATCH] vfio: support layerscape + +This is a integrated patch for layerscape vfio support. + +Signed-off-by: Bharat Bhushan +Signed-off-by: Eric Auger +Signed-off-by: Robin Murphy +Signed-off-by: Wei Yongjun +Signed-off-by: Yangbo Lu +--- + drivers/vfio/Kconfig | 1 + + drivers/vfio/Makefile | 1 + + drivers/vfio/fsl-mc/Kconfig | 9 + + drivers/vfio/fsl-mc/Makefile | 2 + + drivers/vfio/fsl-mc/vfio_fsl_mc.c | 753 ++++++++++++++++++++++++++++++ + drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++++ + drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 +++ + drivers/vfio/vfio_iommu_type1.c | 39 +- + include/uapi/linux/vfio.h | 1 + + 9 files changed, 1058 insertions(+), 2 deletions(-) + create mode 100644 drivers/vfio/fsl-mc/Kconfig + create mode 100644 drivers/vfio/fsl-mc/Makefile + create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c + create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c + create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h + +--- a/drivers/vfio/Kconfig ++++ b/drivers/vfio/Kconfig +@@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU + + source "drivers/vfio/pci/Kconfig" + source "drivers/vfio/platform/Kconfig" ++source "drivers/vfio/fsl-mc/Kconfig" + source "virt/lib/Kconfig" +--- a/drivers/vfio/Makefile ++++ b/drivers/vfio/Makefile +@@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vf + obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o + obj-$(CONFIG_VFIO_PCI) += pci/ + obj-$(CONFIG_VFIO_PLATFORM) += platform/ ++obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/ +--- /dev/null ++++ b/drivers/vfio/fsl-mc/Kconfig +@@ -0,0 +1,9 @@ ++config VFIO_FSL_MC ++ tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices" ++ depends on VFIO && FSL_MC_BUS && EVENTFD ++ help ++ Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc ++ (Management Complex) devices. This is required to passthrough ++ fsl-mc bus devices using the VFIO framework. ++ ++ If you don't know what to do here, say N. +--- /dev/null ++++ b/drivers/vfio/fsl-mc/Makefile +@@ -0,0 +1,2 @@ ++vfio-fsl_mc-y := vfio_fsl_mc.o ++obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o +--- /dev/null ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c +@@ -0,0 +1,753 @@ ++/* ++ * Freescale Management Complex (MC) device passthrough using VFIO ++ * ++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016-2017 NXP ++ * Author: Bharat Bhushan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../staging/fsl-mc/include/mc.h" ++#include "../../staging/fsl-mc/include/mc-bus.h" ++#include "../../staging/fsl-mc/include/mc-sys.h" ++#include "../../staging/fsl-mc/bus/dprc-cmd.h" ++ ++#include "vfio_fsl_mc_private.h" ++ ++#define DRIVER_VERSION "0.10" ++#define DRIVER_AUTHOR "Bharat Bhushan " ++#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver" ++ ++static DEFINE_MUTEX(driver_lock); ++ ++/* FSl-MC device regions (address and size) are aligned to 64K. ++ * While MC firmware reports size less than 64K for some objects (it actually ++ * reports size which does not include reserved space beyond valid bytes). ++ * Align the size to PAGE_SIZE for userspace to mmap. ++ */ ++static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index) ++{ ++ size_t size; ++ ++ size = resource_size(&mc_dev->regions[index]); ++ return PAGE_ALIGN(size); ++} ++ ++static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev) ++{ ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ int count = mc_dev->obj_desc.region_count; ++ int i; ++ ++ vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), ++ GFP_KERNEL); ++ if (!vdev->regions) ++ return -ENOMEM; ++ ++ for (i = 0; i < mc_dev->obj_desc.region_count; i++) { ++ vdev->regions[i].addr = mc_dev->regions[i].start; ++ vdev->regions[i].size = aligned_region_size(mc_dev, i); ++ vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO; ++ if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE) ++ vdev->regions[i].type |= ++ VFIO_FSL_MC_REGION_TYPE_CACHEABLE; ++ vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP; ++ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; ++ if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY)) ++ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; ++ } ++ ++ vdev->num_regions = mc_dev->obj_desc.region_count; ++ return 0; ++} ++ ++static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev) ++{ ++ int i; ++ ++ for (i = 0; i < vdev->num_regions; i++) ++ iounmap(vdev->regions[i].ioaddr); ++ ++ vdev->num_regions = 0; ++ kfree(vdev->regions); ++} ++ ++static int vfio_fsl_mc_open(void *device_data) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ int ret; ++ ++ if (!try_module_get(THIS_MODULE)) ++ return -ENODEV; ++ ++ mutex_lock(&driver_lock); ++ if (!vdev->refcnt) { ++ ret = vfio_fsl_mc_regions_init(vdev); ++ if (ret) ++ goto error_region_init; ++ ++ ret = vfio_fsl_mc_irqs_init(vdev); ++ if (ret) ++ goto error_irq_init; ++ } ++ ++ vdev->refcnt++; ++ mutex_unlock(&driver_lock); ++ return 0; ++ ++error_irq_init: ++ vfio_fsl_mc_regions_cleanup(vdev); ++error_region_init: ++ mutex_unlock(&driver_lock); ++ if (ret) ++ module_put(THIS_MODULE); ++ ++ return ret; ++} ++ ++static void vfio_fsl_mc_release(void *device_data) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ ++ mutex_lock(&driver_lock); ++ ++ if (!(--vdev->refcnt)) { ++ vfio_fsl_mc_regions_cleanup(vdev); ++ vfio_fsl_mc_irqs_cleanup(vdev); ++ } ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ mc_dev->obj_desc.id); ++ ++ mutex_unlock(&driver_lock); ++ ++ module_put(THIS_MODULE); ++} ++ ++static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ unsigned long minsz; ++ ++ if (WARN_ON(!mc_dev)) ++ return -ENODEV; ++ ++ switch (cmd) { ++ case VFIO_DEVICE_GET_INFO: ++ { ++ struct vfio_device_info info; ++ ++ minsz = offsetofend(struct vfio_device_info, num_irqs); ++ ++ if (copy_from_user(&info, (void __user *)arg, minsz)) ++ return -EFAULT; ++ ++ if (info.argsz < minsz) ++ return -EINVAL; ++ ++ info.flags = VFIO_DEVICE_FLAGS_FSL_MC; ++ info.num_regions = mc_dev->obj_desc.region_count; ++ info.num_irqs = mc_dev->obj_desc.irq_count; ++ ++ return copy_to_user((void __user *)arg, &info, minsz); ++ } ++ case VFIO_DEVICE_GET_REGION_INFO: ++ { ++ struct vfio_region_info info; ++ ++ minsz = offsetofend(struct vfio_region_info, offset); ++ ++ if (copy_from_user(&info, (void __user *)arg, minsz)) ++ return -EFAULT; ++ ++ if (info.argsz < minsz) ++ return -EINVAL; ++ ++ if (info.index >= vdev->num_regions) ++ return -EINVAL; ++ ++ /* map offset to the physical address */ ++ info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index); ++ info.size = vdev->regions[info.index].size; ++ info.flags = vdev->regions[info.index].flags; ++ ++ return copy_to_user((void __user *)arg, &info, minsz); ++ } ++ case VFIO_DEVICE_GET_IRQ_INFO: ++ { ++ struct vfio_irq_info info; ++ ++ minsz = offsetofend(struct vfio_irq_info, count); ++ if (copy_from_user(&info, (void __user *)arg, minsz)) ++ return -EFAULT; ++ ++ if (info.argsz < minsz) ++ return -EINVAL; ++ ++ if (info.index >= mc_dev->obj_desc.irq_count) ++ return -EINVAL; ++ ++ if (vdev->mc_irqs != NULL) { ++ info.flags = vdev->mc_irqs[info.index].flags; ++ info.count = vdev->mc_irqs[info.index].count; ++ } else { ++ /* ++ * If IRQs are not initialized then these can not ++ * be configuted and used by user-space/ ++ */ ++ info.flags = 0; ++ info.count = 0; ++ } ++ ++ return copy_to_user((void __user *)arg, &info, minsz); ++ } ++ case VFIO_DEVICE_SET_IRQS: ++ { ++ struct vfio_irq_set hdr; ++ u8 *data = NULL; ++ int ret = 0; ++ ++ minsz = offsetofend(struct vfio_irq_set, count); ++ ++ if (copy_from_user(&hdr, (void __user *)arg, minsz)) ++ return -EFAULT; ++ ++ if (hdr.argsz < minsz) ++ return -EINVAL; ++ ++ if (hdr.index >= mc_dev->obj_desc.irq_count) ++ return -EINVAL; ++ ++ if (hdr.start != 0 || hdr.count > 1) ++ return -EINVAL; ++ ++ if (hdr.count == 0 && ++ (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) || ++ !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER))) ++ return -EINVAL; ++ ++ if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | ++ VFIO_IRQ_SET_ACTION_TYPE_MASK)) ++ return -EINVAL; ++ ++ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { ++ size_t size; ++ ++ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) ++ size = sizeof(uint8_t); ++ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) ++ size = sizeof(int32_t); ++ else ++ return -EINVAL; ++ ++ if (hdr.argsz - minsz < hdr.count * size) ++ return -EINVAL; ++ ++ data = memdup_user((void __user *)(arg + minsz), ++ hdr.count * size); ++ if (IS_ERR(data)) ++ return PTR_ERR(data); ++ } ++ ++ ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags, ++ hdr.index, hdr.start, ++ hdr.count, data); ++ return ret; ++ } ++ case VFIO_DEVICE_RESET: ++ { ++ return -EINVAL; ++ } ++ default: ++ return -EINVAL; ++ } ++} ++ ++static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); ++ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; ++ struct vfio_fsl_mc_region *region; ++ uint64_t data[8]; ++ int i; ++ ++ /* Read ioctl supported only for DPRC device */ ++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc")) ++ return -EINVAL; ++ ++ if (index >= vdev->num_regions) ++ return -EINVAL; ++ ++ region = &vdev->regions[index]; ++ ++ if (!(region->flags & VFIO_REGION_INFO_FLAG_READ)) ++ return -EINVAL; ++ ++ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO) ++ return -EINVAL; ++ ++ if (!region->ioaddr) { ++ region->ioaddr = ioremap_nocache(region->addr, region->size); ++ if (!region->ioaddr) ++ return -ENOMEM; ++ } ++ ++ if (count != 64 || off != 0) ++ return -EINVAL; ++ ++ for (i = 7; i >= 0; i--) ++ data[i] = readq(region->ioaddr + i * sizeof(uint64_t)); ++ ++ if (copy_to_user(buf, data, 64)) ++ return -EFAULT; ++ ++ return count; ++} ++ ++#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 ++#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 ++ ++static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr) ++{ ++ enum mc_cmd_status status; ++ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; ++ ++ for (;;) { ++ u64 header; ++ struct mc_cmd_header *resp_hdr; ++ ++ __iormb(); ++ header = readq(ioaddr); ++ __iormb(); ++ ++ resp_hdr = (struct mc_cmd_header *)&header; ++ status = (enum mc_cmd_status)resp_hdr->status; ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); ++ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; ++ if (timeout_usecs == 0) ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++ ++static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data) ++{ ++ int i; ++ ++ /* Write at command header in the end */ ++ for (i = 7; i >= 0; i--) ++ writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t)); ++ ++ /* Wait for response before returning to user-space ++ * This can be optimized in future to even prepare response ++ * before returning to user-space and avoid read ioctl. ++ */ ++ return vfio_fsl_mc_dprc_wait_for_response(ioaddr); ++} ++ ++static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data) ++{ ++ uint64_t cmd_hdr = cmd_data[0]; ++ int cmd = (cmd_hdr >> 52) & 0xfff; ++ ++ switch (cmd) { ++ case DPRC_CMDID_OPEN: ++ default: ++ return vfio_fsl_mc_send_command(ioaddr, cmd_data); ++ } ++ ++ return 0; ++} ++ ++static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); ++ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; ++ struct vfio_fsl_mc_region *region; ++ uint64_t data[8]; ++ int ret; ++ ++ /* Write ioctl supported only for DPRC device */ ++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc")) ++ return -EINVAL; ++ ++ if (index >= vdev->num_regions) ++ return -EINVAL; ++ ++ region = &vdev->regions[index]; ++ ++ if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE)) ++ return -EINVAL; ++ ++ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO) ++ return -EINVAL; ++ ++ if (!region->ioaddr) { ++ region->ioaddr = ioremap_nocache(region->addr, region->size); ++ if (!region->ioaddr) ++ return -ENOMEM; ++ } ++ ++ if (count != 64 || off != 0) ++ return -EINVAL; ++ ++ if (copy_from_user(&data, buf, 64)) ++ return -EFAULT; ++ ++ ret = vfio_handle_dprc_commands(region->ioaddr, data); ++ if (ret) ++ return ret; ++ ++ return count; ++} ++ ++static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region, ++ struct vm_area_struct *vma) ++{ ++ u64 size = vma->vm_end - vma->vm_start; ++ u64 pgoff, base; ++ ++ pgoff = vma->vm_pgoff & ++ ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1); ++ base = pgoff << PAGE_SHIFT; ++ ++ if (region.size < PAGE_SIZE || base + size > region.size) ++ return -EINVAL; ++ /* ++ * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the ++ * cache inhibited area of the portal to avoid coherency issues ++ * if a user migrates to another core. ++ */ ++ if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE) ++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); ++ else ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; ++ ++ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ size, vma->vm_page_prot); ++} ++ ++/* Allows mmaping fsl_mc device regions in assigned DPRC */ ++static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma) ++{ ++ struct vfio_fsl_mc_device *vdev = device_data; ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ unsigned long size, addr; ++ int index; ++ ++ index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT); ++ ++ if (vma->vm_end < vma->vm_start) ++ return -EINVAL; ++ if (vma->vm_start & ~PAGE_MASK) ++ return -EINVAL; ++ if (vma->vm_end & ~PAGE_MASK) ++ return -EINVAL; ++ if (!(vma->vm_flags & VM_SHARED)) ++ return -EINVAL; ++ if (index >= vdev->num_regions) ++ return -EINVAL; ++ ++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP)) ++ return -EINVAL; ++ ++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) ++ && (vma->vm_flags & VM_READ)) ++ return -EINVAL; ++ ++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) ++ && (vma->vm_flags & VM_WRITE)) ++ return -EINVAL; ++ ++ addr = vdev->regions[index].addr; ++ size = vdev->regions[index].size; ++ ++ vma->vm_private_data = mc_dev; ++ ++ if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO) ++ return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma); ++ ++ return -EFAULT; ++} ++ ++static const struct vfio_device_ops vfio_fsl_mc_ops = { ++ .name = "vfio-fsl-mc", ++ .open = vfio_fsl_mc_open, ++ .release = vfio_fsl_mc_release, ++ .ioctl = vfio_fsl_mc_ioctl, ++ .read = vfio_fsl_mc_read, ++ .write = vfio_fsl_mc_write, ++ .mmap = vfio_fsl_mc_mmap, ++}; ++ ++static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev) ++{ ++ struct device *root_dprc_dev; ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ struct device *dev = &mc_dev->dev; ++ struct fsl_mc_bus *mc_bus; ++ struct irq_domain *mc_msi_domain; ++ unsigned int irq_count; ++ int ret; ++ ++ /* device must be DPRC */ ++ if (strcmp(mc_dev->obj_desc.type, "dprc")) ++ return -EINVAL; ++ ++ /* mc_io must be un-initialized */ ++ WARN_ON(mc_dev->mc_io); ++ ++ /* allocate a portal from the root DPRC for vfio use */ ++ fsl_mc_get_root_dprc(dev, &root_dprc_dev); ++ if (WARN_ON(!root_dprc_dev)) ++ return -EINVAL; ++ ++ ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev), ++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &mc_dev->mc_io); ++ if (ret < 0) ++ goto clean_msi_domain; ++ ++ /* Reset MCP before move on */ ++ ret = fsl_mc_portal_reset(mc_dev->mc_io); ++ if (ret < 0) { ++ dev_err(dev, "dprc portal reset failed: error = %d\n", ret); ++ goto free_mc_portal; ++ } ++ ++ /* MSI domain set up */ ++ ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain); ++ if (ret < 0) ++ goto free_mc_portal; ++ ++ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain); ++ ++ ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (ret) { ++ dev_err(dev, "dprc_open() failed: error = %d\n", ret); ++ goto free_mc_portal; ++ } ++ ++ /* Initialize resource pool */ ++ fsl_mc_init_all_resource_pools(mc_dev); ++ ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (!mc_bus->irq_resources) { ++ irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS; ++ ret = fsl_mc_populate_irq_pool(mc_bus, irq_count); ++ if (ret < 0) { ++ dev_err(dev, "%s: Failed to init irq-pool\n", __func__); ++ goto clean_resource_pool; ++ } ++ } ++ ++ mutex_init(&mc_bus->scan_mutex); ++ ++ mutex_lock(&mc_bus->scan_mutex); ++ ret = dprc_scan_objects(mc_dev, mc_dev->driver_override, ++ &irq_count); ++ mutex_unlock(&mc_bus->scan_mutex); ++ if (ret) { ++ dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret); ++ goto clean_irq_pool; ++ } ++ ++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) { ++ dev_warn(&mc_dev->dev, ++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", ++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); ++ } ++ ++ return 0; ++ ++clean_irq_pool: ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++clean_resource_pool: ++ fsl_mc_cleanup_all_resource_pools(mc_dev); ++ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ ++free_mc_portal: ++ fsl_mc_portal_free(mc_dev->mc_io); ++ ++clean_msi_domain: ++ dev_set_msi_domain(&mc_dev->dev, NULL); ++ ++ return ret; ++} ++ ++static int vfio_fsl_mc_device_remove(struct device *dev, void *data) ++{ ++ struct fsl_mc_device *mc_dev; ++ ++ WARN_ON(dev == NULL); ++ ++ mc_dev = to_fsl_mc_device(dev); ++ if (WARN_ON(mc_dev == NULL)) ++ return -ENODEV; ++ ++ fsl_mc_device_remove(mc_dev); ++ return 0; ++} ++ ++static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev) ++{ ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ struct fsl_mc_bus *mc_bus; ++ ++ /* device must be DPRC */ ++ if (strcmp(mc_dev->obj_desc.type, "dprc")) ++ return; ++ ++ device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove); ++ ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ if (dev_get_msi_domain(&mc_dev->dev)) ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++ dev_set_msi_domain(&mc_dev->dev, NULL); ++ ++ fsl_mc_cleanup_all_resource_pools(mc_dev); ++ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ fsl_mc_portal_free(mc_dev->mc_io); ++} ++ ++static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) ++{ ++ struct iommu_group *group; ++ struct vfio_fsl_mc_device *vdev; ++ struct device *dev = &mc_dev->dev; ++ int ret; ++ ++ group = vfio_iommu_group_get(dev); ++ if (!group) { ++ dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__); ++ return -EINVAL; ++ } ++ ++ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); ++ if (!vdev) { ++ vfio_iommu_group_put(group, dev); ++ return -ENOMEM; ++ } ++ ++ vdev->mc_dev = mc_dev; ++ ++ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev); ++ if (ret) { ++ dev_err(dev, "%s: Failed to add to vfio group\n", __func__); ++ goto free_vfio_device; ++ } ++ ++ /* DPRC container scanned and it's chilren bound with vfio driver */ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { ++ ret = vfio_fsl_mc_initialize_dprc(vdev); ++ if (ret) { ++ vfio_del_group_dev(dev); ++ goto free_vfio_device; ++ } ++ } else { ++ struct fsl_mc_device *mc_bus_dev; ++ ++ /* Non-dprc devices share mc_io from the parent dprc */ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ if (mc_bus_dev == NULL) { ++ vfio_del_group_dev(dev); ++ goto free_vfio_device; ++ } ++ ++ mc_dev->mc_io = mc_bus_dev->mc_io; ++ ++ /* Inherit parent MSI domain */ ++ dev_set_msi_domain(&mc_dev->dev, ++ dev_get_msi_domain(mc_dev->dev.parent)); ++ } ++ return 0; ++ ++free_vfio_device: ++ kfree(vdev); ++ vfio_iommu_group_put(group, dev); ++ return ret; ++} ++ ++static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct vfio_fsl_mc_device *vdev; ++ struct device *dev = &mc_dev->dev; ++ ++ vdev = vfio_del_group_dev(dev); ++ if (!vdev) ++ return -EINVAL; ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ vfio_fsl_mc_cleanup_dprc(vdev); ++ else ++ dev_set_msi_domain(&mc_dev->dev, NULL); ++ ++ mc_dev->mc_io = NULL; ++ ++ vfio_iommu_group_put(mc_dev->dev.iommu_group, dev); ++ kfree(vdev); ++ ++ return 0; ++} ++ ++/* ++ * vfio-fsl_mc is a meta-driver, so use driver_override interface to ++ * bind a fsl_mc container with this driver and match_id_table is NULL. ++ */ ++static struct fsl_mc_driver vfio_fsl_mc_driver = { ++ .probe = vfio_fsl_mc_probe, ++ .remove = vfio_fsl_mc_remove, ++ .match_id_table = NULL, ++ .driver = { ++ .name = "vfio-fsl-mc", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init vfio_fsl_mc_driver_init(void) ++{ ++ return fsl_mc_driver_register(&vfio_fsl_mc_driver); ++} ++ ++static void __exit vfio_fsl_mc_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&vfio_fsl_mc_driver); ++} ++ ++module_init(vfio_fsl_mc_driver_init); ++module_exit(vfio_fsl_mc_driver_exit); ++ ++MODULE_VERSION(DRIVER_VERSION); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); +--- /dev/null ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +@@ -0,0 +1,199 @@ ++/* ++ * Freescale Management Complex (MC) device passthrough using VFIO ++ * ++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc. ++ * Author: Bharat Bhushan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../staging/fsl-mc/include/mc.h" ++#include "vfio_fsl_mc_private.h" ++ ++static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg) ++{ ++ struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg; ++ ++ eventfd_signal(mc_irq->trigger, 1); ++ return IRQ_HANDLED; ++} ++ ++static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev, ++ unsigned int index, unsigned int start, ++ unsigned int count, uint32_t flags, ++ void *data) ++{ ++ return -EINVAL; ++} ++ ++static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev, ++ unsigned int index, unsigned int start, ++ unsigned int count, uint32_t flags, ++ void *data) ++{ ++ return -EINVAL; ++} ++ ++static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev, ++ int index, int fd) ++{ ++ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index]; ++ struct eventfd_ctx *trigger; ++ int hwirq; ++ int ret; ++ ++ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq; ++ if (irq->trigger) { ++ free_irq(hwirq, irq); ++ kfree(irq->name); ++ eventfd_ctx_put(irq->trigger); ++ irq->trigger = NULL; ++ } ++ ++ if (fd < 0) /* Disable only */ ++ return 0; ++ ++ irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", ++ hwirq, dev_name(&vdev->mc_dev->dev)); ++ if (!irq->name) ++ return -ENOMEM; ++ ++ trigger = eventfd_ctx_fdget(fd); ++ if (IS_ERR(trigger)) { ++ kfree(irq->name); ++ return PTR_ERR(trigger); ++ } ++ ++ irq->trigger = trigger; ++ ++ ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0, ++ irq->name, irq); ++ if (ret) { ++ kfree(irq->name); ++ eventfd_ctx_put(trigger); ++ irq->trigger = NULL; ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev) ++{ ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ struct vfio_fsl_mc_irq *mc_irq; ++ int irq_count; ++ int ret, i; ++ ++ /* Device does not support any interrupt */ ++ if (mc_dev->obj_desc.irq_count == 0) ++ return 0; ++ ++ irq_count = mc_dev->obj_desc.irq_count; ++ ++ mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL); ++ if (mc_irq == NULL) ++ return -ENOMEM; ++ ++ /* Allocate IRQs */ ++ ret = fsl_mc_allocate_irqs(mc_dev); ++ if (ret) { ++ kfree(mc_irq); ++ return ret; ++ } ++ ++ for (i = 0; i < irq_count; i++) { ++ mc_irq[i].count = 1; ++ mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD; ++ } ++ ++ vdev->mc_irqs = mc_irq; ++ ++ return 0; ++} ++ ++/* Free All IRQs for the given MC object */ ++void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev) ++{ ++ struct fsl_mc_device *mc_dev = vdev->mc_dev; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ int i; ++ ++ /* Device does not support any interrupt */ ++ if (mc_dev->obj_desc.irq_count == 0) ++ return; ++ ++ for (i = 0; i < irq_count; i++) ++ vfio_set_trigger(vdev, i, -1); ++ ++ fsl_mc_free_irqs(mc_dev); ++ kfree(vdev->mc_irqs); ++} ++ ++static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, ++ unsigned int index, unsigned int start, ++ unsigned int count, uint32_t flags, ++ void *data) ++{ ++ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index]; ++ int hwirq; ++ ++ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) ++ return vfio_set_trigger(vdev, index, -1); ++ ++ if (start != 0 || count != 1) ++ return -EINVAL; ++ ++ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ int32_t fd = *(int32_t *)data; ++ ++ return vfio_set_trigger(vdev, index, fd); ++ } ++ ++ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq; ++ ++ if (flags & VFIO_IRQ_SET_DATA_NONE) { ++ vfio_fsl_mc_irq_handler(hwirq, irq); ++ ++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { ++ uint8_t trigger = *(uint8_t *)data; ++ ++ if (trigger) ++ vfio_fsl_mc_irq_handler(hwirq, irq); ++ } ++ ++ return 0; ++} ++ ++int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev, ++ uint32_t flags, unsigned int index, ++ unsigned int start, unsigned int count, ++ void *data) ++{ ++ int ret = -ENOTTY; ++ ++ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { ++ case VFIO_IRQ_SET_ACTION_MASK: ++ ret = vfio_fsl_mc_irq_mask(vdev, index, start, count, ++ flags, data); ++ break; ++ case VFIO_IRQ_SET_ACTION_UNMASK: ++ ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count, ++ flags, data); ++ break; ++ case VFIO_IRQ_SET_ACTION_TRIGGER: ++ ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start, ++ count, flags, data); ++ break; ++ } ++ ++ return ret; ++} +--- /dev/null ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h +@@ -0,0 +1,55 @@ ++/* ++ * Freescale Management Complex VFIO private declarations ++ * ++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc. ++ * Copyright 2016 NXP ++ * Author: Bharat Bhushan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#ifndef VFIO_FSL_MC_PRIVATE_H ++#define VFIO_FSL_MC_PRIVATE_H ++ ++#define VFIO_FSL_MC_OFFSET_SHIFT 40 ++#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1) ++ ++#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT) ++ ++#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \ ++ ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT) ++ ++struct vfio_fsl_mc_irq { ++ u32 flags; ++ u32 count; ++ struct eventfd_ctx *trigger; ++ char *name; ++}; ++ ++struct vfio_fsl_mc_region { ++ u32 flags; ++#define VFIO_FSL_MC_REGION_TYPE_MMIO 1 ++#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2 ++ u32 type; ++ u64 addr; ++ resource_size_t size; ++ void __iomem *ioaddr; ++}; ++ ++struct vfio_fsl_mc_device { ++ struct fsl_mc_device *mc_dev; ++ int refcnt; ++ u32 num_regions; ++ struct vfio_fsl_mc_region *regions; ++ struct vfio_fsl_mc_irq *mc_irqs; ++}; ++ ++int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev); ++void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev); ++int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev, ++ uint32_t flags, unsigned int index, ++ unsigned int start, unsigned int count, ++ void *data); ++#endif /* VFIO_PCI_PRIVATE_H */ +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -36,6 +36,8 @@ + #include + #include + #include ++#include ++#include + + #define DRIVER_VERSION "0.2" + #define DRIVER_AUTHOR "Alex Williamson " +@@ -720,6 +722,27 @@ static void vfio_test_domain_fgsp(struct + __free_pages(pages, order); + } + ++static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) ++{ ++ struct list_head group_resv_regions; ++ struct iommu_resv_region *region, *next; ++ bool ret = false; ++ ++ INIT_LIST_HEAD(&group_resv_regions); ++ iommu_get_group_resv_regions(group, &group_resv_regions); ++ list_for_each_entry(region, &group_resv_regions, list) { ++ if (region->type == IOMMU_RESV_SW_MSI) { ++ *base = region->start; ++ ret = true; ++ goto out; ++ } ++ } ++out: ++ list_for_each_entry_safe(region, next, &group_resv_regions, list) ++ kfree(region); ++ return ret; ++} ++ + static int vfio_iommu_type1_attach_group(void *iommu_data, + struct iommu_group *iommu_group) + { +@@ -728,6 +751,8 @@ static int vfio_iommu_type1_attach_group + struct vfio_domain *domain, *d; + struct bus_type *bus = NULL; + int ret; ++ bool resv_msi, msi_remap; ++ phys_addr_t resv_msi_base; + + mutex_lock(&iommu->lock); + +@@ -774,11 +799,15 @@ static int vfio_iommu_type1_attach_group + if (ret) + goto out_domain; + ++ resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); ++ + INIT_LIST_HEAD(&domain->group_list); + list_add(&group->next, &domain->group_list); + +- if (!allow_unsafe_interrupts && +- !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { ++ msi_remap = resv_msi ? irq_domain_check_msi_remap() : ++ iommu_capable(bus, IOMMU_CAP_INTR_REMAP); ++ ++ if (!allow_unsafe_interrupts && !msi_remap) { + pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", + __func__); + ret = -EPERM; +@@ -820,6 +849,12 @@ static int vfio_iommu_type1_attach_group + if (ret) + goto out_detach; + ++ if (resv_msi) { ++ ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); ++ if (ret) ++ goto out_detach; ++ } ++ + list_add(&domain->next, &iommu->domain_list); + + mutex_unlock(&iommu->lock); +--- a/include/uapi/linux/vfio.h ++++ b/include/uapi/linux/vfio.h +@@ -198,6 +198,7 @@ struct vfio_device_info { + #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ + #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ + #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ ++#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */ + __u32 num_regions; /* Max region index + 1 */ + __u32 num_irqs; /* Max IRQ index + 1 */ + }; diff --git a/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch b/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch new file mode 100644 index 000000000..c2081b2d0 --- /dev/null +++ b/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch @@ -0,0 +1,28 @@ +From ba4f9dd74ccb9da91195b3570310754716064ef2 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu +Date: Tue, 10 Oct 2017 15:55:31 +0800 +Subject: [PATCH] Revert "usb: kconfig: remove dependency FSL_SOC for ehci fsl + driver" + +This reverts commit 92042e8b3622a9bbfce0ebfc90edf6cd14d45708 on +LSDK linux (https://github.com/qoriq-open-source/linux). + +The patch reverted allowed to build ehci-fsl driver for non-PPC +platforms, but actually the driver was not ready. + +Signed-off-by: Yangbo Lu +--- + drivers/usb/host/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/usb/host/Kconfig ++++ b/drivers/usb/host/Kconfig +@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX + + config USB_EHCI_FSL + tristate "Support for Freescale PPC on-chip EHCI USB controller" +- depends on USB_EHCI_HCD ++ depends on FSL_SOC + select USB_EHCI_ROOT_HUB_TT + ---help--- + Variation of ARC USB block used in some Freescale chips. diff --git a/target/linux/mediatek/patches-4.9/0063-atomic-sleep.patch b/target/linux/mediatek/patches-4.9/0063-atomic-sleep.patch new file mode 100644 index 000000000..095ce74a4 --- /dev/null +++ b/target/linux/mediatek/patches-4.9/0063-atomic-sleep.patch @@ -0,0 +1,38 @@ +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1533,7 +1533,10 @@ static void mtk_hwlro_rx_uninit(struct m + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); + if (val & MTK_LRO_RING_RELINQUISH_DONE) { +- msleep(20); ++ if (in_atomic()) ++ mdelay(20); ++ else ++ msleep(20); + continue; + } + break; +@@ -1951,7 +1954,10 @@ static void mtk_stop_dma(struct mtk_eth + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, glo_cfg); + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { +- msleep(20); ++ if (in_atomic()) ++ mdelay(20); ++ else ++ msleep(20); + continue; + } + break; +@@ -1996,7 +2002,10 @@ static void ethsys_reset(struct mtk_eth + reset_bits, + reset_bits); + +- usleep_range(1000, 1100); ++ if (in_atomic()) ++ udelay(1000); ++ else ++ usleep_range(1000, 1100); + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, + ~reset_bits); diff --git a/target/linux/ramips/patches-4.9/0025-pinctrl-ralink-add-pinctrl-driver.patch b/target/linux/ramips/patches-4.9/0025-pinctrl-ralink-add-pinctrl-driver.patch index dc4585231..2225454d1 100644 --- a/target/linux/ramips/patches-4.9/0025-pinctrl-ralink-add-pinctrl-driver.patch +++ b/target/linux/ramips/patches-4.9/0025-pinctrl-ralink-add-pinctrl-driver.patch @@ -25,7 +25,7 @@ Signed-off-by: John Crispin bool "SGI IP22 (Indy/Indigo2)" --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig -@@ -114,6 +114,11 @@ config PINCTRL_LPC18XX +@@ -115,6 +115,11 @@ config PINCTRL_LPC18XX help Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU). diff --git a/target/linux/sunxi/Makefile b/target/linux/sunxi/Makefile index f8b8aa750..982eecbcb 100644 --- a/target/linux/sunxi/Makefile +++ b/target/linux/sunxi/Makefile @@ -11,11 +11,10 @@ ARCH:=arm BOARD:=sunxi BOARDNAME:=Allwinner A1x/A20/A3x FEATURES:=fpu usb ext4 display rtc squashfs -CPU_TYPE:=cortex-a8 -CPU_SUBTYPE:=vfpv3 +SUBTARGETS:=cortexa8 cortexa7 cortexa53 MAINTAINER:=Zoltan HERPAI -KERNEL_PATCHVER:=4.4 +KERNEL_PATCHVER:=4.9 KERNELNAME:=zImage dtbs # A10: Cortex-A8 diff --git a/target/linux/sunxi/config-4.9 b/target/linux/sunxi/config-4.9 new file mode 100644 index 000000000..e961c8ad8 --- /dev/null +++ b/target/linux/sunxi/config-4.9 @@ -0,0 +1,573 @@ +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_AHCI_SUNXI is not set +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_ARCH_AXXIA is not set +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +CONFIG_ARCH_MULTI_V6_V7=y +CONFIG_ARCH_MULTI_V7=y +CONFIG_ARCH_NR_GPIO=416 +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +CONFIG_ARCH_SUNXI=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARM=y +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ATAG_DTB_COMPAT=y +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND is not set +CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_ARM_ERRATA_643719=y +CONFIG_ARM_GIC=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_ARM_HEAVY_MB=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_LPAE=y +CONFIG_ARM_PATCH_IDIV=y +CONFIG_ARM_PATCH_PHYS_VIRT=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PSCI=y +CONFIG_ARM_PSCI_FW=y +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_UNWIND=y +CONFIG_ARM_VIRT_EXT=y +CONFIG_ATA=y +CONFIG_ATAGS=y +# CONFIG_ATA_SFF is not set +CONFIG_AUTO_ZRELADDR=y +CONFIG_AXP20X_POWER=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_PWM=y +CONFIG_BINFMT_MISC=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BOUNCE=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_CACHE_L2X0=y +CONFIG_CAN=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_CONFIGFS_FS=y +CONFIG_CONNECTOR=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_COREDUMP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_V7=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEV_SUN4I_SS=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_UART_8250 is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y +CONFIG_DMA_SUN4I=y +CONFIG_DMA_SUN6I=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DNOTIFY=y +CONFIG_DTC=y +CONFIG_DUMMY_CONSOLE=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_SUN8I is not set +CONFIG_DWMAC_SUNXI=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_ELF_CORE=y +# CONFIG_EMBEDDED is not set +CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_EXT4_FS=y +CONFIG_EXTCON=y +# CONFIG_F2FS_CHECK_FS is not set +CONFIG_F2FS_FS=y +# CONFIG_F2FS_FS_SECURITY is not set +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_STAT_FS=y +CONFIG_FAT_FS=y +CONFIG_FB=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_CMDLINE=y +CONFIG_FB_FOREIGN_ENDIAN=y +CONFIG_FB_LITTLE_ENDIAN=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_SIMPLE=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x16=y +CONFIG_FONT_8x8=y +CONFIG_FONT_SUPPORT=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FRAME_WARN=2048 +CONFIG_FREEZER=y +CONFIG_FS_MBCACHE=y +CONFIG_FS_POSIX_ACL=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GLOB=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_AXP209=y +CONFIG_GPIO_SYSFS=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_GENERIC_RCU_GUP=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SMP=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_HUGETLBFS is not set +CONFIG_HWMON=y +CONFIG_HW_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=y +CONFIG_HZ_FIXED=0 +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_MV64XXX=y +CONFIG_I2C_SUN6I_P2WI=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_INPUT=y +CONFIG_INPUT_AXP20X_PEK=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_IOMMU_HELPER=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +CONFIG_JBD2=y +CONFIG_KALLSYMS=y +# CONFIG_KERNEL_MODE_NEON is not set +CONFIG_KEYBOARD_SUN4I_LRADC=y +CONFIG_KSM=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_VFIO=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_PLATFORM=y +CONFIG_LEDS_GPIO=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_LIBFDT=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_MACH_SUN4I=y +CONFIG_MACH_SUN5I=y +CONFIG_MACH_SUN6I=y +CONFIG_MACH_SUN7I=y +CONFIG_MACH_SUN8I=y +CONFIG_MACH_SUN9I=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MDIO_SUN4I=y +CONFIG_MEDIA_SUPPORT=y +# CONFIG_MFD_AC100 is not set +CONFIG_MFD_AXP20X=y +CONFIG_MFD_AXP20X_RSB=y +CONFIG_MFD_CORE=y +CONFIG_MFD_SUN6I_PRCM=y +CONFIG_MFD_SYSCON=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MIGRATION=y +CONFIG_MMC=y +CONFIG_MMC_BLOCK=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_SUNXI=y +CONFIG_MMU_NOTIFIER=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MTD is not set +CONFIG_MULTI_IRQ_HANDLER=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEON=y +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NET_VENDOR_ALLWINNER=y +CONFIG_NLS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +CONFIG_NVMEM=y +CONFIG_NVMEM_SUNXI_SID=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OLD_SIGACTION=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_PADATA=y +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PARTITION_ADVANCED is not set +# CONFIG_PCI_DOMAINS_GENERIC is not set +# CONFIG_PCI_SYSCALL is not set +CONFIG_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_PHYLIB=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PHY_SUN4I_USB=y +CONFIG_PHY_SUN9I_USB=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_GR8=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_PINCTRL_SUN4I_A10=y +CONFIG_PINCTRL_SUN5I_A10S=y +CONFIG_PINCTRL_SUN5I_A13=y +CONFIG_PINCTRL_SUN6I_A31=y +CONFIG_PINCTRL_SUN6I_A31S=y +CONFIG_PINCTRL_SUN6I_A31_R=y +CONFIG_PINCTRL_SUN7I_A20=y +CONFIG_PINCTRL_SUN8I_A23=y +CONFIG_PINCTRL_SUN8I_A23_R=y +CONFIG_PINCTRL_SUN8I_A33=y +CONFIG_PINCTRL_SUN8I_A83T=y +CONFIG_PINCTRL_SUN8I_H3=y +CONFIG_PINCTRL_SUN8I_H3_R=y +CONFIG_PINCTRL_SUN9I_A80=y +CONFIG_PINCTRL_SUN9I_A80_R=y +CONFIG_PINCTRL_SUNXI=y +# CONFIG_PL310_ERRATA_588369 is not set +# CONFIG_PL310_ERRATA_727915 is not set +# CONFIG_PL310_ERRATA_753970 is not set +# CONFIG_PL310_ERRATA_769419 is not set +CONFIG_PM=y +CONFIG_PM_CLK=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_OPP=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_POWER_RESET=y +CONFIG_POWER_SUPPLY=y +CONFIG_PPS=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PREEMPT_RCU=y +CONFIG_PRINTK_TIME=y +CONFIG_PROC_EVENTS=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PTP_1588_CLOCK=y +CONFIG_PWM=y +CONFIG_PWM_SUN4I=y +CONFIG_PWM_SYSFS=y +CONFIG_RATIONAL=y +# CONFIG_RCU_BOOST is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_SPI=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_AXP20X=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_GPIO=y +CONFIG_RELAY=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RESET_SUNXI=y +CONFIG_RFS_ACCEL=y +CONFIG_RPS=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_SATA_PMP=y +# CONFIG_SCHED_INFO is not set +CONFIG_SCSI=y +CONFIG_SDIO_UART=y +CONFIG_SECURITYFS=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_NR_UARTS=8 +CONFIG_SERIAL_8250_RUNTIME_UARTS=8 +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SG_POOL=y +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_SND=y +CONFIG_SND_COMPRESS_OFFLOAD=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_PCM=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_I2C_AND_SPI=y +# CONFIG_SND_SUN4I_I2S is not set +# CONFIG_SND_SUN4I_SPDIF is not set +CONFIG_SOUND=y +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_SUN4I=y +CONFIG_SPI_SUN6I=y +CONFIG_SRCU=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_SUN4I_EMAC is not set +CONFIG_SUN4I_TIMER=y +# CONFIG_SUN50I_A64_CCU is not set +CONFIG_SUN5I_HSTIMER=y +CONFIG_SUN6I_A31_CCU=y +CONFIG_SUN8I_A23_CCU=y +CONFIG_SUN8I_A33_CCU=y +CONFIG_SUN8I_H3_CCU=y +CONFIG_SUNXI_CCU=y +CONFIG_SUNXI_CCU_DIV=y +CONFIG_SUNXI_CCU_FRAC=y +CONFIG_SUNXI_CCU_GATE=y +CONFIG_SUNXI_CCU_MP=y +CONFIG_SUNXI_CCU_MULT=y +CONFIG_SUNXI_CCU_MUX=y +CONFIG_SUNXI_CCU_NK=y +CONFIG_SUNXI_CCU_NKM=y +CONFIG_SUNXI_CCU_NKMP=y +CONFIG_SUNXI_CCU_NM=y +CONFIG_SUNXI_CCU_PHASE=y +CONFIG_SUNXI_RSB=y +CONFIG_SUNXI_SRAM=y +CONFIG_SUNXI_WATCHDOG=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_SWCONFIG=y +CONFIG_SWCONFIG_B53=y +# CONFIG_SWCONFIG_B53_MMAP_DRIVER is not set +CONFIG_SWCONFIG_B53_PHY_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_FIXUP=y +# CONFIG_SWCONFIG_B53_SRAB_DRIVER is not set +CONFIG_SWIOTLB=y +CONFIG_SWPHY=y +CONFIG_SWP_EMULATE=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THUMB2_KERNEL is not set +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +CONFIG_TOUCHSCREEN_SUN4I=y +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_COMMON=y +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_GADGET=y +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_SUPPORT=y +CONFIG_USERIO=y +CONFIG_USE_OF=y +CONFIG_VDSO=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VFAT_FS=y +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_VHOST=y +CONFIG_VHOST_NET=y +CONFIG_VIRTUALIZATION=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_XPS=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ZBOOT_ROM_TEXT=0 diff --git a/target/linux/sunxi/cortexa53/config-default b/target/linux/sunxi/cortexa53/config-default new file mode 100644 index 000000000..527a6f697 --- /dev/null +++ b/target/linux/sunxi/cortexa53/config-default @@ -0,0 +1,101 @@ +CONFIG_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_HAS_KCOV=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=24 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARM64=y +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_64K_PAGES is not set +CONFIG_ARM64_CONT_SHIFT=4 +# CONFIG_ARM64_CRYPTO is not set +# CONFIG_ARM64_HW_AFDBM is not set +# CONFIG_ARM64_LSE_ATOMICS is not set +CONFIG_ARM64_PAGE_SHIFT=12 +# CONFIG_ARM64_PAN is not set +# CONFIG_ARM64_PTDUMP is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_ARM64_UAO is not set +CONFIG_ARM64_VA_BITS=39 +CONFIG_ARM64_VA_BITS_39=y +# CONFIG_ARM64_VA_BITS_48 is not set +# CONFIG_ARM64_VHE is not set +CONFIG_ARM_AMBA=y +CONFIG_ARM_GIC_V3=y +# CONFIG_ARM_SBSA_WATCHDOG is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +# CONFIG_COMMON_CLK_VERSATILE is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMPAT is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +CONFIG_DEBUG_RODATA=y +CONFIG_DWMAC_SUN8I=y +CONFIG_FRAME_POINTER=y +# CONFIG_FSL_ERRATUM_A008585 is not set +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_KASAN is not set +CONFIG_KERNEL_MODE_NEON=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_ARM_VGIC_V3_ITS=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NO_IOPORT_MAP=y +# CONFIG_NUMA is not set +CONFIG_PARTITION_PERCPU=y +# CONFIG_PCI_DOMAINS is not set +# CONFIG_PHY_XGENE is not set +# CONFIG_PINCTRL_GR8 is not set +# CONFIG_PINCTRL_SUN4I_A10 is not set +CONFIG_PINCTRL_SUN50I_A64=y +# CONFIG_PINCTRL_SUN5I_A10S is not set +# CONFIG_PINCTRL_SUN5I_A13 is not set +# CONFIG_PINCTRL_SUN6I_A31 is not set +# CONFIG_PINCTRL_SUN6I_A31S is not set +# CONFIG_PINCTRL_SUN6I_A31_R is not set +# CONFIG_PINCTRL_SUN7I_A20 is not set +# CONFIG_PINCTRL_SUN8I_A23 is not set +# CONFIG_PINCTRL_SUN8I_A23_R is not set +# CONFIG_PINCTRL_SUN8I_A33 is not set +# CONFIG_PINCTRL_SUN8I_A83T is not set +# CONFIG_PINCTRL_SUN8I_H3 is not set +# CONFIG_PINCTRL_SUN8I_H3_R is not set +# CONFIG_PINCTRL_SUN9I_A80 is not set +# CONFIG_PINCTRL_SUN9I_A80_R is not set +# CONFIG_RANDOMIZE_BASE is not set +# CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SUN50I_A64_CCU=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y diff --git a/target/linux/sunxi/cortexa53/target.mk b/target/linux/sunxi/cortexa53/target.mk new file mode 100644 index 000000000..6715d00bd --- /dev/null +++ b/target/linux/sunxi/cortexa53/target.mk @@ -0,0 +1,13 @@ +# +# Copyright (C) 2017 Hauke Mehrtens +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +ARCH:=aarch64 +BOARDNAME:=Allwinner A64 +CPU_TYPE:=cortex-a53 +KERNELNAME:=Image dtbs diff --git a/target/linux/sunxi/cortexa7/config-default b/target/linux/sunxi/cortexa7/config-default new file mode 100644 index 000000000..14912981d --- /dev/null +++ b/target/linux/sunxi/cortexa7/config-default @@ -0,0 +1,8 @@ +CONFIG_DWMAC_SUN8I=y +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +# CONFIG_MACH_SUN4I is not set +# CONFIG_MACH_SUN5I is not set +# CONFIG_PINCTRL_GR8 is not set +# CONFIG_PINCTRL_SUN4I_A10 is not set +# CONFIG_PINCTRL_SUN5I_A10S is not set +# CONFIG_PINCTRL_SUN5I_A13 is not set diff --git a/target/linux/sunxi/cortexa7/target.mk b/target/linux/sunxi/cortexa7/target.mk new file mode 100644 index 000000000..16aa9f7d0 --- /dev/null +++ b/target/linux/sunxi/cortexa7/target.mk @@ -0,0 +1,12 @@ +# +# Copyright (C) 2017 Hauke Mehrtens +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +BOARDNAME:=Allwinner A20/A3x +CPU_TYPE:=cortex-a7 +CPU_SUBTYPE:=neon-vfpv4 diff --git a/target/linux/sunxi/cortexa8/config-default b/target/linux/sunxi/cortexa8/config-default new file mode 100644 index 000000000..93e48956e --- /dev/null +++ b/target/linux/sunxi/cortexa8/config-default @@ -0,0 +1,22 @@ +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +# CONFIG_ARM_ERRATA_643719 is not set +# CONFIG_ARM_LPAE is not set +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +# CONFIG_MACH_SUN6I is not set +# CONFIG_MACH_SUN7I is not set +# CONFIG_MACH_SUN8I is not set +# CONFIG_MACH_SUN9I is not set +CONFIG_PGTABLE_LEVELS=2 +# CONFIG_PHYS_ADDR_T_64BIT is not set +# CONFIG_PINCTRL_SUN6I_A31 is not set +# CONFIG_PINCTRL_SUN6I_A31S is not set +# CONFIG_PINCTRL_SUN6I_A31_R is not set +# CONFIG_PINCTRL_SUN7I_A20 is not set +# CONFIG_PINCTRL_SUN8I_A23 is not set +# CONFIG_PINCTRL_SUN8I_A23_R is not set +# CONFIG_PINCTRL_SUN8I_A33 is not set +# CONFIG_PINCTRL_SUN8I_A83T is not set +# CONFIG_PINCTRL_SUN8I_H3 is not set +# CONFIG_PINCTRL_SUN8I_H3_R is not set +# CONFIG_PINCTRL_SUN9I_A80 is not set +# CONFIG_PINCTRL_SUN9I_A80_R is not set diff --git a/target/linux/sunxi/cortexa8/target.mk b/target/linux/sunxi/cortexa8/target.mk new file mode 100644 index 000000000..d7d18f6ca --- /dev/null +++ b/target/linux/sunxi/cortexa8/target.mk @@ -0,0 +1,12 @@ +# +# Copyright (C) 2017 Hauke Mehrtens +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +BOARDNAME:=Allwinner A1x +CPU_TYPE:=cortex-a8 +CPU_SUBTYPE:=vfpv3 diff --git a/target/linux/sunxi/image/Makefile b/target/linux/sunxi/image/Makefile index d0d86ee10..8f95c6190 100644 --- a/target/linux/sunxi/image/Makefile +++ b/target/linux/sunxi/image/Makefile @@ -39,154 +39,8 @@ define Device/Default IMAGE/sdcard.img.gz := sunxi-sdcard | append-metadata | gzip endef -define Device/sun4i-a10-olinuxino-lime - DEVICE_TITLE:=Olimex A10-OLinuXino-LIME - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi - SUPPORTED_DEVICES:=olimex,a10-olinuxino-lime - SUNXI_DTS:=sun4i-a10-olinuxino-lime -endef - -TARGET_DEVICES += sun4i-a10-olinuxino-lime - - -define Device/sun5i-a13-olimex-som - DEVICE_TITLE:=Olimex A13 SOM - DEVICE_PACKAGES:=kmod-rtl8192cu - SUPPORTED_DEVICES:=olimex,a13-olinuxino - SUNXI_DTS:=sun5i-a13-olinuxino -endef - -TARGET_DEVICES += sun5i-a13-olimex-som - - -define Device/sun5i-a13-olinuxino - DEVICE_TITLE:=Olimex A13-Olinuxino - DEVICE_PACKAGES:=kmod-rtl8192cu - SUPPORTED_DEVICES:=olimex,a13-olinuxino - SUNXI_DTS:=sun5i-a13-olinuxino -endef - -TARGET_DEVICES += sun5i-a13-olinuxino - - -define Device/sun7i-a20-olinuxino-lime - DEVICE_TITLE:=Olimex A20-OLinuXino-LIME - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi - SUPPORTED_DEVICES:=olimex,a20-olinuxino-lime - SUNXI_DTS:=sun7i-a20-olinuxino-lime -endef - -TARGET_DEVICES += sun7i-a20-olinuxino-lime - - -define Device/sun7i-a20-olinuxino-micro - DEVICE_TITLE:=Olimex A20-Olinuxino Micro - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi - SUPPORTED_DEVICES:=olimex,a20-olinuxino-micro - SUNXI_DTS:=sun7i-a20-olinuxino-micro -endef - -TARGET_DEVICES += sun7i-a20-olinuxino-micro - - -define Device/sun7i-a20-bananapi - DEVICE_TITLE:=LeMaker Banana Pi - DEVICE_PACKAGES:=kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi - SUPPORTED_DEVICES:=lemaker,bananapi - SUNXI_DTS:=sun7i-a20-bananapi -endef - -TARGET_DEVICES += sun7i-a20-bananapi - - -define Device/sun7i-a20-bananapro - DEVICE_TITLE:=LeMaker Banana Pro - DEVICE_PACKAGES:=kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi kmod-brcmfmac - SUPPORTED_DEVICES:=lemaker,bananapro - SUNXI_DTS:=sun7i-a20-bananapro -endef - -TARGET_DEVICES += sun7i-a20-bananapro - - -define Device/sun7i-a20-cubieboard2 - DEVICE_TITLE:=Cubietech Cubieboard2 - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi - SUPPORTED_DEVICES:=cubietech,cubieboard2 - SUNXI_DTS:=sun7i-a20-cubieboard2 -endef - -TARGET_DEVICES += sun7i-a20-cubieboard2 - - -define Device/sun4i-a10-cubieboard - DEVICE_TITLE:=Cubietech Cubieboard - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi - SUPPORTED_DEVICES:=cubietech,a10-cubieboard - SUNXI_DTS:=sun4i-a10-cubieboard -endef - -TARGET_DEVICES += sun4i-a10-cubieboard - - -define Device/sun7i-a20-cubietruck - DEVICE_TITLE:=Cubietech Cubietruck - DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi kmod-brcmfmac - SUPPORTED_DEVICES:=cubietech,cubietruck - SUNXI_DTS:=sun7i-a20-cubietruck -endef - -TARGET_DEVICES += sun7i-a20-cubietruck - - -define Device/sun7i-a20-lamobo-r1 - DEVICE_TITLE:=Lamobo R1 - DEVICE_PACKAGES:=kmod-ata-sunxi kmod-rtl8192cu swconfig wpad-mini - SUPPORTED_DEVICES:=lamobo,lamobo-r1 - SUNXI_DTS:=sun7i-a20-lamobo-r1 -endef - -TARGET_DEVICES += sun7i-a20-lamobo-r1 - - -define Device/sun6i-a31-m9 - DEVICE_TITLE:=Mele M9 top set box - DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-rtl8192cu - SUPPORTED_DEVICES:=mele,m9 - SUNXI_DTS:=sun6i-a31-m9 -endef - -TARGET_DEVICES += sun6i-a31-m9 - - -define Device/sun8i-h3-orangepi-plus - DEVICE_TITLE:=Xunlong Orange Pi Plus - DEVICE_PACKAGES:=kmod-rtc-sunxi - SUPPORTED_DEVICES:=xunlong,orangepi-plus - SUNXI_DTS:=sun8i-h3-orangepi-plus -endef - -TARGET_DEVICES += sun8i-h3-orangepi-plus - - -define Device/sun7i-a20-pcduino3 - DEVICE_TITLE:=LinkSprite pcDuino3 - DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi kmod-rtl8xxxu rtl8188eu-firmware - SUPPORTED_DEVICES:=linksprite,pcduino3 - SUNXI_DTS:=sun7i-a20-pcduino3 -endef - -TARGET_DEVICES += sun7i-a20-pcduino3 - - -define Device/sun4i-a10-pcduino - DEVICE_TITLE:=LinkSprite pcDuino - DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-rtl8192cu - SUPPORTED_DEVICES:=linksprite,a10-pcduino - SUNXI_DTS:=sun4i-a10-pcduino -endef - -TARGET_DEVICES += sun4i-a10-pcduino - +include cortex-a7.mk +include cortex-a8.mk +include cortex-a53.mk $(eval $(call BuildImage)) diff --git a/target/linux/sunxi/image/cortex-a53.mk b/target/linux/sunxi/image/cortex-a53.mk new file mode 100644 index 000000000..bbb253f0a --- /dev/null +++ b/target/linux/sunxi/image/cortex-a53.mk @@ -0,0 +1,20 @@ +# +# Copyright (C) 2013-2016 OpenWrt.org +# Copyright (C) 2016 Yousong Zhou +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +ifeq ($(SUBTARGET),cortexa53) + +define Device/sun50i-a64-pine64-plus + DEVICE_TITLE:=Pine64 Plus A64 + SUPPORTED_DEVICES:=pine64,pine64-plus + SUNXI_DTS:=allwinner/sun50i-a64-pine64-plus + KERNEL_NAME := Image + KERNEL := kernel-bin +endef + +TARGET_DEVICES += sun50i-a64-pine64-plus + +endif diff --git a/target/linux/sunxi/image/cortex-a7.mk b/target/linux/sunxi/image/cortex-a7.mk new file mode 100644 index 000000000..d0b7aa04b --- /dev/null +++ b/target/linux/sunxi/image/cortex-a7.mk @@ -0,0 +1,149 @@ +# +# Copyright (C) 2013-2016 OpenWrt.org +# Copyright (C) 2016 Yousong Zhou +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +ifeq ($(SUBTARGET),cortexa7) + +define Device/sun7i-a20-olinuxino-lime + DEVICE_TITLE:=Olimex A20-OLinuXino-LIME + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi + SUPPORTED_DEVICES:=olimex,a20-olinuxino-lime + SUNXI_DTS:=sun7i-a20-olinuxino-lime +endef + +TARGET_DEVICES += sun7i-a20-olinuxino-lime + + +define Device/sun7i-a20-olinuxino-lime2 + DEVICE_TITLE:=Olimex A20-OLinuXino-LIME2 + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi kmod-usb-hid + SUPPORTED_DEVICES:=olimex,a20-olinuxino-lime2 + SUNXI_DTS:=sun7i-a20-olinuxino-lime2 +endef + +TARGET_DEVICES += sun7i-a20-olinuxino-lime2 + + +define Device/sun7i-a20-olinuxino-lime2-emmc + DEVICE_TITLE:=Olimex A20-OLinuXino-LIME2-eMMC + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi kmod-usb-hid + SUPPORTED_DEVICES:=olimex,a20-olinuxino-lime2-emmc + SUNXI_DTS:=sun7i-a20-olinuxino-lime2-emmc +endef + +TARGET_DEVICES += sun7i-a20-olinuxino-lime2-emmc + + +define Device/sun7i-a20-olinuxino-micro + DEVICE_TITLE:=Olimex A20-Olinuxino Micro + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi + SUPPORTED_DEVICES:=olimex,a20-olinuxino-micro + SUNXI_DTS:=sun7i-a20-olinuxino-micro +endef + +TARGET_DEVICES += sun7i-a20-olinuxino-micro + + +define Device/sun7i-a20-bananapi + DEVICE_TITLE:=LeMaker Banana Pi + DEVICE_PACKAGES:=kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi + SUPPORTED_DEVICES:=lemaker,bananapi + SUNXI_DTS:=sun7i-a20-bananapi +endef + +TARGET_DEVICES += sun7i-a20-bananapi + + +define Device/sun7i-a20-bananapro + DEVICE_TITLE:=LeMaker Banana Pro + DEVICE_PACKAGES:=kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi kmod-brcmfmac + SUPPORTED_DEVICES:=lemaker,bananapro + SUNXI_DTS:=sun7i-a20-bananapro +endef + +TARGET_DEVICES += sun7i-a20-bananapro + + +define Device/sun7i-a20-cubieboard2 + DEVICE_TITLE:=Cubietech Cubieboard2 + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi + SUPPORTED_DEVICES:=cubietech,cubieboard2 + SUNXI_DTS:=sun7i-a20-cubieboard2 +endef + +TARGET_DEVICES += sun7i-a20-cubieboard2 + + +define Device/sun7i-a20-cubietruck + DEVICE_TITLE:=Cubietech Cubietruck + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-rtc-sunxi kmod-brcmfmac + SUPPORTED_DEVICES:=cubietech,cubietruck + SUNXI_DTS:=sun7i-a20-cubietruck +endef + +TARGET_DEVICES += sun7i-a20-cubietruck + + +define Device/sun7i-a20-lamobo-r1 + DEVICE_TITLE:=Lamobo R1 + DEVICE_PACKAGES:=kmod-ata-sunxi kmod-rtl8192cu swconfig wpad-mini + SUPPORTED_DEVICES:=lamobo,lamobo-r1 + SUNXI_DTS:=sun7i-a20-lamobo-r1 +endef + +TARGET_DEVICES += sun7i-a20-lamobo-r1 + + +define Device/sun6i-a31-m9 + DEVICE_TITLE:=Mele M9 top set box + DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-rtl8192cu + SUPPORTED_DEVICES:=mele,m9 + SUNXI_DTS:=sun6i-a31-m9 +endef + +TARGET_DEVICES += sun6i-a31-m9 + + +define Device/sun8i-h2-plus-orangepi-r1 + DEVICE_TITLE:=Xunlong Orange Pi R1 + DEVICE_PACKAGES:=kmod-rtc-sunxi kmod-usb-net kmod-usb-net-rtl8152 + SUPPORTED_DEVICES:=xunlong,orangepi-r1 + SUNXI_DTS:=sun8i-h2-plus-orangepi-r1 +endef + +TARGET_DEVICES += sun8i-h2-plus-orangepi-r1 + + +define Device/sun8i-h3-nanopi-neo + DEVICE_TITLE:=FriendlyARM NanoPi NEO + SUPPORTED_DEVICES:=friendlyarm,nanopi-neo + SUNXI_DTS:=sun8i-h3-nanopi-neo +endef + +TARGET_DEVICES += sun8i-h3-nanopi-neo + + +define Device/sun8i-h3-orangepi-plus + DEVICE_TITLE:=Xunlong Orange Pi Plus + DEVICE_PACKAGES:=kmod-rtc-sunxi + SUPPORTED_DEVICES:=xunlong,orangepi-plus + SUNXI_DTS:=sun8i-h3-orangepi-plus +endef + +TARGET_DEVICES += sun8i-h3-orangepi-plus + + +define Device/sun7i-a20-pcduino3 + DEVICE_TITLE:=LinkSprite pcDuino3 + DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-ata-core kmod-ata-sunxi kmod-rtl8xxxu rtl8188eu-firmware + SUPPORTED_DEVICES:=linksprite,pcduino3 + SUNXI_DTS:=sun7i-a20-pcduino3 +endef + +TARGET_DEVICES += sun7i-a20-pcduino3 + +endif diff --git a/target/linux/sunxi/image/cortex-a8.mk b/target/linux/sunxi/image/cortex-a8.mk new file mode 100644 index 000000000..97e033dbb --- /dev/null +++ b/target/linux/sunxi/image/cortex-a8.mk @@ -0,0 +1,59 @@ +# +# Copyright (C) 2013-2016 OpenWrt.org +# Copyright (C) 2016 Yousong Zhou +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +ifeq ($(SUBTARGET),cortexa8) + +define Device/sun4i-a10-olinuxino-lime + DEVICE_TITLE:=Olimex A10-OLinuXino-LIME + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi + SUPPORTED_DEVICES:=olimex,a10-olinuxino-lime + SUNXI_DTS:=sun4i-a10-olinuxino-lime +endef + +TARGET_DEVICES += sun4i-a10-olinuxino-lime + + +define Device/sun5i-a13-olimex-som + DEVICE_TITLE:=Olimex A13 SOM + DEVICE_PACKAGES:=kmod-rtl8192cu + SUPPORTED_DEVICES:=olimex,a13-olinuxino + SUNXI_DTS:=sun5i-a13-olinuxino +endef + +TARGET_DEVICES += sun5i-a13-olimex-som + + +define Device/sun5i-a13-olinuxino + DEVICE_TITLE:=Olimex A13-Olinuxino + DEVICE_PACKAGES:=kmod-rtl8192cu + SUPPORTED_DEVICES:=olimex,a13-olinuxino + SUNXI_DTS:=sun5i-a13-olinuxino +endef + +TARGET_DEVICES += sun5i-a13-olinuxino + + +define Device/sun4i-a10-cubieboard + DEVICE_TITLE:=Cubietech Cubieboard + DEVICE_PACKAGES:=kmod-ata-core kmod-ata-sunxi kmod-sun4i-emac kmod-rtc-sunxi + SUPPORTED_DEVICES:=cubietech,a10-cubieboard + SUNXI_DTS:=sun4i-a10-cubieboard +endef + +TARGET_DEVICES += sun4i-a10-cubieboard + + +define Device/sun4i-a10-pcduino + DEVICE_TITLE:=LinkSprite pcDuino + DEVICE_PACKAGES:=kmod-sun4i-emac kmod-rtc-sunxi kmod-rtl8192cu + SUPPORTED_DEVICES:=linksprite,a10-pcduino + SUNXI_DTS:=sun4i-a10-pcduino +endef + +TARGET_DEVICES += sun4i-a10-pcduino + +endif diff --git a/target/linux/sunxi/modules.mk b/target/linux/sunxi/modules.mk index 99b82a3b5..b8ea7d5cb 100644 --- a/target/linux/sunxi/modules.mk +++ b/target/linux/sunxi/modules.mk @@ -10,8 +10,8 @@ define KernelPackage/rtc-sunxi DEPENDS:=@TARGET_sunxi $(call AddDepends/rtc) KCONFIG:= \ - CONFIG_RTC_CLASS=y \ - CONFIG_RTC_DRV_SUNXI=m + CONFIG_RTC_DRV_SUNXI \ + CONFIG_RTC_CLASS=y FILES:=$(LINUX_DIR)/drivers/rtc/rtc-sunxi.ko AUTOLOAD:=$(call AutoLoad,50,rtc-sunxi) endef diff --git a/target/linux/sunxi/patches-4.9/0001-arm64-sunxi-always-enable-reset-controller.patch b/target/linux/sunxi/patches-4.9/0001-arm64-sunxi-always-enable-reset-controller.patch new file mode 100644 index 000000000..e23475218 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0001-arm64-sunxi-always-enable-reset-controller.patch @@ -0,0 +1,39 @@ +From 900a9020af7a023f9b64c919fddf8a7486108962 Mon Sep 17 00:00:00 2001 +From: Arnd Bergmann +Date: Tue, 18 Apr 2017 15:55:51 +0200 +Subject: arm64: sunxi: always enable reset controller + +The sunxi clk driver causes a link error when the reset controller +subsystem is disabled: + +drivers/clk/built-in.o: In function `sun4i_ve_clk_setup': +:(.init.text+0xd040): undefined reference to `reset_controller_register' +drivers/clk/built-in.o: In function `sun4i_a10_display_init': +:(.init.text+0xe5e0): undefined reference to `reset_controller_register' +drivers/clk/built-in.o: In function `sunxi_usb_clk_setup': +:(.init.text+0x10074): undefined reference to `reset_controller_register' + +We already force it to be enabled on arm32 and some other arm64 platforms, +but not on arm64/sunxi. This adds the respective Kconfig statements to +also select it here. + +Signed-off-by: Arnd Bergmann +Acked-by: Maxime Ripard +--- + arch/arm64/Kconfig.platforms | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm64/Kconfig.platforms ++++ b/arch/arm64/Kconfig.platforms +@@ -2,9 +2,11 @@ menu "Platform selection" + + config ARCH_SUNXI + bool "Allwinner sunxi 64-bit SoC Family" ++ select ARCH_HAS_RESET_CONTROLLER + select GENERIC_IRQ_CHIP + select PINCTRL + select PINCTRL_SUN50I_A64 ++ select RESET_CONTROLLER + help + This enables support for Allwinner sunxi based SoCs like the A64. + diff --git a/target/linux/sunxi/patches-4.9/0002-clk-sunxi-ng-Rename-the-internal-structures.patch b/target/linux/sunxi/patches-4.9/0002-clk-sunxi-ng-Rename-the-internal-structures.patch new file mode 100644 index 000000000..f3e485d88 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0002-clk-sunxi-ng-Rename-the-internal-structures.patch @@ -0,0 +1,239 @@ +From a501a14e38cc4d8e9c91bb508cdca7032d53f717 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Fri, 30 Sep 2016 10:05:32 +0200 +Subject: clk: sunxi-ng: Rename the internal structures + +Rename the structures meant to be embedded in other structures to make it +consistent with the mux structure name + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + drivers/clk/sunxi-ng/ccu_div.h | 6 +++--- + drivers/clk/sunxi-ng/ccu_frac.c | 12 ++++++------ + drivers/clk/sunxi-ng/ccu_frac.h | 14 +++++++------- + drivers/clk/sunxi-ng/ccu_mp.h | 4 ++-- + drivers/clk/sunxi-ng/ccu_mult.h | 4 ++-- + drivers/clk/sunxi-ng/ccu_nk.h | 4 ++-- + drivers/clk/sunxi-ng/ccu_nkm.h | 6 +++--- + drivers/clk/sunxi-ng/ccu_nkmp.h | 8 ++++---- + drivers/clk/sunxi-ng/ccu_nm.h | 6 +++--- + 9 files changed, 32 insertions(+), 32 deletions(-) + +--- a/drivers/clk/sunxi-ng/ccu_div.h ++++ b/drivers/clk/sunxi-ng/ccu_div.h +@@ -20,7 +20,7 @@ + #include "ccu_mux.h" + + /** +- * struct _ccu_div - Internal divider description ++ * struct ccu_div_internal - Internal divider description + * @shift: Bit offset of the divider in its register + * @width: Width of the divider field in its register + * @max: Maximum value allowed for that divider. This is the +@@ -36,7 +36,7 @@ + * It is basically a wrapper around the clk_divider functions + * arguments. + */ +-struct _ccu_div { ++struct ccu_div_internal { + u8 shift; + u8 width; + +@@ -78,7 +78,7 @@ struct _ccu_div { + struct ccu_div { + u32 enable; + +- struct _ccu_div div; ++ struct ccu_div_internal div; + struct ccu_mux_internal mux; + struct ccu_common common; + }; +--- a/drivers/clk/sunxi-ng/ccu_frac.c ++++ b/drivers/clk/sunxi-ng/ccu_frac.c +@@ -14,7 +14,7 @@ + #include "ccu_frac.h" + + bool ccu_frac_helper_is_enabled(struct ccu_common *common, +- struct _ccu_frac *cf) ++ struct ccu_frac_internal *cf) + { + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return false; +@@ -23,7 +23,7 @@ bool ccu_frac_helper_is_enabled(struct c + } + + void ccu_frac_helper_enable(struct ccu_common *common, +- struct _ccu_frac *cf) ++ struct ccu_frac_internal *cf) + { + unsigned long flags; + u32 reg; +@@ -38,7 +38,7 @@ void ccu_frac_helper_enable(struct ccu_c + } + + void ccu_frac_helper_disable(struct ccu_common *common, +- struct _ccu_frac *cf) ++ struct ccu_frac_internal *cf) + { + unsigned long flags; + u32 reg; +@@ -53,7 +53,7 @@ void ccu_frac_helper_disable(struct ccu_ + } + + bool ccu_frac_helper_has_rate(struct ccu_common *common, +- struct _ccu_frac *cf, ++ struct ccu_frac_internal *cf, + unsigned long rate) + { + if (!(common->features & CCU_FEATURE_FRACTIONAL)) +@@ -63,7 +63,7 @@ bool ccu_frac_helper_has_rate(struct ccu + } + + unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, +- struct _ccu_frac *cf) ++ struct ccu_frac_internal *cf) + { + u32 reg; + +@@ -84,7 +84,7 @@ unsigned long ccu_frac_helper_read_rate( + } + + int ccu_frac_helper_set_rate(struct ccu_common *common, +- struct _ccu_frac *cf, ++ struct ccu_frac_internal *cf, + unsigned long rate) + { + unsigned long flags; +--- a/drivers/clk/sunxi-ng/ccu_frac.h ++++ b/drivers/clk/sunxi-ng/ccu_frac.h +@@ -18,7 +18,7 @@ + + #include "ccu_common.h" + +-struct _ccu_frac { ++struct ccu_frac_internal { + u32 enable; + u32 select; + +@@ -33,21 +33,21 @@ struct _ccu_frac { + } + + bool ccu_frac_helper_is_enabled(struct ccu_common *common, +- struct _ccu_frac *cf); ++ struct ccu_frac_internal *cf); + void ccu_frac_helper_enable(struct ccu_common *common, +- struct _ccu_frac *cf); ++ struct ccu_frac_internal *cf); + void ccu_frac_helper_disable(struct ccu_common *common, +- struct _ccu_frac *cf); ++ struct ccu_frac_internal *cf); + + bool ccu_frac_helper_has_rate(struct ccu_common *common, +- struct _ccu_frac *cf, ++ struct ccu_frac_internal *cf, + unsigned long rate); + + unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, +- struct _ccu_frac *cf); ++ struct ccu_frac_internal *cf); + + int ccu_frac_helper_set_rate(struct ccu_common *common, +- struct _ccu_frac *cf, ++ struct ccu_frac_internal *cf, + unsigned long rate); + + #endif /* _CCU_FRAC_H_ */ +--- a/drivers/clk/sunxi-ng/ccu_mp.h ++++ b/drivers/clk/sunxi-ng/ccu_mp.h +@@ -29,8 +29,8 @@ + struct ccu_mp { + u32 enable; + +- struct _ccu_div m; +- struct _ccu_div p; ++ struct ccu_div_internal m; ++ struct ccu_div_internal p; + struct ccu_mux_internal mux; + struct ccu_common common; + }; +--- a/drivers/clk/sunxi-ng/ccu_mult.h ++++ b/drivers/clk/sunxi-ng/ccu_mult.h +@@ -4,7 +4,7 @@ + #include "ccu_common.h" + #include "ccu_mux.h" + +-struct _ccu_mult { ++struct ccu_mult_internal { + u8 shift; + u8 width; + }; +@@ -18,7 +18,7 @@ struct _ccu_mult { + struct ccu_mult { + u32 enable; + +- struct _ccu_mult mult; ++ struct ccu_mult_internal mult; + struct ccu_mux_internal mux; + struct ccu_common common; + }; +--- a/drivers/clk/sunxi-ng/ccu_nk.h ++++ b/drivers/clk/sunxi-ng/ccu_nk.h +@@ -30,8 +30,8 @@ struct ccu_nk { + u32 enable; + u32 lock; + +- struct _ccu_mult n; +- struct _ccu_mult k; ++ struct ccu_mult_internal n; ++ struct ccu_mult_internal k; + + unsigned int fixed_post_div; + +--- a/drivers/clk/sunxi-ng/ccu_nkm.h ++++ b/drivers/clk/sunxi-ng/ccu_nkm.h +@@ -29,9 +29,9 @@ struct ccu_nkm { + u32 enable; + u32 lock; + +- struct _ccu_mult n; +- struct _ccu_mult k; +- struct _ccu_div m; ++ struct ccu_mult_internal n; ++ struct ccu_mult_internal k; ++ struct ccu_div_internal m; + struct ccu_mux_internal mux; + + struct ccu_common common; +--- a/drivers/clk/sunxi-ng/ccu_nkmp.h ++++ b/drivers/clk/sunxi-ng/ccu_nkmp.h +@@ -29,10 +29,10 @@ struct ccu_nkmp { + u32 enable; + u32 lock; + +- struct _ccu_mult n; +- struct _ccu_mult k; +- struct _ccu_div m; +- struct _ccu_div p; ++ struct ccu_mult_internal n; ++ struct ccu_mult_internal k; ++ struct ccu_div_internal m; ++ struct ccu_div_internal p; + + struct ccu_common common; + }; +--- a/drivers/clk/sunxi-ng/ccu_nm.h ++++ b/drivers/clk/sunxi-ng/ccu_nm.h +@@ -30,9 +30,9 @@ struct ccu_nm { + u32 enable; + u32 lock; + +- struct _ccu_mult n; +- struct _ccu_div m; +- struct _ccu_frac frac; ++ struct ccu_mult_internal n; ++ struct ccu_div_internal m; ++ struct ccu_frac_internal frac; + + struct ccu_common common; + }; diff --git a/target/linux/sunxi/patches-4.9/0003-clk-sunxi-ng-Remove-the-use-of-rational-computations.patch b/target/linux/sunxi/patches-4.9/0003-clk-sunxi-ng-Remove-the-use-of-rational-computations.patch new file mode 100644 index 000000000..6b8f46eae --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0003-clk-sunxi-ng-Remove-the-use-of-rational-computations.patch @@ -0,0 +1,239 @@ +From ee28648cb2b4d4ab5c2eb8199ea86675fe19016b Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Thu, 29 Sep 2016 22:53:12 +0200 +Subject: clk: sunxi-ng: Remove the use of rational computations + +While the rational library works great, it doesn't really allow us to add +more constraints, like the minimum. + +Remove that in order to be able to deal with the constraints we'll need. + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + drivers/clk/sunxi-ng/Kconfig | 3 --- + drivers/clk/sunxi-ng/ccu_nkm.c | 31 ++++++++++++----------- + drivers/clk/sunxi-ng/ccu_nkmp.c | 37 ++++++++++++++-------------- + drivers/clk/sunxi-ng/ccu_nm.c | 54 +++++++++++++++++++++++++++++++---------- + 4 files changed, 74 insertions(+), 51 deletions(-) + +--- a/drivers/clk/sunxi-ng/Kconfig ++++ b/drivers/clk/sunxi-ng/Kconfig +@@ -35,17 +35,14 @@ config SUNXI_CCU_NK + + config SUNXI_CCU_NKM + bool +- select RATIONAL + select SUNXI_CCU_GATE + + config SUNXI_CCU_NKMP + bool +- select RATIONAL + select SUNXI_CCU_GATE + + config SUNXI_CCU_NM + bool +- select RATIONAL + select SUNXI_CCU_FRAC + select SUNXI_CCU_GATE + +--- a/drivers/clk/sunxi-ng/ccu_nkm.c ++++ b/drivers/clk/sunxi-ng/ccu_nkm.c +@@ -9,7 +9,6 @@ + */ + + #include +-#include + + #include "ccu_gate.h" + #include "ccu_nkm.h" +@@ -28,21 +27,21 @@ static void ccu_nkm_find_best(unsigned l + unsigned long _n, _k, _m; + + for (_k = 1; _k <= nkm->max_k; _k++) { +- unsigned long tmp_rate; +- +- rational_best_approximation(rate / _k, parent, +- nkm->max_n, nkm->max_m, &_n, &_m); +- +- tmp_rate = parent * _n * _k / _m; +- +- if (tmp_rate > rate) +- continue; +- +- if ((rate - tmp_rate) < (rate - best_rate)) { +- best_rate = tmp_rate; +- best_n = _n; +- best_k = _k; +- best_m = _m; ++ for (_n = 1; _n <= nkm->max_n; _n++) { ++ for (_m = 1; _n <= nkm->max_m; _m++) { ++ unsigned long tmp_rate; ++ ++ tmp_rate = parent * _n * _k / _m; ++ ++ if (tmp_rate > rate) ++ continue; ++ if ((rate - tmp_rate) < (rate - best_rate)) { ++ best_rate = tmp_rate; ++ best_n = _n; ++ best_k = _k; ++ best_m = _m; ++ } ++ } + } + } + +--- a/drivers/clk/sunxi-ng/ccu_nkmp.c ++++ b/drivers/clk/sunxi-ng/ccu_nkmp.c +@@ -9,7 +9,6 @@ + */ + + #include +-#include + + #include "ccu_gate.h" + #include "ccu_nkmp.h" +@@ -29,24 +28,24 @@ static void ccu_nkmp_find_best(unsigned + unsigned long _n, _k, _m, _p; + + for (_k = 1; _k <= nkmp->max_k; _k++) { +- for (_p = 1; _p <= nkmp->max_p; _p <<= 1) { +- unsigned long tmp_rate; +- +- rational_best_approximation(rate / _k, parent / _p, +- nkmp->max_n, nkmp->max_m, +- &_n, &_m); +- +- tmp_rate = parent * _n * _k / (_m * _p); +- +- if (tmp_rate > rate) +- continue; +- +- if ((rate - tmp_rate) < (rate - best_rate)) { +- best_rate = tmp_rate; +- best_n = _n; +- best_k = _k; +- best_m = _m; +- best_p = _p; ++ for (_n = 1; _n <= nkmp->max_n; _n++) { ++ for (_m = 1; _n <= nkmp->max_m; _m++) { ++ for (_p = 1; _p <= nkmp->max_p; _p <<= 1) { ++ unsigned long tmp_rate; ++ ++ tmp_rate = parent * _n * _k / (_m * _p); ++ ++ if (tmp_rate > rate) ++ continue; ++ ++ if ((rate - tmp_rate) < (rate - best_rate)) { ++ best_rate = tmp_rate; ++ best_n = _n; ++ best_k = _k; ++ best_m = _m; ++ best_p = _p; ++ } ++ } + } + } + } +--- a/drivers/clk/sunxi-ng/ccu_nm.c ++++ b/drivers/clk/sunxi-ng/ccu_nm.c +@@ -9,12 +9,42 @@ + */ + + #include +-#include + + #include "ccu_frac.h" + #include "ccu_gate.h" + #include "ccu_nm.h" + ++struct _ccu_nm { ++ unsigned long n, max_n; ++ unsigned long m, max_m; ++}; ++ ++static void ccu_nm_find_best(unsigned long parent, unsigned long rate, ++ struct _ccu_nm *nm) ++{ ++ unsigned long best_rate = 0; ++ unsigned long best_n = 0, best_m = 0; ++ unsigned long _n, _m; ++ ++ for (_n = 1; _n <= nm->max_n; _n++) { ++ for (_m = 1; _n <= nm->max_m; _m++) { ++ unsigned long tmp_rate = parent * _n / _m; ++ ++ if (tmp_rate > rate) ++ continue; ++ ++ if ((rate - tmp_rate) < (rate - best_rate)) { ++ best_rate = tmp_rate; ++ best_n = _n; ++ best_m = _m; ++ } ++ } ++ } ++ ++ nm->n = best_n; ++ nm->m = best_m; ++} ++ + static void ccu_nm_disable(struct clk_hw *hw) + { + struct ccu_nm *nm = hw_to_ccu_nm(hw); +@@ -61,24 +91,22 @@ static long ccu_nm_round_rate(struct clk + unsigned long *parent_rate) + { + struct ccu_nm *nm = hw_to_ccu_nm(hw); +- unsigned long max_n, max_m; +- unsigned long n, m; ++ struct _ccu_nm _nm; + +- max_n = 1 << nm->n.width; +- max_m = nm->m.max ?: 1 << nm->m.width; ++ _nm.max_n = 1 << nm->n.width; ++ _nm.max_m = nm->m.max ?: 1 << nm->m.width; + +- rational_best_approximation(rate, *parent_rate, max_n, max_m, &n, &m); ++ ccu_nm_find_best(*parent_rate, rate, &_nm); + +- return *parent_rate * n / m; ++ return *parent_rate * _nm.n / _nm.m; + } + + static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) + { + struct ccu_nm *nm = hw_to_ccu_nm(hw); ++ struct _ccu_nm _nm; + unsigned long flags; +- unsigned long max_n, max_m; +- unsigned long n, m; + u32 reg; + + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) +@@ -86,10 +114,10 @@ static int ccu_nm_set_rate(struct clk_hw + else + ccu_frac_helper_disable(&nm->common, &nm->frac); + +- max_n = 1 << nm->n.width; +- max_m = nm->m.max ?: 1 << nm->m.width; ++ _nm.max_n = 1 << nm->n.width; ++ _nm.max_m = nm->m.max ?: 1 << nm->m.width; + +- rational_best_approximation(rate, parent_rate, max_n, max_m, &n, &m); ++ ccu_nm_find_best(parent_rate, rate, &_nm); + + spin_lock_irqsave(nm->common.lock, flags); + +@@ -97,7 +125,7 @@ static int ccu_nm_set_rate(struct clk_hw + reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift); + reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift); + +- writel(reg | ((m - 1) << nm->m.shift) | ((n - 1) << nm->n.shift), ++ writel(reg | ((_nm.m - 1) << nm->m.shift) | ((_nm.n - 1) << nm->n.shift), + nm->common.base + nm->common.reg); + + spin_unlock_irqrestore(nm->common.lock, flags); diff --git a/target/linux/sunxi/patches-4.9/0004-clk-sunxi-ng-Finish-to-convert-to-structures-for-arg.patch b/target/linux/sunxi/patches-4.9/0004-clk-sunxi-ng-Finish-to-convert-to-structures-for-arg.patch new file mode 100644 index 000000000..4b91892b8 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0004-clk-sunxi-ng-Finish-to-convert-to-structures-for-arg.patch @@ -0,0 +1,182 @@ +From b8302c7267dedaeeb1bf38143f099defbf16dce8 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Thu, 29 Sep 2016 23:50:21 +0200 +Subject: clk: sunxi-ng: Finish to convert to structures for arguments + +Some clocks still use an explicit list of arguments, which make it a bit +more tedious to add new parameters. + +Convert those over to a structure pointer argument to add as many +arguments as possible without having to many noise in our patches, or a +very long list of arguments. + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + drivers/clk/sunxi-ng/ccu_mult.c | 28 ++++++++++++++++++++-------- + drivers/clk/sunxi-ng/ccu_nk.c | 39 ++++++++++++++++++++++----------------- + 2 files changed, 42 insertions(+), 25 deletions(-) + +--- a/drivers/clk/sunxi-ng/ccu_mult.c ++++ b/drivers/clk/sunxi-ng/ccu_mult.c +@@ -13,10 +13,20 @@ + #include "ccu_gate.h" + #include "ccu_mult.h" + ++struct _ccu_mult { ++ unsigned long mult, max; ++}; ++ + static void ccu_mult_find_best(unsigned long parent, unsigned long rate, +- unsigned int max_n, unsigned int *n) ++ struct _ccu_mult *mult) + { +- *n = rate / parent; ++ int _mult; ++ ++ _mult = rate / parent; ++ if (_mult > mult->max) ++ _mult = mult->max; ++ ++ mult->mult = _mult; + } + + static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux, +@@ -25,11 +35,12 @@ static unsigned long ccu_mult_round_rate + void *data) + { + struct ccu_mult *cm = data; +- unsigned int n; ++ struct _ccu_mult _cm; + +- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n); ++ _cm.max = 1 << cm->mult.width; ++ ccu_mult_find_best(parent_rate, rate, &_cm); + +- return parent_rate * n; ++ return parent_rate * _cm.mult; + } + + static void ccu_mult_disable(struct clk_hw *hw) +@@ -83,21 +94,22 @@ static int ccu_mult_set_rate(struct clk_ + unsigned long parent_rate) + { + struct ccu_mult *cm = hw_to_ccu_mult(hw); ++ struct _ccu_mult _cm; + unsigned long flags; +- unsigned int n; + u32 reg; + + ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, + &parent_rate); + +- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n); ++ _cm.max = 1 << cm->mult.width; ++ ccu_mult_find_best(parent_rate, rate, &_cm); + + spin_lock_irqsave(cm->common.lock, flags); + + reg = readl(cm->common.base + cm->common.reg); + reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift); + +- writel(reg | ((n - 1) << cm->mult.shift), ++ writel(reg | ((_cm.mult - 1) << cm->mult.shift), + cm->common.base + cm->common.reg); + + spin_unlock_irqrestore(cm->common.lock, flags); +--- a/drivers/clk/sunxi-ng/ccu_nk.c ++++ b/drivers/clk/sunxi-ng/ccu_nk.c +@@ -9,21 +9,24 @@ + */ + + #include +-#include + + #include "ccu_gate.h" + #include "ccu_nk.h" + ++struct _ccu_nk { ++ unsigned long n, max_n; ++ unsigned long k, max_k; ++}; ++ + static void ccu_nk_find_best(unsigned long parent, unsigned long rate, +- unsigned int max_n, unsigned int max_k, +- unsigned int *n, unsigned int *k) ++ struct _ccu_nk *nk) + { + unsigned long best_rate = 0; + unsigned int best_k = 0, best_n = 0; + unsigned int _k, _n; + +- for (_k = 1; _k <= max_k; _k++) { +- for (_n = 1; _n <= max_n; _n++) { ++ for (_k = 1; _k <= nk->max_k; _k++) { ++ for (_n = 1; _n <= nk->max_n; _n++) { + unsigned long tmp_rate = parent * _n * _k; + + if (tmp_rate > rate) +@@ -37,8 +40,8 @@ static void ccu_nk_find_best(unsigned lo + } + } + +- *k = best_k; +- *n = best_n; ++ nk->k = best_k; ++ nk->n = best_n; + } + + static void ccu_nk_disable(struct clk_hw *hw) +@@ -89,16 +92,17 @@ static long ccu_nk_round_rate(struct clk + unsigned long *parent_rate) + { + struct ccu_nk *nk = hw_to_ccu_nk(hw); +- unsigned int n, k; ++ struct _ccu_nk _nk; + + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nk->fixed_post_div; + +- ccu_nk_find_best(*parent_rate, rate, +- 1 << nk->n.width, 1 << nk->k.width, +- &n, &k); ++ _nk.max_n = 1 << nk->n.width; ++ _nk.max_k = 1 << nk->k.width; ++ ++ ccu_nk_find_best(*parent_rate, rate, &_nk); ++ rate = *parent_rate * _nk.n * _nk.k; + +- rate = *parent_rate * n * k; + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate / nk->fixed_post_div; + +@@ -110,15 +114,16 @@ static int ccu_nk_set_rate(struct clk_hw + { + struct ccu_nk *nk = hw_to_ccu_nk(hw); + unsigned long flags; +- unsigned int n, k; ++ struct _ccu_nk _nk; + u32 reg; + + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate * nk->fixed_post_div; + +- ccu_nk_find_best(parent_rate, rate, +- 1 << nk->n.width, 1 << nk->k.width, +- &n, &k); ++ _nk.max_n = 1 << nk->n.width; ++ _nk.max_k = 1 << nk->k.width; ++ ++ ccu_nk_find_best(parent_rate, rate, &_nk); + + spin_lock_irqsave(nk->common.lock, flags); + +@@ -126,7 +131,7 @@ static int ccu_nk_set_rate(struct clk_hw + reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift); + reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift); + +- writel(reg | ((k - 1) << nk->k.shift) | ((n - 1) << nk->n.shift), ++ writel(reg | ((_nk.k - 1) << nk->k.shift) | ((_nk.n - 1) << nk->n.shift), + nk->common.base + nk->common.reg); + + spin_unlock_irqrestore(nk->common.lock, flags); diff --git a/target/linux/sunxi/patches-4.9/0005-clk-sunxi-ng-Add-minimums-for-all-the-relevant-struc.patch b/target/linux/sunxi/patches-4.9/0005-clk-sunxi-ng-Add-minimums-for-all-the-relevant-struc.patch new file mode 100644 index 000000000..0165ade13 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0005-clk-sunxi-ng-Add-minimums-for-all-the-relevant-struc.patch @@ -0,0 +1,256 @@ +From 6e0d50daa97f4bf9706e343b4f71171e88921209 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Thu, 29 Sep 2016 22:57:26 +0200 +Subject: clk: sunxi-ng: Add minimums for all the relevant structures and + clocks + +Modify the current clocks we have to be able to specify the minimum for +each clocks we support, just like we support the max. + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + drivers/clk/sunxi-ng/ccu_mult.c | 7 ++++++- + drivers/clk/sunxi-ng/ccu_nk.c | 12 ++++++++---- + drivers/clk/sunxi-ng/ccu_nkm.c | 18 ++++++++++++------ + drivers/clk/sunxi-ng/ccu_nkmp.c | 24 ++++++++++++++++-------- + drivers/clk/sunxi-ng/ccu_nm.c | 12 ++++++++---- + 5 files changed, 50 insertions(+), 23 deletions(-) + +--- a/drivers/clk/sunxi-ng/ccu_mult.c ++++ b/drivers/clk/sunxi-ng/ccu_mult.c +@@ -14,7 +14,7 @@ + #include "ccu_mult.h" + + struct _ccu_mult { +- unsigned long mult, max; ++ unsigned long mult, min, max; + }; + + static void ccu_mult_find_best(unsigned long parent, unsigned long rate, +@@ -23,6 +23,9 @@ static void ccu_mult_find_best(unsigned + int _mult; + + _mult = rate / parent; ++ if (_mult < mult->min) ++ _mult = mult->min; ++ + if (_mult > mult->max) + _mult = mult->max; + +@@ -37,6 +40,7 @@ static unsigned long ccu_mult_round_rate + struct ccu_mult *cm = data; + struct _ccu_mult _cm; + ++ _cm.min = 1; + _cm.max = 1 << cm->mult.width; + ccu_mult_find_best(parent_rate, rate, &_cm); + +@@ -101,6 +105,7 @@ static int ccu_mult_set_rate(struct clk_ + ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, + &parent_rate); + ++ _cm.min = 1; + _cm.max = 1 << cm->mult.width; + ccu_mult_find_best(parent_rate, rate, &_cm); + +--- a/drivers/clk/sunxi-ng/ccu_nk.c ++++ b/drivers/clk/sunxi-ng/ccu_nk.c +@@ -14,8 +14,8 @@ + #include "ccu_nk.h" + + struct _ccu_nk { +- unsigned long n, max_n; +- unsigned long k, max_k; ++ unsigned long n, min_n, max_n; ++ unsigned long k, min_k, max_k; + }; + + static void ccu_nk_find_best(unsigned long parent, unsigned long rate, +@@ -25,8 +25,8 @@ static void ccu_nk_find_best(unsigned lo + unsigned int best_k = 0, best_n = 0; + unsigned int _k, _n; + +- for (_k = 1; _k <= nk->max_k; _k++) { +- for (_n = 1; _n <= nk->max_n; _n++) { ++ for (_k = nk->min_k; _k <= nk->max_k; _k++) { ++ for (_n = nk->min_n; _n <= nk->max_n; _n++) { + unsigned long tmp_rate = parent * _n * _k; + + if (tmp_rate > rate) +@@ -97,7 +97,9 @@ static long ccu_nk_round_rate(struct clk + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nk->fixed_post_div; + ++ _nk.min_n = 1; + _nk.max_n = 1 << nk->n.width; ++ _nk.min_k = 1; + _nk.max_k = 1 << nk->k.width; + + ccu_nk_find_best(*parent_rate, rate, &_nk); +@@ -120,7 +122,9 @@ static int ccu_nk_set_rate(struct clk_hw + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate * nk->fixed_post_div; + ++ _nk.min_n = 1; + _nk.max_n = 1 << nk->n.width; ++ _nk.min_k = 1; + _nk.max_k = 1 << nk->k.width; + + ccu_nk_find_best(parent_rate, rate, &_nk); +--- a/drivers/clk/sunxi-ng/ccu_nkm.c ++++ b/drivers/clk/sunxi-ng/ccu_nkm.c +@@ -14,9 +14,9 @@ + #include "ccu_nkm.h" + + struct _ccu_nkm { +- unsigned long n, max_n; +- unsigned long k, max_k; +- unsigned long m, max_m; ++ unsigned long n, min_n, max_n; ++ unsigned long k, min_k, max_k; ++ unsigned long m, min_m, max_m; + }; + + static void ccu_nkm_find_best(unsigned long parent, unsigned long rate, +@@ -26,9 +26,9 @@ static void ccu_nkm_find_best(unsigned l + unsigned long best_n = 0, best_k = 0, best_m = 0; + unsigned long _n, _k, _m; + +- for (_k = 1; _k <= nkm->max_k; _k++) { +- for (_n = 1; _n <= nkm->max_n; _n++) { +- for (_m = 1; _n <= nkm->max_m; _m++) { ++ for (_k = nkm->min_k; _k <= nkm->max_k; _k++) { ++ for (_n = nkm->min_n; _n <= nkm->max_n; _n++) { ++ for (_m = nkm->min_m; _m <= nkm->max_m; _m++) { + unsigned long tmp_rate; + + tmp_rate = parent * _n * _k / _m; +@@ -100,8 +100,11 @@ static unsigned long ccu_nkm_round_rate( + struct ccu_nkm *nkm = data; + struct _ccu_nkm _nkm; + ++ _nkm.min_n = 1; + _nkm.max_n = 1 << nkm->n.width; ++ _nkm.min_k = 1; + _nkm.max_k = 1 << nkm->k.width; ++ _nkm.min_m = 1; + _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; + + ccu_nkm_find_best(parent_rate, rate, &_nkm); +@@ -126,8 +129,11 @@ static int ccu_nkm_set_rate(struct clk_h + unsigned long flags; + u32 reg; + ++ _nkm.min_n = 1; + _nkm.max_n = 1 << nkm->n.width; ++ _nkm.min_k = 1; + _nkm.max_k = 1 << nkm->k.width; ++ _nkm.min_m = 1; + _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; + + ccu_nkm_find_best(parent_rate, rate, &_nkm); +--- a/drivers/clk/sunxi-ng/ccu_nkmp.c ++++ b/drivers/clk/sunxi-ng/ccu_nkmp.c +@@ -14,10 +14,10 @@ + #include "ccu_nkmp.h" + + struct _ccu_nkmp { +- unsigned long n, max_n; +- unsigned long k, max_k; +- unsigned long m, max_m; +- unsigned long p, max_p; ++ unsigned long n, min_n, max_n; ++ unsigned long k, min_k, max_k; ++ unsigned long m, min_m, max_m; ++ unsigned long p, min_p, max_p; + }; + + static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate, +@@ -27,10 +27,10 @@ static void ccu_nkmp_find_best(unsigned + unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0; + unsigned long _n, _k, _m, _p; + +- for (_k = 1; _k <= nkmp->max_k; _k++) { +- for (_n = 1; _n <= nkmp->max_n; _n++) { +- for (_m = 1; _n <= nkmp->max_m; _m++) { +- for (_p = 1; _p <= nkmp->max_p; _p <<= 1) { ++ for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) { ++ for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) { ++ for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) { ++ for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) { + unsigned long tmp_rate; + + tmp_rate = parent * _n * _k / (_m * _p); +@@ -107,9 +107,13 @@ static long ccu_nkmp_round_rate(struct c + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + struct _ccu_nkmp _nkmp; + ++ _nkmp.min_n = 1; + _nkmp.max_n = 1 << nkmp->n.width; ++ _nkmp.min_k = 1; + _nkmp.max_k = 1 << nkmp->k.width; ++ _nkmp.min_m = 1; + _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; ++ _nkmp.min_p = 1; + _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1); + + ccu_nkmp_find_best(*parent_rate, rate, &_nkmp); +@@ -125,9 +129,13 @@ static int ccu_nkmp_set_rate(struct clk_ + unsigned long flags; + u32 reg; + ++ _nkmp.min_n = 1; + _nkmp.max_n = 1 << nkmp->n.width; ++ _nkmp.min_k = 1; + _nkmp.max_k = 1 << nkmp->k.width; ++ _nkmp.min_m = 1; + _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; ++ _nkmp.min_p = 1; + _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1); + + ccu_nkmp_find_best(parent_rate, rate, &_nkmp); +--- a/drivers/clk/sunxi-ng/ccu_nm.c ++++ b/drivers/clk/sunxi-ng/ccu_nm.c +@@ -15,8 +15,8 @@ + #include "ccu_nm.h" + + struct _ccu_nm { +- unsigned long n, max_n; +- unsigned long m, max_m; ++ unsigned long n, min_n, max_n; ++ unsigned long m, min_m, max_m; + }; + + static void ccu_nm_find_best(unsigned long parent, unsigned long rate, +@@ -26,8 +26,8 @@ static void ccu_nm_find_best(unsigned lo + unsigned long best_n = 0, best_m = 0; + unsigned long _n, _m; + +- for (_n = 1; _n <= nm->max_n; _n++) { +- for (_m = 1; _n <= nm->max_m; _m++) { ++ for (_n = nm->min_n; _n <= nm->max_n; _n++) { ++ for (_m = nm->min_m; _m <= nm->max_m; _m++) { + unsigned long tmp_rate = parent * _n / _m; + + if (tmp_rate > rate) +@@ -93,7 +93,9 @@ static long ccu_nm_round_rate(struct clk + struct ccu_nm *nm = hw_to_ccu_nm(hw); + struct _ccu_nm _nm; + ++ _nm.min_n = 1; + _nm.max_n = 1 << nm->n.width; ++ _nm.min_m = 1; + _nm.max_m = nm->m.max ?: 1 << nm->m.width; + + ccu_nm_find_best(*parent_rate, rate, &_nm); +@@ -114,7 +116,9 @@ static int ccu_nm_set_rate(struct clk_hw + else + ccu_frac_helper_disable(&nm->common, &nm->frac); + ++ _nm.min_n = 1; + _nm.max_n = 1 << nm->n.width; ++ _nm.min_m = 1; + _nm.max_m = nm->m.max ?: 1 << nm->m.width; + + ccu_nm_find_best(parent_rate, rate, &_nm); diff --git a/target/linux/sunxi/patches-4.9/0006-clk-sunxi-ng-Implement-minimum-for-multipliers.patch b/target/linux/sunxi/patches-4.9/0006-clk-sunxi-ng-Implement-minimum-for-multipliers.patch new file mode 100644 index 000000000..668d59649 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0006-clk-sunxi-ng-Implement-minimum-for-multipliers.patch @@ -0,0 +1,132 @@ +From 2beaa601c849e72683a2dd0fe6fd77763f19f051 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Fri, 30 Sep 2016 22:16:51 +0200 +Subject: clk: sunxi-ng: Implement minimum for multipliers + +Allow the CCU drivers to specify a multiplier for their clocks. + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + drivers/clk/sunxi-ng/ccu_mult.c | 2 +- + drivers/clk/sunxi-ng/ccu_mult.h | 13 +++++++++---- + drivers/clk/sunxi-ng/ccu_nk.c | 8 ++++---- + drivers/clk/sunxi-ng/ccu_nkm.c | 8 ++++---- + drivers/clk/sunxi-ng/ccu_nkmp.c | 4 ++-- + drivers/clk/sunxi-ng/ccu_nm.c | 2 +- + 6 files changed, 21 insertions(+), 16 deletions(-) + +--- a/drivers/clk/sunxi-ng/ccu_mult.c ++++ b/drivers/clk/sunxi-ng/ccu_mult.c +@@ -105,7 +105,7 @@ static int ccu_mult_set_rate(struct clk_ + ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, + &parent_rate); + +- _cm.min = 1; ++ _cm.min = cm->mult.min; + _cm.max = 1 << cm->mult.width; + ccu_mult_find_best(parent_rate, rate, &_cm); + +--- a/drivers/clk/sunxi-ng/ccu_mult.h ++++ b/drivers/clk/sunxi-ng/ccu_mult.h +@@ -7,14 +7,19 @@ + struct ccu_mult_internal { + u8 shift; + u8 width; ++ u8 min; + }; + +-#define _SUNXI_CCU_MULT(_shift, _width) \ +- { \ +- .shift = _shift, \ +- .width = _width, \ ++#define _SUNXI_CCU_MULT_MIN(_shift, _width, _min) \ ++ { \ ++ .shift = _shift, \ ++ .width = _width, \ ++ .min = _min, \ + } + ++#define _SUNXI_CCU_MULT(_shift, _width) \ ++ _SUNXI_CCU_MULT_MIN(_shift, _width, 1) ++ + struct ccu_mult { + u32 enable; + +--- a/drivers/clk/sunxi-ng/ccu_nk.c ++++ b/drivers/clk/sunxi-ng/ccu_nk.c +@@ -97,9 +97,9 @@ static long ccu_nk_round_rate(struct clk + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nk->fixed_post_div; + +- _nk.min_n = 1; ++ _nk.min_n = nk->n.min; + _nk.max_n = 1 << nk->n.width; +- _nk.min_k = 1; ++ _nk.min_k = nk->k.min; + _nk.max_k = 1 << nk->k.width; + + ccu_nk_find_best(*parent_rate, rate, &_nk); +@@ -122,9 +122,9 @@ static int ccu_nk_set_rate(struct clk_hw + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate * nk->fixed_post_div; + +- _nk.min_n = 1; ++ _nk.min_n = nk->n.min; + _nk.max_n = 1 << nk->n.width; +- _nk.min_k = 1; ++ _nk.min_k = nk->k.min; + _nk.max_k = 1 << nk->k.width; + + ccu_nk_find_best(parent_rate, rate, &_nk); +--- a/drivers/clk/sunxi-ng/ccu_nkm.c ++++ b/drivers/clk/sunxi-ng/ccu_nkm.c +@@ -100,9 +100,9 @@ static unsigned long ccu_nkm_round_rate( + struct ccu_nkm *nkm = data; + struct _ccu_nkm _nkm; + +- _nkm.min_n = 1; ++ _nkm.min_n = nkm->n.min; + _nkm.max_n = 1 << nkm->n.width; +- _nkm.min_k = 1; ++ _nkm.min_k = nkm->k.min; + _nkm.max_k = 1 << nkm->k.width; + _nkm.min_m = 1; + _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; +@@ -129,9 +129,9 @@ static int ccu_nkm_set_rate(struct clk_h + unsigned long flags; + u32 reg; + +- _nkm.min_n = 1; ++ _nkm.min_n = nkm->n.min; + _nkm.max_n = 1 << nkm->n.width; +- _nkm.min_k = 1; ++ _nkm.min_k = nkm->k.min; + _nkm.max_k = 1 << nkm->k.width; + _nkm.min_m = 1; + _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; +--- a/drivers/clk/sunxi-ng/ccu_nkmp.c ++++ b/drivers/clk/sunxi-ng/ccu_nkmp.c +@@ -107,9 +107,9 @@ static long ccu_nkmp_round_rate(struct c + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + struct _ccu_nkmp _nkmp; + +- _nkmp.min_n = 1; ++ _nkmp.min_n = nkmp->n.min; + _nkmp.max_n = 1 << nkmp->n.width; +- _nkmp.min_k = 1; ++ _nkmp.min_k = nkmp->k.min; + _nkmp.max_k = 1 << nkmp->k.width; + _nkmp.min_m = 1; + _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; +--- a/drivers/clk/sunxi-ng/ccu_nm.c ++++ b/drivers/clk/sunxi-ng/ccu_nm.c +@@ -93,7 +93,7 @@ static long ccu_nm_round_rate(struct clk + struct ccu_nm *nm = hw_to_ccu_nm(hw); + struct _ccu_nm _nm; + +- _nm.min_n = 1; ++ _nm.min_n = nm->n.min; + _nm.max_n = 1 << nm->n.width; + _nm.min_m = 1; + _nm.max_m = nm->m.max ?: 1 << nm->m.width; diff --git a/target/linux/sunxi/patches-4.9/0007-clk-sunxi-ng-Add-A64-clocks.patch b/target/linux/sunxi/patches-4.9/0007-clk-sunxi-ng-Add-A64-clocks.patch new file mode 100644 index 000000000..fa0bae92b --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0007-clk-sunxi-ng-Add-A64-clocks.patch @@ -0,0 +1,1295 @@ +From c6a0637460c29799f1e63a6a4a65bda22caf4a54 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Wed, 6 Jul 2016 08:31:34 +0200 +Subject: clk: sunxi-ng: Add A64 clocks + +Add the A64 CCU clocks set. + +Acked-by: Rob Herring +Acked-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +--- + .../devicetree/bindings/clock/sunxi-ccu.txt | 1 + + drivers/clk/sunxi-ng/Kconfig | 11 + + drivers/clk/sunxi-ng/Makefile | 1 + + drivers/clk/sunxi-ng/ccu-sun50i-a64.c | 915 +++++++++++++++++++++ + drivers/clk/sunxi-ng/ccu-sun50i-a64.h | 72 ++ + include/dt-bindings/clock/sun50i-a64-ccu.h | 134 +++ + include/dt-bindings/reset/sun50i-a64-ccu.h | 98 +++ + 7 files changed, 1232 insertions(+) + create mode 100644 drivers/clk/sunxi-ng/ccu-sun50i-a64.c + create mode 100644 drivers/clk/sunxi-ng/ccu-sun50i-a64.h + create mode 100644 include/dt-bindings/clock/sun50i-a64-ccu.h + create mode 100644 include/dt-bindings/reset/sun50i-a64-ccu.h + +--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt ++++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +@@ -7,6 +7,7 @@ Required properties : + - "allwinner,sun8i-a23-ccu" + - "allwinner,sun8i-a33-ccu" + - "allwinner,sun8i-h3-ccu" ++ - "allwinner,sun50i-a64-ccu" + + - reg: Must contain the registers base address and length + - clocks: phandle to the oscillators feeding the CCU. Two are needed: +--- a/drivers/clk/sunxi-ng/Kconfig ++++ b/drivers/clk/sunxi-ng/Kconfig +@@ -53,6 +53,17 @@ config SUNXI_CCU_MP + + # SoC Drivers + ++config SUN50I_A64_CCU ++ bool "Support for the Allwinner A64 CCU" ++ select SUNXI_CCU_DIV ++ select SUNXI_CCU_NK ++ select SUNXI_CCU_NKM ++ select SUNXI_CCU_NKMP ++ select SUNXI_CCU_NM ++ select SUNXI_CCU_MP ++ select SUNXI_CCU_PHASE ++ default ARM64 && ARCH_SUNXI ++ + config SUN6I_A31_CCU + bool "Support for the Allwinner A31/A31s CCU" + select SUNXI_CCU_DIV +--- a/drivers/clk/sunxi-ng/Makefile ++++ b/drivers/clk/sunxi-ng/Makefile +@@ -18,6 +18,7 @@ obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o + obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o + + # SoC support ++obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o + obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o + obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o + obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o +--- /dev/null ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +@@ -0,0 +1,915 @@ ++/* ++ * Copyright (c) 2016 Maxime Ripard. All rights reserved. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++ ++#include "ccu_common.h" ++#include "ccu_reset.h" ++ ++#include "ccu_div.h" ++#include "ccu_gate.h" ++#include "ccu_mp.h" ++#include "ccu_mult.h" ++#include "ccu_nk.h" ++#include "ccu_nkm.h" ++#include "ccu_nkmp.h" ++#include "ccu_nm.h" ++#include "ccu_phase.h" ++ ++#include "ccu-sun50i-a64.h" ++ ++static struct ccu_nkmp pll_cpux_clk = { ++ .enable = BIT(31), ++ .lock = BIT(28), ++ .n = _SUNXI_CCU_MULT(8, 5), ++ .k = _SUNXI_CCU_MULT(4, 2), ++ .m = _SUNXI_CCU_DIV(0, 2), ++ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4), ++ .common = { ++ .reg = 0x000, ++ .hw.init = CLK_HW_INIT("pll-cpux", ++ "osc24M", ++ &ccu_nkmp_ops, ++ CLK_SET_RATE_UNGATE), ++ }, ++}; ++ ++/* ++ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from ++ * the base (2x, 4x and 8x), and one variable divider (the one true ++ * pll audio). ++ * ++ * We don't have any need for the variable divider for now, so we just ++ * hardcode it to match with the clock names ++ */ ++#define SUN50I_A64_PLL_AUDIO_REG 0x008 ++ ++static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", ++ "osc24M", 0x008, ++ 8, 7, /* N */ ++ 0, 5, /* M */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0", ++ "osc24M", 0x010, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", ++ "osc24M", 0x018, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0", ++ "osc24M", 0x020, ++ 8, 5, /* N */ ++ 4, 2, /* K */ ++ 0, 2, /* M */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static struct ccu_nk pll_periph0_clk = { ++ .enable = BIT(31), ++ .lock = BIT(28), ++ .n = _SUNXI_CCU_MULT(8, 5), ++ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), ++ .fixed_post_div = 2, ++ .common = { ++ .reg = 0x028, ++ .features = CCU_FEATURE_FIXED_POSTDIV, ++ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M", ++ &ccu_nk_ops, CLK_SET_RATE_UNGATE), ++ }, ++}; ++ ++static struct ccu_nk pll_periph1_clk = { ++ .enable = BIT(31), ++ .lock = BIT(28), ++ .n = _SUNXI_CCU_MULT(8, 5), ++ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), ++ .fixed_post_div = 2, ++ .common = { ++ .reg = 0x02c, ++ .features = CCU_FEATURE_FIXED_POSTDIV, ++ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M", ++ &ccu_nk_ops, CLK_SET_RATE_UNGATE), ++ }, ++}; ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1", ++ "osc24M", 0x030, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", ++ "osc24M", 0x038, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++/* ++ * The output function can be changed to something more complex that ++ * we do not handle yet. ++ * ++ * Hardcode the mode so that we don't fall in that case. ++ */ ++#define SUN50I_A64_PLL_MIPI_REG 0x040 ++ ++struct ccu_nkm pll_mipi_clk = { ++ .enable = BIT(31), ++ .lock = BIT(28), ++ .n = _SUNXI_CCU_MULT(8, 4), ++ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), ++ .m = _SUNXI_CCU_DIV(0, 4), ++ .common = { ++ .reg = 0x040, ++ .hw.init = CLK_HW_INIT("pll-mipi", "pll-video0", ++ &ccu_nkm_ops, CLK_SET_RATE_UNGATE), ++ }, ++}; ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic", ++ "osc24M", 0x044, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de", ++ "osc24M", 0x048, ++ 8, 7, /* N */ ++ 0, 4, /* M */ ++ BIT(24), /* frac enable */ ++ BIT(25), /* frac select */ ++ 270000000, /* frac rate 0 */ ++ 297000000, /* frac rate 1 */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1", ++ "osc24M", 0x04c, ++ 8, 7, /* N */ ++ 0, 2, /* M */ ++ BIT(31), /* gate */ ++ BIT(28), /* lock */ ++ CLK_SET_RATE_UNGATE); ++ ++static const char * const cpux_parents[] = { "osc32k", "osc24M", ++ "pll-cpux" , "pll-cpux" }; ++static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents, ++ 0x050, 16, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); ++ ++static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0); ++ ++static const char * const ahb1_parents[] = { "osc32k", "osc24M", ++ "axi" , "pll-periph0" }; ++static struct ccu_div ahb1_clk = { ++ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), ++ ++ .mux = { ++ .shift = 12, ++ .width = 2, ++ ++ .variable_prediv = { ++ .index = 3, ++ .shift = 6, ++ .width = 2, ++ }, ++ }, ++ ++ .common = { ++ .reg = 0x054, ++ .features = CCU_FEATURE_VARIABLE_PREDIV, ++ .hw.init = CLK_HW_INIT_PARENTS("ahb1", ++ ahb1_parents, ++ &ccu_div_ops, ++ 0), ++ }, ++}; ++ ++static struct clk_div_table apb1_div_table[] = { ++ { .val = 0, .div = 2 }, ++ { .val = 1, .div = 2 }, ++ { .val = 2, .div = 4 }, ++ { .val = 3, .div = 8 }, ++ { /* Sentinel */ }, ++}; ++static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1", ++ 0x054, 8, 2, apb1_div_table, 0); ++ ++static const char * const apb2_parents[] = { "osc32k", "osc24M", ++ "pll-periph0-2x" , ++ "pll-periph0-2x" }; ++static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058, ++ 0, 5, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ 0); ++ ++static const char * const ahb2_parents[] = { "ahb1" , "pll-periph0" }; ++static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = { ++ { .index = 1, .div = 2 }, ++}; ++static struct ccu_mux ahb2_clk = { ++ .mux = { ++ .shift = 0, ++ .width = 1, ++ .fixed_predivs = ahb2_fixed_predivs, ++ .n_predivs = ARRAY_SIZE(ahb2_fixed_predivs), ++ }, ++ ++ .common = { ++ .reg = 0x05c, ++ .features = CCU_FEATURE_FIXED_PREDIV, ++ .hw.init = CLK_HW_INIT_PARENTS("ahb2", ++ ahb2_parents, ++ &ccu_mux_ops, ++ 0), ++ }, ++}; ++ ++static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1", ++ 0x060, BIT(1), 0); ++static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1", ++ 0x060, BIT(5), 0); ++static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1", ++ 0x060, BIT(6), 0); ++static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1", ++ 0x060, BIT(8), 0); ++static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1", ++ 0x060, BIT(9), 0); ++static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1", ++ 0x060, BIT(10), 0); ++static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1", ++ 0x060, BIT(13), 0); ++static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1", ++ 0x060, BIT(14), 0); ++static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2", ++ 0x060, BIT(17), 0); ++static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1", ++ 0x060, BIT(18), 0); ++static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1", ++ 0x060, BIT(19), 0); ++static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1", ++ 0x060, BIT(20), 0); ++static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1", ++ 0x060, BIT(21), 0); ++static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1", ++ 0x060, BIT(23), 0); ++static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1", ++ 0x060, BIT(24), 0); ++static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2", ++ 0x060, BIT(25), 0); ++static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1", ++ 0x060, BIT(28), 0); ++static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2", ++ 0x060, BIT(29), 0); ++ ++static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1", ++ 0x064, BIT(0), 0); ++static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1", ++ 0x064, BIT(3), 0); ++static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1", ++ 0x064, BIT(4), 0); ++static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1", ++ 0x064, BIT(5), 0); ++static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1", ++ 0x064, BIT(8), 0); ++static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1", ++ 0x064, BIT(11), 0); ++static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1", ++ 0x064, BIT(12), 0); ++static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1", ++ 0x064, BIT(20), 0); ++static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1", ++ 0x064, BIT(21), 0); ++static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1", ++ 0x064, BIT(22), 0); ++ ++static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1", ++ 0x068, BIT(0), 0); ++static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", ++ 0x068, BIT(1), 0); ++static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1", ++ 0x068, BIT(5), 0); ++static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", ++ 0x068, BIT(8), 0); ++static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", ++ 0x068, BIT(12), 0); ++static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", ++ 0x068, BIT(13), 0); ++static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", ++ 0x068, BIT(14), 0); ++ ++static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", ++ 0x06c, BIT(0), 0); ++static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", ++ 0x06c, BIT(1), 0); ++static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", ++ 0x06c, BIT(2), 0); ++static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2", ++ 0x06c, BIT(5), 0); ++static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", ++ 0x06c, BIT(16), 0); ++static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", ++ 0x06c, BIT(17), 0); ++static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", ++ 0x06c, BIT(18), 0); ++static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", ++ 0x06c, BIT(19), 0); ++static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2", ++ 0x06c, BIT(20), 0); ++ ++static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1", ++ 0x070, BIT(7), 0); ++ ++static struct clk_div_table ths_div_table[] = { ++ { .val = 0, .div = 1 }, ++ { .val = 1, .div = 2 }, ++ { .val = 2, .div = 4 }, ++ { .val = 3, .div = 6 }, ++}; ++static const char * const ths_parents[] = { "osc24M" }; ++static struct ccu_div ths_clk = { ++ .enable = BIT(31), ++ .div = _SUNXI_CCU_DIV_TABLE(0, 2, ths_div_table), ++ .mux = _SUNXI_CCU_MUX(24, 2), ++ .common = { ++ .reg = 0x074, ++ .hw.init = CLK_HW_INIT_PARENTS("ths", ++ ths_parents, ++ &ccu_div_ops, ++ 0), ++ }, ++}; ++ ++static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0", ++ "pll-periph1" }; ++static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static const char * const mmc_default_parents[] = { "osc24M", "pll-periph0-2x", ++ "pll-periph1-2x" }; ++static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_default_parents, 0x088, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_default_parents, 0x08c, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_default_parents, 0x090, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static const char * const ts_parents[] = { "osc24M", "pll-periph0", }; ++static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 4, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mmc_default_parents, 0x09c, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, ++ 0, 4, /* M */ ++ 16, 2, /* P */ ++ 24, 2, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", ++ "pll-audio-2x", "pll-audio" }; ++static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, ++ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, ++ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents, ++ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio", ++ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", ++ 0x0cc, BIT(8), 0); ++static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", ++ 0x0cc, BIT(9), 0); ++static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic", ++ 0x0cc, BIT(10), 0); ++static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M", ++ 0x0cc, BIT(11), 0); ++static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", ++ 0x0cc, BIT(16), 0); ++static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "usb-ohci0", ++ 0x0cc, BIT(17), 0); ++ ++static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" }; ++static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents, ++ 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL); ++ ++static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram", ++ 0x100, BIT(0), 0); ++static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram", ++ 0x100, BIT(1), 0); ++static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram", ++ 0x100, BIT(2), 0); ++static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram", ++ 0x100, BIT(3), 0); ++ ++static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, ++ 0x104, 0, 4, 24, 3, BIT(31), 0); ++ ++static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" }; ++static const u8 tcon0_table[] = { 0, 2, }; ++static SUNXI_CCU_MUX_TABLE_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents, ++ tcon0_table, 0x118, 24, 3, BIT(31), ++ CLK_SET_RATE_PARENT); ++ ++static const char * const tcon1_parents[] = { "pll-video0", "pll-video1" }; ++static const u8 tcon1_table[] = { 0, 2, }; ++struct ccu_div tcon1_clk = { ++ .enable = BIT(31), ++ .div = _SUNXI_CCU_DIV(0, 4), ++ .mux = _SUNXI_CCU_MUX_TABLE(24, 2, tcon1_table), ++ .common = { ++ .reg = 0x11c, ++ .hw.init = CLK_HW_INIT_PARENTS("tcon1", ++ tcon1_parents, ++ &ccu_div_ops, ++ CLK_SET_RATE_PARENT), ++ }, ++}; ++ ++static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents, ++ 0x124, 0, 4, 24, 3, BIT(31), 0); ++ ++static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", ++ 0x130, BIT(31), 0); ++ ++static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents, ++ 0x134, 16, 4, 24, 3, BIT(31), 0); ++ ++static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1", "pll-periph1" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, ++ 0x134, 0, 5, 8, 3, BIT(15), 0); ++ ++static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", ++ 0x13c, 16, 3, BIT(31), 0); ++ ++static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", ++ 0x140, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x", ++ 0x140, BIT(30), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", ++ 0x144, BIT(31), 0); ++ ++static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, ++ 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", ++ 0x154, BIT(31), 0); ++ ++static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x", ++ "pll-ddr0", "pll-ddr1" }; ++static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, ++ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL); ++ ++static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" }; ++static const u8 dsi_dphy_table[] = { 0, 2, }; ++static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy", ++ dsi_dphy_parents, dsi_dphy_table, ++ 0x168, 0, 4, 8, 2, BIT(31), CLK_SET_RATE_PARENT); ++ ++static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", ++ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); ++ ++/* Fixed Factor clocks */ ++static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); ++ ++/* We hardcode the divider to 4 for now */ ++static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", ++ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); ++static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", ++ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); ++static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", ++ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); ++static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", ++ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); ++static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", ++ "pll-periph0", 1, 2, 0); ++static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x", ++ "pll-periph1", 1, 2, 0); ++static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", ++ "pll-video0", 1, 2, CLK_SET_RATE_PARENT); ++ ++static struct ccu_common *sun50i_a64_ccu_clks[] = { ++ &pll_cpux_clk.common, ++ &pll_audio_base_clk.common, ++ &pll_video0_clk.common, ++ &pll_ve_clk.common, ++ &pll_ddr0_clk.common, ++ &pll_periph0_clk.common, ++ &pll_periph1_clk.common, ++ &pll_video1_clk.common, ++ &pll_gpu_clk.common, ++ &pll_mipi_clk.common, ++ &pll_hsic_clk.common, ++ &pll_de_clk.common, ++ &pll_ddr1_clk.common, ++ &cpux_clk.common, ++ &axi_clk.common, ++ &ahb1_clk.common, ++ &apb1_clk.common, ++ &apb2_clk.common, ++ &ahb2_clk.common, ++ &bus_mipi_dsi_clk.common, ++ &bus_ce_clk.common, ++ &bus_dma_clk.common, ++ &bus_mmc0_clk.common, ++ &bus_mmc1_clk.common, ++ &bus_mmc2_clk.common, ++ &bus_nand_clk.common, ++ &bus_dram_clk.common, ++ &bus_emac_clk.common, ++ &bus_ts_clk.common, ++ &bus_hstimer_clk.common, ++ &bus_spi0_clk.common, ++ &bus_spi1_clk.common, ++ &bus_otg_clk.common, ++ &bus_ehci0_clk.common, ++ &bus_ehci1_clk.common, ++ &bus_ohci0_clk.common, ++ &bus_ohci1_clk.common, ++ &bus_ve_clk.common, ++ &bus_tcon0_clk.common, ++ &bus_tcon1_clk.common, ++ &bus_deinterlace_clk.common, ++ &bus_csi_clk.common, ++ &bus_hdmi_clk.common, ++ &bus_de_clk.common, ++ &bus_gpu_clk.common, ++ &bus_msgbox_clk.common, ++ &bus_spinlock_clk.common, ++ &bus_codec_clk.common, ++ &bus_spdif_clk.common, ++ &bus_pio_clk.common, ++ &bus_ths_clk.common, ++ &bus_i2s0_clk.common, ++ &bus_i2s1_clk.common, ++ &bus_i2s2_clk.common, ++ &bus_i2c0_clk.common, ++ &bus_i2c1_clk.common, ++ &bus_i2c2_clk.common, ++ &bus_scr_clk.common, ++ &bus_uart0_clk.common, ++ &bus_uart1_clk.common, ++ &bus_uart2_clk.common, ++ &bus_uart3_clk.common, ++ &bus_uart4_clk.common, ++ &bus_dbg_clk.common, ++ &ths_clk.common, ++ &nand_clk.common, ++ &mmc0_clk.common, ++ &mmc1_clk.common, ++ &mmc2_clk.common, ++ &ts_clk.common, ++ &ce_clk.common, ++ &spi0_clk.common, ++ &spi1_clk.common, ++ &i2s0_clk.common, ++ &i2s1_clk.common, ++ &i2s2_clk.common, ++ &spdif_clk.common, ++ &usb_phy0_clk.common, ++ &usb_phy1_clk.common, ++ &usb_hsic_clk.common, ++ &usb_hsic_12m_clk.common, ++ &usb_ohci0_clk.common, ++ &usb_ohci1_clk.common, ++ &dram_clk.common, ++ &dram_ve_clk.common, ++ &dram_csi_clk.common, ++ &dram_deinterlace_clk.common, ++ &dram_ts_clk.common, ++ &de_clk.common, ++ &tcon0_clk.common, ++ &tcon1_clk.common, ++ &deinterlace_clk.common, ++ &csi_misc_clk.common, ++ &csi_sclk_clk.common, ++ &csi_mclk_clk.common, ++ &ve_clk.common, ++ &ac_dig_clk.common, ++ &ac_dig_4x_clk.common, ++ &avs_clk.common, ++ &hdmi_clk.common, ++ &hdmi_ddc_clk.common, ++ &mbus_clk.common, ++ &dsi_dphy_clk.common, ++ &gpu_clk.common, ++}; ++ ++static struct clk_hw_onecell_data sun50i_a64_hw_clks = { ++ .hws = { ++ [CLK_OSC_12M] = &osc12M_clk.hw, ++ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw, ++ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, ++ [CLK_PLL_AUDIO] = &pll_audio_clk.hw, ++ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, ++ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, ++ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, ++ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, ++ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, ++ [CLK_PLL_VE] = &pll_ve_clk.common.hw, ++ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw, ++ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, ++ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, ++ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, ++ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw, ++ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, ++ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, ++ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw, ++ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw, ++ [CLK_PLL_DE] = &pll_de_clk.common.hw, ++ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw, ++ [CLK_CPUX] = &cpux_clk.common.hw, ++ [CLK_AXI] = &axi_clk.common.hw, ++ [CLK_AHB1] = &ahb1_clk.common.hw, ++ [CLK_APB1] = &apb1_clk.common.hw, ++ [CLK_APB2] = &apb2_clk.common.hw, ++ [CLK_AHB2] = &ahb2_clk.common.hw, ++ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw, ++ [CLK_BUS_CE] = &bus_ce_clk.common.hw, ++ [CLK_BUS_DMA] = &bus_dma_clk.common.hw, ++ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, ++ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, ++ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, ++ [CLK_BUS_NAND] = &bus_nand_clk.common.hw, ++ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, ++ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, ++ [CLK_BUS_TS] = &bus_ts_clk.common.hw, ++ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, ++ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, ++ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw, ++ [CLK_BUS_OTG] = &bus_otg_clk.common.hw, ++ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, ++ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw, ++ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, ++ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw, ++ [CLK_BUS_VE] = &bus_ve_clk.common.hw, ++ [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw, ++ [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw, ++ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw, ++ [CLK_BUS_CSI] = &bus_csi_clk.common.hw, ++ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw, ++ [CLK_BUS_DE] = &bus_de_clk.common.hw, ++ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw, ++ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw, ++ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw, ++ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, ++ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw, ++ [CLK_BUS_PIO] = &bus_pio_clk.common.hw, ++ [CLK_BUS_THS] = &bus_ths_clk.common.hw, ++ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, ++ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw, ++ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw, ++ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, ++ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, ++ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw, ++ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, ++ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, ++ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, ++ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, ++ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw, ++ [CLK_BUS_SCR] = &bus_scr_clk.common.hw, ++ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, ++ [CLK_THS] = &ths_clk.common.hw, ++ [CLK_NAND] = &nand_clk.common.hw, ++ [CLK_MMC0] = &mmc0_clk.common.hw, ++ [CLK_MMC1] = &mmc1_clk.common.hw, ++ [CLK_MMC2] = &mmc2_clk.common.hw, ++ [CLK_TS] = &ts_clk.common.hw, ++ [CLK_CE] = &ce_clk.common.hw, ++ [CLK_SPI0] = &spi0_clk.common.hw, ++ [CLK_SPI1] = &spi1_clk.common.hw, ++ [CLK_I2S0] = &i2s0_clk.common.hw, ++ [CLK_I2S1] = &i2s1_clk.common.hw, ++ [CLK_I2S2] = &i2s2_clk.common.hw, ++ [CLK_SPDIF] = &spdif_clk.common.hw, ++ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, ++ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, ++ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw, ++ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw, ++ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, ++ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, ++ [CLK_DRAM] = &dram_clk.common.hw, ++ [CLK_DRAM_VE] = &dram_ve_clk.common.hw, ++ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, ++ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw, ++ [CLK_DRAM_TS] = &dram_ts_clk.common.hw, ++ [CLK_DE] = &de_clk.common.hw, ++ [CLK_TCON0] = &tcon0_clk.common.hw, ++ [CLK_TCON1] = &tcon1_clk.common.hw, ++ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw, ++ [CLK_CSI_MISC] = &csi_misc_clk.common.hw, ++ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, ++ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw, ++ [CLK_VE] = &ve_clk.common.hw, ++ [CLK_AC_DIG] = &ac_dig_clk.common.hw, ++ [CLK_AC_DIG_4X] = &ac_dig_4x_clk.common.hw, ++ [CLK_AVS] = &avs_clk.common.hw, ++ [CLK_HDMI] = &hdmi_clk.common.hw, ++ [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw, ++ [CLK_MBUS] = &mbus_clk.common.hw, ++ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw, ++ [CLK_GPU] = &gpu_clk.common.hw, ++ }, ++ .num = CLK_NUMBER, ++}; ++ ++static struct ccu_reset_map sun50i_a64_ccu_resets[] = { ++ [RST_USB_PHY0] = { 0x0cc, BIT(0) }, ++ [RST_USB_PHY1] = { 0x0cc, BIT(1) }, ++ [RST_USB_HSIC] = { 0x0cc, BIT(2) }, ++ ++ [RST_DRAM] = { 0x0f4, BIT(31) }, ++ [RST_MBUS] = { 0x0fc, BIT(31) }, ++ ++ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) }, ++ [RST_BUS_CE] = { 0x2c0, BIT(5) }, ++ [RST_BUS_DMA] = { 0x2c0, BIT(6) }, ++ [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, ++ [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, ++ [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, ++ [RST_BUS_NAND] = { 0x2c0, BIT(13) }, ++ [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, ++ [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, ++ [RST_BUS_TS] = { 0x2c0, BIT(18) }, ++ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, ++ [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, ++ [RST_BUS_SPI1] = { 0x2c0, BIT(21) }, ++ [RST_BUS_OTG] = { 0x2c0, BIT(23) }, ++ [RST_BUS_EHCI0] = { 0x2c0, BIT(24) }, ++ [RST_BUS_EHCI1] = { 0x2c0, BIT(25) }, ++ [RST_BUS_OHCI0] = { 0x2c0, BIT(28) }, ++ [RST_BUS_OHCI1] = { 0x2c0, BIT(29) }, ++ ++ [RST_BUS_VE] = { 0x2c4, BIT(0) }, ++ [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, ++ [RST_BUS_TCON1] = { 0x2c4, BIT(4) }, ++ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) }, ++ [RST_BUS_CSI] = { 0x2c4, BIT(8) }, ++ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) }, ++ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) }, ++ [RST_BUS_DE] = { 0x2c4, BIT(12) }, ++ [RST_BUS_GPU] = { 0x2c4, BIT(20) }, ++ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) }, ++ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) }, ++ [RST_BUS_DBG] = { 0x2c4, BIT(31) }, ++ ++ [RST_BUS_LVDS] = { 0x2c8, BIT(0) }, ++ ++ [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, ++ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) }, ++ [RST_BUS_THS] = { 0x2d0, BIT(8) }, ++ [RST_BUS_I2S0] = { 0x2d0, BIT(12) }, ++ [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, ++ [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, ++ ++ [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, ++ [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, ++ [RST_BUS_I2C2] = { 0x2d8, BIT(2) }, ++ [RST_BUS_SCR] = { 0x2d8, BIT(5) }, ++ [RST_BUS_UART0] = { 0x2d8, BIT(16) }, ++ [RST_BUS_UART1] = { 0x2d8, BIT(17) }, ++ [RST_BUS_UART2] = { 0x2d8, BIT(18) }, ++ [RST_BUS_UART3] = { 0x2d8, BIT(19) }, ++ [RST_BUS_UART4] = { 0x2d8, BIT(20) }, ++}; ++ ++static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = { ++ .ccu_clks = sun50i_a64_ccu_clks, ++ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_ccu_clks), ++ ++ .hw_clks = &sun50i_a64_hw_clks, ++ ++ .resets = sun50i_a64_ccu_resets, ++ .num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets), ++}; ++ ++static int sun50i_a64_ccu_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ void __iomem *reg; ++ u32 val; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ reg = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(reg)) ++ return PTR_ERR(reg); ++ ++ /* Force the PLL-Audio-1x divider to 4 */ ++ val = readl(reg + SUN50I_A64_PLL_AUDIO_REG); ++ val &= ~GENMASK(19, 16); ++ writel(val | (3 << 16), reg + SUN50I_A64_PLL_AUDIO_REG); ++ ++ writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG); ++ ++ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc); ++} ++ ++static const struct of_device_id sun50i_a64_ccu_ids[] = { ++ { .compatible = "allwinner,sun50i-a64-ccu" }, ++ { } ++}; ++ ++static struct platform_driver sun50i_a64_ccu_driver = { ++ .probe = sun50i_a64_ccu_probe, ++ .driver = { ++ .name = "sun50i-a64-ccu", ++ .of_match_table = sun50i_a64_ccu_ids, ++ }, ++}; ++builtin_platform_driver(sun50i_a64_ccu_driver); +--- /dev/null ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +@@ -0,0 +1,72 @@ ++/* ++ * Copyright 2016 Maxime Ripard ++ * ++ * Maxime Ripard ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _CCU_SUN50I_A64_H_ ++#define _CCU_SUN50I_A64_H_ ++ ++#include ++#include ++ ++#define CLK_OSC_12M 0 ++#define CLK_PLL_CPUX 1 ++#define CLK_PLL_AUDIO_BASE 2 ++#define CLK_PLL_AUDIO 3 ++#define CLK_PLL_AUDIO_2X 4 ++#define CLK_PLL_AUDIO_4X 5 ++#define CLK_PLL_AUDIO_8X 6 ++#define CLK_PLL_VIDEO0 7 ++#define CLK_PLL_VIDEO0_2X 8 ++#define CLK_PLL_VE 9 ++#define CLK_PLL_DDR0 10 ++#define CLK_PLL_PERIPH0 11 ++#define CLK_PLL_PERIPH0_2X 12 ++#define CLK_PLL_PERIPH1 13 ++#define CLK_PLL_PERIPH1_2X 14 ++#define CLK_PLL_VIDEO1 15 ++#define CLK_PLL_GPU 16 ++#define CLK_PLL_MIPI 17 ++#define CLK_PLL_HSIC 18 ++#define CLK_PLL_DE 19 ++#define CLK_PLL_DDR1 20 ++#define CLK_CPUX 21 ++#define CLK_AXI 22 ++#define CLK_APB 23 ++#define CLK_AHB1 24 ++#define CLK_APB1 25 ++#define CLK_APB2 26 ++#define CLK_AHB2 27 ++ ++/* All the bus gates are exported */ ++ ++/* The first bunch of module clocks are exported */ ++ ++#define CLK_USB_OHCI0_12M 90 ++ ++#define CLK_USB_OHCI1_12M 92 ++ ++#define CLK_DRAM 94 ++ ++/* All the DRAM gates are exported */ ++ ++/* Some more module clocks are exported */ ++ ++#define CLK_MBUS 112 ++ ++/* And the DSI and GPU module clock is exported */ ++ ++#define CLK_NUMBER (CLK_GPU + 1) ++ ++#endif /* _CCU_SUN50I_A64_H_ */ +--- /dev/null ++++ b/include/dt-bindings/clock/sun50i-a64-ccu.h +@@ -0,0 +1,134 @@ ++/* ++ * Copyright (C) 2016 Maxime Ripard ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This file is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ ++#define _DT_BINDINGS_CLK_SUN50I_A64_H_ ++ ++#define CLK_BUS_MIPI_DSI 28 ++#define CLK_BUS_CE 29 ++#define CLK_BUS_DMA 30 ++#define CLK_BUS_MMC0 31 ++#define CLK_BUS_MMC1 32 ++#define CLK_BUS_MMC2 33 ++#define CLK_BUS_NAND 34 ++#define CLK_BUS_DRAM 35 ++#define CLK_BUS_EMAC 36 ++#define CLK_BUS_TS 37 ++#define CLK_BUS_HSTIMER 38 ++#define CLK_BUS_SPI0 39 ++#define CLK_BUS_SPI1 40 ++#define CLK_BUS_OTG 41 ++#define CLK_BUS_EHCI0 42 ++#define CLK_BUS_EHCI1 43 ++#define CLK_BUS_OHCI0 44 ++#define CLK_BUS_OHCI1 45 ++#define CLK_BUS_VE 46 ++#define CLK_BUS_TCON0 47 ++#define CLK_BUS_TCON1 48 ++#define CLK_BUS_DEINTERLACE 49 ++#define CLK_BUS_CSI 50 ++#define CLK_BUS_HDMI 51 ++#define CLK_BUS_DE 52 ++#define CLK_BUS_GPU 53 ++#define CLK_BUS_MSGBOX 54 ++#define CLK_BUS_SPINLOCK 55 ++#define CLK_BUS_CODEC 56 ++#define CLK_BUS_SPDIF 57 ++#define CLK_BUS_PIO 58 ++#define CLK_BUS_THS 59 ++#define CLK_BUS_I2S0 60 ++#define CLK_BUS_I2S1 61 ++#define CLK_BUS_I2S2 62 ++#define CLK_BUS_I2C0 63 ++#define CLK_BUS_I2C1 64 ++#define CLK_BUS_I2C2 65 ++#define CLK_BUS_SCR 66 ++#define CLK_BUS_UART0 67 ++#define CLK_BUS_UART1 68 ++#define CLK_BUS_UART2 69 ++#define CLK_BUS_UART3 70 ++#define CLK_BUS_UART4 71 ++#define CLK_BUS_DBG 72 ++#define CLK_THS 73 ++#define CLK_NAND 74 ++#define CLK_MMC0 75 ++#define CLK_MMC1 76 ++#define CLK_MMC2 77 ++#define CLK_TS 78 ++#define CLK_CE 79 ++#define CLK_SPI0 80 ++#define CLK_SPI1 81 ++#define CLK_I2S0 82 ++#define CLK_I2S1 83 ++#define CLK_I2S2 84 ++#define CLK_SPDIF 85 ++#define CLK_USB_PHY0 86 ++#define CLK_USB_PHY1 87 ++#define CLK_USB_HSIC 88 ++#define CLK_USB_HSIC_12M 89 ++ ++#define CLK_USB_OHCI0 91 ++ ++#define CLK_USB_OHCI1 93 ++ ++#define CLK_DRAM_VE 95 ++#define CLK_DRAM_CSI 96 ++#define CLK_DRAM_DEINTERLACE 97 ++#define CLK_DRAM_TS 98 ++#define CLK_DE 99 ++#define CLK_TCON0 100 ++#define CLK_TCON1 101 ++#define CLK_DEINTERLACE 102 ++#define CLK_CSI_MISC 103 ++#define CLK_CSI_SCLK 104 ++#define CLK_CSI_MCLK 105 ++#define CLK_VE 106 ++#define CLK_AC_DIG 107 ++#define CLK_AC_DIG_4X 108 ++#define CLK_AVS 109 ++#define CLK_HDMI 110 ++#define CLK_HDMI_DDC 111 ++ ++#define CLK_DSI_DPHY 113 ++#define CLK_GPU 114 ++ ++#endif /* _DT_BINDINGS_CLK_SUN50I_H_ */ +--- /dev/null ++++ b/include/dt-bindings/reset/sun50i-a64-ccu.h +@@ -0,0 +1,98 @@ ++/* ++ * Copyright (C) 2016 Maxime Ripard ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This file is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DT_BINDINGS_RST_SUN50I_A64_H_ ++#define _DT_BINDINGS_RST_SUN50I_A64_H_ ++ ++#define RST_USB_PHY0 0 ++#define RST_USB_PHY1 1 ++#define RST_USB_HSIC 2 ++#define RST_DRAM 3 ++#define RST_MBUS 4 ++#define RST_BUS_MIPI_DSI 5 ++#define RST_BUS_CE 6 ++#define RST_BUS_DMA 7 ++#define RST_BUS_MMC0 8 ++#define RST_BUS_MMC1 9 ++#define RST_BUS_MMC2 10 ++#define RST_BUS_NAND 11 ++#define RST_BUS_DRAM 12 ++#define RST_BUS_EMAC 13 ++#define RST_BUS_TS 14 ++#define RST_BUS_HSTIMER 15 ++#define RST_BUS_SPI0 16 ++#define RST_BUS_SPI1 17 ++#define RST_BUS_OTG 18 ++#define RST_BUS_EHCI0 19 ++#define RST_BUS_EHCI1 20 ++#define RST_BUS_OHCI0 21 ++#define RST_BUS_OHCI1 22 ++#define RST_BUS_VE 23 ++#define RST_BUS_TCON0 24 ++#define RST_BUS_TCON1 25 ++#define RST_BUS_DEINTERLACE 26 ++#define RST_BUS_CSI 27 ++#define RST_BUS_HDMI0 28 ++#define RST_BUS_HDMI1 29 ++#define RST_BUS_DE 30 ++#define RST_BUS_GPU 31 ++#define RST_BUS_MSGBOX 32 ++#define RST_BUS_SPINLOCK 33 ++#define RST_BUS_DBG 34 ++#define RST_BUS_LVDS 35 ++#define RST_BUS_CODEC 36 ++#define RST_BUS_SPDIF 37 ++#define RST_BUS_THS 38 ++#define RST_BUS_I2S0 39 ++#define RST_BUS_I2S1 40 ++#define RST_BUS_I2S2 41 ++#define RST_BUS_I2C0 42 ++#define RST_BUS_I2C1 43 ++#define RST_BUS_I2C2 44 ++#define RST_BUS_SCR 45 ++#define RST_BUS_UART0 46 ++#define RST_BUS_UART1 47 ++#define RST_BUS_UART2 48 ++#define RST_BUS_UART3 49 ++#define RST_BUS_UART4 50 ++ ++#endif /* _DT_BINDINGS_RST_SUN50I_A64_H_ */ diff --git a/target/linux/sunxi/patches-4.9/0010-arm64-dts-add-Allwinner-A64-SoC-.dtsi.patch b/target/linux/sunxi/patches-4.9/0010-arm64-dts-add-Allwinner-A64-SoC-.dtsi.patch new file mode 100644 index 000000000..eaaba96fc --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0010-arm64-dts-add-Allwinner-A64-SoC-.dtsi.patch @@ -0,0 +1,311 @@ +From 6bc37fac30cf01c39feb17834090089304bd1d31 Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Mon, 18 Jan 2016 10:24:31 +0000 +Subject: arm64: dts: add Allwinner A64 SoC .dtsi + +The Allwinner A64 SoC is a low-cost chip with 4 ARM Cortex-A53 cores +and the typical tablet / TV box peripherals. +The SoC is based on the (32-bit) Allwinner H3 chip, sharing most of +the peripherals and the memory map. +Although the cores are proper 64-bit ones, the whole SoC is actually +limited to 4GB (including all the supported DRAM), so we use 32-bit +address and size cells. This has the nice feature of us being able to +reuse the DT for 32-bit kernels as well. +This .dtsi lists the hardware that we support so far. + +Signed-off-by: Andre Przywara +Acked-by: Rob Herring +Acked-by: Chen-Yu Tsai +[Maxime: Convert to CCU binding, drop the MMC support for now] +Signed-off-by: Maxime Ripard +--- + Documentation/devicetree/bindings/arm/sunxi.txt | 1 + + MAINTAINERS | 1 + + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 263 ++++++++++++++++++++++++ + 3 files changed, 265 insertions(+) + create mode 100644 arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi + +--- a/Documentation/devicetree/bindings/arm/sunxi.txt ++++ b/Documentation/devicetree/bindings/arm/sunxi.txt +@@ -14,4 +14,5 @@ using one of the following compatible st + allwinner,sun8i-a83t + allwinner,sun8i-h3 + allwinner,sun9i-a80 ++ allwinner,sun50i-a64 + nextthing,gr8 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -1026,6 +1026,7 @@ L: linux-arm-kernel@lists.infradead.org + S: Maintained + N: sun[x456789]i + F: arch/arm/boot/dts/ntc-gr8* ++F: arch/arm64/boot/dts/allwinner/ + + ARM/Allwinner SoC Clock Support + M: Emilio López +--- /dev/null ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -0,0 +1,263 @@ ++/* ++ * Copyright (C) 2016 ARM Ltd. ++ * based on the Allwinner H3 dtsi: ++ * Copyright (C) 2015 Jens Kuske ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This file is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++/ { ++ interrupt-parent = <&gic>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ compatible = "arm,cortex-a53", "arm,armv8"; ++ device_type = "cpu"; ++ reg = <0>; ++ enable-method = "psci"; ++ }; ++ ++ cpu1: cpu@1 { ++ compatible = "arm,cortex-a53", "arm,armv8"; ++ device_type = "cpu"; ++ reg = <1>; ++ enable-method = "psci"; ++ }; ++ ++ cpu2: cpu@2 { ++ compatible = "arm,cortex-a53", "arm,armv8"; ++ device_type = "cpu"; ++ reg = <2>; ++ enable-method = "psci"; ++ }; ++ ++ cpu3: cpu@3 { ++ compatible = "arm,cortex-a53", "arm,armv8"; ++ device_type = "cpu"; ++ reg = <3>; ++ enable-method = "psci"; ++ }; ++ }; ++ ++ osc24M: osc24M_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <24000000>; ++ clock-output-names = "osc24M"; ++ }; ++ ++ osc32k: osc32k_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <32768>; ++ clock-output-names = "osc32k"; ++ }; ++ ++ psci { ++ compatible = "arm,psci-0.2"; ++ method = "smc"; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ ccu: clock@01c20000 { ++ compatible = "allwinner,sun50i-a64-ccu"; ++ reg = <0x01c20000 0x400>; ++ clocks = <&osc24M>, <&osc32k>; ++ clock-names = "hosc", "losc"; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ }; ++ ++ pio: pinctrl@1c20800 { ++ compatible = "allwinner,sun50i-a64-pinctrl"; ++ reg = <0x01c20800 0x400>; ++ interrupts = , ++ , ++ ; ++ clocks = <&ccu CLK_BUS_PIO>; ++ gpio-controller; ++ #gpio-cells = <3>; ++ interrupt-controller; ++ #interrupt-cells = <3>; ++ ++ i2c1_pins: i2c1_pins { ++ pins = "PH2", "PH3"; ++ function = "i2c1"; ++ }; ++ ++ uart0_pins_a: uart0@0 { ++ pins = "PB8", "PB9"; ++ function = "uart0"; ++ }; ++ }; ++ ++ uart0: serial@1c28000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x01c28000 0x400>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&ccu CLK_BUS_UART0>; ++ resets = <&ccu RST_BUS_UART0>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@1c28400 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x01c28400 0x400>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&ccu CLK_BUS_UART1>; ++ resets = <&ccu RST_BUS_UART1>; ++ status = "disabled"; ++ }; ++ ++ uart2: serial@1c28800 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x01c28800 0x400>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&ccu CLK_BUS_UART2>; ++ resets = <&ccu RST_BUS_UART2>; ++ status = "disabled"; ++ }; ++ ++ uart3: serial@1c28c00 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x01c28c00 0x400>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&ccu CLK_BUS_UART3>; ++ resets = <&ccu RST_BUS_UART3>; ++ status = "disabled"; ++ }; ++ ++ uart4: serial@1c29000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x01c29000 0x400>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&ccu CLK_BUS_UART4>; ++ resets = <&ccu RST_BUS_UART4>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@1c2ac00 { ++ compatible = "allwinner,sun6i-a31-i2c"; ++ reg = <0x01c2ac00 0x400>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_I2C0>; ++ resets = <&ccu RST_BUS_I2C0>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c1: i2c@1c2b000 { ++ compatible = "allwinner,sun6i-a31-i2c"; ++ reg = <0x01c2b000 0x400>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_I2C1>; ++ resets = <&ccu RST_BUS_I2C1>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c2: i2c@1c2b400 { ++ compatible = "allwinner,sun6i-a31-i2c"; ++ reg = <0x01c2b400 0x400>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_I2C2>; ++ resets = <&ccu RST_BUS_I2C2>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ gic: interrupt-controller@1c81000 { ++ compatible = "arm,gic-400"; ++ reg = <0x01c81000 0x1000>, ++ <0x01c82000 0x2000>, ++ <0x01c84000 0x2000>, ++ <0x01c86000 0x2000>; ++ interrupts = ; ++ interrupt-controller; ++ #interrupt-cells = <3>; ++ }; ++ ++ rtc: rtc@1f00000 { ++ compatible = "allwinner,sun6i-a31-rtc"; ++ reg = <0x01f00000 0x54>; ++ interrupts = , ++ ; ++ }; ++ }; ++}; diff --git a/target/linux/sunxi/patches-4.9/0011-arm64-dts-add-Pine64-support.patch b/target/linux/sunxi/patches-4.9/0011-arm64-dts-add-Pine64-support.patch new file mode 100644 index 000000000..9960588ab --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0011-arm64-dts-add-Pine64-support.patch @@ -0,0 +1,176 @@ +From 4e3886081848b7ea16452a92c4324acaab644d49 Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Tue, 19 Jan 2016 10:36:39 +0000 +Subject: arm64: dts: add Pine64 support + +The Pine64 is a cost-efficient development board based on the +Allwinner A64 SoC. +There are three models: the basic version with Fast Ethernet and +512 MB of DRAM (Pine64) and two Pine64+ versions, which both +feature Gigabit Ethernet and additional connectors for touchscreens +and a camera. Or as my son put it: "Those are smaller and these are +missing." ;-) +The two Pine64+ models just differ in the amount of DRAM +(1GB vs. 2GB). Since U-Boot will figure out the right size for us and +patches the DT accordingly we just need to provide one DT for the +Pine64+. + +Signed-off-by: Andre Przywara +[Maxime: Removed the common DTSI and include directly the pine64 DTS] +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/Makefile | 1 + + arch/arm64/boot/dts/allwinner/Makefile | 5 ++ + .../boot/dts/allwinner/sun50i-a64-pine64-plus.dts | 50 +++++++++++++++ + .../arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 74 ++++++++++++++++++++++ + 4 files changed, 130 insertions(+) + create mode 100644 arch/arm64/boot/dts/allwinner/Makefile + create mode 100644 arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts + create mode 100644 arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts + +--- a/arch/arm64/boot/dts/Makefile ++++ b/arch/arm64/boot/dts/Makefile +@@ -1,4 +1,5 @@ + dts-dirs += al ++dts-dirs += allwinner + dts-dirs += altera + dts-dirs += amd + dts-dirs += amlogic +--- /dev/null ++++ b/arch/arm64/boot/dts/allwinner/Makefile +@@ -0,0 +1,5 @@ ++dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb ++ ++always := $(dtb-y) ++subdir-y := $(dts-dirs) ++clean-files := *.dtb +--- /dev/null ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (c) 2016 ARM Ltd. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "sun50i-a64-pine64.dts" ++ ++/ { ++ model = "Pine64+"; ++ compatible = "pine64,pine64-plus", "allwinner,sun50i-a64"; ++ ++ /* TODO: Camera, Ethernet PHY, touchscreen, etc. */ ++}; +--- /dev/null ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -0,0 +1,74 @@ ++/* ++ * Copyright (c) 2016 ARM Ltd. ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++ ++#include "sun50i-a64.dtsi" ++ ++/ { ++ model = "Pine64"; ++ compatible = "pine64,pine64", "allwinner,sun50i-a64"; ++ ++ aliases { ++ serial0 = &uart0; ++ }; ++ ++ chosen { ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins_a>; ++ status = "okay"; ++}; ++ ++&i2c1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c1_pins>; ++ status = "okay"; ++}; ++ ++&i2c1_pins { ++ bias-pull-up; ++}; diff --git a/target/linux/sunxi/patches-4.9/0012-arm64-dts-fix-build-errors-from-missing-dependencies.patch b/target/linux/sunxi/patches-4.9/0012-arm64-dts-fix-build-errors-from-missing-dependencies.patch new file mode 100644 index 000000000..1719b682b --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0012-arm64-dts-fix-build-errors-from-missing-dependencies.patch @@ -0,0 +1,134 @@ +From f98121f3ef3d36f4d040b11ab38f15387f6eefa2 Mon Sep 17 00:00:00 2001 +From: Arnd Bergmann +Date: Wed, 30 Nov 2016 15:08:55 +0100 +Subject: arm64: dts: fix build errors from missing dependencies + +Two branches were incorrectly sent without having the necessary +header file changes. Rather than back those out now, I'm replacing +the symbolic names for the clks and resets with the numeric +values to get 'make allmodconfig dtbs' back to work. + +After the header file changes are merged, we can revert this +patch. + +Fixes: 6bc37fa ("arm64: dts: add Allwinner A64 SoC .dtsi") +Fixes: 50784e6 ("dts: arm64: db820c: add pmic pins specific dts file") +Acked-by: Andre Przywara +Acked-by: Maxime Ripard +Acked-by: Srinivas Kandagatla +Signed-off-by: Arnd Bergmann +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 36 ++++++++++------------ + .../boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi | 2 +- + 2 files changed, 18 insertions(+), 20 deletions(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -42,10 +42,8 @@ + * OTHER DEALINGS IN THE SOFTWARE. + */ + +-#include + #include + #include +-#include + + / { + interrupt-parent = <&gic>; +@@ -137,7 +135,7 @@ + interrupts = , + , + ; +- clocks = <&ccu CLK_BUS_PIO>; ++ clocks = <&ccu 58>; + gpio-controller; + #gpio-cells = <3>; + interrupt-controller; +@@ -160,8 +158,8 @@ + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; +- clocks = <&ccu CLK_BUS_UART0>; +- resets = <&ccu RST_BUS_UART0>; ++ clocks = <&ccu 67>; ++ resets = <&ccu 46>; + status = "disabled"; + }; + +@@ -171,8 +169,8 @@ + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; +- clocks = <&ccu CLK_BUS_UART1>; +- resets = <&ccu RST_BUS_UART1>; ++ clocks = <&ccu 68>; ++ resets = <&ccu 47>; + status = "disabled"; + }; + +@@ -182,8 +180,8 @@ + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; +- clocks = <&ccu CLK_BUS_UART2>; +- resets = <&ccu RST_BUS_UART2>; ++ clocks = <&ccu 69>; ++ resets = <&ccu 48>; + status = "disabled"; + }; + +@@ -193,8 +191,8 @@ + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; +- clocks = <&ccu CLK_BUS_UART3>; +- resets = <&ccu RST_BUS_UART3>; ++ clocks = <&ccu 70>; ++ resets = <&ccu 49>; + status = "disabled"; + }; + +@@ -204,8 +202,8 @@ + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; +- clocks = <&ccu CLK_BUS_UART4>; +- resets = <&ccu RST_BUS_UART4>; ++ clocks = <&ccu 71>; ++ resets = <&ccu 50>; + status = "disabled"; + }; + +@@ -213,8 +211,8 @@ + compatible = "allwinner,sun6i-a31-i2c"; + reg = <0x01c2ac00 0x400>; + interrupts = ; +- clocks = <&ccu CLK_BUS_I2C0>; +- resets = <&ccu RST_BUS_I2C0>; ++ clocks = <&ccu 63>; ++ resets = <&ccu 42>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; +@@ -224,8 +222,8 @@ + compatible = "allwinner,sun6i-a31-i2c"; + reg = <0x01c2b000 0x400>; + interrupts = ; +- clocks = <&ccu CLK_BUS_I2C1>; +- resets = <&ccu RST_BUS_I2C1>; ++ clocks = <&ccu 64>; ++ resets = <&ccu 43>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; +@@ -235,8 +233,8 @@ + compatible = "allwinner,sun6i-a31-i2c"; + reg = <0x01c2b400 0x400>; + interrupts = ; +- clocks = <&ccu CLK_BUS_I2C2>; +- resets = <&ccu RST_BUS_I2C2>; ++ clocks = <&ccu 65>; ++ resets = <&ccu 44>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; diff --git a/target/linux/sunxi/patches-4.9/0013-arm64-dts-allwinner-add-USB1-related-nodes-of-Allwin.patch b/target/linux/sunxi/patches-4.9/0013-arm64-dts-allwinner-add-USB1-related-nodes-of-Allwin.patch new file mode 100644 index 000000000..f96570c85 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0013-arm64-dts-allwinner-add-USB1-related-nodes-of-Allwin.patch @@ -0,0 +1,84 @@ +From a004ee350177ece3c059831ea49293d62aea7ca6 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Tue, 22 Nov 2016 23:58:29 +0800 +Subject: arm64: dts: allwinner: add USB1-related nodes of Allwinner A64 + +Allwinner A64 have two HCI USB controllers, a OTG controller and a USB +PHY device which have two ports. One of the port is wired to both a HCI +USB controller and the OTG controller, which is currently not supported. +The another one is only wired to a HCI controller, and the device node of +OHCI/EHCI controller of the port can be added now. + +Also the A64 USB PHY device node is also added for the HCI controllers to +work. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 46 +++++++++++++++++++++++++++ + 1 file changed, 46 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -42,8 +42,10 @@ + * OTHER DEALINGS IN THE SOFTWARE. + */ + ++#include + #include + #include ++#include + + / { + interrupt-parent = <&gic>; +@@ -120,6 +122,50 @@ + #size-cells = <1>; + ranges; + ++ usbphy: phy@01c19400 { ++ compatible = "allwinner,sun50i-a64-usb-phy"; ++ reg = <0x01c19400 0x14>, ++ <0x01c1b800 0x4>; ++ reg-names = "phy_ctrl", ++ "pmu1"; ++ clocks = <&ccu CLK_USB_PHY0>, ++ <&ccu CLK_USB_PHY1>; ++ clock-names = "usb0_phy", ++ "usb1_phy"; ++ resets = <&ccu RST_USB_PHY0>, ++ <&ccu RST_USB_PHY1>; ++ reset-names = "usb0_reset", ++ "usb1_reset"; ++ status = "disabled"; ++ #phy-cells = <1>; ++ }; ++ ++ ehci1: usb@01c1b000 { ++ compatible = "allwinner,sun50i-a64-ehci", "generic-ehci"; ++ reg = <0x01c1b000 0x100>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_OHCI1>, ++ <&ccu CLK_BUS_EHCI1>, ++ <&ccu CLK_USB_OHCI1>; ++ resets = <&ccu RST_BUS_OHCI1>, ++ <&ccu RST_BUS_EHCI1>; ++ phys = <&usbphy 1>; ++ phy-names = "usb"; ++ status = "disabled"; ++ }; ++ ++ ohci1: usb@01c1b400 { ++ compatible = "allwinner,sun50i-a64-ohci", "generic-ohci"; ++ reg = <0x01c1b400 0x100>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_OHCI1>, ++ <&ccu CLK_USB_OHCI1>; ++ resets = <&ccu RST_BUS_OHCI1>; ++ phys = <&usbphy 1>; ++ phy-names = "usb"; ++ status = "disabled"; ++ }; ++ + ccu: clock@01c20000 { + compatible = "allwinner,sun50i-a64-ccu"; + reg = <0x01c20000 0x400>; diff --git a/target/linux/sunxi/patches-4.9/0014-arm64-dts-allwinner-sort-the-nodes-in-sun50i-a64-pin.patch b/target/linux/sunxi/patches-4.9/0014-arm64-dts-allwinner-sort-the-nodes-in-sun50i-a64-pin.patch new file mode 100644 index 000000000..4c7d6dafe --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0014-arm64-dts-allwinner-sort-the-nodes-in-sun50i-a64-pin.patch @@ -0,0 +1,40 @@ +From ac93c09cdbaf1229c21f67a5db1c3c6df7d503e5 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Tue, 22 Nov 2016 23:58:30 +0800 +Subject: arm64: dts: allwinner: sort the nodes in sun50i-a64-pine64.dts + +In this dts file, uart0 node is put before i2c1. + +Move the uart0 node to the end to satisfy alphebetical order. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -57,12 +57,6 @@ + }; + }; + +-&uart0 { +- pinctrl-names = "default"; +- pinctrl-0 = <&uart0_pins_a>; +- status = "okay"; +-}; +- + &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; +@@ -72,3 +66,9 @@ + &i2c1_pins { + bias-pull-up; + }; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins_a>; ++ status = "okay"; ++}; diff --git a/target/linux/sunxi/patches-4.9/0015-arm64-dts-allwinner-enable-EHCI1-OHCI1-and-USB-PHY-n.patch b/target/linux/sunxi/patches-4.9/0015-arm64-dts-allwinner-enable-EHCI1-OHCI1-and-USB-PHY-n.patch new file mode 100644 index 000000000..451ec595c --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0015-arm64-dts-allwinner-enable-EHCI1-OHCI1-and-USB-PHY-n.patch @@ -0,0 +1,47 @@ +From d49f9dbc8f0c4521fa56477d051a3bd1158f2595 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Tue, 22 Nov 2016 23:58:31 +0800 +Subject: arm64: dts: allwinner: enable EHCI1, OHCI1 and USB PHY nodes in + Pine64 + +Pine64 have two USB Type-A ports, which are wired to the two ports of +A64 USB PHY, and the lower port is the EHCI/OHCI1 port. + +Enable the necessary nodes to enable the lower USB port to work. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -57,6 +57,10 @@ + }; + }; + ++&ehci1 { ++ status = "okay"; ++}; ++ + &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; +@@ -67,8 +71,16 @@ + bias-pull-up; + }; + ++&ohci1 { ++ status = "okay"; ++}; ++ + &uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_a>; + status = "okay"; + }; ++ ++&usbphy { ++ status = "okay"; ++}; diff --git a/target/linux/sunxi/patches-4.9/0016-arm64-dts-add-MUSB-node-to-Allwinner-A64-dtsi.patch b/target/linux/sunxi/patches-4.9/0016-arm64-dts-add-MUSB-node-to-Allwinner-A64-dtsi.patch new file mode 100644 index 000000000..804d00521 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0016-arm64-dts-add-MUSB-node-to-Allwinner-A64-dtsi.patch @@ -0,0 +1,42 @@ +From 972a3ecdf27f3ebdd1ce0dccd1b548ef3c04b8ed Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Wed, 23 Nov 2016 00:59:01 +0800 +Subject: arm64: dts: add MUSB node to Allwinner A64 dtsi + +Allwinner A64 SoC has a MUSB controller like the one in A33, so add +a node for it, just use the compatible of A33 MUSB. + +Host mode is tested to work properly on Pine64 and will be added into +the device tree of Pine64 in next patch. + +Peripheral mode is also tested on Pine64, by changing dr_mode property +of usb_otg node and use a non-standard USB Type-A to Type-A cable. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -122,6 +122,19 @@ + #size-cells = <1>; + ranges; + ++ usb_otg: usb@01c19000 { ++ compatible = "allwinner,sun8i-a33-musb"; ++ reg = <0x01c19000 0x0400>; ++ clocks = <&ccu CLK_BUS_OTG>; ++ resets = <&ccu RST_BUS_OTG>; ++ interrupts = ; ++ interrupt-names = "mc"; ++ phys = <&usbphy 0>; ++ phy-names = "usb"; ++ extcon = <&usbphy 0>; ++ status = "disabled"; ++ }; ++ + usbphy: phy@01c19400 { + compatible = "allwinner,sun50i-a64-usb-phy"; + reg = <0x01c19400 0x14>, diff --git a/target/linux/sunxi/patches-4.9/0017-arm64-dts-enable-the-MUSB-controller-of-Pine64-in-ho.patch b/target/linux/sunxi/patches-4.9/0017-arm64-dts-enable-the-MUSB-controller-of-Pine64-in-ho.patch new file mode 100644 index 000000000..3992ab651 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0017-arm64-dts-enable-the-MUSB-controller-of-Pine64-in-ho.patch @@ -0,0 +1,32 @@ +From f57e8384c5d2417fd8707c577d8e622fc1570b6c Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Wed, 23 Nov 2016 00:59:02 +0800 +Subject: arm64: dts: enable the MUSB controller of Pine64 in host-only mode + +A64 has a MUSB controller wired to the USB PHY 0, which is connected +to the upper USB Type-A port of Pine64. + +As the port is a Type-A female port, enable it in host-only mode in the +device tree, which makes devices with USB Type-A male port can work on +this port (which is originally designed by Pine64 team). + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -81,6 +81,11 @@ + status = "okay"; + }; + ++&usb_otg { ++ dr_mode = "host"; ++ status = "okay"; ++}; ++ + &usbphy { + status = "okay"; + }; diff --git a/target/linux/sunxi/patches-4.9/0018-arm64-dts-allwinner-Remove-no-longer-used-pinctrl-su.patch b/target/linux/sunxi/patches-4.9/0018-arm64-dts-allwinner-Remove-no-longer-used-pinctrl-su.patch new file mode 100644 index 000000000..d2d54e937 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0018-arm64-dts-allwinner-Remove-no-longer-used-pinctrl-su.patch @@ -0,0 +1,31 @@ +From 4f9758302ccaf753cd4ba6a5eb740392a4d24773 Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Tue, 24 Jan 2017 10:32:29 +0800 +Subject: arm64: dts: allwinner: Remove no longer used pinctrl/sun4i-a10.h + header + +All dts files for the sunxi platform have been switched to the generic +pinconf bindings. As a result, the sunxi specific pinctrl macros are +no longer used. + +Remove the #include entry with the following command: + + sed -i -e '/pinctrl\/sun4i-a10.h/D' \ + arch/arm64/boot/dts/allwinner/*.dts? + +Signed-off-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 1 - + 1 file changed, 1 deletion(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -44,7 +44,6 @@ + + #include + #include +-#include + #include + + / { diff --git a/target/linux/sunxi/patches-4.9/0019-arm64-allwinner-a64-Add-MMC-nodes.patch b/target/linux/sunxi/patches-4.9/0019-arm64-allwinner-a64-Add-MMC-nodes.patch new file mode 100644 index 000000000..1f91e9bea --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0019-arm64-allwinner-a64-Add-MMC-nodes.patch @@ -0,0 +1,69 @@ +From f3dff3478a8a7b09f9a92023955a151584658893 Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Thu, 6 Oct 2016 02:25:22 +0100 +Subject: arm64: allwinner: a64: Add MMC nodes + +The A64 has 3 MMC controllers, one of them being especially targeted to +eMMC. Among other things, it has a data strobe signal and a 8 bits data +width. + +The two other are more usual controllers that will have a 4 bits width at +most and no data strobe signal, which limits it to more usual SD or MMC +peripherals. + +Signed-off-by: Andre Przywara +Signed-off-by: Maxime Ripard +Tested-by: Florian Vaussard +Acked-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 39 +++++++++++++++++++++++++++ + 1 file changed, 39 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -121,6 +121,45 @@ + #size-cells = <1>; + ranges; + ++ mmc0: mmc@1c0f000 { ++ compatible = "allwinner,sun50i-a64-mmc"; ++ reg = <0x01c0f000 0x1000>; ++ clocks = <&ccu CLK_BUS_MMC0>, <&ccu CLK_MMC0>; ++ clock-names = "ahb", "mmc"; ++ resets = <&ccu RST_BUS_MMC0>; ++ reset-names = "ahb"; ++ interrupts = ; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ mmc1: mmc@1c10000 { ++ compatible = "allwinner,sun50i-a64-mmc"; ++ reg = <0x01c10000 0x1000>; ++ clocks = <&ccu CLK_BUS_MMC1>, <&ccu CLK_MMC1>; ++ clock-names = "ahb", "mmc"; ++ resets = <&ccu RST_BUS_MMC1>; ++ reset-names = "ahb"; ++ interrupts = ; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ mmc2: mmc@1c11000 { ++ compatible = "allwinner,sun50i-a64-emmc"; ++ reg = <0x01c11000 0x1000>; ++ clocks = <&ccu CLK_BUS_MMC2>, <&ccu CLK_MMC2>; ++ clock-names = "ahb", "mmc"; ++ resets = <&ccu RST_BUS_MMC2>; ++ reset-names = "ahb"; ++ interrupts = ; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ + usb_otg: usb@01c19000 { + compatible = "allwinner,sun8i-a33-musb"; + reg = <0x01c19000 0x0400>; diff --git a/target/linux/sunxi/patches-4.9/0020-arm64-allwinner-a64-Add-MMC-pinctrl-nodes.patch b/target/linux/sunxi/patches-4.9/0020-arm64-allwinner-a64-Add-MMC-pinctrl-nodes.patch new file mode 100644 index 000000000..f5af0f819 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0020-arm64-allwinner-a64-Add-MMC-pinctrl-nodes.patch @@ -0,0 +1,50 @@ +From a3e8f4926248b3c12933aacec4432e9b6de004bb Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Mon, 9 Jan 2017 16:39:15 +0100 +Subject: arm64: allwinner: a64: Add MMC pinctrl nodes + +The A64 only has a single set of pins for each MMC controller. Since we +already have boards that require all of them, let's add them to the DTSI. + +Reviewed-by: Andre Przywara +Signed-off-by: Maxime Ripard +Tested-by: Florian Vaussard +Acked-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 25 +++++++++++++++++++++++++ + 1 file changed, 25 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -243,6 +243,31 @@ + function = "i2c1"; + }; + ++ mmc0_pins: mmc0-pins { ++ pins = "PF0", "PF1", "PF2", "PF3", ++ "PF4", "PF5"; ++ function = "mmc0"; ++ drive-strength = <30>; ++ bias-pull-up; ++ }; ++ ++ mmc1_pins: mmc1-pins { ++ pins = "PG0", "PG1", "PG2", "PG3", ++ "PG4", "PG5"; ++ function = "mmc1"; ++ drive-strength = <30>; ++ bias-pull-up; ++ }; ++ ++ mmc2_pins: mmc2-pins { ++ pins = "PC1", "PC5", "PC6", "PC8", "PC9", ++ "PC10","PC11", "PC12", "PC13", ++ "PC14", "PC15", "PC16"; ++ function = "mmc2"; ++ drive-strength = <30>; ++ bias-pull-up; ++ }; ++ + uart0_pins_a: uart0@0 { + pins = "PB8", "PB9"; + function = "uart0"; diff --git a/target/linux/sunxi/patches-4.9/0022-arm64-allwinner-pine64-add-MMC-support.patch b/target/linux/sunxi/patches-4.9/0022-arm64-allwinner-pine64-add-MMC-support.patch new file mode 100644 index 000000000..c60e5104c --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0022-arm64-allwinner-pine64-add-MMC-support.patch @@ -0,0 +1,62 @@ +From ebe3ae29c6314217edf40d9ee23c36d610ff0fb8 Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Tue, 10 Jan 2017 01:22:31 +0000 +Subject: arm64: allwinner: pine64: add MMC support + +All Pine64 boards connect an micro-SD card slot to the first MMC +controller. +Enable the respective DT node and specify the (always-on) regulator +and card-detect pin. +As a micro-SD slot does not feature a write-protect switch, we disable +this feature. + +Signed-off-by: Andre Przywara +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -44,6 +44,8 @@ + + #include "sun50i-a64.dtsi" + ++#include ++ + / { + model = "Pine64"; + compatible = "pine64,pine64", "allwinner,sun50i-a64"; +@@ -55,6 +57,13 @@ + chosen { + stdout-path = "serial0:115200n8"; + }; ++ ++ reg_vcc3v3: vcc3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "vcc3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ }; + }; + + &ehci1 { +@@ -71,6 +80,17 @@ + bias-pull-up; + }; + ++&mmc0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mmc0_pins>; ++ vmmc-supply = <®_vcc3v3>; ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; ++ cd-inverted; ++ disable-wp; ++ bus-width = <4>; ++ status = "okay"; ++}; ++ + &ohci1 { + status = "okay"; + }; diff --git a/target/linux/sunxi/patches-4.9/0023-arm64-allwinner-a64-add-UART1-pin-nodes.patch b/target/linux/sunxi/patches-4.9/0023-arm64-allwinner-a64-add-UART1-pin-nodes.patch new file mode 100644 index 000000000..998d51435 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0023-arm64-allwinner-a64-add-UART1-pin-nodes.patch @@ -0,0 +1,35 @@ +From e7ba733d32cc9487b62b07219ad911c77764a681 Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Tue, 10 Jan 2017 01:22:32 +0000 +Subject: arm64: allwinner: a64: add UART1 pin nodes + +On many boards UART1 connects to a Bluetooth chip, so add the pinctrl +nodes for the only pins providing access to that UART. That includes +those pins for hardware flow control (RTS/CTS). + +Signed-off-by: Andre Przywara +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -272,6 +272,16 @@ + pins = "PB8", "PB9"; + function = "uart0"; + }; ++ ++ uart1_pins: uart1_pins { ++ pins = "PG6", "PG7"; ++ function = "uart1"; ++ }; ++ ++ uart1_rts_cts_pins: uart1_rts_cts_pins { ++ pins = "PG8", "PG9"; ++ function = "uart1"; ++ }; + }; + + uart0: serial@1c28000 { diff --git a/target/linux/sunxi/patches-4.9/0024-arm64-allwinner-a64-add-r_ccu-node.patch b/target/linux/sunxi/patches-4.9/0024-arm64-allwinner-a64-add-r_ccu-node.patch new file mode 100644 index 000000000..19a358970 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0024-arm64-allwinner-a64-add-r_ccu-node.patch @@ -0,0 +1,52 @@ +From 791a9e001d3ba3b552888b0bf3c592a50b71f57e Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Tue, 4 Apr 2017 17:50:58 +0800 +Subject: arm64: allwinner: a64: add r_ccu node + +A64 SoC have a CCU (r_ccu) in PRCM block. + +Add the device node for it. + +The mux 3 of R_CCU is an internal oscillator, which is 16MHz according +to the user manual, and has only 30% accuracy based on our experience +on older SoCs. The real mesaured value of it on two Pine64 boards is +around 11MHz, which is around 70% of 16MHz. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -98,6 +98,14 @@ + clock-output-names = "osc32k"; + }; + ++ iosc: internal-osc-clk { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <16000000>; ++ clock-accuracy = <300000000>; ++ clock-output-names = "iosc"; ++ }; ++ + psci { + compatible = "arm,psci-0.2"; + method = "smc"; +@@ -389,5 +397,14 @@ + interrupts = , + ; + }; ++ ++ r_ccu: clock@1f01400 { ++ compatible = "allwinner,sun50i-a64-r-ccu"; ++ reg = <0x01f01400 0x100>; ++ clocks = <&osc24M>, <&osc32k>, <&iosc>; ++ clock-names = "hosc", "losc", "iosc"; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ }; + }; + }; diff --git a/target/linux/sunxi/patches-4.9/0025-arm64-allwinner-a64-add-R_PIO-pinctrl-node.patch b/target/linux/sunxi/patches-4.9/0025-arm64-allwinner-a64-add-R_PIO-pinctrl-node.patch new file mode 100644 index 000000000..ff541ce63 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0025-arm64-allwinner-a64-add-R_PIO-pinctrl-node.patch @@ -0,0 +1,35 @@ +From ec4279053a6434f685246e022be95d2a62f8c608 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Tue, 4 Apr 2017 17:51:00 +0800 +Subject: arm64: allwinner: a64: add R_PIO pinctrl node + +Allwinner A64 have a dedicated pin controller to manage the PL pin bank. +As the driver and the required clock support are added, add the device +node for it. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -406,5 +406,17 @@ + #clock-cells = <1>; + #reset-cells = <1>; + }; ++ ++ r_pio: pinctrl@01f02c00 { ++ compatible = "allwinner,sun50i-a64-r-pinctrl"; ++ reg = <0x01f02c00 0x400>; ++ interrupts = ; ++ clocks = <&r_ccu 3>, <&osc24M>, <&osc32k>; ++ clock-names = "apb", "hosc", "losc"; ++ gpio-controller; ++ #gpio-cells = <3>; ++ interrupt-controller; ++ #interrupt-cells = <3>; ++ }; + }; + }; diff --git a/target/linux/sunxi/patches-4.9/0026-arm64-allwinner-a64-add-pmu0-regs-for-USB-PHY.patch b/target/linux/sunxi/patches-4.9/0026-arm64-allwinner-a64-add-pmu0-regs-for-USB-PHY.patch new file mode 100644 index 000000000..0d7803775 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0026-arm64-allwinner-a64-add-pmu0-regs-for-USB-PHY.patch @@ -0,0 +1,29 @@ +From 0d98479738b950e30bb4f782d60099d44076ad67 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Wed, 5 Apr 2017 22:30:34 +0800 +Subject: arm64: allwinner: a64: add pmu0 regs for USB PHY + +The USB PHY in A64 has a "pmu0" region, which controls the EHCI/OHCI +controller pair that can be connected to the PHY0. + +Add the MMIO region for PHY node. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -184,8 +184,10 @@ + usbphy: phy@01c19400 { + compatible = "allwinner,sun50i-a64-usb-phy"; + reg = <0x01c19400 0x14>, ++ <0x01c1a800 0x4>, + <0x01c1b800 0x4>; + reg-names = "phy_ctrl", ++ "pmu0", + "pmu1"; + clocks = <&ccu CLK_USB_PHY0>, + <&ccu CLK_USB_PHY1>; diff --git a/target/linux/sunxi/patches-4.9/0027-arm64-allwinner-a64-Add-PLL_PERIPH0-clock-to-the-R_C.patch b/target/linux/sunxi/patches-4.9/0027-arm64-allwinner-a64-Add-PLL_PERIPH0-clock-to-the-R_C.patch new file mode 100644 index 000000000..319dba659 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0027-arm64-allwinner-a64-Add-PLL_PERIPH0-clock-to-the-R_C.patch @@ -0,0 +1,32 @@ +From f74994a94063bc85ac1d6ad677ed06b5279c101f Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Wed, 31 May 2017 15:58:24 +0800 +Subject: arm64: allwinner: a64: Add PLL_PERIPH0 clock to the R_CCU + +The AR100 clock within the R_CCU (PRCM) has the PLL_PERIPH0 as one of +its parents. + +This adds the reference in the device tree describing this relationship. +This patch uses a raw number for the clock index to ease merging by +avoiding cross tree dependencies. + +Signed-off-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -403,8 +403,9 @@ + r_ccu: clock@1f01400 { + compatible = "allwinner,sun50i-a64-r-ccu"; + reg = <0x01f01400 0x100>; +- clocks = <&osc24M>, <&osc32k>, <&iosc>; +- clock-names = "hosc", "losc", "iosc"; ++ clocks = <&osc24M>, <&osc32k>, <&iosc>, ++ <&ccu 11>; ++ clock-names = "hosc", "losc", "iosc", "pll-periph"; + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/target/linux/sunxi/patches-4.9/0030-pinctrl-sunxi-Rework-the-pin-config-building-code.patch b/target/linux/sunxi/patches-4.9/0030-pinctrl-sunxi-Rework-the-pin-config-building-code.patch new file mode 100644 index 000000000..498581712 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0030-pinctrl-sunxi-Rework-the-pin-config-building-code.patch @@ -0,0 +1,251 @@ +From f233dbca6227703eaae2f67d6d9c79819773f16b Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Tue, 11 Oct 2016 17:45:59 +0200 +Subject: pinctrl: sunxi: Rework the pin config building code + +In order to support more easily the generic pinctrl properties, rework the +pinctrl maps configuration and split it into several sub-functions. + +One of the side-effects from that rework is that we only parse the pin +configuration once, since it's going to be common to every pin, instead of +having to parsing once for each pin. + +Signed-off-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 178 +++++++++++++++++++++++++--------- + 1 file changed, 130 insertions(+), 48 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -145,6 +145,110 @@ static int sunxi_pctrl_get_group_pins(st + return 0; + } + ++static bool sunxi_pctrl_has_bias_prop(struct device_node *node) ++{ ++ return of_find_property(node, "allwinner,pull", NULL); ++} ++ ++static bool sunxi_pctrl_has_drive_prop(struct device_node *node) ++{ ++ return of_find_property(node, "allwinner,drive", NULL); ++} ++ ++static int sunxi_pctrl_parse_bias_prop(struct device_node *node) ++{ ++ u32 val; ++ ++ if (of_property_read_u32(node, "allwinner,pull", &val)) ++ return -EINVAL; ++ ++ switch (val) { ++ case 1: ++ return PIN_CONFIG_BIAS_PULL_UP; ++ case 2: ++ return PIN_CONFIG_BIAS_PULL_DOWN; ++ } ++ ++ return -EINVAL; ++} ++ ++static int sunxi_pctrl_parse_drive_prop(struct device_node *node) ++{ ++ u32 val; ++ ++ if (of_property_read_u32(node, "allwinner,drive", &val)) ++ return -EINVAL; ++ ++ return (val + 1) * 10; ++} ++ ++static const char *sunxi_pctrl_parse_function_prop(struct device_node *node) ++{ ++ const char *function; ++ int ret; ++ ++ ret = of_property_read_string(node, "allwinner,function", &function); ++ if (!ret) ++ return function; ++ ++ return NULL; ++} ++ ++static const char *sunxi_pctrl_find_pins_prop(struct device_node *node, ++ int *npins) ++{ ++ int count; ++ ++ count = of_property_count_strings(node, "allwinner,pins"); ++ if (count > 0) { ++ *npins = count; ++ return "allwinner,pins"; ++ } ++ ++ return NULL; ++} ++ ++static unsigned long *sunxi_pctrl_build_pin_config(struct device_node *node, ++ unsigned int *len) ++{ ++ unsigned long *pinconfig; ++ unsigned int configlen = 0, idx = 0; ++ ++ if (sunxi_pctrl_has_drive_prop(node)) ++ configlen++; ++ if (sunxi_pctrl_has_bias_prop(node)) ++ configlen++; ++ ++ pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); ++ if (!pinconfig) ++ return NULL; ++ ++ if (sunxi_pctrl_has_drive_prop(node)) { ++ int drive = sunxi_pctrl_parse_drive_prop(node); ++ if (drive < 0) ++ goto err_free; ++ ++ pinconfig[idx++] = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, ++ drive); ++ } ++ ++ if (sunxi_pctrl_has_bias_prop(node)) { ++ int pull = sunxi_pctrl_parse_bias_prop(node); ++ if (pull < 0) ++ goto err_free; ++ ++ pinconfig[idx++] = pinconf_to_config_packed(pull, 0); ++ } ++ ++ ++ *len = configlen; ++ return pinconfig; ++ ++err_free: ++ kfree(pinconfig); ++ return NULL; ++} ++ + static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *node, + struct pinctrl_map **map, +@@ -153,38 +257,45 @@ static int sunxi_pctrl_dt_node_to_map(st + struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + unsigned long *pinconfig; + struct property *prop; +- const char *function; ++ const char *function, *pin_prop; + const char *group; +- int ret, nmaps, i = 0; +- u32 val; ++ int ret, npins, nmaps, configlen = 0, i = 0; + + *map = NULL; + *num_maps = 0; + +- ret = of_property_read_string(node, "allwinner,function", &function); +- if (ret) { +- dev_err(pctl->dev, +- "missing allwinner,function property in node %s\n", ++ function = sunxi_pctrl_parse_function_prop(node); ++ if (!function) { ++ dev_err(pctl->dev, "missing function property in node %s\n", + node->name); + return -EINVAL; + } + +- nmaps = of_property_count_strings(node, "allwinner,pins") * 2; +- if (nmaps < 0) { +- dev_err(pctl->dev, +- "missing allwinner,pins property in node %s\n", ++ pin_prop = sunxi_pctrl_find_pins_prop(node, &npins); ++ if (!pin_prop) { ++ dev_err(pctl->dev, "missing pins property in node %s\n", + node->name); + return -EINVAL; + } + ++ /* ++ * We have two maps for each pin: one for the function, one ++ * for the configuration (bias, strength, etc) ++ */ ++ nmaps = npins * 2; + *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL); + if (!*map) + return -ENOMEM; + +- of_property_for_each_string(node, "allwinner,pins", prop, group) { ++ pinconfig = sunxi_pctrl_build_pin_config(node, &configlen); ++ if (!pinconfig) { ++ ret = -EINVAL; ++ goto err_free_map; ++ } ++ ++ of_property_for_each_string(node, pin_prop, prop, group) { + struct sunxi_pinctrl_group *grp = + sunxi_pinctrl_find_group_by_name(pctl, group); +- int j = 0, configlen = 0; + + if (!grp) { + dev_err(pctl->dev, "unknown pin %s", group); +@@ -207,34 +318,6 @@ static int sunxi_pctrl_dt_node_to_map(st + + (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; + (*map)[i].data.configs.group_or_pin = group; +- +- if (of_find_property(node, "allwinner,drive", NULL)) +- configlen++; +- if (of_find_property(node, "allwinner,pull", NULL)) +- configlen++; +- +- pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); +- if (!pinconfig) { +- kfree(*map); +- return -ENOMEM; +- } +- +- if (!of_property_read_u32(node, "allwinner,drive", &val)) { +- u16 strength = (val + 1) * 10; +- pinconfig[j++] = +- pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, +- strength); +- } +- +- if (!of_property_read_u32(node, "allwinner,pull", &val)) { +- enum pin_config_param pull = PIN_CONFIG_END; +- if (val == 1) +- pull = PIN_CONFIG_BIAS_PULL_UP; +- else if (val == 2) +- pull = PIN_CONFIG_BIAS_PULL_DOWN; +- pinconfig[j++] = pinconf_to_config_packed(pull, 0); +- } +- + (*map)[i].data.configs.configs = pinconfig; + (*map)[i].data.configs.num_configs = configlen; + +@@ -244,19 +327,18 @@ static int sunxi_pctrl_dt_node_to_map(st + *num_maps = nmaps; + + return 0; ++ ++err_free_map: ++ kfree(map); ++ return ret; + } + + static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, + unsigned num_maps) + { +- int i; +- +- for (i = 0; i < num_maps; i++) { +- if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) +- kfree(map[i].data.configs.configs); +- } +- ++ /* All the maps have the same pin config, free only the first one */ ++ kfree(map[0].data.configs.configs); + kfree(map); + } + diff --git a/target/linux/sunxi/patches-4.9/0031-pinctrl-sunxi-Use-macros-from-bindings-header-file-f.patch b/target/linux/sunxi/patches-4.9/0031-pinctrl-sunxi-Use-macros-from-bindings-header-file-f.patch new file mode 100644 index 000000000..39be96542 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0031-pinctrl-sunxi-Use-macros-from-bindings-header-file-f.patch @@ -0,0 +1,38 @@ +From 42676fa4aa87eda4fc762df495d4bde2ddc4bfce Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Tue, 11 Oct 2016 17:46:00 +0200 +Subject: pinctrl: sunxi: Use macros from bindings header file for DT parsing + +Since we have some bindings header for our hardcoded flags, let's use them +when we can. + +Acked-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -28,6 +28,8 @@ + #include + #include + ++#include ++ + #include "../core.h" + #include "pinctrl-sunxi.h" + +@@ -163,9 +165,9 @@ static int sunxi_pctrl_parse_bias_prop(s + return -EINVAL; + + switch (val) { +- case 1: ++ case SUN4I_PINCTRL_PULL_UP: + return PIN_CONFIG_BIAS_PULL_UP; +- case 2: ++ case SUN4I_PINCTRL_PULL_DOWN: + return PIN_CONFIG_BIAS_PULL_DOWN; + } + diff --git a/target/linux/sunxi/patches-4.9/0032-pinctrl-sunxi-Handle-bias-disable.patch b/target/linux/sunxi/patches-4.9/0032-pinctrl-sunxi-Handle-bias-disable.patch new file mode 100644 index 000000000..61d6102c9 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0032-pinctrl-sunxi-Handle-bias-disable.patch @@ -0,0 +1,42 @@ +From 07fe64ba213f36ca8f6ffd8c4d5893f022744fdb Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Tue, 11 Oct 2016 17:46:01 +0200 +Subject: pinctrl: sunxi: Handle bias disable + +So far, putting NO_PULL in allwinner,pull was ignored, behaving like if +that property was not there at all. + +Obviously, this is not the right thing to do, and in that case, we really +need to just disable the bias. + +Acked-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -165,6 +165,8 @@ static int sunxi_pctrl_parse_bias_prop(s + return -EINVAL; + + switch (val) { ++ case SUN4I_PINCTRL_NO_PULL: ++ return PIN_CONFIG_BIAS_DISABLE; + case SUN4I_PINCTRL_PULL_UP: + return PIN_CONFIG_BIAS_PULL_UP; + case SUN4I_PINCTRL_PULL_DOWN: +@@ -401,6 +403,12 @@ static int sunxi_pconf_group_set(struct + | dlevel << sunxi_dlevel_offset(pin), + pctl->membase + sunxi_dlevel_reg(pin)); + break; ++ case PIN_CONFIG_BIAS_DISABLE: ++ val = readl(pctl->membase + sunxi_pull_reg(pin)); ++ mask = PULL_PINS_MASK << sunxi_pull_offset(pin); ++ writel((val & ~mask), ++ pctl->membase + sunxi_pull_reg(pin)); ++ break; + case PIN_CONFIG_BIAS_PULL_UP: + val = readl(pctl->membase + sunxi_pull_reg(pin)); + mask = PULL_PINS_MASK << sunxi_pull_offset(pin); diff --git a/target/linux/sunxi/patches-4.9/0033-pinctrl-sunxi-Support-generic-binding.patch b/target/linux/sunxi/patches-4.9/0033-pinctrl-sunxi-Support-generic-binding.patch new file mode 100644 index 000000000..35c687681 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0033-pinctrl-sunxi-Support-generic-binding.patch @@ -0,0 +1,106 @@ +From cefbf1a1b29531a970bc2908a50a75d6474fcc38 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Thu, 20 Oct 2016 15:49:03 +0200 +Subject: pinctrl: sunxi: Support generic binding + +Our bindings are mostly irrelevant now that we have generic pinctrl +bindings that cover exactly the same uses cases. + +Add support for the new ones, and obviously keep our old binding support in +order to keep the ABI stable. + +Acked-by: Chen-Yu Tsai +Signed-off-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 48 +++++++++++++++++++++++++++++++++-- + 1 file changed, 46 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -149,18 +149,33 @@ static int sunxi_pctrl_get_group_pins(st + + static bool sunxi_pctrl_has_bias_prop(struct device_node *node) + { +- return of_find_property(node, "allwinner,pull", NULL); ++ return of_find_property(node, "bias-pull-up", NULL) || ++ of_find_property(node, "bias-pull-down", NULL) || ++ of_find_property(node, "bias-disable", NULL) || ++ of_find_property(node, "allwinner,pull", NULL); + } + + static bool sunxi_pctrl_has_drive_prop(struct device_node *node) + { +- return of_find_property(node, "allwinner,drive", NULL); ++ return of_find_property(node, "drive-strength", NULL) || ++ of_find_property(node, "allwinner,drive", NULL); + } + + static int sunxi_pctrl_parse_bias_prop(struct device_node *node) + { + u32 val; + ++ /* Try the new style binding */ ++ if (of_find_property(node, "bias-pull-up", NULL)) ++ return PIN_CONFIG_BIAS_PULL_UP; ++ ++ if (of_find_property(node, "bias-pull-down", NULL)) ++ return PIN_CONFIG_BIAS_PULL_DOWN; ++ ++ if (of_find_property(node, "bias-disable", NULL)) ++ return PIN_CONFIG_BIAS_DISABLE; ++ ++ /* And fall back to the old binding */ + if (of_property_read_u32(node, "allwinner,pull", &val)) + return -EINVAL; + +@@ -180,6 +195,21 @@ static int sunxi_pctrl_parse_drive_prop( + { + u32 val; + ++ /* Try the new style binding */ ++ if (!of_property_read_u32(node, "drive-strength", &val)) { ++ /* We can't go below 10mA ... */ ++ if (val < 10) ++ return -EINVAL; ++ ++ /* ... and only up to 40 mA ... */ ++ if (val > 40) ++ val = 40; ++ ++ /* by steps of 10 mA */ ++ return rounddown(val, 10); ++ } ++ ++ /* And then fall back to the old binding */ + if (of_property_read_u32(node, "allwinner,drive", &val)) + return -EINVAL; + +@@ -191,6 +221,12 @@ static const char *sunxi_pctrl_parse_fun + const char *function; + int ret; + ++ /* Try the generic binding */ ++ ret = of_property_read_string(node, "function", &function); ++ if (!ret) ++ return function; ++ ++ /* And fall back to our legacy one */ + ret = of_property_read_string(node, "allwinner,function", &function); + if (!ret) + return function; +@@ -203,6 +239,14 @@ static const char *sunxi_pctrl_find_pins + { + int count; + ++ /* Try the generic binding */ ++ count = of_property_count_strings(node, "pins"); ++ if (count > 0) { ++ *npins = count; ++ return "pins"; ++ } ++ ++ /* And fall back to our legacy one */ + count = of_property_count_strings(node, "allwinner,pins"); + if (count > 0) { + *npins = count; diff --git a/target/linux/sunxi/patches-4.9/0034-pinctrl-sunxi-Deal-with-configless-pins.patch b/target/linux/sunxi/patches-4.9/0034-pinctrl-sunxi-Deal-with-configless-pins.patch new file mode 100644 index 000000000..119ab2b8f --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0034-pinctrl-sunxi-Deal-with-configless-pins.patch @@ -0,0 +1,128 @@ +From e11dee2e98f8abc99ad5336796576a827853ccfa Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Thu, 20 Oct 2016 15:49:02 +0200 +Subject: pinctrl: sunxi: Deal with configless pins + +Even though the our binding had the assumption that the allwinner,pull and +allwinner,drive properties were optional, the code never took that into +account. + +Fix that. + +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 51 +++++++++++++++++++++++++---------- + 1 file changed, 37 insertions(+), 14 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -261,20 +261,29 @@ static unsigned long *sunxi_pctrl_build_ + { + unsigned long *pinconfig; + unsigned int configlen = 0, idx = 0; ++ int ret; + + if (sunxi_pctrl_has_drive_prop(node)) + configlen++; + if (sunxi_pctrl_has_bias_prop(node)) + configlen++; + ++ /* ++ * If we don't have any configuration, bail out ++ */ ++ if (!configlen) ++ return NULL; ++ + pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); + if (!pinconfig) +- return NULL; ++ return ERR_PTR(-ENOMEM); + + if (sunxi_pctrl_has_drive_prop(node)) { + int drive = sunxi_pctrl_parse_drive_prop(node); +- if (drive < 0) ++ if (drive < 0) { ++ ret = drive; + goto err_free; ++ } + + pinconfig[idx++] = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, + drive); +@@ -282,8 +291,10 @@ static unsigned long *sunxi_pctrl_build_ + + if (sunxi_pctrl_has_bias_prop(node)) { + int pull = sunxi_pctrl_parse_bias_prop(node); +- if (pull < 0) ++ if (pull < 0) { ++ ret = pull; + goto err_free; ++ } + + pinconfig[idx++] = pinconf_to_config_packed(pull, 0); + } +@@ -294,7 +305,7 @@ static unsigned long *sunxi_pctrl_build_ + + err_free: + kfree(pinconfig); +- return NULL; ++ return ERR_PTR(ret); + } + + static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, +@@ -328,7 +339,10 @@ static int sunxi_pctrl_dt_node_to_map(st + + /* + * We have two maps for each pin: one for the function, one +- * for the configuration (bias, strength, etc) ++ * for the configuration (bias, strength, etc). ++ * ++ * We might be slightly overshooting, since we might not have ++ * any configuration. + */ + nmaps = npins * 2; + *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL); +@@ -336,8 +350,8 @@ static int sunxi_pctrl_dt_node_to_map(st + return -ENOMEM; + + pinconfig = sunxi_pctrl_build_pin_config(node, &configlen); +- if (!pinconfig) { +- ret = -EINVAL; ++ if (IS_ERR(pinconfig)) { ++ ret = PTR_ERR(pinconfig); + goto err_free_map; + } + +@@ -364,15 +378,24 @@ static int sunxi_pctrl_dt_node_to_map(st + + i++; + +- (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; +- (*map)[i].data.configs.group_or_pin = group; +- (*map)[i].data.configs.configs = pinconfig; +- (*map)[i].data.configs.num_configs = configlen; +- +- i++; ++ if (pinconfig) { ++ (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; ++ (*map)[i].data.configs.group_or_pin = group; ++ (*map)[i].data.configs.configs = pinconfig; ++ (*map)[i].data.configs.num_configs = configlen; ++ i++; ++ } + } + +- *num_maps = nmaps; ++ *num_maps = i; ++ ++ /* ++ * We know have the number of maps we need, we can resize our ++ * map array ++ */ ++ *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL); ++ if (!map) ++ return -ENOMEM; + + return 0; + diff --git a/target/linux/sunxi/patches-4.9/0035-pinctrl-sunxi-make-bool-drivers-explicitly-non-modul.patch b/target/linux/sunxi/patches-4.9/0035-pinctrl-sunxi-make-bool-drivers-explicitly-non-modul.patch new file mode 100644 index 000000000..8ab535c30 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0035-pinctrl-sunxi-make-bool-drivers-explicitly-non-modul.patch @@ -0,0 +1,437 @@ +From 0c8c6ba00cbf2c0a6164aa41d43d017d65caf321 Mon Sep 17 00:00:00 2001 +From: Paul Gortmaker +Date: Sat, 29 Oct 2016 20:00:30 -0400 +Subject: pinctrl: sunxi: make bool drivers explicitly non-modular + +None of the Kconfigs for any of these drivers are tristate, +meaning that they currently are not being built as a module by anyone. + +Lets remove the modular code that is essentially orphaned, so that +when reading the drivers there is no doubt they are builtin-only. All +drivers get essentially the same change, so they are handled in batch. + +Changes are (1) use builtin_platform_driver, (2) use init.h header +(3) delete module_exit related code, (4) delete MODULE_DEVICE_TABLE, +and (5) delete MODULE_LICENCE/MODULE_AUTHOR and associated tags. + +Since module_platform_driver() uses the same init level priority as +builtin_platform_driver() the init ordering remains unchanged with +this commit. + +Also note that MODULE_DEVICE_TABLE is a no-op for non-modular code. + +We do delete the MODULE_LICENSE etc. tags since all that information +is already contained at the top of each file in the comments. + +Cc: Boris Brezillon +Cc: Chen-Yu Tsai +Cc: Hans de Goede +Cc: Linus Walleij +Cc: Patrice Chotard +Cc: Hongzhou Yang +Cc: Fabian Frederick +Cc: Maxime Coquelin +Cc: Vishnu Patekar +Cc: Mylene Josserand +Cc: linux-gpio@vger.kernel.org +Cc: linux-arm-kernel@lists.infradead.org +Signed-off-by: Paul Gortmaker +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-gr8.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c | 10 ++-------- + drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c | 11 ++--------- + drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c | 10 ++-------- + drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c | 9 ++------- + drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c | 9 ++------- + 13 files changed, 26 insertions(+), 95 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-gr8.c ++++ b/drivers/pinctrl/sunxi/pinctrl-gr8.c +@@ -12,7 +12,7 @@ + * warranty of any kind, whether express or implied. + */ + +-#include ++#include + #include + #include + #include +@@ -525,7 +525,6 @@ static const struct of_device_id sun5i_g + { .compatible = "nextthing,gr8-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun5i_gr8_pinctrl_match); + + static struct platform_driver sun5i_gr8_pinctrl_driver = { + .probe = sun5i_gr8_pinctrl_probe, +@@ -534,8 +533,4 @@ static struct platform_driver sun5i_gr8_ + .of_match_table = sun5i_gr8_pinctrl_match, + }, + }; +-module_platform_driver(sun5i_gr8_pinctrl_driver); +- +-MODULE_AUTHOR("Mylene Josserand ++#include + #include + #include + #include +@@ -1036,7 +1036,6 @@ static const struct of_device_id sun4i_a + { .compatible = "allwinner,sun4i-a10-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun4i_a10_pinctrl_match); + + static struct platform_driver sun4i_a10_pinctrl_driver = { + .probe = sun4i_a10_pinctrl_probe, +@@ -1045,8 +1044,4 @@ static struct platform_driver sun4i_a10_ + .of_match_table = sun4i_a10_pinctrl_match, + }, + }; +-module_platform_driver(sun4i_a10_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -674,7 +674,6 @@ static const struct of_device_id sun5i_a + { .compatible = "allwinner,sun5i-a10s-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun5i_a10s_pinctrl_match); + + static struct platform_driver sun5i_a10s_pinctrl_driver = { + .probe = sun5i_a10s_pinctrl_probe, +@@ -683,8 +682,4 @@ static struct platform_driver sun5i_a10s + .of_match_table = sun5i_a10s_pinctrl_match, + }, + }; +-module_platform_driver(sun5i_a10s_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -392,7 +392,6 @@ static const struct of_device_id sun5i_a + { .compatible = "allwinner,sun5i-a13-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun5i_a13_pinctrl_match); + + static struct platform_driver sun5i_a13_pinctrl_driver = { + .probe = sun5i_a13_pinctrl_probe, +@@ -401,8 +400,4 @@ static struct platform_driver sun5i_a13_ + .of_match_table = sun5i_a13_pinctrl_match, + }, + }; +-module_platform_driver(sun5i_a13_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -136,7 +136,6 @@ static const struct of_device_id sun6i_a + { .compatible = "allwinner,sun6i-a31-r-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun6i_a31_r_pinctrl_match); + + static struct platform_driver sun6i_a31_r_pinctrl_driver = { + .probe = sun6i_a31_r_pinctrl_probe, +@@ -145,9 +144,4 @@ static struct platform_driver sun6i_a31_ + .of_match_table = sun6i_a31_r_pinctrl_match, + }, + }; +-module_platform_driver(sun6i_a31_r_pinctrl_driver); +- +-MODULE_AUTHOR("Boris Brezillon ++#include + #include + #include + #include +@@ -934,7 +934,6 @@ static const struct of_device_id sun6i_a + { .compatible = "allwinner,sun6i-a31-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun6i_a31_pinctrl_match); + + static struct platform_driver sun6i_a31_pinctrl_driver = { + .probe = sun6i_a31_pinctrl_probe, +@@ -943,8 +942,4 @@ static struct platform_driver sun6i_a31_ + .of_match_table = sun6i_a31_pinctrl_match, + }, + }; +-module_platform_driver(sun6i_a31_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -798,7 +798,6 @@ static const struct of_device_id sun6i_a + { .compatible = "allwinner,sun6i-a31s-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun6i_a31s_pinctrl_match); + + static struct platform_driver sun6i_a31s_pinctrl_driver = { + .probe = sun6i_a31s_pinctrl_probe, +@@ -807,8 +806,4 @@ static struct platform_driver sun6i_a31s + .of_match_table = sun6i_a31s_pinctrl_match, + }, + }; +-module_platform_driver(sun6i_a31s_pinctrl_driver); +- +-MODULE_AUTHOR("Hans de Goede "); +-MODULE_DESCRIPTION("Allwinner A31s pinctrl driver"); +-MODULE_LICENSE("GPL"); ++builtin_platform_driver(sun6i_a31s_pinctrl_driver); +--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c +@@ -10,7 +10,7 @@ + * warranty of any kind, whether express or implied. + */ + +-#include ++#include + #include + #include + #include +@@ -1045,7 +1045,6 @@ static const struct of_device_id sun7i_a + { .compatible = "allwinner,sun7i-a20-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun7i_a20_pinctrl_match); + + static struct platform_driver sun7i_a20_pinctrl_driver = { + .probe = sun7i_a20_pinctrl_probe, +@@ -1054,8 +1053,4 @@ static struct platform_driver sun7i_a20_ + .of_match_table = sun7i_a20_pinctrl_match, + }, + }; +-module_platform_driver(sun7i_a20_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -123,7 +123,6 @@ static const struct of_device_id sun8i_a + { .compatible = "allwinner,sun8i-a23-r-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun8i_a23_r_pinctrl_match); + + static struct platform_driver sun8i_a23_r_pinctrl_driver = { + .probe = sun8i_a23_r_pinctrl_probe, +@@ -132,10 +131,4 @@ static struct platform_driver sun8i_a23_ + .of_match_table = sun8i_a23_r_pinctrl_match, + }, + }; +-module_platform_driver(sun8i_a23_r_pinctrl_driver); +- +-MODULE_AUTHOR("Chen-Yu Tsai "); +-MODULE_AUTHOR("Boris Brezillon ++#include + #include + #include + #include +@@ -575,7 +575,6 @@ static const struct of_device_id sun8i_a + { .compatible = "allwinner,sun8i-a23-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun8i_a23_pinctrl_match); + + static struct platform_driver sun8i_a23_pinctrl_driver = { + .probe = sun8i_a23_pinctrl_probe, +@@ -584,9 +583,4 @@ static struct platform_driver sun8i_a23_ + .of_match_table = sun8i_a23_pinctrl_match, + }, + }; +-module_platform_driver(sun8i_a23_pinctrl_driver); +- +-MODULE_AUTHOR("Chen-Yu Tsai "); +-MODULE_AUTHOR("Maxime Ripard ++#include + #include + #include + #include +@@ -498,7 +498,6 @@ static const struct of_device_id sun8i_a + { .compatible = "allwinner,sun8i-a33-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun8i_a33_pinctrl_match); + + static struct platform_driver sun8i_a33_pinctrl_driver = { + .probe = sun8i_a33_pinctrl_probe, +@@ -507,8 +506,4 @@ static struct platform_driver sun8i_a33_ + .of_match_table = sun8i_a33_pinctrl_match, + }, + }; +-module_platform_driver(sun8i_a33_pinctrl_driver); +- +-MODULE_AUTHOR("Vishnu Patekar "); +-MODULE_DESCRIPTION("Allwinner a33 pinctrl driver"); +-MODULE_LICENSE("GPL"); ++builtin_platform_driver(sun8i_a33_pinctrl_driver); +--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +@@ -12,7 +12,7 @@ + * warranty of any kind, whether express or implied. + */ + +-#include ++#include + #include + #include + #include +@@ -587,7 +587,6 @@ static const struct of_device_id sun8i_a + { .compatible = "allwinner,sun8i-a83t-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun8i_a83t_pinctrl_match); + + static struct platform_driver sun8i_a83t_pinctrl_driver = { + .probe = sun8i_a83t_pinctrl_probe, +@@ -596,8 +595,4 @@ static struct platform_driver sun8i_a83t + .of_match_table = sun8i_a83t_pinctrl_match, + }, + }; +-module_platform_driver(sun8i_a83t_pinctrl_driver); +- +-MODULE_AUTHOR("Vishnu Patekar "); +-MODULE_DESCRIPTION("Allwinner a83t pinctrl driver"); +-MODULE_LICENSE("GPL"); ++builtin_platform_driver(sun8i_a83t_pinctrl_driver); +--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c +@@ -10,7 +10,7 @@ + * warranty of any kind, whether express or implied. + */ + +-#include ++#include + #include + #include + #include +@@ -733,7 +733,6 @@ static const struct of_device_id sun9i_a + { .compatible = "allwinner,sun9i-a80-pinctrl", }, + {} + }; +-MODULE_DEVICE_TABLE(of, sun9i_a80_pinctrl_match); + + static struct platform_driver sun9i_a80_pinctrl_driver = { + .probe = sun9i_a80_pinctrl_probe, +@@ -742,8 +741,4 @@ static struct platform_driver sun9i_a80_ + .of_match_table = sun9i_a80_pinctrl_match, + }, + }; +-module_platform_driver(sun9i_a80_pinctrl_driver); +- +-MODULE_AUTHOR("Maxime Ripard "); +-MODULE_DESCRIPTION("Allwinner A80 pinctrl driver"); +-MODULE_LICENSE("GPL"); ++builtin_platform_driver(sun9i_a80_pinctrl_driver); diff --git a/target/linux/sunxi/patches-4.9/0036-pinctrl-sunxi-Free-configs-in-pinctrl_map-only-if-it.patch b/target/linux/sunxi/patches-4.9/0036-pinctrl-sunxi-Free-configs-in-pinctrl_map-only-if-it.patch new file mode 100644 index 000000000..02c5f568c --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0036-pinctrl-sunxi-Free-configs-in-pinctrl_map-only-if-it.patch @@ -0,0 +1,51 @@ +From 88f01a1bd0e0dbd01b65907023dbe53cf524ea2a Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Fri, 11 Nov 2016 10:35:10 +0800 +Subject: pinctrl: sunxi: Free configs in pinctrl_map only if it is a config + map + +In the recently refactored sunxi pinctrl library, we are only allocating +one set of pin configs for each pinmux setting node. When the pinctrl_map +structure is freed, the pin configs should also be freed. However the +code assumed the first map would contain the configs, which actually +never happens, as the mux function map gets added first. + +The proper way to do this is to look through all the maps and free the +first one whose type is actually PIN_MAP_TYPE_CONFIGS_GROUP. + +Also slightly expand the comment explaining this. + +Fixes: f233dbca6227 ("pinctrl: sunxi: Rework the pin config building code") +Signed-off-by: Chen-Yu Tsai +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 17 +++++++++++++++-- + 1 file changed, 15 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -408,8 +408,21 @@ static void sunxi_pctrl_dt_free_map(stru + struct pinctrl_map *map, + unsigned num_maps) + { +- /* All the maps have the same pin config, free only the first one */ +- kfree(map[0].data.configs.configs); ++ int i; ++ ++ /* pin config is never in the first map */ ++ for (i = 1; i < num_maps; i++) { ++ if (map[i].type != PIN_MAP_TYPE_CONFIGS_GROUP) ++ continue; ++ ++ /* ++ * All the maps share the same pin config, ++ * free only the first one we find. ++ */ ++ kfree(map[i].data.configs.configs); ++ break; ++ } ++ + kfree(map); + } + diff --git a/target/linux/sunxi/patches-4.9/0037-pinctrl-sunxi-Fix-PIN_CONFIG_BIAS_PULL_-DOWN-UP-argu.patch b/target/linux/sunxi/patches-4.9/0037-pinctrl-sunxi-Fix-PIN_CONFIG_BIAS_PULL_-DOWN-UP-argu.patch new file mode 100644 index 000000000..4921240f7 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0037-pinctrl-sunxi-Fix-PIN_CONFIG_BIAS_PULL_-DOWN-UP-argu.patch @@ -0,0 +1,40 @@ +From 223dba00b4072efc590c7d648f230db1b44186b9 Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Fri, 11 Nov 2016 17:50:34 +0800 +Subject: pinctrl: sunxi: Fix PIN_CONFIG_BIAS_PULL_{DOWN,UP} argument + +According to pinconf-generic.h, the argument for +PIN_CONFIG_BIAS_PULL_{DOWN,UP} is non-zero if the bias is enabled +with a pull up/down resistor, zero if it is directly connected +to VDD or ground. + +Since Allwinner hardware uses a weak pull resistor internally, +the argument should be 1. + +Signed-off-by: Chen-Yu Tsai +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -291,12 +291,16 @@ static unsigned long *sunxi_pctrl_build_ + + if (sunxi_pctrl_has_bias_prop(node)) { + int pull = sunxi_pctrl_parse_bias_prop(node); ++ int arg = 0; + if (pull < 0) { + ret = pull; + goto err_free; + } + +- pinconfig[idx++] = pinconf_to_config_packed(pull, 0); ++ if (pull != PIN_CONFIG_BIAS_DISABLE) ++ arg = 1; /* hardware uses weak pull resistors */ ++ ++ pinconfig[idx++] = pinconf_to_config_packed(pull, arg); + } + + diff --git a/target/linux/sunxi/patches-4.9/0038-pinctrl-sunxi-Add-support-for-fetching-pinconf-setti.patch b/target/linux/sunxi/patches-4.9/0038-pinctrl-sunxi-Add-support-for-fetching-pinconf-setti.patch new file mode 100644 index 000000000..d7972197f --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0038-pinctrl-sunxi-Add-support-for-fetching-pinconf-setti.patch @@ -0,0 +1,158 @@ +From c5fda170e87a4bdaeb278f7e50f7a1f654e94eb5 Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Fri, 11 Nov 2016 17:50:35 +0800 +Subject: pinctrl: sunxi: Add support for fetching pinconf settings from + hardware + +The sunxi pinctrl driver only caches whatever pinconf setting was last +set on a given pingroup. This is not particularly helpful, nor is it +correct. + +Fix this by actually reading the hardware registers and returning +the correct results or error codes. Also filter out unsupported +pinconf settings. Since this driver has a peculiar setup of 1 pin +per group, we can support both pin and pingroup pinconf setting +read back with the same code. The sunxi_pconf_reg helper and code +structure is inspired by pinctrl-msm. + +With this done we can also claim to support generic pinconf, by +setting .is_generic = true in pinconf_ops. + +Also remove the cached config value. The behavior of this was never +correct, as it only cached 1 setting instead of all of them. Since +we can now read back settings directly from the hardware, it is no +longer required. + +Signed-off-by: Chen-Yu Tsai +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 86 +++++++++++++++++++++++++++++++++-- + drivers/pinctrl/sunxi/pinctrl-sunxi.h | 1 - + 2 files changed, 81 insertions(+), 6 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -438,15 +438,91 @@ static const struct pinctrl_ops sunxi_pc + .get_group_pins = sunxi_pctrl_get_group_pins, + }; + ++static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param, ++ u32 *offset, u32 *shift, u32 *mask) ++{ ++ switch (param) { ++ case PIN_CONFIG_DRIVE_STRENGTH: ++ *offset = sunxi_dlevel_reg(pin); ++ *shift = sunxi_dlevel_offset(pin); ++ *mask = DLEVEL_PINS_MASK; ++ break; ++ ++ case PIN_CONFIG_BIAS_PULL_UP: ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ case PIN_CONFIG_BIAS_DISABLE: ++ *offset = sunxi_pull_reg(pin); ++ *shift = sunxi_pull_offset(pin); ++ *mask = PULL_PINS_MASK; ++ break; ++ ++ default: ++ return -ENOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin, ++ unsigned long *config) ++{ ++ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); ++ enum pin_config_param param = pinconf_to_config_param(*config); ++ u32 offset, shift, mask, val; ++ u16 arg; ++ int ret; ++ ++ pin -= pctl->desc->pin_base; ++ ++ ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask); ++ if (ret < 0) ++ return ret; ++ ++ val = (readl(pctl->membase + offset) >> shift) & mask; ++ ++ switch (pinconf_to_config_param(*config)) { ++ case PIN_CONFIG_DRIVE_STRENGTH: ++ arg = (val + 1) * 10; ++ break; ++ ++ case PIN_CONFIG_BIAS_PULL_UP: ++ if (val != SUN4I_PINCTRL_PULL_UP) ++ return -EINVAL; ++ arg = 1; /* hardware is weak pull-up */ ++ break; ++ ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ if (val != SUN4I_PINCTRL_PULL_DOWN) ++ return -EINVAL; ++ arg = 1; /* hardware is weak pull-down */ ++ break; ++ ++ case PIN_CONFIG_BIAS_DISABLE: ++ if (val != SUN4I_PINCTRL_NO_PULL) ++ return -EINVAL; ++ arg = 0; ++ break; ++ ++ default: ++ /* sunxi_pconf_reg should catch anything unsupported */ ++ WARN_ON(1); ++ return -ENOTSUPP; ++ } ++ ++ *config = pinconf_to_config_packed(param, arg); ++ ++ return 0; ++} ++ + static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev, + unsigned group, + unsigned long *config) + { + struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); ++ struct sunxi_pinctrl_group *g = &pctl->groups[group]; + +- *config = pctl->groups[group].config; +- +- return 0; ++ /* We only support 1 pin per group. Chain it to the pin callback */ ++ return sunxi_pconf_get(pctldev, g->pin, config); + } + + static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, +@@ -508,8 +584,6 @@ static int sunxi_pconf_group_set(struct + default: + break; + } +- /* cache the config value */ +- g->config = configs[i]; + } /* for each config */ + + spin_unlock_irqrestore(&pctl->lock, flags); +@@ -518,6 +592,8 @@ static int sunxi_pconf_group_set(struct + } + + static const struct pinconf_ops sunxi_pconf_ops = { ++ .is_generic = true, ++ .pin_config_get = sunxi_pconf_get, + .pin_config_group_get = sunxi_pconf_group_get, + .pin_config_group_set = sunxi_pconf_group_set, + }; +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h +@@ -109,7 +109,6 @@ struct sunxi_pinctrl_function { + + struct sunxi_pinctrl_group { + const char *name; +- unsigned long config; + unsigned pin; + }; + diff --git a/target/linux/sunxi/patches-4.9/0039-pinctrl-sunxi-Make-sunxi_pconf_group_set-use-sunxi_p.patch b/target/linux/sunxi/patches-4.9/0039-pinctrl-sunxi-Make-sunxi_pconf_group_set-use-sunxi_p.patch new file mode 100644 index 000000000..7555933f6 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0039-pinctrl-sunxi-Make-sunxi_pconf_group_set-use-sunxi_p.patch @@ -0,0 +1,122 @@ +From 51814827190214986c452a166718bf12d32211c7 Mon Sep 17 00:00:00 2001 +From: Chen-Yu Tsai +Date: Fri, 11 Nov 2016 17:50:36 +0800 +Subject: pinctrl: sunxi: Make sunxi_pconf_group_set use sunxi_pconf_reg helper + +The sunxi_pconf_reg helper introduced in the last patch gives us the +chance to rework sunxi_pconf_group_set to have it match the structure +of sunxi_pconf_(group_)get and make it easier to understand. + +For each config to set, it: + + 1. checks if the parameter is supported. + 2. checks if the argument is within limits. + 3. converts argument to the register value. + 4. writes to the register with spinlock held. + +As a result the function now blocks unsupported config parameters, +instead of silently ignoring them. + +Signed-off-by: Chen-Yu Tsai +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 64 +++++++++++++++++------------------ + 1 file changed, 32 insertions(+), 32 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -532,23 +532,27 @@ static int sunxi_pconf_group_set(struct + { + struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct sunxi_pinctrl_group *g = &pctl->groups[group]; +- unsigned long flags; + unsigned pin = g->pin - pctl->desc->pin_base; +- u32 val, mask; +- u16 strength; +- u8 dlevel; + int i; + +- spin_lock_irqsave(&pctl->lock, flags); +- + for (i = 0; i < num_configs; i++) { +- switch (pinconf_to_config_param(configs[i])) { ++ enum pin_config_param param; ++ unsigned long flags; ++ u32 offset, shift, mask, reg; ++ u16 arg, val; ++ int ret; ++ ++ param = pinconf_to_config_param(configs[i]); ++ arg = pinconf_to_config_argument(configs[i]); ++ ++ ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask); ++ if (ret < 0) ++ return ret; ++ ++ switch (param) { + case PIN_CONFIG_DRIVE_STRENGTH: +- strength = pinconf_to_config_argument(configs[i]); +- if (strength > 40) { +- spin_unlock_irqrestore(&pctl->lock, flags); ++ if (arg < 10 || arg > 40) + return -EINVAL; +- } + /* + * We convert from mA to what the register expects: + * 0: 10mA +@@ -556,37 +560,33 @@ static int sunxi_pconf_group_set(struct + * 2: 30mA + * 3: 40mA + */ +- dlevel = strength / 10 - 1; +- val = readl(pctl->membase + sunxi_dlevel_reg(pin)); +- mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(pin); +- writel((val & ~mask) +- | dlevel << sunxi_dlevel_offset(pin), +- pctl->membase + sunxi_dlevel_reg(pin)); ++ val = arg / 10 - 1; + break; + case PIN_CONFIG_BIAS_DISABLE: +- val = readl(pctl->membase + sunxi_pull_reg(pin)); +- mask = PULL_PINS_MASK << sunxi_pull_offset(pin); +- writel((val & ~mask), +- pctl->membase + sunxi_pull_reg(pin)); ++ val = 0; + break; + case PIN_CONFIG_BIAS_PULL_UP: +- val = readl(pctl->membase + sunxi_pull_reg(pin)); +- mask = PULL_PINS_MASK << sunxi_pull_offset(pin); +- writel((val & ~mask) | 1 << sunxi_pull_offset(pin), +- pctl->membase + sunxi_pull_reg(pin)); ++ if (arg == 0) ++ return -EINVAL; ++ val = 1; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: +- val = readl(pctl->membase + sunxi_pull_reg(pin)); +- mask = PULL_PINS_MASK << sunxi_pull_offset(pin); +- writel((val & ~mask) | 2 << sunxi_pull_offset(pin), +- pctl->membase + sunxi_pull_reg(pin)); ++ if (arg == 0) ++ return -EINVAL; ++ val = 2; + break; + default: +- break; ++ /* sunxi_pconf_reg should catch anything unsupported */ ++ WARN_ON(1); ++ return -ENOTSUPP; + } +- } /* for each config */ + +- spin_unlock_irqrestore(&pctl->lock, flags); ++ spin_lock_irqsave(&pctl->lock, flags); ++ reg = readl(pctl->membase + offset); ++ reg &= ~(mask << shift); ++ writel(reg | val << shift, pctl->membase + offset); ++ spin_unlock_irqrestore(&pctl->lock, flags); ++ } /* for each config */ + + return 0; + } diff --git a/target/linux/sunxi/patches-4.9/0040-pinctrl-sunxi-Add-support-for-interrupt-debouncing.patch b/target/linux/sunxi/patches-4.9/0040-pinctrl-sunxi-Add-support-for-interrupt-debouncing.patch new file mode 100644 index 000000000..01cbe31be --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0040-pinctrl-sunxi-Add-support-for-interrupt-debouncing.patch @@ -0,0 +1,171 @@ +From 7c926492d38a3feef4b4b29c91b7c03eb1b8b546 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Mon, 14 Nov 2016 21:53:03 +0100 +Subject: pinctrl: sunxi: Add support for interrupt debouncing + +The pin controller found in the Allwinner SoCs has support for interrupts +debouncing. + +However, this is not done per-pin, preventing us from using the generic +pinconf binding for that, but per irq bank, which, depending on the SoC, +ranges from one to five. + +Introduce a device-wide property to deal with this using a microsecond +resolution. We can re-use the per-pin input-debounce property for that, so +let's do it! + +Signed-off-by: Maxime Ripard +Acked-by: Rob Herring +Signed-off-by: Linus Walleij +--- + .../bindings/pinctrl/allwinner,sunxi-pinctrl.txt | 14 ++++ + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 84 ++++++++++++++++++++++ + drivers/pinctrl/sunxi/pinctrl-sunxi.h | 7 ++ + 3 files changed, 105 insertions(+) + +--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt ++++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +@@ -28,6 +28,20 @@ Required properties: + - reg: Should contain the register physical address and length for the + pin controller. + ++- clocks: phandle to the clocks feeding the pin controller: ++ - "apb": the gated APB parent clock ++ - "hosc": the high frequency oscillator in the system ++ - "losc": the low frequency oscillator in the system ++ ++Note: For backward compatibility reasons, the hosc and losc clocks are only ++required if you need to use the optional input-debounce property. Any new ++device tree should set them. ++ ++Optional properties: ++ - input-debounce: Array of debouncing periods in microseconds. One period per ++ irq bank found in the controller. 0 if no setup required. ++ ++ + Please refer to pinctrl-bindings.txt in this directory for details of the + common pinctrl bindings used by client devices. + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -1122,6 +1122,88 @@ static int sunxi_pinctrl_build_state(str + return 0; + } + ++static int sunxi_pinctrl_get_debounce_div(struct clk *clk, int freq, int *diff) ++{ ++ unsigned long clock = clk_get_rate(clk); ++ unsigned int best_diff = ~0, best_div; ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ int cur_diff = abs(freq - (clock >> i)); ++ ++ if (cur_diff < best_diff) { ++ best_diff = cur_diff; ++ best_div = i; ++ } ++ } ++ ++ *diff = best_diff; ++ return best_div; ++} ++ ++static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl, ++ struct device_node *node) ++{ ++ unsigned int hosc_diff, losc_diff; ++ unsigned int hosc_div, losc_div; ++ struct clk *hosc, *losc; ++ u8 div, src; ++ int i, ret; ++ ++ /* Deal with old DTs that didn't have the oscillators */ ++ if (of_count_phandle_with_args(node, "clocks", "#clock-cells") != 3) ++ return 0; ++ ++ /* If we don't have any setup, bail out */ ++ if (!of_find_property(node, "input-debounce", NULL)) ++ return 0; ++ ++ losc = devm_clk_get(pctl->dev, "losc"); ++ if (IS_ERR(losc)) ++ return PTR_ERR(losc); ++ ++ hosc = devm_clk_get(pctl->dev, "hosc"); ++ if (IS_ERR(hosc)) ++ return PTR_ERR(hosc); ++ ++ for (i = 0; i < pctl->desc->irq_banks; i++) { ++ unsigned long debounce_freq; ++ u32 debounce; ++ ++ ret = of_property_read_u32_index(node, "input-debounce", ++ i, &debounce); ++ if (ret) ++ return ret; ++ ++ if (!debounce) ++ continue; ++ ++ debounce_freq = DIV_ROUND_CLOSEST(USEC_PER_SEC, debounce); ++ losc_div = sunxi_pinctrl_get_debounce_div(losc, ++ debounce_freq, ++ &losc_diff); ++ ++ hosc_div = sunxi_pinctrl_get_debounce_div(hosc, ++ debounce_freq, ++ &hosc_diff); ++ ++ if (hosc_diff < losc_diff) { ++ div = hosc_div; ++ src = 1; ++ } else { ++ div = losc_div; ++ src = 0; ++ } ++ ++ writel(src | div << 4, ++ pctl->membase + ++ sunxi_irq_debounce_reg_from_bank(i, ++ pctl->desc->irq_bank_base)); ++ } ++ ++ return 0; ++} ++ + int sunxi_pinctrl_init(struct platform_device *pdev, + const struct sunxi_pinctrl_desc *desc) + { +@@ -1284,6 +1366,8 @@ int sunxi_pinctrl_init(struct platform_d + pctl); + } + ++ sunxi_pinctrl_setup_debounce(pctl, node); ++ + dev_info(&pdev->dev, "initialized sunXi PIO driver\n"); + + return 0; +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h +@@ -69,6 +69,8 @@ + #define IRQ_STATUS_IRQ_BITS 1 + #define IRQ_STATUS_IRQ_MASK ((1 << IRQ_STATUS_IRQ_BITS) - 1) + ++#define IRQ_DEBOUNCE_REG 0x218 ++ + #define IRQ_MEM_SIZE 0x20 + + #define IRQ_EDGE_RISING 0x00 +@@ -265,6 +267,11 @@ static inline u32 sunxi_irq_ctrl_offset( + return irq_num * IRQ_CTRL_IRQ_BITS; + } + ++static inline u32 sunxi_irq_debounce_reg_from_bank(u8 bank, unsigned bank_base) ++{ ++ return IRQ_DEBOUNCE_REG + (bank_base + bank) * IRQ_MEM_SIZE; ++} ++ + static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base) + { + return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE; diff --git a/target/linux/sunxi/patches-4.9/0041-pinctrl-sunxi-fix-theoretical-uninitialized-variable.patch b/target/linux/sunxi/patches-4.9/0041-pinctrl-sunxi-fix-theoretical-uninitialized-variable.patch new file mode 100644 index 000000000..69de015b6 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0041-pinctrl-sunxi-fix-theoretical-uninitialized-variable.patch @@ -0,0 +1,40 @@ +From d8a22212737314cc02692cc90eda7d844fa20257 Mon Sep 17 00:00:00 2001 +From: Arnd Bergmann +Date: Wed, 16 Nov 2016 15:18:18 +0100 +Subject: pinctrl: sunxi: fix theoretical uninitialized variable access + +gcc warns about a way that it could use an uninitialized variable: + +drivers/pinctrl/sunxi/pinctrl-sunxi.c: In function 'sunxi_pinctrl_init': +drivers/pinctrl/sunxi/pinctrl-sunxi.c:1191:8: error: 'best_div' may be used uninitialized in this function [-Werror=maybe-uninitialized] + +This cannot really happen except if 'freq' is UINT_MAX and 'clock' is +zero, and both of these are forbidden. To shut up the warning anyway, +this changes the logic to initialize the return code to the first +divider value before looking at the others. + +Fixes: 7c926492d38a ("pinctrl: sunxi: Add support for interrupt debouncing") +Signed-off-by: Arnd Bergmann +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -1125,10 +1125,13 @@ static int sunxi_pinctrl_build_state(str + static int sunxi_pinctrl_get_debounce_div(struct clk *clk, int freq, int *diff) + { + unsigned long clock = clk_get_rate(clk); +- unsigned int best_diff = ~0, best_div; ++ unsigned int best_diff, best_div; + int i; + +- for (i = 0; i < 8; i++) { ++ best_diff = abs(freq - clock); ++ best_div = 0; ++ ++ for (i = 1; i < 8; i++) { + int cur_diff = abs(freq - (clock >> i)); + + if (cur_diff < best_diff) { diff --git a/target/linux/sunxi/patches-4.9/0042-pinctrl-sunxi-Testing-the-wrong-variable.patch b/target/linux/sunxi/patches-4.9/0042-pinctrl-sunxi-Testing-the-wrong-variable.patch new file mode 100644 index 000000000..8ed4f27b4 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0042-pinctrl-sunxi-Testing-the-wrong-variable.patch @@ -0,0 +1,35 @@ +From b3cde198b17f504643cc1eeffc4623f03326f436 Mon Sep 17 00:00:00 2001 +From: Dan Carpenter +Date: Fri, 18 Nov 2016 14:35:57 +0300 +Subject: pinctrl: sunxi: Testing the wrong variable + +Smatch complains that we dereference "map" before testing it for NULL +which is true. We should be testing "*map" instead. Also on the error +path, we should free *map and set it to NULL. + +Signed-off-by: Dan Carpenter +Acked-by: Maxime Ripard +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -398,13 +398,14 @@ static int sunxi_pctrl_dt_node_to_map(st + * map array + */ + *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL); +- if (!map) ++ if (!*map) + return -ENOMEM; + + return 0; + + err_free_map: +- kfree(map); ++ kfree(*map); ++ *map = NULL; + return ret; + } + diff --git a/target/linux/sunxi/patches-4.9/0043-pinctrl-sunxi-Don-t-enforce-bias-disable-for-now.patch b/target/linux/sunxi/patches-4.9/0043-pinctrl-sunxi-Don-t-enforce-bias-disable-for-now.patch new file mode 100644 index 000000000..d6e639af5 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0043-pinctrl-sunxi-Don-t-enforce-bias-disable-for-now.patch @@ -0,0 +1,42 @@ +From 2154d94b40ea2a5de05245521371d0461bb0d669 Mon Sep 17 00:00:00 2001 +From: Maxime Ripard +Date: Mon, 23 Jan 2017 09:21:30 +0100 +Subject: pinctrl: sunxi: Don't enforce bias disable (for now) + +Commit 07fe64ba213f ("pinctrl: sunxi: Handle bias disable") actually +enforced enforced the disabling of the pull up/down resistors instead of +ignoring it like it was done before. + +This was part of a wider rework to switch to the generic pinconf bindings, +and was meant to be merged together with DT patches that were switching to +it, and removing what was considered default values by both the binding and +the boards. This included no bias on a pin. + +However, those DT patches were delayed to 4.11, which would be fine only +for a significant number boards having the bias setup wrong, which in turns +break the MMC on those boards (and possibly other devices too). + +In order to avoid conflicts as much as possible, bring back the old +behaviour for 4.10, and we'll revert that commit once all the DT bits will +have landed. + +Tested-by: Priit Laes +Signed-off-by: Maxime Ripard +Acked-by: Chen-Yu Tsai +Signed-off-by: Linus Walleij +--- + drivers/pinctrl/sunxi/pinctrl-sunxi.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct + val = arg / 10 - 1; + break; + case PIN_CONFIG_BIAS_DISABLE: +- val = 0; +- break; ++ continue; + case PIN_CONFIG_BIAS_PULL_UP: + if (arg == 0) + return -EINVAL; diff --git a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch new file mode 100644 index 000000000..b88c19e25 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch @@ -0,0 +1,3497 @@ +--- a/Documentation/devicetree/bindings/net/stmmac.txt ++++ b/Documentation/devicetree/bindings/net/stmmac.txt +@@ -1,7 +1,7 @@ + * STMicroelectronics 10/100/1000 Ethernet driver (GMAC) + + Required properties: +-- compatible: Should be "snps,dwmac-" "snps,dwmac" ++- compatible: Should be "snps,dwmac-", "snps,dwmac" + For backwards compatibility: "st,spear600-gmac" is also supported. + - reg: Address and length of the register set for the device + - interrupt-parent: Should be the phandle for the interrupt controller +@@ -34,7 +34,13 @@ Optional properties: + platforms. + - tx-fifo-depth: See ethernet.txt file in the same directory + - rx-fifo-depth: See ethernet.txt file in the same directory +-- snps,pbl Programmable Burst Length ++- snps,pbl Programmable Burst Length (tx and rx) ++- snps,txpbl Tx Programmable Burst Length. Only for GMAC and newer. ++ If set, DMA tx will use this value rather than snps,pbl. ++- snps,rxpbl Rx Programmable Burst Length. Only for GMAC and newer. ++ If set, DMA rx will use this value rather than snps,pbl. ++- snps,no-pbl-x8 Don't multiply the pbl/txpbl/rxpbl values by 8. ++ For core rev < 3.50, don't multiply the values by 4. + - snps,aal Address-Aligned Beats + - snps,fixed-burst Program the DMA to use the fixed burst mode + - snps,mixed-burst Program the DMA to use the mixed burst mode +@@ -50,6 +56,8 @@ Optional properties: + - snps,ps-speed: port selection speed that can be passed to the core when + PCS is supported. For example, this is used in case of SGMII + and MAC2MAC connection. ++- snps,tso: this enables the TSO feature otherwise it will be managed by ++ MAC HW capability register. Only for GMAC4 and newer. + - AXI BUS Mode parameters: below the list of all the parameters to program the + AXI register inside the DMA module: + - snps,lpi_en: enable Low Power Interface +@@ -62,8 +70,6 @@ Optional properties: + - snps,fb: fixed-burst + - snps,mb: mixed-burst + - snps,rb: rebuild INCRx Burst +- - snps,tso: this enables the TSO feature otherwise it will be managed by +- MAC HW capability register. + - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. + + Examples: +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -69,6 +69,17 @@ config DWMAC_MESON + the stmmac device driver. This driver is used for Meson6, + Meson8, Meson8b and GXBB SoCs. + ++config DWMAC_OXNAS ++ tristate "Oxford Semiconductor OXNAS dwmac support" ++ default ARCH_OXNAS ++ depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST) ++ select MFD_SYSCON ++ help ++ Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs. ++ ++ This selects the Oxford Semiconductor OXNASSoC glue layer support for ++ the stmmac device driver. This driver is used for OX820. ++ + config DWMAC_ROCKCHIP + tristate "Rockchip dwmac support" + default ARCH_ROCKCHIP +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac- + obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o + obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o + obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o ++obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o + obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o + obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o + obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o +--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str + unsigned int entry = priv->cur_tx; + struct dma_desc *desc = priv->dma_tx + entry; + unsigned int nopaged_len = skb_headlen(skb); +- unsigned int bmax; ++ unsigned int bmax, des2; + unsigned int i = 1, len; + + if (priv->plat->enh_desc) +@@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, str + + len = nopaged_len - bmax; + +- desc->des2 = dma_map_single(priv->device, skb->data, +- bmax, DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, skb->data, ++ bmax, DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = bmax; + /* do not close the descriptor and do not set own bit */ + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, +@@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, str + desc = priv->dma_tx + entry; + + if (len > bmax) { +- desc->des2 = dma_map_single(priv->device, +- (skb->data + bmax * i), +- bmax, DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, ++ (skb->data + bmax * i), ++ bmax, DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = bmax; + priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, +@@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, str + len -= bmax; + i++; + } else { +- desc->des2 = dma_map_single(priv->device, +- (skb->data + bmax * i), len, +- DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, ++ (skb->data + bmax * i), len, ++ DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, +@@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void * + struct dma_extended_desc *p = (struct dma_extended_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_extended_desc); +- p->basic.des3 = (unsigned int)dma_phy; ++ p->basic.des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } +- p->basic.des3 = (unsigned int)phy_addr; ++ p->basic.des3 = cpu_to_le32((unsigned int)phy_addr); + + } else { + struct dma_desc *p = (struct dma_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_desc); +- p->des3 = (unsigned int)dma_phy; ++ p->des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } +- p->des3 = (unsigned int)phy_addr; ++ p->des3 = cpu_to_le32((unsigned int)phy_addr); + } + } + +@@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *pr + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ +- p->des3 = (unsigned int)(priv->dma_rx_phy + +- (((priv->dirty_rx) + 1) % +- DMA_RX_SIZE) * +- sizeof(struct dma_desc)); ++ p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy + ++ (((priv->dirty_rx) + 1) % ++ DMA_RX_SIZE) * ++ sizeof(struct dma_desc))); + } + + static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +@@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *pri + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ +- p->des3 = (unsigned int)((priv->dma_tx_phy + +- ((priv->dirty_tx + 1) % DMA_TX_SIZE)) +- * sizeof(struct dma_desc)); ++ p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + ++ ((priv->dirty_tx + 1) % DMA_TX_SIZE)) ++ * sizeof(struct dma_desc))); + } + + const struct stmmac_mode_ops chain_mode_ops = { +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -44,6 +44,7 @@ + #define DWMAC_CORE_4_00 0x40 + #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ + ++/* These need to be power of two, and >= 4 */ + #define DMA_TX_SIZE 512 + #define DMA_RX_SIZE 512 + #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) +@@ -411,8 +412,8 @@ extern const struct stmmac_desc_ops ndes + struct stmmac_dma_ops { + /* DMA core initialization */ + int (*reset)(void __iomem *ioaddr); +- void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb, +- int aal, u32 dma_tx, u32 dma_rx, int atds); ++ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx, u32 dma_rx, int atds); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ +@@ -506,6 +507,12 @@ struct mac_link { + struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ ++ unsigned int addr_shift; /* MII address shift */ ++ unsigned int reg_shift; /* MII reg shift */ ++ unsigned int addr_mask; /* MII address mask */ ++ unsigned int reg_mask; /* MII reg mask */ ++ unsigned int clk_csr_shift; ++ unsigned int clk_csr_mask; + }; + + /* Helpers to manage the descriptors for chain and ring modes */ +--- a/drivers/net/ethernet/stmicro/stmmac/descs.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h +@@ -87,7 +87,7 @@ + #define TDES0_ERROR_SUMMARY BIT(15) + #define TDES0_IP_HEADER_ERROR BIT(16) + #define TDES0_TIME_STAMP_STATUS BIT(17) +-#define TDES0_OWN BIT(31) ++#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */ + /* TDES1 */ + #define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) + #define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +@@ -130,7 +130,7 @@ + #define ETDES0_FIRST_SEGMENT BIT(28) + #define ETDES0_LAST_SEGMENT BIT(29) + #define ETDES0_INTERRUPT BIT(30) +-#define ETDES0_OWN BIT(31) ++#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */ + /* TDES1 */ + #define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) + #define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +@@ -170,19 +170,19 @@ + + /* Basic descriptor structure for normal and alternate descriptors */ + struct dma_desc { +- unsigned int des0; +- unsigned int des1; +- unsigned int des2; +- unsigned int des3; ++ __le32 des0; ++ __le32 des1; ++ __le32 des2; ++ __le32 des3; + }; + + /* Extended descriptor structure (e.g. >= databook 3.50a) */ + struct dma_extended_desc { + struct dma_desc basic; /* Basic descriptors */ +- unsigned int des4; /* Extended Status */ +- unsigned int des5; /* Reserved */ +- unsigned int des6; /* Tx/Rx Timestamp Low */ +- unsigned int des7; /* Tx/Rx Timestamp High */ ++ __le32 des4; /* Extended Status */ ++ __le32 des5; /* Reserved */ ++ __le32 des6; /* Tx/Rx Timestamp Low */ ++ __le32 des7; /* Tx/Rx Timestamp High */ + }; + + /* Transmit checksum insertion control */ +--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h +@@ -35,47 +35,50 @@ + /* Enhanced descriptors */ + static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) + { +- p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT) +- & ERDES1_BUFFER2_SIZE_MASK; ++ p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) ++ << ERDES1_BUFFER2_SIZE_SHIFT) ++ & ERDES1_BUFFER2_SIZE_MASK); + + if (end) +- p->des1 |= ERDES1_END_RING; ++ p->des1 |= cpu_to_le32(ERDES1_END_RING); + } + + static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end) + { + if (end) +- p->des0 |= ETDES0_END_RING; ++ p->des0 |= cpu_to_le32(ETDES0_END_RING); + else +- p->des0 &= ~ETDES0_END_RING; ++ p->des0 &= cpu_to_le32(~ETDES0_END_RING); + } + + static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) + { + if (unlikely(len > BUF_SIZE_4KiB)) { +- p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT) ++ p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB) ++ << ETDES1_BUFFER2_SIZE_SHIFT) + & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB +- & ETDES1_BUFFER1_SIZE_MASK); ++ & ETDES1_BUFFER1_SIZE_MASK)); + } else +- p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); ++ p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK)); + } + + /* Normal descriptors */ + static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) + { +- p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT) +- & RDES1_BUFFER2_SIZE_MASK; ++ p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) ++ << RDES1_BUFFER2_SIZE_SHIFT) ++ & RDES1_BUFFER2_SIZE_MASK); + + if (end) +- p->des1 |= RDES1_END_RING; ++ p->des1 |= cpu_to_le32(RDES1_END_RING); + } + + static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end) + { + if (end) +- p->des1 |= TDES1_END_RING; ++ p->des1 |= cpu_to_le32(TDES1_END_RING); + else +- p->des1 &= ~TDES1_END_RING; ++ p->des1 &= cpu_to_le32(~TDES1_END_RING); + } + + static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +@@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_ + if (unlikely(len > BUF_SIZE_2KiB)) { + unsigned int buffer1 = (BUF_SIZE_2KiB - 1) + & TDES1_BUFFER1_SIZE_MASK; +- p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT) +- & TDES1_BUFFER2_SIZE_MASK) | buffer1); ++ p->des1 |= cpu_to_le32((((len - buffer1) ++ << TDES1_BUFFER2_SIZE_SHIFT) ++ & TDES1_BUFFER2_SIZE_MASK) | buffer1); + } else +- p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK); ++ p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK)); + } + + /* Specific functions used for Chain mode */ +@@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_ + /* Enhanced descriptors */ + static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p) + { +- p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED; ++ p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED); + } + + static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p) + { +- p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED; ++ p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED); + } + + static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) + { +- p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); ++ p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK); + } + + /* Normal descriptors */ + static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) + { +- p->des1 |= RDES1_SECOND_ADDRESS_CHAINED; ++ p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED); + } + + static inline void ndesc_tx_set_on_chain(struct dma_desc *p) + { +- p->des1 |= TDES1_SECOND_ADDRESS_CHAINED; ++ p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED); + } + + static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) + { +- p->des1 |= len & TDES1_BUFFER1_SIZE_MASK; ++ p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK); + } + #endif /* __DESC_COM_H__ */ +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +@@ -71,9 +71,12 @@ err_remove_config_dt: + + static const struct of_device_id dwmac_generic_match[] = { + { .compatible = "st,spear600-gmac"}, ++ { .compatible = "snps,dwmac-3.50a"}, + { .compatible = "snps,dwmac-3.610"}, + { .compatible = "snps,dwmac-3.70a"}, + { .compatible = "snps,dwmac-3.710"}, ++ { .compatible = "snps,dwmac-4.00"}, ++ { .compatible = "snps,dwmac-4.10a"}, + { .compatible = "snps,dwmac"}, + { } + }; +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +@@ -0,0 +1,194 @@ ++/* ++ * Oxford Semiconductor OXNAS DWMAC glue layer ++ * ++ * Copyright (C) 2016 Neil Armstrong ++ * Copyright (C) 2014 Daniel Golle ++ * Copyright (C) 2013 Ma Haijun ++ * Copyright (C) 2012 John Crispin ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac_platform.h" ++ ++/* System Control regmap offsets */ ++#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78 ++#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100 ++ ++/* Control Register */ ++#define DWMAC_CKEN_RX_IN 14 ++#define DWMAC_CKEN_RXN_OUT 13 ++#define DWMAC_CKEN_RX_OUT 12 ++#define DWMAC_CKEN_TX_IN 10 ++#define DWMAC_CKEN_TXN_OUT 9 ++#define DWMAC_CKEN_TX_OUT 8 ++#define DWMAC_RX_SOURCE 7 ++#define DWMAC_TX_SOURCE 6 ++#define DWMAC_LOW_TX_SOURCE 4 ++#define DWMAC_AUTO_TX_SOURCE 3 ++#define DWMAC_RGMII 2 ++#define DWMAC_SIMPLE_MUX 1 ++#define DWMAC_CKEN_GTX 0 ++ ++/* Delay register */ ++#define DWMAC_TX_VARDELAY_SHIFT 0 ++#define DWMAC_TXN_VARDELAY_SHIFT 8 ++#define DWMAC_RX_VARDELAY_SHIFT 16 ++#define DWMAC_RXN_VARDELAY_SHIFT 24 ++#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT) ++#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT) ++#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT) ++#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT) ++ ++struct oxnas_dwmac { ++ struct device *dev; ++ struct clk *clk; ++ struct regmap *regmap; ++}; ++ ++static int oxnas_dwmac_init(struct platform_device *pdev, void *priv) ++{ ++ struct oxnas_dwmac *dwmac = priv; ++ unsigned int value; ++ int ret; ++ ++ /* Reset HW here before changing the glue configuration */ ++ ret = device_reset(dwmac->dev); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(dwmac->clk); ++ if (ret) ++ return ret; ++ ++ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value); ++ if (ret < 0) { ++ clk_disable_unprepare(dwmac->clk); ++ return ret; ++ } ++ ++ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */ ++ value |= BIT(DWMAC_CKEN_GTX) | ++ /* Use simple mux for 25/125 Mhz clock switching */ ++ BIT(DWMAC_SIMPLE_MUX) | ++ /* set auto switch tx clock source */ ++ BIT(DWMAC_AUTO_TX_SOURCE) | ++ /* enable tx & rx vardelay */ ++ BIT(DWMAC_CKEN_TX_OUT) | ++ BIT(DWMAC_CKEN_TXN_OUT) | ++ BIT(DWMAC_CKEN_TX_IN) | ++ BIT(DWMAC_CKEN_RX_OUT) | ++ BIT(DWMAC_CKEN_RXN_OUT) | ++ BIT(DWMAC_CKEN_RX_IN); ++ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value); ++ ++ /* set tx & rx vardelay */ ++ value = DWMAC_TX_VARDELAY(4) | ++ DWMAC_TXN_VARDELAY(2) | ++ DWMAC_RX_VARDELAY(10) | ++ DWMAC_RXN_VARDELAY(8); ++ regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value); ++ ++ return 0; ++} ++ ++static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv) ++{ ++ struct oxnas_dwmac *dwmac = priv; ++ ++ clk_disable_unprepare(dwmac->clk); ++} ++ ++static int oxnas_dwmac_probe(struct platform_device *pdev) ++{ ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ struct oxnas_dwmac *dwmac; ++ int ret; ++ ++ ret = stmmac_get_platform_resources(pdev, &stmmac_res); ++ if (ret) ++ return ret; ++ ++ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ if (IS_ERR(plat_dat)) ++ return PTR_ERR(plat_dat); ++ ++ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); ++ if (!dwmac) { ++ ret = -ENOMEM; ++ goto err_remove_config_dt; ++ } ++ ++ dwmac->dev = &pdev->dev; ++ plat_dat->bsp_priv = dwmac; ++ plat_dat->init = oxnas_dwmac_init; ++ plat_dat->exit = oxnas_dwmac_exit; ++ ++ dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, ++ "oxsemi,sys-ctrl"); ++ if (IS_ERR(dwmac->regmap)) { ++ dev_err(&pdev->dev, "failed to have sysctrl regmap\n"); ++ ret = PTR_ERR(dwmac->regmap); ++ goto err_remove_config_dt; ++ } ++ ++ dwmac->clk = devm_clk_get(&pdev->dev, "gmac"); ++ if (IS_ERR(dwmac->clk)) { ++ ret = PTR_ERR(dwmac->clk); ++ goto err_remove_config_dt; ++ } ++ ++ ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv); ++ if (ret) ++ goto err_remove_config_dt; ++ ++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (ret) ++ goto err_dwmac_exit; ++ ++ ++ return 0; ++ ++err_dwmac_exit: ++ oxnas_dwmac_exit(pdev, plat_dat->bsp_priv); ++err_remove_config_dt: ++ stmmac_remove_config_dt(pdev, plat_dat); ++ ++ return ret; ++} ++ ++static const struct of_device_id oxnas_dwmac_match[] = { ++ { .compatible = "oxsemi,ox820-dwmac" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, oxnas_dwmac_match); ++ ++static struct platform_driver oxnas_dwmac_driver = { ++ .probe = oxnas_dwmac_probe, ++ .remove = stmmac_pltfr_remove, ++ .driver = { ++ .name = "oxnas-dwmac", ++ .pm = &stmmac_pltfr_pm_ops, ++ .of_match_table = oxnas_dwmac_match, ++ }, ++}; ++module_platform_driver(oxnas_dwmac_driver); ++ ++MODULE_AUTHOR("Neil Armstrong "); ++MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer"); ++MODULE_LICENSE("GPL v2"); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_pri + int ret; + struct device *dev = &bsp_priv->pdev->dev; + ++ ret = gmac_clk_enable(bsp_priv, true); ++ if (ret) ++ return ret; ++ + /*rmii or rgmii*/ + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) { + dev_info(dev, "init for RGMII\n"); +@@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_pri + if (ret) + return ret; + +- ret = gmac_clk_enable(bsp_priv, true); +- if (ret) +- return ret; +- + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + +@@ -901,44 +901,6 @@ static void rk_gmac_powerdown(struct rk_ + gmac_clk_enable(gmac, false); + } + +-static int rk_gmac_init(struct platform_device *pdev, void *priv) +-{ +- struct rk_priv_data *bsp_priv = priv; +- +- return rk_gmac_powerup(bsp_priv); +-} +- +-static void rk_gmac_exit(struct platform_device *pdev, void *priv) +-{ +- struct rk_priv_data *bsp_priv = priv; +- +- rk_gmac_powerdown(bsp_priv); +-} +- +-static void rk_gmac_suspend(struct platform_device *pdev, void *priv) +-{ +- struct rk_priv_data *bsp_priv = priv; +- +- /* Keep the PHY up if we use Wake-on-Lan. */ +- if (device_may_wakeup(&pdev->dev)) +- return; +- +- rk_gmac_powerdown(bsp_priv); +- bsp_priv->suspended = true; +-} +- +-static void rk_gmac_resume(struct platform_device *pdev, void *priv) +-{ +- struct rk_priv_data *bsp_priv = priv; +- +- /* The PHY was up for Wake-on-Lan. */ +- if (!bsp_priv->suspended) +- return; +- +- rk_gmac_powerup(bsp_priv); +- bsp_priv->suspended = false; +-} +- + static void rk_fix_speed(void *priv, unsigned int speed) + { + struct rk_priv_data *bsp_priv = priv; +@@ -974,11 +936,7 @@ static int rk_gmac_probe(struct platform + return PTR_ERR(plat_dat); + + plat_dat->has_gmac = true; +- plat_dat->init = rk_gmac_init; +- plat_dat->exit = rk_gmac_exit; + plat_dat->fix_mac_speed = rk_fix_speed; +- plat_dat->suspend = rk_gmac_suspend; +- plat_dat->resume = rk_gmac_resume; + + plat_dat->bsp_priv = rk_gmac_setup(pdev, data); + if (IS_ERR(plat_dat->bsp_priv)) { +@@ -986,24 +944,65 @@ static int rk_gmac_probe(struct platform + goto err_remove_config_dt; + } + +- ret = rk_gmac_init(pdev, plat_dat->bsp_priv); ++ ret = rk_gmac_powerup(plat_dat->bsp_priv); + if (ret) + goto err_remove_config_dt; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) +- goto err_gmac_exit; ++ goto err_gmac_powerdown; + + return 0; + +-err_gmac_exit: +- rk_gmac_exit(pdev, plat_dat->bsp_priv); ++err_gmac_powerdown: ++ rk_gmac_powerdown(plat_dat->bsp_priv); + err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; + } + ++static int rk_gmac_remove(struct platform_device *pdev) ++{ ++ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev); ++ int ret = stmmac_dvr_remove(&pdev->dev); ++ ++ rk_gmac_powerdown(bsp_priv); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int rk_gmac_suspend(struct device *dev) ++{ ++ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev); ++ int ret = stmmac_suspend(dev); ++ ++ /* Keep the PHY up if we use Wake-on-Lan. */ ++ if (!device_may_wakeup(dev)) { ++ rk_gmac_powerdown(bsp_priv); ++ bsp_priv->suspended = true; ++ } ++ ++ return ret; ++} ++ ++static int rk_gmac_resume(struct device *dev) ++{ ++ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev); ++ ++ /* The PHY was up for Wake-on-Lan. */ ++ if (bsp_priv->suspended) { ++ rk_gmac_powerup(bsp_priv); ++ bsp_priv->suspended = false; ++ } ++ ++ return stmmac_resume(dev); ++} ++#endif /* CONFIG_PM_SLEEP */ ++ ++static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); ++ + static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, + { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, +@@ -1016,10 +1015,10 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_ma + + static struct platform_driver rk_gmac_dwmac_driver = { + .probe = rk_gmac_probe, +- .remove = stmmac_pltfr_remove, ++ .remove = rk_gmac_remove, + .driver = { + .name = "rk_gmac-dwmac", +- .pm = &stmmac_pltfr_pm_ops, ++ .pm = &rk_gmac_pm_ops, + .of_match_table = rk_gmac_dwmac_match, + }, + }; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +@@ -380,8 +380,8 @@ static int socfpga_dwmac_resume(struct d + * control register 0, and can be modified by the phy driver + * framework. + */ +- if (priv->phydev) +- phy_resume(priv->phydev); ++ if (ndev->phydev) ++ phy_resume(ndev->phydev); + + return stmmac_resume(dev); + } +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +@@ -126,8 +126,8 @@ struct sti_dwmac { + struct clk *clk; /* PHY clock */ + u32 ctrl_reg; /* GMAC glue-logic control register */ + int clk_sel_reg; /* GMAC ext clk selection register */ +- struct device *dev; + struct regmap *regmap; ++ bool gmac_en; + u32 speed; + void (*fix_retime_src)(void *priv, unsigned int speed); + }; +@@ -191,7 +191,7 @@ static void stih4xx_fix_retime_src(void + } + } + +- if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq) ++ if (src == TX_RETIME_SRC_CLKGEN && freq) + clk_set_rate(dwmac->clk, freq); + + regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK, +@@ -222,26 +222,20 @@ static void stid127_fix_retime_src(void + freq = DWMAC_2_5MHZ; + } + +- if (dwmac->clk && freq) ++ if (freq) + clk_set_rate(dwmac->clk, freq); + + regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val); + } + +-static int sti_dwmac_init(struct platform_device *pdev, void *priv) ++static int sti_dwmac_set_mode(struct sti_dwmac *dwmac) + { +- struct sti_dwmac *dwmac = priv; + struct regmap *regmap = dwmac->regmap; + int iface = dwmac->interface; +- struct device *dev = dwmac->dev; +- struct device_node *np = dev->of_node; + u32 reg = dwmac->ctrl_reg; + u32 val; + +- if (dwmac->clk) +- clk_prepare_enable(dwmac->clk); +- +- if (of_property_read_bool(np, "st,gmac_en")) ++ if (dwmac->gmac_en) + regmap_update_bits(regmap, reg, EN_MASK, EN); + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); +@@ -249,18 +243,11 @@ static int sti_dwmac_init(struct platfor + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + regmap_update_bits(regmap, reg, ENMII_MASK, val); + +- dwmac->fix_retime_src(priv, dwmac->speed); ++ dwmac->fix_retime_src(dwmac, dwmac->speed); + + return 0; + } + +-static void sti_dwmac_exit(struct platform_device *pdev, void *priv) +-{ +- struct sti_dwmac *dwmac = priv; +- +- if (dwmac->clk) +- clk_disable_unprepare(dwmac->clk); +-} + static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, + struct platform_device *pdev) + { +@@ -270,9 +257,6 @@ static int sti_dwmac_parse_data(struct s + struct regmap *regmap; + int err; + +- if (!np) +- return -EINVAL; +- + /* clk selection from extra syscfg register */ + dwmac->clk_sel_reg = -ENXIO; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf"); +@@ -289,9 +273,9 @@ static int sti_dwmac_parse_data(struct s + return err; + } + +- dwmac->dev = dev; + dwmac->interface = of_get_phy_mode(np); + dwmac->regmap = regmap; ++ dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en"); + dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); + dwmac->tx_retime_src = TX_RETIME_SRC_NA; + dwmac->speed = SPEED_100; +@@ -359,28 +343,65 @@ static int sti_dwmac_probe(struct platfo + dwmac->fix_retime_src = data->fix_retime_src; + + plat_dat->bsp_priv = dwmac; +- plat_dat->init = sti_dwmac_init; +- plat_dat->exit = sti_dwmac_exit; + plat_dat->fix_mac_speed = data->fix_retime_src; + +- ret = sti_dwmac_init(pdev, plat_dat->bsp_priv); ++ ret = clk_prepare_enable(dwmac->clk); + if (ret) + goto err_remove_config_dt; + ++ ret = sti_dwmac_set_mode(dwmac); ++ if (ret) ++ goto disable_clk; ++ + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) +- goto err_dwmac_exit; ++ goto disable_clk; + + return 0; + +-err_dwmac_exit: +- sti_dwmac_exit(pdev, plat_dat->bsp_priv); ++disable_clk: ++ clk_disable_unprepare(dwmac->clk); + err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; + } + ++static int sti_dwmac_remove(struct platform_device *pdev) ++{ ++ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); ++ int ret = stmmac_dvr_remove(&pdev->dev); ++ ++ clk_disable_unprepare(dwmac->clk); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int sti_dwmac_suspend(struct device *dev) ++{ ++ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev); ++ int ret = stmmac_suspend(dev); ++ ++ clk_disable_unprepare(dwmac->clk); ++ ++ return ret; ++} ++ ++static int sti_dwmac_resume(struct device *dev) ++{ ++ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev); ++ ++ clk_prepare_enable(dwmac->clk); ++ sti_dwmac_set_mode(dwmac); ++ ++ return stmmac_resume(dev); ++} ++#endif /* CONFIG_PM_SLEEP */ ++ ++static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend, ++ sti_dwmac_resume); ++ + static const struct sti_dwmac_of_data stih4xx_dwmac_data = { + .fix_retime_src = stih4xx_fix_retime_src, + }; +@@ -400,10 +421,10 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match) + + static struct platform_driver sti_dwmac_driver = { + .probe = sti_dwmac_probe, +- .remove = stmmac_pltfr_remove, ++ .remove = sti_dwmac_remove, + .driver = { + .name = "sti-dwmac", +- .pm = &stmmac_pltfr_pm_ops, ++ .pm = &sti_dwmac_pm_ops, + .of_match_table = sti_dwmac_match, + }, + }; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +@@ -225,7 +225,7 @@ enum rx_tx_priority_ratio { + + #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ + #define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ +-#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ ++#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */ + #define DMA_BUS_MODE_RPBL_SHIFT 17 + #define DMA_BUS_MODE_USP 0x00800000 + #define DMA_BUS_MODE_MAXPBL 0x01000000 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +@@ -538,6 +538,12 @@ struct mac_device_info *dwmac1000_setup( + mac->link.speed = GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; ++ mac->mii.addr_shift = 11; ++ mac->mii.addr_mask = 0x0000F800; ++ mac->mii.reg_shift = 6; ++ mac->mii.reg_mask = 0x000007C0; ++ mac->mii.clk_csr_shift = 2; ++ mac->mii.clk_csr_mask = GENMASK(5, 2); + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +@@ -84,37 +84,39 @@ static void dwmac1000_dma_axi(void __iom + writel(value, ioaddr + DMA_AXI_BUS_MODE); + } + +-static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, +- int aal, u32 dma_tx, u32 dma_rx, int atds) ++static void dwmac1000_dma_init(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx, u32 dma_rx, int atds) + { + u32 value = readl(ioaddr + DMA_BUS_MODE); ++ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; ++ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + /* + * Set the DMA PBL (Programmable Burst Length) mode. + * + * Note: before stmmac core 3.50 this mode bit was 4xPBL, and + * post 3.5 mode bit acts as 8*PBL. +- * +- * This configuration doesn't take care about the Separate PBL +- * so only the bits: 13-8 are programmed with the PBL passed from the +- * platform. + */ +- value |= DMA_BUS_MODE_MAXPBL; +- value &= ~DMA_BUS_MODE_PBL_MASK; +- value |= (pbl << DMA_BUS_MODE_PBL_SHIFT); ++ if (dma_cfg->pblx8) ++ value |= DMA_BUS_MODE_MAXPBL; ++ value |= DMA_BUS_MODE_USP; ++ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); ++ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); ++ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ +- if (fb) ++ if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ +- if (mb) ++ if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (atds) + value |= DMA_BUS_MODE_ATDS; + +- if (aal) ++ if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_BUS_MODE); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +@@ -192,6 +192,13 @@ struct mac_device_info *dwmac100_setup(v + mac->link.speed = 0; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; ++ mac->mii.addr_shift = 11; ++ mac->mii.addr_mask = 0x0000F800; ++ mac->mii.reg_shift = 6; ++ mac->mii.reg_mask = 0x000007C0; ++ mac->mii.clk_csr_shift = 2; ++ mac->mii.clk_csr_mask = GENMASK(5, 2); ++ + /* Synopsys Id is not available on old chips */ + *synopsys_id = 0; + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +@@ -32,11 +32,12 @@ + #include "dwmac100.h" + #include "dwmac_dma.h" + +-static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, +- int aal, u32 dma_tx, u32 dma_rx, int atds) ++static void dwmac100_dma_init(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx, u32 dma_rx, int atds) + { + /* Enable Application Access by writing to DMA CSR0 */ +- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), ++ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -155,8 +155,11 @@ enum power_event { + #define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) + + #define MTL_OP_MODE_RSF BIT(5) ++#define MTL_OP_MODE_TXQEN BIT(3) + #define MTL_OP_MODE_TSF BIT(1) + ++#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) ++ + #define MTL_OP_MODE_TTC_MASK 0x70 + #define MTL_OP_MODE_TTC_SHIFT 4 + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -430,6 +430,12 @@ struct mac_device_info *dwmac4_setup(voi + mac->link.speed = GMAC_CONFIG_FES; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; ++ mac->mii.addr_shift = 21; ++ mac->mii.addr_mask = GENMASK(25, 21); ++ mac->mii.reg_shift = 16; ++ mac->mii.reg_mask = GENMASK(20, 16); ++ mac->mii.clk_csr_shift = 8; ++ mac->mii.clk_csr_mask = GENMASK(11, 8); + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(v + unsigned int tdes3; + int ret = tx_done; + +- tdes3 = p->des3; ++ tdes3 = le32_to_cpu(p->des3); + + /* Get tx owner first */ + if (unlikely(tdes3 & TDES3_OWN)) +@@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(v + struct dma_desc *p) + { + struct net_device_stats *stats = (struct net_device_stats *)data; +- unsigned int rdes1 = p->des1; +- unsigned int rdes2 = p->des2; +- unsigned int rdes3 = p->des3; ++ unsigned int rdes1 = le32_to_cpu(p->des1); ++ unsigned int rdes2 = le32_to_cpu(p->des2); ++ unsigned int rdes3 = le32_to_cpu(p->des3); + int message_type; + int ret = good_frame; + +@@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(v + + static int dwmac4_rd_get_tx_len(struct dma_desc *p) + { +- return (p->des2 & TDES2_BUFFER1_SIZE_MASK); ++ return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK); + } + + static int dwmac4_get_tx_owner(struct dma_desc *p) + { +- return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT; ++ return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT; + } + + static void dwmac4_set_tx_owner(struct dma_desc *p) + { +- p->des3 |= TDES3_OWN; ++ p->des3 |= cpu_to_le32(TDES3_OWN); + } + + static void dwmac4_set_rx_owner(struct dma_desc *p) + { +- p->des3 |= RDES3_OWN; ++ p->des3 |= cpu_to_le32(RDES3_OWN); + } + + static int dwmac4_get_tx_ls(struct dma_desc *p) + { +- return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT; ++ return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR) ++ >> TDES3_LAST_DESCRIPTOR_SHIFT; + } + + static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) + { +- return (p->des3 & RDES3_PACKET_SIZE_MASK); ++ return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK); + } + + static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) + { +- p->des2 |= TDES2_TIMESTAMP_ENABLE; ++ p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE); + } + + static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) + { + /* Context type from W/B descriptor must be zero */ +- if (p->des3 & TDES3_CONTEXT_TYPE) ++ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) + return -EINVAL; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ +- if (p->des3 & TDES3_TIMESTAMP_STATUS) ++ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) + return 0; + + return 1; +@@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(v + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + +- ns = p->des0; ++ ns = le32_to_cpu(p->des0); + /* convert high/sec time stamp value to nanosecond */ +- ns += p->des1 * 1000000000ULL; ++ ns += le32_to_cpu(p->des1) * 1000000000ULL; + + return ns; + } +@@ -264,7 +265,7 @@ static int dwmac4_wrback_get_rx_timestam + + /* Get the status from normal w/b descriptor */ + if (likely(p->des3 & TDES3_RS1V)) { +- if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) { ++ if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { + int i = 0; + + /* Check if timestamp is OK from context descriptor */ +@@ -287,10 +288,10 @@ exit: + static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) + { +- p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR; ++ p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + + if (!disable_rx_ic) +- p->des3 |= RDES3_INT_ON_COMPLETION_EN; ++ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); + } + + static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) +@@ -305,9 +306,9 @@ static void dwmac4_rd_prepare_tx_desc(st + bool csum_flag, int mode, bool tx_own, + bool ls) + { +- unsigned int tdes3 = p->des3; ++ unsigned int tdes3 = le32_to_cpu(p->des3); + +- p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); ++ p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); + + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; +@@ -333,9 +334,9 @@ static void dwmac4_rd_prepare_tx_desc(st + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ +- wmb(); ++ dma_wmb(); + +- p->des3 = tdes3; ++ p->des3 = cpu_to_le32(tdes3); + } + + static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, +@@ -343,14 +344,14 @@ static void dwmac4_rd_prepare_tso_tx_des + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) + { +- unsigned int tdes3 = p->des3; ++ unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) +- p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK); ++ p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK)); + + if (len2) +- p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) +- & TDES2_BUFFER2_SIZE_MASK; ++ p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) ++ & TDES2_BUFFER2_SIZE_MASK); + + if (is_fs) { + tdes3 |= TDES3_FIRST_DESCRIPTOR | +@@ -376,9 +377,9 @@ static void dwmac4_rd_prepare_tso_tx_des + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ +- wmb(); ++ dma_wmb(); + +- p->des3 = tdes3; ++ p->des3 = cpu_to_le32(tdes3); + } + + static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) +@@ -389,7 +390,7 @@ static void dwmac4_release_tx_desc(struc + + static void dwmac4_rd_set_tx_ic(struct dma_desc *p) + { +- p->des2 |= TDES2_INTERRUPT_ON_COMPLETION; ++ p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION); + } + + static void dwmac4_display_ring(void *head, unsigned int size, bool rx) +@@ -402,7 +403,8 @@ static void dwmac4_display_ring(void *he + for (i = 0; i < size; i++) { + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(p), +- p->des0, p->des1, p->des2, p->des3); ++ le32_to_cpu(p->des0), le32_to_cpu(p->des1), ++ le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + } +@@ -411,8 +413,8 @@ static void dwmac4_set_mss_ctxt(struct d + { + p->des0 = 0; + p->des1 = 0; +- p->des2 = mss; +- p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV; ++ p->des2 = cpu_to_le32(mss); ++ p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); + } + + const struct stmmac_desc_ops dwmac4_desc_ops = { +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -71,25 +71,29 @@ static void dwmac4_dma_axi(void __iomem + writel(value, ioaddr + DMA_SYS_BUS_MODE); + } + +-static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl, ++static void dwmac4_dma_init_channel(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 dma_rx_phy, + u32 channel) + { + u32 value; ++ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; ++ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + /* set PBL for each channels. Currently we affect same configuration + * on each channel + */ + value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); +- value = value | DMA_BUS_MODE_PBL; ++ if (dma_cfg->pblx8) ++ value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); +- value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT); ++ value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); +- value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT); ++ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); + + /* Mask interrupts by writing to CSR7 */ +@@ -99,27 +103,28 @@ static void dwmac4_dma_init_channel(void + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + } + +-static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, +- int aal, u32 dma_tx, u32 dma_rx, int atds) ++static void dwmac4_dma_init(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx, u32 dma_rx, int atds) + { + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + /* Set the Fixed burst mode */ +- if (fb) ++ if (dma_cfg->fixed_burst) + value |= DMA_SYS_BUS_FB; + + /* Mixed Burst has no effect when fb is set */ +- if (mb) ++ if (dma_cfg->mixed_burst) + value |= DMA_SYS_BUS_MB; + +- if (aal) ++ if (dma_cfg->aal) + value |= DMA_SYS_BUS_AAL; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) +- dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i); ++ dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); + } + + static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) +@@ -215,7 +220,17 @@ static void dwmac4_dma_chan_op_mode(void + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; + } +- ++ /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO ++ * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. ++ * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W ++ * with reset values: TXQEN off, TQS 256 bytes. ++ * ++ * Write the bits in both cases, since it will have no effect when RO. ++ * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might ++ * be RO, however, writing the whole TQS field will result in a value ++ * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1. ++ */ ++ mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void * + struct dma_desc *p, void __iomem *ioaddr) + { + struct net_device_stats *stats = (struct net_device_stats *)data; +- unsigned int tdes0 = p->des0; ++ unsigned int tdes0 = le32_to_cpu(p->des0); + int ret = tx_done; + + /* Get tx owner first */ +@@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void * + + static int enh_desc_get_tx_len(struct dma_desc *p) + { +- return (p->des1 & ETDES1_BUFFER1_SIZE_MASK); ++ return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK); + } + + static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) +@@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_er + static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p) + { +- unsigned int rdes0 = p->basic.des0; +- unsigned int rdes4 = p->des4; ++ unsigned int rdes0 = le32_to_cpu(p->basic.des0); ++ unsigned int rdes4 = le32_to_cpu(p->des4); + + if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { + int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; +@@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void * + struct dma_desc *p) + { + struct net_device_stats *stats = (struct net_device_stats *)data; +- unsigned int rdes0 = p->des0; ++ unsigned int rdes0 = le32_to_cpu(p->des0); + int ret = good_frame; + + if (unlikely(rdes0 & RDES0_OWN)) +@@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void * + static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) + { +- p->des0 |= RDES0_OWN; +- p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); ++ p->des0 |= cpu_to_le32(RDES0_OWN); ++ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p); +@@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct + ehn_desc_rx_set_on_ring(p, end); + + if (disable_rx_ic) +- p->des1 |= ERDES1_DISABLE_IC; ++ p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); + } + + static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) + { +- p->des0 &= ~ETDES0_OWN; ++ p->des0 &= cpu_to_le32(~ETDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p); + else +@@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct + + static int enh_desc_get_tx_owner(struct dma_desc *p) + { +- return (p->des0 & ETDES0_OWN) >> 31; ++ return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31; + } + + static void enh_desc_set_tx_owner(struct dma_desc *p) + { +- p->des0 |= ETDES0_OWN; ++ p->des0 |= cpu_to_le32(ETDES0_OWN); + } + + static void enh_desc_set_rx_owner(struct dma_desc *p) + { +- p->des0 |= RDES0_OWN; ++ p->des0 |= cpu_to_le32(RDES0_OWN); + } + + static int enh_desc_get_tx_ls(struct dma_desc *p) + { +- return (p->des0 & ETDES0_LAST_SEGMENT) >> 29; ++ return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29; + } + + static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) + { +- int ter = (p->des0 & ETDES0_END_RING) >> 21; ++ int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) +@@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(str + bool csum_flag, int mode, bool tx_own, + bool ls) + { +- unsigned int tdes0 = p->des0; ++ unsigned int tdes0 = le32_to_cpu(p->des0); + + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); +@@ -350,14 +350,14 @@ static void enh_desc_prepare_tx_desc(str + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ +- wmb(); ++ dma_wmb(); + +- p->des0 = tdes0; ++ p->des0 = cpu_to_le32(tdes0); + } + + static void enh_desc_set_tx_ic(struct dma_desc *p) + { +- p->des0 |= ETDES0_INTERRUPT; ++ p->des0 |= cpu_to_le32(ETDES0_INTERRUPT); + } + + static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +@@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(str + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + +- return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - +- csum); ++ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) ++ >> RDES0_FRAME_LEN_SHIFT) - csum); + } + + static void enh_desc_enable_tx_timestamp(struct dma_desc *p) + { +- p->des0 |= ETDES0_TIME_STAMP_ENABLE; ++ p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE); + } + + static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) + { +- return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17; ++ return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; + } + + static u64 enh_desc_get_timestamp(void *desc, u32 ats) +@@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void * + + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; +- ns = p->des6; ++ ns = le32_to_cpu(p->des6); + /* convert high/sec time stamp value to nanosecond */ +- ns += p->des7 * 1000000000ULL; ++ ns += le32_to_cpu(p->des7) * 1000000000ULL; + } else { + struct dma_desc *p = (struct dma_desc *)desc; +- ns = p->des2; +- ns += p->des3 * 1000000000ULL; ++ ns = le32_to_cpu(p->des2); ++ ns += le32_to_cpu(p->des3) * 1000000000ULL; + } + + return ns; +@@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_sta + { + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; +- return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7; ++ return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7; + } else { + struct dma_desc *p = (struct dma_desc *)desc; +- if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) ++ if ((le32_to_cpu(p->des2) == 0xffffffff) && ++ (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +@@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *dat + struct dma_desc *p, void __iomem *ioaddr) + { + struct net_device_stats *stats = (struct net_device_stats *)data; +- unsigned int tdes0 = p->des0; +- unsigned int tdes1 = p->des1; ++ unsigned int tdes0 = le32_to_cpu(p->des0); ++ unsigned int tdes1 = le32_to_cpu(p->des1); + int ret = tx_done; + + /* Get tx owner first */ +@@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *dat + + static int ndesc_get_tx_len(struct dma_desc *p) + { +- return (p->des1 & RDES1_BUFFER1_SIZE_MASK); ++ return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK); + } + + /* This function verifies if each incoming frame has some errors +@@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *dat + struct dma_desc *p) + { + int ret = good_frame; +- unsigned int rdes0 = p->des0; ++ unsigned int rdes0 = le32_to_cpu(p->des0); + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(rdes0 & RDES0_OWN)) +@@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *dat + static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end) + { +- p->des0 |= RDES0_OWN; +- p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK; ++ p->des0 |= cpu_to_le32(RDES0_OWN); ++ p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ndesc_rx_set_on_chain(p, end); +@@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dm + ndesc_rx_set_on_ring(p, end); + + if (disable_rx_ic) +- p->des1 |= RDES1_DISABLE_IC; ++ p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); + } + + static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) + { +- p->des0 &= ~TDES0_OWN; ++ p->des0 &= cpu_to_le32(~TDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p); + else +@@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dm + + static int ndesc_get_tx_owner(struct dma_desc *p) + { +- return (p->des0 & TDES0_OWN) >> 31; ++ return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31; + } + + static void ndesc_set_tx_owner(struct dma_desc *p) + { +- p->des0 |= TDES0_OWN; ++ p->des0 |= cpu_to_le32(TDES0_OWN); + } + + static void ndesc_set_rx_owner(struct dma_desc *p) + { +- p->des0 |= RDES0_OWN; ++ p->des0 |= cpu_to_le32(RDES0_OWN); + } + + static int ndesc_get_tx_ls(struct dma_desc *p) + { +- return (p->des1 & TDES1_LAST_SEGMENT) >> 30; ++ return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30; + } + + static void ndesc_release_tx_desc(struct dma_desc *p, int mode) + { +- int ter = (p->des1 & TDES1_END_RING) >> 25; ++ int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) +@@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct + bool csum_flag, int mode, bool tx_own, + bool ls) + { +- unsigned int tdes1 = p->des1; ++ unsigned int tdes1 = le32_to_cpu(p->des1); + + if (is_fs) + tdes1 |= TDES1_FIRST_SEGMENT; +@@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct + if (ls) + tdes1 |= TDES1_LAST_SEGMENT; + +- p->des1 = tdes1; ++ p->des1 = cpu_to_le32(tdes1); + + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); +@@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct + norm_set_tx_desc_len_on_ring(p, len); + + if (tx_own) +- p->des0 |= TDES0_OWN; ++ p->des0 |= cpu_to_le32(TDES0_OWN); + } + + static void ndesc_set_tx_ic(struct dma_desc *p) + { +- p->des1 |= TDES1_INTERRUPT; ++ p->des1 |= cpu_to_le32(TDES1_INTERRUPT); + } + + static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +@@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + +- return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - ++ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) ++ >> RDES0_FRAME_LEN_SHIFT) - + csum); + + } + + static void ndesc_enable_tx_timestamp(struct dma_desc *p) + { +- p->des1 |= TDES1_TIME_STAMP_ENABLE; ++ p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE); + } + + static int ndesc_get_tx_timestamp_status(struct dma_desc *p) + { +- return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17; ++ return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; + } + + static u64 ndesc_get_timestamp(void *desc, u32 ats) +@@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *des + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + +- ns = p->des2; ++ ns = le32_to_cpu(p->des2); + /* convert high/sec time stamp value to nanosecond */ +- ns += p->des3 * 1000000000ULL; ++ ns += le32_to_cpu(p->des3) * 1000000000ULL; + + return ns; + } +@@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status + { + struct dma_desc *p = (struct dma_desc *)desc; + +- if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) ++ if ((le32_to_cpu(p->des2) == 0xffffffff) && ++ (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str + unsigned int entry = priv->cur_tx; + struct dma_desc *desc; + unsigned int nopaged_len = skb_headlen(skb); +- unsigned int bmax, len; ++ unsigned int bmax, len, des2; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); +@@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, str + + if (nopaged_len > BUF_SIZE_8KiB) { + +- desc->des2 = dma_map_single(priv->device, skb->data, +- bmax, DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, skb->data, bmax, ++ DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; + +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = bmax; + priv->tx_skbuff_dma[entry].is_jumbo = true; + +- desc->des3 = desc->des2 + BUF_SIZE_4KiB; ++ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false); + priv->tx_skbuff[entry] = NULL; +@@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, str + else + desc = priv->dma_tx + entry; + +- desc->des2 = dma_map_single(priv->device, skb->data + bmax, +- len, DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, skb->data + bmax, len, ++ DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].is_jumbo = true; + +- desc->des3 = desc->des2 + BUF_SIZE_4KiB; ++ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_RING_MODE, 1, true); + } else { +- desc->des2 = dma_map_single(priv->device, skb->data, +- nopaged_len, DMA_TO_DEVICE); +- if (dma_mapping_error(priv->device, desc->des2)) ++ des2 = dma_map_single(priv->device, skb->data, ++ nopaged_len, DMA_TO_DEVICE); ++ desc->des2 = cpu_to_le32(des2); ++ if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = desc->des2; ++ priv->tx_skbuff_dma[entry].buf = des2; + priv->tx_skbuff_dma[entry].len = nopaged_len; + priv->tx_skbuff_dma[entry].is_jumbo = true; +- desc->des3 = desc->des2 + BUF_SIZE_4KiB; ++ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, + STMMAC_RING_MODE, 0, true); + } +@@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *pr + + /* Fill DES3 in case of RING mode */ + if (priv->dma_buf_sz >= BUF_SIZE_8KiB) +- p->des3 = p->des2 + BUF_SIZE_8KiB; ++ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); + } + + /* In ring mode we need to fill the desc3 because it is used as buffer */ + static void stmmac_init_desc3(struct dma_desc *p) + { +- p->des3 = p->des2 + BUF_SIZE_8KiB; ++ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); + } + + static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -64,7 +64,6 @@ struct stmmac_priv { + dma_addr_t dma_tx_phy; + int tx_coalesce; + int hwts_tx_en; +- spinlock_t tx_lock; + bool tx_path_in_lpi_mode; + struct timer_list txtimer; + bool tso; +@@ -90,7 +89,6 @@ struct stmmac_priv { + struct mac_device_info *hw; + spinlock_t lock; + +- struct phy_device *phydev ____cacheline_aligned_in_smp; + int oldlink; + int speed; + int oldduplex; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -263,7 +263,7 @@ static void stmmac_ethtool_getdrvinfo(st + { + struct stmmac_priv *priv = netdev_priv(dev); + +- if (priv->plat->has_gmac) ++ if (priv->plat->has_gmac || priv->plat->has_gmac4) + strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else + strlcpy(info->driver, MAC100_ETHTOOL_NAME, +@@ -272,25 +272,26 @@ static void stmmac_ethtool_getdrvinfo(st + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + } + +-static int stmmac_ethtool_getsettings(struct net_device *dev, +- struct ethtool_cmd *cmd) ++static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, ++ struct ethtool_link_ksettings *cmd) + { + struct stmmac_priv *priv = netdev_priv(dev); +- struct phy_device *phy = priv->phydev; ++ struct phy_device *phy = dev->phydev; + int rc; + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { + struct rgmii_adv adv; ++ u32 supported, advertising, lp_advertising; + + if (!priv->xstats.pcs_link) { +- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); +- cmd->duplex = DUPLEX_UNKNOWN; ++ cmd->base.speed = SPEED_UNKNOWN; ++ cmd->base.duplex = DUPLEX_UNKNOWN; + return 0; + } +- cmd->duplex = priv->xstats.pcs_duplex; ++ cmd->base.duplex = priv->xstats.pcs_duplex; + +- ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); ++ cmd->base.speed = priv->xstats.pcs_speed; + + /* Get and convert ADV/LP_ADV from the HW AN registers */ + if (!priv->hw->mac->pcs_get_adv_lp) +@@ -300,45 +301,59 @@ static int stmmac_ethtool_getsettings(st + + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ + ++ ethtool_convert_link_mode_to_legacy_u32( ++ &supported, cmd->link_modes.supported); ++ ethtool_convert_link_mode_to_legacy_u32( ++ &advertising, cmd->link_modes.advertising); ++ ethtool_convert_link_mode_to_legacy_u32( ++ &lp_advertising, cmd->link_modes.lp_advertising); ++ + if (adv.pause & STMMAC_PCS_PAUSE) +- cmd->advertising |= ADVERTISED_Pause; ++ advertising |= ADVERTISED_Pause; + if (adv.pause & STMMAC_PCS_ASYM_PAUSE) +- cmd->advertising |= ADVERTISED_Asym_Pause; ++ advertising |= ADVERTISED_Asym_Pause; + if (adv.lp_pause & STMMAC_PCS_PAUSE) +- cmd->lp_advertising |= ADVERTISED_Pause; ++ lp_advertising |= ADVERTISED_Pause; + if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) +- cmd->lp_advertising |= ADVERTISED_Asym_Pause; ++ lp_advertising |= ADVERTISED_Asym_Pause; + + /* Reg49[3] always set because ANE is always supported */ +- cmd->autoneg = ADVERTISED_Autoneg; +- cmd->supported |= SUPPORTED_Autoneg; +- cmd->advertising |= ADVERTISED_Autoneg; +- cmd->lp_advertising |= ADVERTISED_Autoneg; ++ cmd->base.autoneg = ADVERTISED_Autoneg; ++ supported |= SUPPORTED_Autoneg; ++ advertising |= ADVERTISED_Autoneg; ++ lp_advertising |= ADVERTISED_Autoneg; + + if (adv.duplex) { +- cmd->supported |= (SUPPORTED_1000baseT_Full | +- SUPPORTED_100baseT_Full | +- SUPPORTED_10baseT_Full); +- cmd->advertising |= (ADVERTISED_1000baseT_Full | +- ADVERTISED_100baseT_Full | +- ADVERTISED_10baseT_Full); ++ supported |= (SUPPORTED_1000baseT_Full | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_10baseT_Full); ++ advertising |= (ADVERTISED_1000baseT_Full | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Full); + } else { +- cmd->supported |= (SUPPORTED_1000baseT_Half | +- SUPPORTED_100baseT_Half | +- SUPPORTED_10baseT_Half); +- cmd->advertising |= (ADVERTISED_1000baseT_Half | +- ADVERTISED_100baseT_Half | +- ADVERTISED_10baseT_Half); ++ supported |= (SUPPORTED_1000baseT_Half | ++ SUPPORTED_100baseT_Half | ++ SUPPORTED_10baseT_Half); ++ advertising |= (ADVERTISED_1000baseT_Half | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_10baseT_Half); + } + if (adv.lp_duplex) +- cmd->lp_advertising |= (ADVERTISED_1000baseT_Full | +- ADVERTISED_100baseT_Full | +- ADVERTISED_10baseT_Full); ++ lp_advertising |= (ADVERTISED_1000baseT_Full | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Full); + else +- cmd->lp_advertising |= (ADVERTISED_1000baseT_Half | +- ADVERTISED_100baseT_Half | +- ADVERTISED_10baseT_Half); +- cmd->port = PORT_OTHER; ++ lp_advertising |= (ADVERTISED_1000baseT_Half | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_10baseT_Half); ++ cmd->base.port = PORT_OTHER; ++ ++ ethtool_convert_legacy_u32_to_link_mode( ++ cmd->link_modes.supported, supported); ++ ethtool_convert_legacy_u32_to_link_mode( ++ cmd->link_modes.advertising, advertising); ++ ethtool_convert_legacy_u32_to_link_mode( ++ cmd->link_modes.lp_advertising, lp_advertising); + + return 0; + } +@@ -353,16 +368,16 @@ static int stmmac_ethtool_getsettings(st + "link speed / duplex setting\n", dev->name); + return -EBUSY; + } +- cmd->transceiver = XCVR_INTERNAL; +- rc = phy_ethtool_gset(phy, cmd); ++ rc = phy_ethtool_ksettings_get(phy, cmd); + return rc; + } + +-static int stmmac_ethtool_setsettings(struct net_device *dev, +- struct ethtool_cmd *cmd) ++static int ++stmmac_ethtool_set_link_ksettings(struct net_device *dev, ++ const struct ethtool_link_ksettings *cmd) + { + struct stmmac_priv *priv = netdev_priv(dev); +- struct phy_device *phy = priv->phydev; ++ struct phy_device *phy = dev->phydev; + int rc; + + if (priv->hw->pcs & STMMAC_PCS_RGMII || +@@ -370,7 +385,7 @@ static int stmmac_ethtool_setsettings(st + u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; + + /* Only support ANE */ +- if (cmd->autoneg != AUTONEG_ENABLE) ++ if (cmd->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | +@@ -391,9 +406,7 @@ static int stmmac_ethtool_setsettings(st + return 0; + } + +- spin_lock(&priv->lock); +- rc = phy_ethtool_sset(phy, cmd); +- spin_unlock(&priv->lock); ++ rc = phy_ethtool_ksettings_set(phy, cmd); + + return rc; + } +@@ -433,7 +446,7 @@ static void stmmac_ethtool_gregs(struct + + memset(reg_space, 0x0, REG_SPACE_SIZE); + +- if (!priv->plat->has_gmac) { ++ if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) { + /* MAC registers */ + for (i = 0; i < 12; i++) + reg_space[i] = readl(priv->ioaddr + (i * 4)); +@@ -471,12 +484,12 @@ stmmac_get_pauseparam(struct net_device + if (!adv_lp.pause) + return; + } else { +- if (!(priv->phydev->supported & SUPPORTED_Pause) || +- !(priv->phydev->supported & SUPPORTED_Asym_Pause)) ++ if (!(netdev->phydev->supported & SUPPORTED_Pause) || ++ !(netdev->phydev->supported & SUPPORTED_Asym_Pause)) + return; + } + +- pause->autoneg = priv->phydev->autoneg; ++ pause->autoneg = netdev->phydev->autoneg; + + if (priv->flow_ctrl & FLOW_RX) + pause->rx_pause = 1; +@@ -490,7 +503,7 @@ stmmac_set_pauseparam(struct net_device + struct ethtool_pauseparam *pause) + { + struct stmmac_priv *priv = netdev_priv(netdev); +- struct phy_device *phy = priv->phydev; ++ struct phy_device *phy = netdev->phydev; + int new_pause = FLOW_OFF; + + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { +@@ -550,7 +563,7 @@ static void stmmac_get_ethtool_stats(str + } + } + if (priv->eee_enabled) { +- int val = phy_get_eee_err(priv->phydev); ++ int val = phy_get_eee_err(dev->phydev); + if (val) + priv->xstats.phy_eee_wakeup_error_n = val; + } +@@ -669,7 +682,7 @@ static int stmmac_ethtool_op_get_eee(str + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + +- return phy_ethtool_get_eee(priv->phydev, edata); ++ return phy_ethtool_get_eee(dev->phydev, edata); + } + + static int stmmac_ethtool_op_set_eee(struct net_device *dev, +@@ -694,7 +707,7 @@ static int stmmac_ethtool_op_set_eee(str + priv->tx_lpi_timer = edata->tx_lpi_timer; + } + +- return phy_ethtool_set_eee(priv->phydev, edata); ++ return phy_ethtool_set_eee(dev->phydev, edata); + } + + static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) +@@ -853,8 +866,6 @@ static int stmmac_set_tunable(struct net + static const struct ethtool_ops stmmac_ethtool_ops = { + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, +- .get_settings = stmmac_ethtool_getsettings, +- .set_settings = stmmac_ethtool_setsettings, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, +@@ -874,6 +885,8 @@ static const struct ethtool_ops stmmac_e + .set_coalesce = stmmac_set_coalesce, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, ++ .get_link_ksettings = stmmac_ethtool_get_link_ksettings, ++ .set_link_ksettings = stmmac_ethtool_set_link_ksettings, + }; + + void stmmac_set_ethtool_ops(struct net_device *netdev) +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -105,8 +105,8 @@ module_param(eee_timer, int, S_IRUGO | S + MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); + #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) + +-/* By default the driver will use the ring mode to manage tx and rx descriptors +- * but passing this value so user can force to use the chain instead of the ring ++/* By default the driver will use the ring mode to manage tx and rx descriptors, ++ * but allow user to force to use the chain instead of the ring + */ + static unsigned int chain_mode; + module_param(chain_mode, int, S_IRUGO); +@@ -221,7 +221,8 @@ static inline u32 stmmac_rx_dirty(struct + */ + static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) + { +- struct phy_device *phydev = priv->phydev; ++ struct net_device *ndev = priv->dev; ++ struct phy_device *phydev = ndev->phydev; + + if (likely(priv->plat->fix_mac_speed)) + priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); +@@ -279,6 +280,7 @@ static void stmmac_eee_ctrl_timer(unsign + */ + bool stmmac_eee_init(struct stmmac_priv *priv) + { ++ struct net_device *ndev = priv->dev; + unsigned long flags; + bool ret = false; + +@@ -295,7 +297,7 @@ bool stmmac_eee_init(struct stmmac_priv + int tx_lpi_timer = priv->tx_lpi_timer; + + /* Check if the PHY supports EEE */ +- if (phy_init_eee(priv->phydev, 1)) { ++ if (phy_init_eee(ndev->phydev, 1)) { + /* To manage at run-time if the EEE cannot be supported + * anymore (for example because the lp caps have been + * changed). +@@ -303,7 +305,7 @@ bool stmmac_eee_init(struct stmmac_priv + */ + spin_lock_irqsave(&priv->lock, flags); + if (priv->eee_active) { +- pr_debug("stmmac: disable EEE\n"); ++ netdev_dbg(priv->dev, "disable EEE\n"); + del_timer_sync(&priv->eee_ctrl_timer); + priv->hw->mac->set_eee_timer(priv->hw, 0, + tx_lpi_timer); +@@ -327,12 +329,12 @@ bool stmmac_eee_init(struct stmmac_priv + tx_lpi_timer); + } + /* Set HW EEE according to the speed */ +- priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); ++ priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); + + ret = true; + spin_unlock_irqrestore(&priv->lock, flags); + +- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); ++ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); + } + out: + return ret; +@@ -450,8 +452,8 @@ static int stmmac_hwtstamp_ioctl(struct + sizeof(struct hwtstamp_config))) + return -EFAULT; + +- pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", +- __func__, config.flags, config.tx_type, config.rx_filter); ++ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", ++ __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) +@@ -697,7 +699,7 @@ static void stmmac_release_ptp(struct st + static void stmmac_adjust_link(struct net_device *dev) + { + struct stmmac_priv *priv = netdev_priv(dev); +- struct phy_device *phydev = priv->phydev; ++ struct phy_device *phydev = dev->phydev; + unsigned long flags; + int new_state = 0; + unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; +@@ -750,9 +752,9 @@ static void stmmac_adjust_link(struct ne + stmmac_hw_fix_mac_speed(priv); + break; + default: +- if (netif_msg_link(priv)) +- pr_warn("%s: Speed (%d) not 10/100\n", +- dev->name, phydev->speed); ++ netif_warn(priv, link, priv->dev, ++ "Speed (%d) not 10/100\n", ++ phydev->speed); + break; + } + +@@ -805,10 +807,10 @@ static void stmmac_check_pcs_mode(struct + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { +- pr_debug("STMMAC: PCS RGMII support enable\n"); ++ netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_RGMII; + } else if (interface == PHY_INTERFACE_MODE_SGMII) { +- pr_debug("STMMAC: PCS SGMII support enable\n"); ++ netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_SGMII; + } + } +@@ -843,15 +845,15 @@ static int stmmac_init_phy(struct net_de + + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); +- pr_debug("stmmac_init_phy: trying to attach to %s\n", +- phy_id_fmt); ++ netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, ++ phy_id_fmt); + + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, + interface); + } + + if (IS_ERR_OR_NULL(phydev)) { +- pr_err("%s: Could not attach to PHY\n", dev->name); ++ netdev_err(priv->dev, "Could not attach to PHY\n"); + if (!phydev) + return -ENODEV; + +@@ -884,10 +886,8 @@ static int stmmac_init_phy(struct net_de + if (phydev->is_pseudo_fixed_link) + phydev->irq = PHY_POLL; + +- pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" +- " Link = %d\n", dev->name, phydev->phy_id, phydev->link); +- +- priv->phydev = phydev; ++ netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n", ++ __func__, phydev->phy_id, phydev->link); + + return 0; + } +@@ -973,7 +973,8 @@ static int stmmac_init_rx_buffers(struct + + skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); + if (!skb) { +- pr_err("%s: Rx init fails; skb is NULL\n", __func__); ++ netdev_err(priv->dev, ++ "%s: Rx init fails; skb is NULL\n", __func__); + return -ENOMEM; + } + priv->rx_skbuff[i] = skb; +@@ -981,15 +982,15 @@ static int stmmac_init_rx_buffers(struct + priv->dma_buf_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { +- pr_err("%s: DMA mapping error\n", __func__); ++ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (priv->synopsys_id >= DWMAC_CORE_4_00) +- p->des0 = priv->rx_skbuff_dma[i]; ++ p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); + else +- p->des2 = priv->rx_skbuff_dma[i]; ++ p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]); + + if ((priv->hw->mode->init_desc3) && + (priv->dma_buf_sz == BUF_SIZE_16KiB)) +@@ -1031,13 +1032,14 @@ static int init_dma_desc_rings(struct ne + + priv->dma_buf_sz = bfsize; + +- if (netif_msg_probe(priv)) { +- pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, +- (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); ++ netif_dbg(priv, probe, priv->dev, ++ "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", ++ __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy); ++ ++ /* RX INITIALIZATION */ ++ netif_dbg(priv, probe, priv->dev, ++ "SKB addresses:\nskb\t\tskb data\tdma data\n"); + +- /* RX INITIALIZATION */ +- pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); +- } + for (i = 0; i < DMA_RX_SIZE; i++) { + struct dma_desc *p; + if (priv->extend_desc) +@@ -1049,10 +1051,9 @@ static int init_dma_desc_rings(struct ne + if (ret) + goto err_init_rx_buffers; + +- if (netif_msg_probe(priv)) +- pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], +- priv->rx_skbuff[i]->data, +- (unsigned int)priv->rx_skbuff_dma[i]); ++ netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", ++ priv->rx_skbuff[i], priv->rx_skbuff[i]->data, ++ (unsigned int)priv->rx_skbuff_dma[i]); + } + priv->cur_rx = 0; + priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); +@@ -1307,7 +1308,7 @@ static void stmmac_tx_clean(struct stmma + unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry = priv->dirty_tx; + +- spin_lock(&priv->tx_lock); ++ netif_tx_lock(priv->dev); + + priv->xstats.tx_clean++; + +@@ -1378,22 +1379,17 @@ static void stmmac_tx_clean(struct stmma + netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(priv->dev) && +- stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { +- netif_tx_lock(priv->dev); +- if (netif_queue_stopped(priv->dev) && +- stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { +- if (netif_msg_tx_done(priv)) +- pr_debug("%s: restart transmit\n", __func__); +- netif_wake_queue(priv->dev); +- } +- netif_tx_unlock(priv->dev); ++ stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { ++ netif_dbg(priv, tx_done, priv->dev, ++ "%s: restart transmit\n", __func__); ++ netif_wake_queue(priv->dev); + } + + if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { + stmmac_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); + } +- spin_unlock(&priv->tx_lock); ++ netif_tx_unlock(priv->dev); + } + + static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) +@@ -1497,7 +1493,7 @@ static void stmmac_mmc_setup(struct stmm + dwmac_mmc_ctrl(priv->mmcaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else +- pr_info(" No MAC Management Counters available\n"); ++ netdev_info(priv->dev, "No MAC Management Counters available\n"); + } + + /** +@@ -1510,18 +1506,18 @@ static void stmmac_mmc_setup(struct stmm + static void stmmac_selec_desc_mode(struct stmmac_priv *priv) + { + if (priv->plat->enh_desc) { +- pr_info(" Enhanced/Alternate descriptors\n"); ++ dev_info(priv->device, "Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { +- pr_info("\tEnabled extended descriptors\n"); ++ dev_info(priv->device, "Enabled extended descriptors\n"); + priv->extend_desc = 1; + } else +- pr_warn("Extended descriptors not supported\n"); ++ dev_warn(priv->device, "Extended descriptors not supported\n"); + + priv->hw->desc = &enh_desc_ops; + } else { +- pr_info(" Normal descriptors\n"); ++ dev_info(priv->device, "Normal descriptors\n"); + priv->hw->desc = &ndesc_ops; + } + } +@@ -1562,8 +1558,8 @@ static void stmmac_check_ether_addr(stru + priv->dev->dev_addr, 0); + if (!is_valid_ether_addr(priv->dev->dev_addr)) + eth_hw_addr_random(priv->dev); +- pr_info("%s: device MAC address %pM\n", priv->dev->name, +- priv->dev->dev_addr); ++ netdev_info(priv->dev, "device MAC address %pM\n", ++ priv->dev->dev_addr); + } + } + +@@ -1577,16 +1573,12 @@ static void stmmac_check_ether_addr(stru + */ + static int stmmac_init_dma_engine(struct stmmac_priv *priv) + { +- int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0; +- int mixed_burst = 0; + int atds = 0; + int ret = 0; + +- if (priv->plat->dma_cfg) { +- pbl = priv->plat->dma_cfg->pbl; +- fixed_burst = priv->plat->dma_cfg->fixed_burst; +- mixed_burst = priv->plat->dma_cfg->mixed_burst; +- aal = priv->plat->dma_cfg->aal; ++ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { ++ dev_err(priv->device, "Invalid DMA configuration\n"); ++ return -EINVAL; + } + + if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) +@@ -1598,8 +1590,8 @@ static int stmmac_init_dma_engine(struct + return ret; + } + +- priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, +- aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); ++ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, ++ priv->dma_tx_phy, priv->dma_rx_phy, atds); + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->rx_tail_addr = priv->dma_rx_phy + +@@ -1671,7 +1663,8 @@ static int stmmac_hw_setup(struct net_de + /* DMA initialization and SW reset */ + ret = stmmac_init_dma_engine(priv); + if (ret < 0) { +- pr_err("%s: DMA engine initialization failed\n", __func__); ++ netdev_err(priv->dev, "%s: DMA engine initialization failed\n", ++ __func__); + return ret; + } + +@@ -1700,7 +1693,7 @@ static int stmmac_hw_setup(struct net_de + + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { +- pr_warn(" RX IPC Checksum Offload disabled\n"); ++ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } +@@ -1725,10 +1718,11 @@ static int stmmac_hw_setup(struct net_de + #ifdef CONFIG_DEBUG_FS + ret = stmmac_init_fs(dev); + if (ret < 0) +- pr_warn("%s: failed debugFS registration\n", __func__); ++ netdev_warn(priv->dev, "%s: failed debugFS registration\n", ++ __func__); + #endif + /* Start the ball rolling... */ +- pr_debug("%s: DMA RX/TX processes started...\n", dev->name); ++ netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); + priv->hw->dma->start_tx(priv->ioaddr); + priv->hw->dma->start_rx(priv->ioaddr); + +@@ -1783,8 +1777,9 @@ static int stmmac_open(struct net_device + priv->hw->pcs != STMMAC_PCS_RTBI) { + ret = stmmac_init_phy(dev); + if (ret) { +- pr_err("%s: Cannot attach to PHY (error: %d)\n", +- __func__, ret); ++ netdev_err(priv->dev, ++ "%s: Cannot attach to PHY (error: %d)\n", ++ __func__, ret); + return ret; + } + } +@@ -1798,33 +1793,36 @@ static int stmmac_open(struct net_device + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { +- pr_err("%s: DMA descriptors allocation failed\n", __func__); ++ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", ++ __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { +- pr_err("%s: DMA descriptors initialization failed\n", __func__); ++ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", ++ __func__); + goto init_error; + } + + ret = stmmac_hw_setup(dev, true); + if (ret < 0) { +- pr_err("%s: Hw setup failed\n", __func__); ++ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); + goto init_error; + } + + stmmac_init_tx_coalesce(priv); + +- if (priv->phydev) +- phy_start(priv->phydev); ++ if (dev->phydev) ++ phy_start(dev->phydev); + + /* Request the IRQ lines */ + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { +- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", +- __func__, dev->irq, ret); ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the IRQ %d (error: %d)\n", ++ __func__, dev->irq, ret); + goto init_error; + } + +@@ -1833,8 +1831,9 @@ static int stmmac_open(struct net_device + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { +- pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", +- __func__, priv->wol_irq, ret); ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the WoL IRQ %d (%d)\n", ++ __func__, priv->wol_irq, ret); + goto wolirq_error; + } + } +@@ -1844,8 +1843,9 @@ static int stmmac_open(struct net_device + ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, + dev->name, dev); + if (unlikely(ret < 0)) { +- pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", +- __func__, priv->lpi_irq, ret); ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the LPI IRQ %d (%d)\n", ++ __func__, priv->lpi_irq, ret); + goto lpiirq_error; + } + } +@@ -1864,8 +1864,8 @@ wolirq_error: + init_error: + free_dma_desc_resources(priv); + dma_desc_error: +- if (priv->phydev) +- phy_disconnect(priv->phydev); ++ if (dev->phydev) ++ phy_disconnect(dev->phydev); + + return ret; + } +@@ -1884,10 +1884,9 @@ static int stmmac_release(struct net_dev + del_timer_sync(&priv->eee_ctrl_timer); + + /* Stop and disconnect the PHY */ +- if (priv->phydev) { +- phy_stop(priv->phydev); +- phy_disconnect(priv->phydev); +- priv->phydev = NULL; ++ if (dev->phydev) { ++ phy_stop(dev->phydev); ++ phy_disconnect(dev->phydev); + } + + netif_stop_queue(dev); +@@ -1947,13 +1946,13 @@ static void stmmac_tso_allocator(struct + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + desc = priv->dma_tx + priv->cur_tx; + +- desc->des0 = des + (total_len - tmp_len); ++ desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? + TSO_MAX_BUFF_SIZE : tmp_len; + + priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, + 0, 1, +- (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), ++ (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; +@@ -1998,8 +1997,6 @@ static netdev_tx_t stmmac_tso_xmit(struc + u8 proto_hdr_len; + int i; + +- spin_lock(&priv->tx_lock); +- + /* Compute header lengths */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + +@@ -2009,9 +2006,10 @@ static netdev_tx_t stmmac_tso_xmit(struc + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ +- pr_err("%s: Tx Ring full when queue awake\n", __func__); ++ netdev_err(priv->dev, ++ "%s: Tx Ring full when queue awake\n", ++ __func__); + } +- spin_unlock(&priv->tx_lock); + return NETDEV_TX_BUSY; + } + +@@ -2049,11 +2047,11 @@ static netdev_tx_t stmmac_tso_xmit(struc + priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + priv->tx_skbuff[first_entry] = skb; + +- first->des0 = des; ++ first->des0 = cpu_to_le32(des); + + /* Fill start of payload in buff2 of first descriptor */ + if (pay_len) +- first->des1 = des + proto_hdr_len; ++ first->des1 = cpu_to_le32(des + proto_hdr_len); + + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; +@@ -2082,8 +2080,8 @@ static netdev_tx_t stmmac_tso_xmit(struc + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { +- if (netif_msg_hw(priv)) +- pr_debug("%s: stop transmitted packets\n", __func__); ++ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", ++ __func__); + netif_stop_queue(dev); + } + +@@ -2127,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ +- smp_wmb(); ++ dma_wmb(); + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", +@@ -2146,11 +2144,9 @@ static netdev_tx_t stmmac_tso_xmit(struc + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + +- spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; + + dma_map_err: +- spin_unlock(&priv->tx_lock); + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; +@@ -2182,14 +2178,13 @@ static netdev_tx_t stmmac_xmit(struct sk + return stmmac_tso_xmit(skb, dev); + } + +- spin_lock(&priv->tx_lock); +- + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { +- spin_unlock(&priv->tx_lock); + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ +- pr_err("%s: Tx Ring full when queue awake\n", __func__); ++ netdev_err(priv->dev, ++ "%s: Tx Ring full when queue awake\n", ++ __func__); + } + return NETDEV_TX_BUSY; + } +@@ -2242,13 +2237,11 @@ static netdev_tx_t stmmac_xmit(struct sk + + priv->tx_skbuff[entry] = NULL; + +- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { +- desc->des0 = des; +- priv->tx_skbuff_dma[entry].buf = desc->des0; +- } else { +- desc->des2 = des; +- priv->tx_skbuff_dma[entry].buf = desc->des2; +- } ++ priv->tx_skbuff_dma[entry].buf = des; ++ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) ++ desc->des0 = cpu_to_le32(des); ++ else ++ desc->des2 = cpu_to_le32(des); + + priv->tx_skbuff_dma[entry].map_as_page = true; + priv->tx_skbuff_dma[entry].len = len; +@@ -2266,9 +2259,10 @@ static netdev_tx_t stmmac_xmit(struct sk + if (netif_msg_pktdata(priv)) { + void *tx_head; + +- pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", +- __func__, priv->cur_tx, priv->dirty_tx, first_entry, +- entry, first, nfrags); ++ netdev_dbg(priv->dev, ++ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", ++ __func__, priv->cur_tx, priv->dirty_tx, first_entry, ++ entry, first, nfrags); + + if (priv->extend_desc) + tx_head = (void *)priv->dma_etx; +@@ -2277,13 +2271,13 @@ static netdev_tx_t stmmac_xmit(struct sk + + priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); + +- pr_debug(">>> frame to be transmitted: "); ++ netdev_dbg(priv->dev, ">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } + + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { +- if (netif_msg_hw(priv)) +- pr_debug("%s: stop transmitted packets\n", __func__); ++ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", ++ __func__); + netif_stop_queue(dev); + } + +@@ -2319,13 +2313,11 @@ static netdev_tx_t stmmac_xmit(struct sk + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + +- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { +- first->des0 = des; +- priv->tx_skbuff_dma[first_entry].buf = first->des0; +- } else { +- first->des2 = des; +- priv->tx_skbuff_dma[first_entry].buf = first->des2; +- } ++ priv->tx_skbuff_dma[first_entry].buf = des; ++ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) ++ first->des0 = cpu_to_le32(des); ++ else ++ first->des2 = cpu_to_le32(des); + + priv->tx_skbuff_dma[first_entry].len = nopaged_len; + priv->tx_skbuff_dma[first_entry].last_segment = last_segment; +@@ -2346,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ +- smp_wmb(); ++ dma_wmb(); + } + + netdev_sent_queue(dev, skb->len); +@@ -2357,12 +2349,10 @@ static netdev_tx_t stmmac_xmit(struct sk + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + +- spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; + + dma_map_err: +- spin_unlock(&priv->tx_lock); +- dev_err(priv->device, "Tx dma map failed\n"); ++ netdev_err(priv->dev, "Tx DMA map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +@@ -2433,16 +2423,16 @@ static inline void stmmac_rx_refill(stru + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, + priv->rx_skbuff_dma[entry])) { +- dev_err(priv->device, "Rx dma map failed\n"); ++ netdev_err(priv->dev, "Rx DMA map failed\n"); + dev_kfree_skb(skb); + break; + } + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { +- p->des0 = priv->rx_skbuff_dma[entry]; ++ p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); + p->des1 = 0; + } else { +- p->des2 = priv->rx_skbuff_dma[entry]; ++ p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); + } + if (priv->hw->mode->refill_desc3) + priv->hw->mode->refill_desc3(priv, p); +@@ -2450,17 +2440,17 @@ static inline void stmmac_rx_refill(stru + if (priv->rx_zeroc_thresh > 0) + priv->rx_zeroc_thresh--; + +- if (netif_msg_rx_status(priv)) +- pr_debug("\trefill entry #%d\n", entry); ++ netif_dbg(priv, rx_status, priv->dev, ++ "refill entry #%d\n", entry); + } +- wmb(); ++ dma_wmb(); + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); + else + priv->hw->desc->set_rx_owner(p); + +- wmb(); ++ dma_wmb(); + + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); + } +@@ -2484,7 +2474,7 @@ static int stmmac_rx(struct stmmac_priv + if (netif_msg_rx_status(priv)) { + void *rx_head; + +- pr_info(">>>>>> %s: descriptor ring:\n", __func__); ++ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) + rx_head = (void *)priv->dma_erx; + else +@@ -2546,9 +2536,9 @@ static int stmmac_rx(struct stmmac_priv + unsigned int des; + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) +- des = p->des0; ++ des = le32_to_cpu(p->des0); + else +- des = p->des2; ++ des = le32_to_cpu(p->des2); + + frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + +@@ -2557,9 +2547,9 @@ static int stmmac_rx(struct stmmac_priv + * ignored + */ + if (frame_len > priv->dma_buf_sz) { +- pr_err("%s: len %d larger than size (%d)\n", +- priv->dev->name, frame_len, +- priv->dma_buf_sz); ++ netdev_err(priv->dev, ++ "len %d larger than size (%d)\n", ++ frame_len, priv->dma_buf_sz); + priv->dev->stats.rx_length_errors++; + break; + } +@@ -2571,11 +2561,11 @@ static int stmmac_rx(struct stmmac_priv + frame_len -= ETH_FCS_LEN; + + if (netif_msg_rx_status(priv)) { +- pr_info("\tdesc: %p [entry %d] buff=0x%x\n", +- p, entry, des); ++ netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", ++ p, entry, des); + if (frame_len > ETH_FRAME_LEN) +- pr_debug("\tframe size %d, COE: %d\n", +- frame_len, status); ++ netdev_dbg(priv->dev, "frame size %d, COE: %d\n", ++ frame_len, status); + } + + /* The zero-copy is always used for all the sizes +@@ -2612,8 +2602,9 @@ static int stmmac_rx(struct stmmac_priv + } else { + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { +- pr_err("%s: Inconsistent Rx chain\n", +- priv->dev->name); ++ netdev_err(priv->dev, ++ "%s: Inconsistent Rx chain\n", ++ priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } +@@ -2629,7 +2620,8 @@ static int stmmac_rx(struct stmmac_priv + } + + if (netif_msg_pktdata(priv)) { +- pr_debug("frame received (%dbytes)", frame_len); ++ netdev_dbg(priv->dev, "frame received (%dbytes)", ++ frame_len); + print_pkt(skb->data, frame_len); + } + +@@ -2732,7 +2724,7 @@ static int stmmac_change_mtu(struct net_ + int max_mtu; + + if (netif_running(dev)) { +- pr_err("%s: must be stopped to change its MTU\n", dev->name); ++ netdev_err(priv->dev, "must be stopped to change its MTU\n"); + return -EBUSY; + } + +@@ -2824,7 +2816,7 @@ static irqreturn_t stmmac_interrupt(int + pm_wakeup_event(priv->device, 0); + + if (unlikely(!dev)) { +- pr_err("%s: invalid dev pointer\n", __func__); ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + +@@ -2882,7 +2874,6 @@ static void stmmac_poll_controller(struc + */ + static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { +- struct stmmac_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) +@@ -2892,9 +2883,9 @@ static int stmmac_ioctl(struct net_devic + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: +- if (!priv->phydev) ++ if (!dev->phydev) + return -EINVAL; +- ret = phy_mii_ioctl(priv->phydev, rq, cmd); ++ ret = phy_mii_ioctl(dev->phydev, rq, cmd); + break; + case SIOCSHWTSTAMP: + ret = stmmac_hwtstamp_ioctl(dev, rq); +@@ -2922,14 +2913,17 @@ static void sysfs_display_ring(void *hea + x = *(u64 *) ep; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), +- ep->basic.des0, ep->basic.des1, +- ep->basic.des2, ep->basic.des3); ++ le32_to_cpu(ep->basic.des0), ++ le32_to_cpu(ep->basic.des1), ++ le32_to_cpu(ep->basic.des2), ++ le32_to_cpu(ep->basic.des3)); + ep++; + } else { + x = *(u64 *) p; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), +- p->des0, p->des1, p->des2, p->des3); ++ le32_to_cpu(p->des0), le32_to_cpu(p->des1), ++ le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + seq_printf(seq, "\n"); +@@ -2961,6 +2955,8 @@ static int stmmac_sysfs_ring_open(struct + return single_open(file, stmmac_sysfs_ring_read, inode->i_private); + } + ++/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ ++ + static const struct file_operations stmmac_rings_status_fops = { + .owner = THIS_MODULE, + .open = stmmac_sysfs_ring_open, +@@ -2983,11 +2979,11 @@ static int stmmac_sysfs_dma_cap_read(str + seq_printf(seq, "\tDMA HW features\n"); + seq_printf(seq, "==============================\n"); + +- seq_printf(seq, "\t10/100 Mbps %s\n", ++ seq_printf(seq, "\t10/100 Mbps: %s\n", + (priv->dma_cap.mbps_10_100) ? "Y" : "N"); +- seq_printf(seq, "\t1000 Mbps %s\n", ++ seq_printf(seq, "\t1000 Mbps: %s\n", + (priv->dma_cap.mbps_1000) ? "Y" : "N"); +- seq_printf(seq, "\tHalf duple %s\n", ++ seq_printf(seq, "\tHalf duplex: %s\n", + (priv->dma_cap.half_duplex) ? "Y" : "N"); + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); +@@ -3005,9 +3001,9 @@ static int stmmac_sysfs_dma_cap_read(str + (priv->dma_cap.rmon) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", + (priv->dma_cap.time_stamp) ? "Y" : "N"); +- seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", ++ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", + (priv->dma_cap.atime_stamp) ? "Y" : "N"); +- seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", ++ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", + (priv->dma_cap.eee) ? "Y" : "N"); + seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); + seq_printf(seq, "\tChecksum Offload in TX: %s\n", +@@ -3054,8 +3050,7 @@ static int stmmac_init_fs(struct net_dev + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); + + if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { +- pr_err("ERROR %s/%s, debugfs create directory failed\n", +- STMMAC_RESOURCE_NAME, dev->name); ++ netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); + + return -ENOMEM; + } +@@ -3067,7 +3062,7 @@ static int stmmac_init_fs(struct net_dev + &stmmac_rings_status_fops); + + if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { +- pr_info("ERROR creating stmmac ring debugfs file\n"); ++ netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); + debugfs_remove_recursive(priv->dbgfs_dir); + + return -ENOMEM; +@@ -3079,7 +3074,7 @@ static int stmmac_init_fs(struct net_dev + dev, &stmmac_dma_cap_fops); + + if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { +- pr_info("ERROR creating stmmac MMC debugfs file\n"); ++ netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); + debugfs_remove_recursive(priv->dbgfs_dir); + + return -ENOMEM; +@@ -3151,11 +3146,11 @@ static int stmmac_hw_init(struct stmmac_ + } else { + if (chain_mode) { + priv->hw->mode = &chain_mode_ops; +- pr_info(" Chain mode enabled\n"); ++ dev_info(priv->device, "Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + } else { + priv->hw->mode = &ring_mode_ops; +- pr_info(" Ring mode enabled\n"); ++ dev_info(priv->device, "Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + } + } +@@ -3163,7 +3158,7 @@ static int stmmac_hw_init(struct stmmac_ + /* Get the HW capability (new GMAC newer than 3.50a) */ + priv->hw_cap_support = stmmac_get_hw_features(priv); + if (priv->hw_cap_support) { +- pr_info(" DMA HW capability register supported"); ++ dev_info(priv->device, "DMA HW capability register supported\n"); + + /* We can override some gmac/dma configuration fields: e.g. + * enh_desc, tx_coe (e.g. that are passed through the +@@ -3188,8 +3183,9 @@ static int stmmac_hw_init(struct stmmac_ + else if (priv->dma_cap.rx_coe_type1) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; + +- } else +- pr_info(" No HW DMA feature register supported"); ++ } else { ++ dev_info(priv->device, "No HW DMA feature register supported\n"); ++ } + + /* To use alternate (extended), normal or GMAC4 descriptor structures */ + if (priv->synopsys_id >= DWMAC_CORE_4_00) +@@ -3199,20 +3195,20 @@ static int stmmac_hw_init(struct stmmac_ + + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; +- pr_info(" RX Checksum Offload Engine supported\n"); ++ dev_info(priv->device, "RX Checksum Offload Engine supported\n"); + if (priv->synopsys_id < DWMAC_CORE_4_00) +- pr_info("\tCOE Type %d\n", priv->hw->rx_csum); ++ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); + } + if (priv->plat->tx_coe) +- pr_info(" TX Checksum insertion supported\n"); ++ dev_info(priv->device, "TX Checksum insertion supported\n"); + + if (priv->plat->pmt) { +- pr_info(" Wake-Up On Lan supported\n"); ++ dev_info(priv->device, "Wake-Up On Lan supported\n"); + device_set_wakeup_capable(priv->device, 1); + } + + if (priv->dma_cap.tsoen) +- pr_info(" TSO supported\n"); ++ dev_info(priv->device, "TSO supported\n"); + + return 0; + } +@@ -3271,8 +3267,8 @@ int stmmac_dvr_probe(struct device *devi + + priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); + if (IS_ERR(priv->stmmac_clk)) { +- dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", +- __func__); ++ netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", ++ __func__); + /* If failed to obtain stmmac_clk and specific clk_csr value + * is NOT passed from the platform, probe fail. + */ +@@ -3321,7 +3317,7 @@ int stmmac_dvr_probe(struct device *devi + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + ndev->hw_features |= NETIF_F_TSO; + priv->tso = true; +- pr_info(" TSO feature enabled\n"); ++ dev_info(priv->device, "TSO feature enabled\n"); + } + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); +@@ -3341,13 +3337,13 @@ int stmmac_dvr_probe(struct device *devi + */ + if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; +- pr_info(" Enable RX Mitigation via HW Watchdog Timer\n"); ++ dev_info(priv->device, ++ "Enable RX Mitigation via HW Watchdog Timer\n"); + } + + netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); + + spin_lock_init(&priv->lock); +- spin_lock_init(&priv->tx_lock); + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be +@@ -3368,15 +3364,17 @@ int stmmac_dvr_probe(struct device *devi + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { +- pr_debug("%s: MDIO bus (id: %d) registration failed", +- __func__, priv->plat->bus_id); +- goto error_napi_register; ++ dev_err(priv->device, ++ "%s: MDIO bus (id: %d) registration failed", ++ __func__, priv->plat->bus_id); ++ goto error_mdio_register; + } + } + + ret = register_netdev(ndev); + if (ret) { +- pr_err("%s: ERROR %i registering the device\n", __func__, ret); ++ dev_err(priv->device, "%s: ERROR %i registering the device\n", ++ __func__, ret); + goto error_netdev_register; + } + +@@ -3387,7 +3385,7 @@ error_netdev_register: + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); +-error_napi_register: ++error_mdio_register: + netif_napi_del(&priv->napi); + error_hw_init: + clk_disable_unprepare(priv->pclk); +@@ -3411,7 +3409,7 @@ int stmmac_dvr_remove(struct device *dev + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + +- pr_info("%s:\n\tremoving driver", __func__); ++ netdev_info(priv->dev, "%s: removing driver", __func__); + + priv->hw->dma->stop_rx(priv->ioaddr); + priv->hw->dma->stop_tx(priv->ioaddr); +@@ -3449,8 +3447,8 @@ int stmmac_suspend(struct device *dev) + if (!ndev || !netif_running(ndev)) + return 0; + +- if (priv->phydev) +- phy_stop(priv->phydev); ++ if (ndev->phydev) ++ phy_stop(ndev->phydev); + + spin_lock_irqsave(&priv->lock, flags); + +@@ -3544,8 +3542,8 @@ int stmmac_resume(struct device *dev) + + spin_unlock_irqrestore(&priv->lock, flags); + +- if (priv->phydev) +- phy_start(priv->phydev); ++ if (ndev->phydev) ++ phy_start(ndev->phydev); + + return 0; + } +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -42,13 +42,6 @@ + #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) + #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) + +-#define MII_PHY_ADDR_GMAC4_SHIFT 21 +-#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21) +-#define MII_PHY_REG_GMAC4_SHIFT 16 +-#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16) +-#define MII_CSR_CLK_GMAC4_SHIFT 8 +-#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8) +- + static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) + { + unsigned long curr; +@@ -68,8 +61,8 @@ static int stmmac_mdio_busy_wait(void __ + /** + * stmmac_mdio_read + * @bus: points to the mii_bus structure +- * @phyaddr: MII addr reg bits 15-11 +- * @phyreg: MII addr reg bits 10-6 ++ * @phyaddr: MII addr ++ * @phyreg: MII reg + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. +@@ -83,14 +76,20 @@ static int stmmac_mdio_read(struct mii_b + unsigned int mii_data = priv->hw->mii.data; + + int data; +- u16 regValue = (((phyaddr << 11) & (0x0000F800)) | +- ((phyreg << 6) & (0x000007C0))); +- regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); ++ u32 value = MII_BUSY; ++ ++ value |= (phyaddr << priv->hw->mii.addr_shift) ++ & priv->hw->mii.addr_mask; ++ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; ++ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) ++ & priv->hw->mii.clk_csr_mask; ++ if (priv->plat->has_gmac4) ++ value |= MII_GMAC4_READ; + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + +- writel(regValue, priv->ioaddr + mii_address); ++ writel(value, priv->ioaddr + mii_address); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; +@@ -104,8 +103,8 @@ static int stmmac_mdio_read(struct mii_b + /** + * stmmac_mdio_write + * @bus: points to the mii_bus structure +- * @phyaddr: MII addr reg bits 15-11 +- * @phyreg: MII addr reg bits 10-6 ++ * @phyaddr: MII addr ++ * @phyreg: MII reg + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +@@ -117,85 +116,18 @@ static int stmmac_mdio_write(struct mii_ + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + +- u16 value = +- (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) +- | MII_WRITE; +- +- value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); +- +- /* Wait until any existing MII operation is complete */ +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) +- return -EBUSY; +- +- /* Set the MII address register to write */ +- writel(phydata, priv->ioaddr + mii_data); +- writel(value, priv->ioaddr + mii_address); +- +- /* Wait until any existing MII operation is complete */ +- return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); +-} +- +-/** +- * stmmac_mdio_read_gmac4 +- * @bus: points to the mii_bus structure +- * @phyaddr: MII addr reg bits 25-21 +- * @phyreg: MII addr reg bits 20-16 +- * Description: it reads data from the MII register of GMAC4 from within +- * the phy device. +- */ +-static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg) +-{ +- struct net_device *ndev = bus->priv; +- struct stmmac_priv *priv = netdev_priv(ndev); +- unsigned int mii_address = priv->hw->mii.addr; +- unsigned int mii_data = priv->hw->mii.data; +- int data; +- u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & +- (MII_PHY_ADDR_GMAC4_MASK)) | +- ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & +- (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ; +- +- value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) +- << MII_CSR_CLK_GMAC4_SHIFT); +- +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) +- return -EBUSY; +- +- writel(value, priv->ioaddr + mii_address); +- +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) +- return -EBUSY; +- +- /* Read the data from the MII data register */ +- data = (int)readl(priv->ioaddr + mii_data); +- +- return data; +-} +- +-/** +- * stmmac_mdio_write_gmac4 +- * @bus: points to the mii_bus structure +- * @phyaddr: MII addr reg bits 25-21 +- * @phyreg: MII addr reg bits 20-16 +- * @phydata: phy data +- * Description: it writes the data into the MII register of GMAC4 from within +- * the device. +- */ +-static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg, +- u16 phydata) +-{ +- struct net_device *ndev = bus->priv; +- struct stmmac_priv *priv = netdev_priv(ndev); +- unsigned int mii_address = priv->hw->mii.addr; +- unsigned int mii_data = priv->hw->mii.data; +- +- u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & +- (MII_PHY_ADDR_GMAC4_MASK)) | +- ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & +- (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE; ++ u32 value = MII_BUSY; + +- value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) +- << MII_CSR_CLK_GMAC4_SHIFT); ++ value |= (phyaddr << priv->hw->mii.addr_shift) ++ & priv->hw->mii.addr_mask; ++ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; ++ ++ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) ++ & priv->hw->mii.clk_csr_mask; ++ if (priv->plat->has_gmac4) ++ value |= MII_GMAC4_WRITE; ++ else ++ value |= MII_WRITE; + + /* Wait until any existing MII operation is complete */ + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) +@@ -260,7 +192,7 @@ int stmmac_mdio_reset(struct mii_bus *bu + #endif + + if (data->phy_reset) { +- pr_debug("stmmac_mdio_reset: calling phy_reset\n"); ++ netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n"); + data->phy_reset(priv->plat->bsp_priv); + } + +@@ -305,13 +237,8 @@ int stmmac_mdio_register(struct net_devi + #endif + + new_bus->name = "stmmac"; +- if (priv->plat->has_gmac4) { +- new_bus->read = &stmmac_mdio_read_gmac4; +- new_bus->write = &stmmac_mdio_write_gmac4; +- } else { +- new_bus->read = &stmmac_mdio_read; +- new_bus->write = &stmmac_mdio_write; +- } ++ new_bus->read = &stmmac_mdio_read; ++ new_bus->write = &stmmac_mdio_write; + + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", +@@ -325,7 +252,7 @@ int stmmac_mdio_register(struct net_devi + else + err = mdiobus_register(new_bus); + if (err != 0) { +- pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); ++ netdev_err(ndev, "Cannot register the MDIO bus\n"); + goto bus_register_fail; + } + +@@ -372,16 +299,16 @@ int stmmac_mdio_register(struct net_devi + irq_str = irq_num; + break; + } +- pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n", +- ndev->name, phydev->phy_id, addr, +- irq_str, phydev_name(phydev), +- act ? " active" : ""); ++ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", ++ phydev->phy_id, addr, ++ irq_str, phydev_name(phydev), ++ act ? " active" : ""); + found = 1; + } + } + + if (!found && !mdio_node) { +- pr_warn("%s: No PHY found\n", ndev->name); ++ netdev_warn(ndev, "No PHY found\n"); + mdiobus_unregister(new_bus); + mdiobus_free(new_bus); + return -ENODEV; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -81,6 +81,7 @@ static void stmmac_default_data(struct p + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 32; ++ plat->dma_cfg->pblx8 = true; + /* TODO: AXI */ + + /* Set default value for multicast hash bins */ +@@ -88,6 +89,9 @@ static void stmmac_default_data(struct p + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; ++ ++ /* Set the maxmtu to a default of JUMBO_LEN */ ++ plat->maxmtu = JUMBO_LEN; + } + + static int quark_default_data(struct plat_stmmacenet_data *plat, +@@ -115,6 +119,7 @@ static int quark_default_data(struct pla + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 16; ++ plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ + +@@ -124,6 +129,9 @@ static int quark_default_data(struct pla + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + ++ /* Set the maxmtu to a default of JUMBO_LEN */ ++ plat->maxmtu = JUMBO_LEN; ++ + return 0; + } + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -292,6 +292,7 @@ stmmac_probe_config_dt(struct platform_d + if (of_device_is_compatible(np, "snps,dwmac-4.00") || + of_device_is_compatible(np, "snps,dwmac-4.10a")) { + plat->has_gmac4 = 1; ++ plat->has_gmac = 0; + plat->pmt = 1; + plat->tso_en = of_property_read_bool(np, "snps,tso"); + } +@@ -303,21 +304,25 @@ stmmac_probe_config_dt(struct platform_d + plat->force_sf_dma_mode = 1; + } + +- if (of_find_property(np, "snps,pbl", NULL)) { +- dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), +- GFP_KERNEL); +- if (!dma_cfg) { +- stmmac_remove_config_dt(pdev, plat); +- return ERR_PTR(-ENOMEM); +- } +- plat->dma_cfg = dma_cfg; +- of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); +- dma_cfg->aal = of_property_read_bool(np, "snps,aal"); +- dma_cfg->fixed_burst = +- of_property_read_bool(np, "snps,fixed-burst"); +- dma_cfg->mixed_burst = +- of_property_read_bool(np, "snps,mixed-burst"); +- } ++ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), ++ GFP_KERNEL); ++ if (!dma_cfg) { ++ stmmac_remove_config_dt(pdev, plat); ++ return ERR_PTR(-ENOMEM); ++ } ++ plat->dma_cfg = dma_cfg; ++ ++ of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); ++ if (!dma_cfg->pbl) ++ dma_cfg->pbl = DEFAULT_DMA_PBL; ++ of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); ++ of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); ++ dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); ++ ++ dma_cfg->aal = of_property_read_bool(np, "snps,aal"); ++ dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); ++ dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); ++ + plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); + if (plat->force_thresh_dma_mode) { + plat->force_sf_dma_mode = 0; +@@ -445,9 +450,7 @@ static int stmmac_pltfr_suspend(struct d + struct platform_device *pdev = to_platform_device(dev); + + ret = stmmac_suspend(dev); +- if (priv->plat->suspend) +- priv->plat->suspend(pdev, priv->plat->bsp_priv); +- else if (priv->plat->exit) ++ if (priv->plat->exit) + priv->plat->exit(pdev, priv->plat->bsp_priv); + + return ret; +@@ -466,9 +469,7 @@ static int stmmac_pltfr_resume(struct de + struct stmmac_priv *priv = netdev_priv(ndev); + struct platform_device *pdev = to_platform_device(dev); + +- if (priv->plat->resume) +- priv->plat->resume(pdev, priv->plat->bsp_priv); +- else if (priv->plat->init) ++ if (priv->plat->init) + priv->plat->init(pdev, priv->plat->bsp_priv); + + return stmmac_resume(dev); +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data { + + struct stmmac_dma_cfg { + int pbl; ++ int txpbl; ++ int rxpbl; ++ bool pblx8; + int fixed_burst; + int mixed_burst; + bool aal; +@@ -135,8 +138,6 @@ struct plat_stmmacenet_data { + void (*bus_setup)(void __iomem *ioaddr); + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); +- void (*suspend)(struct platform_device *pdev, void *priv); +- void (*resume)(struct platform_device *pdev, void *priv); + void *bsp_priv; + struct stmmac_axi *axi; + int has_gmac4; diff --git a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch new file mode 100644 index 000000000..abd130448 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch @@ -0,0 +1,2296 @@ +--- a/Documentation/devicetree/bindings/net/stmmac.txt ++++ b/Documentation/devicetree/bindings/net/stmmac.txt +@@ -49,6 +49,8 @@ Optional properties: + - snps,force_sf_dma_mode Force DMA to use the Store and Forward + mode for both tx and rx. This flag is + ignored if force_thresh_dma_mode is set. ++- snps,en-tx-lpi-clockgating Enable gating of the MAC TX clock during ++ TX low-power mode + - snps,multicast-filter-bins: Number of multicast filter hash bins + supported by this device instance + - snps,perfect-filter-entries: Number of perfect filter entries supported +@@ -65,7 +67,6 @@ Optional properties: + - snps,wr_osr_lmt: max write outstanding req. limit + - snps,rd_osr_lmt: max read outstanding req. limit + - snps,kbbe: do not cross 1KiB boundary. +- - snps,axi_all: align address + - snps,blen: this is a vector of supported burst length. + - snps,fb: fixed-burst + - snps,mb: mixed-burst +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -1,5 +1,5 @@ + config STMMAC_ETH +- tristate "STMicroelectronics 10/100/1000 Ethernet driver" ++ tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver" + depends on HAS_IOMEM && HAS_DMA + select MII + select PHYLIB +@@ -7,9 +7,8 @@ config STMMAC_ETH + select PTP_1588_CLOCK + select RESET_CONTROLLER + ---help--- +- This is the driver for the Ethernet IPs are built around a +- Synopsys IP Core and only tested on the STMicroelectronics +- platforms. ++ This is the driver for the Ethernet IPs built around a ++ Synopsys IP Core. + + if STMMAC_ETH + +@@ -29,6 +28,15 @@ config STMMAC_PLATFORM + + if STMMAC_PLATFORM + ++config DWMAC_DWC_QOS_ETH ++ tristate "Support for snps,dwc-qos-ethernet.txt DT binding." ++ select PHYLIB ++ select CRC32 ++ select MII ++ depends on OF && HAS_DMA ++ help ++ Support for chips using the snps,dwc-qos-ethernet.txt DT binding. ++ + config DWMAC_GENERIC + tristate "Generic driver for DWMAC" + default STMMAC_PLATFORM +@@ -143,11 +151,11 @@ config STMMAC_PCI + tristate "STMMAC PCI bus support" + depends on STMMAC_ETH && PCI + ---help--- +- This is to select the Synopsys DWMAC available on PCI devices, +- if you have a controller with this interface, say Y or M here. ++ This selects the platform specific bus support for the stmmac driver. ++ This driver was tested on XLINX XC2V3000 FF1152AMT0221 ++ D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit. + +- This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 +- D1215994A VIRTEX FPGA board. ++ If you have a controller with this interface, say Y or M here. + + If unsure, say N. + endif +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-alt + obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o + obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o + obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o ++obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o + obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o + stmmac-platform-objs:= stmmac_platform.o + dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o +--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +@@ -16,10 +16,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -71,7 +67,7 @@ struct stmmac_extra_stats { + unsigned long overflow_error; + unsigned long ipc_csum_error; + unsigned long rx_collision; +- unsigned long rx_crc; ++ unsigned long rx_crc_errors; + unsigned long dribbling_bit; + unsigned long rx_length; + unsigned long rx_mii; +@@ -323,6 +319,9 @@ struct dma_features { + /* TX and RX number of channels */ + unsigned int number_rx_channel; + unsigned int number_tx_channel; ++ /* TX and RX number of queues */ ++ unsigned int number_rx_queues; ++ unsigned int number_tx_queues; + /* Alternate (enhanced) DESC mode */ + unsigned int enh_desc; + }; +@@ -340,7 +339,7 @@ struct dma_features { + /* Common MAC defines */ + #define MAC_CTRL_REG 0x00000000 /* MAC Control */ + #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ ++#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + + /* Default LPI timers */ + #define STMMAC_DEFAULT_LIT_LS 0x3E8 +@@ -417,7 +416,7 @@ struct stmmac_dma_ops { + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ +- void (*dump_regs) (void __iomem *ioaddr); ++ void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); + /* Set tx/rx threshold in the csr6 register + * An invalid value enables the store-and-forward mode */ + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, +@@ -454,8 +453,10 @@ struct stmmac_ops { + void (*core_init)(struct mac_device_info *hw, int mtu); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); ++ /* Enable RX Queues */ ++ void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); + /* Dump MAC registers */ +- void (*dump_regs)(struct mac_device_info *hw); ++ void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); +@@ -471,7 +472,8 @@ struct stmmac_ops { + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); +- void (*set_eee_mode)(struct mac_device_info *hw); ++ void (*set_eee_mode)(struct mac_device_info *hw, ++ bool en_tx_lpi_clockgating); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); +--- a/drivers/net/ethernet/stmicro/stmmac/descs.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h +@@ -11,10 +11,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h +@@ -17,10 +17,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +@@ -0,0 +1,202 @@ ++/* ++ * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver ++ * ++ * Copyright (C) 2016 Joao Pinto ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac_platform.h" ++ ++static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, ++ struct plat_stmmacenet_data *plat_dat) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ u32 burst_map = 0; ++ u32 bit_index = 0; ++ u32 a_index = 0; ++ ++ if (!plat_dat->axi) { ++ plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL); ++ ++ if (!plat_dat->axi) ++ return -ENOMEM; ++ } ++ ++ plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi"); ++ if (of_property_read_u32(np, "snps,write-requests", ++ &plat_dat->axi->axi_wr_osr_lmt)) { ++ /** ++ * Since the register has a reset value of 1, if property ++ * is missing, default to 1. ++ */ ++ plat_dat->axi->axi_wr_osr_lmt = 1; ++ } else { ++ /** ++ * If property exists, to keep the behavior from dwc_eth_qos, ++ * subtract one after parsing. ++ */ ++ plat_dat->axi->axi_wr_osr_lmt--; ++ } ++ ++ if (of_property_read_u32(np, "read,read-requests", ++ &plat_dat->axi->axi_rd_osr_lmt)) { ++ /** ++ * Since the register has a reset value of 1, if property ++ * is missing, default to 1. ++ */ ++ plat_dat->axi->axi_rd_osr_lmt = 1; ++ } else { ++ /** ++ * If property exists, to keep the behavior from dwc_eth_qos, ++ * subtract one after parsing. ++ */ ++ plat_dat->axi->axi_rd_osr_lmt--; ++ } ++ of_property_read_u32(np, "snps,burst-map", &burst_map); ++ ++ /* converts burst-map bitmask to burst array */ ++ for (bit_index = 0; bit_index < 7; bit_index++) { ++ if (burst_map & (1 << bit_index)) { ++ switch (bit_index) { ++ case 0: ++ plat_dat->axi->axi_blen[a_index] = 4; break; ++ case 1: ++ plat_dat->axi->axi_blen[a_index] = 8; break; ++ case 2: ++ plat_dat->axi->axi_blen[a_index] = 16; break; ++ case 3: ++ plat_dat->axi->axi_blen[a_index] = 32; break; ++ case 4: ++ plat_dat->axi->axi_blen[a_index] = 64; break; ++ case 5: ++ plat_dat->axi->axi_blen[a_index] = 128; break; ++ case 6: ++ plat_dat->axi->axi_blen[a_index] = 256; break; ++ default: ++ break; ++ } ++ a_index++; ++ } ++ } ++ ++ /* dwc-qos needs GMAC4, AAL, TSO and PMT */ ++ plat_dat->has_gmac4 = 1; ++ plat_dat->dma_cfg->aal = 1; ++ plat_dat->tso_en = 1; ++ plat_dat->pmt = 1; ++ ++ return 0; ++} ++ ++static int dwc_eth_dwmac_probe(struct platform_device *pdev) ++{ ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ struct resource *res; ++ int ret; ++ ++ memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); ++ ++ /** ++ * Since stmmac_platform supports name IRQ only, basic platform ++ * resource initialization is done in the glue logic. ++ */ ++ stmmac_res.irq = platform_get_irq(pdev, 0); ++ if (stmmac_res.irq < 0) { ++ if (stmmac_res.irq != -EPROBE_DEFER) ++ dev_err(&pdev->dev, ++ "IRQ configuration information not found\n"); ++ ++ return stmmac_res.irq; ++ } ++ stmmac_res.wol_irq = stmmac_res.irq; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(stmmac_res.addr)) ++ return PTR_ERR(stmmac_res.addr); ++ ++ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ if (IS_ERR(plat_dat)) ++ return PTR_ERR(plat_dat); ++ ++ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); ++ if (IS_ERR(plat_dat->stmmac_clk)) { ++ dev_err(&pdev->dev, "apb_pclk clock not found.\n"); ++ ret = PTR_ERR(plat_dat->stmmac_clk); ++ plat_dat->stmmac_clk = NULL; ++ goto err_remove_config_dt; ++ } ++ clk_prepare_enable(plat_dat->stmmac_clk); ++ ++ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); ++ if (IS_ERR(plat_dat->pclk)) { ++ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); ++ ret = PTR_ERR(plat_dat->pclk); ++ plat_dat->pclk = NULL; ++ goto err_out_clk_dis_phy; ++ } ++ clk_prepare_enable(plat_dat->pclk); ++ ++ ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); ++ if (ret) ++ goto err_out_clk_dis_aper; ++ ++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (ret) ++ goto err_out_clk_dis_aper; ++ ++ return 0; ++ ++err_out_clk_dis_aper: ++ clk_disable_unprepare(plat_dat->pclk); ++err_out_clk_dis_phy: ++ clk_disable_unprepare(plat_dat->stmmac_clk); ++err_remove_config_dt: ++ stmmac_remove_config_dt(pdev, plat_dat); ++ ++ return ret; ++} ++ ++static int dwc_eth_dwmac_remove(struct platform_device *pdev) ++{ ++ return stmmac_pltfr_remove(pdev); ++} ++ ++static const struct of_device_id dwc_eth_dwmac_match[] = { ++ { .compatible = "snps,dwc-qos-ethernet-4.10", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); ++ ++static struct platform_driver dwc_eth_dwmac_driver = { ++ .probe = dwc_eth_dwmac_probe, ++ .remove = dwc_eth_dwmac_remove, ++ .driver = { ++ .name = "dwc-eth-dwmac", ++ .of_match_table = dwc_eth_dwmac_match, ++ }, ++}; ++module_platform_driver(dwc_eth_dwmac_driver); ++ ++MODULE_AUTHOR("Joao Pinto "); ++MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver"); ++MODULE_LICENSE("GPL v2"); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +@@ -35,10 +35,6 @@ + + #define PRG_ETH0_TXDLY_SHIFT 5 + #define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) +-#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) +-#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) +-#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) +-#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) + + /* divider for the result of m250_sel */ + #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 +@@ -69,6 +65,8 @@ struct meson8b_dwmac { + + struct clk_divider m25_div; + struct clk *m25_div_clk; ++ ++ u32 tx_delay_ns; + }; + + static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, +@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct m + { + int ret; + unsigned long clk_rate; ++ u8 tx_dly_val = 0; + + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: +- case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: ++ /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where ++ * 8ns are exactly one cycle of the 125MHz RGMII TX clock): ++ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 ++ */ ++ tx_dly_val = dwmac->tx_delay_ns >> 1; ++ /* fall through */ ++ ++ case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* Generate a 25MHz clock for the PHY */ + clk_rate = 25 * 1000 * 1000; +@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct m + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_INVERTED_RMII_CLK, 0); + +- /* TX clock delay - all known boards use a 1/4 cycle delay */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, +- PRG_ETH0_TXDLY_QUARTER); ++ tx_dly_val << PRG_ETH0_TXDLY_SHIFT); + break; + + case PHY_INTERFACE_MODE_RMII: +@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct pl + goto err_remove_config_dt; + } + ++ /* use 2ns as fallback since this value was previously hardcoded */ ++ if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", ++ &dwmac->tx_delay_ns)) ++ dwmac->tx_delay_ns = 2; ++ + ret = meson8b_init_clk(dwmac); + if (ret) + goto err_remove_config_dt; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_o + .set_rmii_speed = rk3288_set_rmii_speed, + }; + ++#define RK3328_GRF_MAC_CON0 0x0900 ++#define RK3328_GRF_MAC_CON1 0x0904 ++ ++/* RK3328_GRF_MAC_CON0 */ ++#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) ++#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) ++ ++/* RK3328_GRF_MAC_CON1 */ ++#define RK3328_GMAC_PHY_INTF_SEL_RGMII \ ++ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) ++#define RK3328_GMAC_PHY_INTF_SEL_RMII \ ++ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) ++#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3) ++#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) ++#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2) ++#define RK3328_GMAC_SPEED_100M GRF_BIT(2) ++#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7) ++#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) ++#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12)) ++#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12)) ++#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12)) ++#define RK3328_GMAC_RMII_MODE GRF_BIT(9) ++#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9) ++#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) ++#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) ++#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) ++#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) ++ ++static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, ++ int tx_delay, int rx_delay) ++{ ++ struct device *dev = &bsp_priv->pdev->dev; ++ ++ if (IS_ERR(bsp_priv->grf)) { ++ dev_err(dev, "Missing rockchip,grf property\n"); ++ return; ++ } ++ ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_PHY_INTF_SEL_RGMII | ++ RK3328_GMAC_RMII_MODE_CLR | ++ RK3328_GMAC_RXCLK_DLY_ENABLE | ++ RK3328_GMAC_TXCLK_DLY_ENABLE); ++ ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0, ++ RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) | ++ RK3328_GMAC_CLK_TX_DL_CFG(tx_delay)); ++} ++ ++static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) ++{ ++ struct device *dev = &bsp_priv->pdev->dev; ++ ++ if (IS_ERR(bsp_priv->grf)) { ++ dev_err(dev, "Missing rockchip,grf property\n"); ++ return; ++ } ++ ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_PHY_INTF_SEL_RMII | ++ RK3328_GMAC_RMII_MODE); ++ ++ /* set MAC to RMII mode */ ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); ++} ++ ++static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) ++{ ++ struct device *dev = &bsp_priv->pdev->dev; ++ ++ if (IS_ERR(bsp_priv->grf)) { ++ dev_err(dev, "Missing rockchip,grf property\n"); ++ return; ++ } ++ ++ if (speed == 10) ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_CLK_2_5M); ++ else if (speed == 100) ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_CLK_25M); ++ else if (speed == 1000) ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_CLK_125M); ++ else ++ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); ++} ++ ++static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) ++{ ++ struct device *dev = &bsp_priv->pdev->dev; ++ ++ if (IS_ERR(bsp_priv->grf)) { ++ dev_err(dev, "Missing rockchip,grf property\n"); ++ return; ++ } ++ ++ if (speed == 10) ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_RMII_CLK_2_5M | ++ RK3328_GMAC_SPEED_10M); ++ else if (speed == 100) ++ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, ++ RK3328_GMAC_RMII_CLK_25M | ++ RK3328_GMAC_SPEED_100M); ++ else ++ dev_err(dev, "unknown speed value for RMII! speed=%d", speed); ++} ++ ++static const struct rk_gmac_ops rk3328_ops = { ++ .set_to_rgmii = rk3328_set_to_rgmii, ++ .set_to_rmii = rk3328_set_to_rmii, ++ .set_rgmii_speed = rk3328_set_rgmii_speed, ++ .set_rmii_speed = rk3328_set_rmii_speed, ++}; ++ + #define RK3366_GRF_SOC_CON6 0x0418 + #define RK3366_GRF_SOC_CON7 0x041c + +@@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, + static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, + { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, ++ { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, + { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, + { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, + { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +@@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct pl + * mode. Create a copy of the core reset handle so it can be used by + * the driver later. + */ +- dwmac->stmmac_rst = stpriv->stmmac_rst; ++ dwmac->stmmac_rst = stpriv->plat->stmmac_rst; + + ret = socfpga_dwmac_set_phy_mode(dwmac); + if (ret) +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +@@ -10,10 +10,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +@@ -16,10 +16,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -96,17 +92,13 @@ static int dwmac1000_rx_ipc_enable(struc + return !!(value & GMAC_CONTROL_IPC); + } + +-static void dwmac1000_dump_regs(struct mac_device_info *hw) ++static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) + { + void __iomem *ioaddr = hw->pcsr; + int i; +- pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr); + +- for (i = 0; i < 55; i++) { +- int offset = i * 4; +- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, +- offset, readl(ioaddr + offset)); +- } ++ for (i = 0; i < 55; i++) ++ reg_space[i] = readl(ioaddr + i * 4); + } + + static void dwmac1000_set_umac_addr(struct mac_device_info *hw, +@@ -347,11 +339,14 @@ static int dwmac1000_irq_status(struct m + return ret; + } + +-static void dwmac1000_set_eee_mode(struct mac_device_info *hw) ++static void dwmac1000_set_eee_mode(struct mac_device_info *hw, ++ bool en_tx_lpi_clockgating) + { + void __iomem *ioaddr = hw->pcsr; + u32 value; + ++ /*TODO - en_tx_lpi_clockgating treatment */ ++ + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +@@ -16,10 +16,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -205,18 +201,14 @@ static void dwmac1000_dma_operation_mode + writel(csr6, ioaddr + DMA_CONTROL); + } + +-static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) ++static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) + { + int i; +- pr_info(" DMA registers\n"); +- for (i = 0; i < 22; i++) { +- if ((i < 9) || (i > 17)) { +- int offset = i * 4; +- pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, +- (DMA_BUS_MODE + offset), +- readl(ioaddr + DMA_BUS_MODE + offset)); +- } +- } ++ ++ for (i = 0; i < 22; i++) ++ if ((i < 9) || (i > 17)) ++ reg_space[DMA_BUS_MODE / 4 + i] = ++ readl(ioaddr + DMA_BUS_MODE + i * 4); + } + + static void dwmac1000_get_hw_feature(void __iomem *ioaddr, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +@@ -18,10 +18,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -44,28 +40,18 @@ static void dwmac100_core_init(struct ma + #endif + } + +-static void dwmac100_dump_mac_regs(struct mac_device_info *hw) ++static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space) + { + void __iomem *ioaddr = hw->pcsr; +- pr_info("\t----------------------------------------------\n" +- "\t DWMAC 100 CSR (base addr = 0x%p)\n" +- "\t----------------------------------------------\n", ioaddr); +- pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, +- readl(ioaddr + MAC_CONTROL)); +- pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, +- readl(ioaddr + MAC_ADDR_HIGH)); +- pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, +- readl(ioaddr + MAC_ADDR_LOW)); +- pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", +- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); +- pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", +- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); +- pr_info("\tflow control (offset 0x%x): 0x%08x\n", +- MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); +- pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, +- readl(ioaddr + MAC_VLAN1)); +- pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, +- readl(ioaddr + MAC_VLAN2)); ++ ++ reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL); ++ reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH); ++ reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW); ++ reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH); ++ reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW); ++ reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL); ++ reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1); ++ reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2); + } + + static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +@@ -18,10 +18,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -70,19 +66,18 @@ static void dwmac100_dma_operation_mode( + writel(csr6, ioaddr + DMA_CONTROL); + } + +-static void dwmac100_dump_dma_regs(void __iomem *ioaddr) ++static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) + { + int i; + +- pr_debug("DWMAC 100 DMA CSR\n"); + for (i = 0; i < 9; i++) +- pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, +- (DMA_BUS_MODE + i * 4), +- readl(ioaddr + DMA_BUS_MODE + i * 4)); +- +- pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", +- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), +- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); ++ reg_space[DMA_BUS_MODE / 4 + i] = ++ readl(ioaddr + DMA_BUS_MODE + i * 4); ++ ++ reg_space[DMA_CUR_TX_BUF_ADDR / 4] = ++ readl(ioaddr + DMA_CUR_TX_BUF_ADDR); ++ reg_space[DMA_CUR_RX_BUF_ADDR / 4] = ++ readl(ioaddr + DMA_CUR_RX_BUF_ADDR); + } + + /* DMA controller has two counters to track the number of the missed frames. */ +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -22,6 +22,7 @@ + #define GMAC_HASH_TAB_32_63 0x00000014 + #define GMAC_RX_FLOW_CTRL 0x00000090 + #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) ++#define GMAC_RXQ_CTRL0 0x000000a0 + #define GMAC_INT_STATUS 0x000000b0 + #define GMAC_INT_EN 0x000000b4 + #define GMAC_PCS_BASE 0x000000e0 +@@ -44,6 +45,11 @@ + + #define GMAC_MAX_PERFECT_ADDRESSES 128 + ++/* MAC RX Queue Enable */ ++#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) ++#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) ++#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) ++ + /* MAC Flow Control RX */ + #define GMAC_RX_FLOW_CTRL_RFE BIT(0) + +@@ -84,6 +90,19 @@ enum power_event { + power_down = 0x00000001, + }; + ++/* Energy Efficient Ethernet (EEE) for GMAC4 ++ * ++ * LPI status, timer and control register offset ++ */ ++#define GMAC4_LPI_CTRL_STATUS 0xd0 ++#define GMAC4_LPI_TIMER_CTRL 0xd4 ++ ++/* LPI control and status defines */ ++#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ ++#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ ++#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ ++#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ ++ + /* MAC Debug bitmap */ + #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) + #define GMAC_DEBUG_TFCSTS_SHIFT 17 +@@ -133,6 +152,8 @@ enum power_event { + /* MAC HW features2 bitmap */ + #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) + #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) ++#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) ++#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) + + /* MAC HW ADDR regs */ + #define GMAC_HI_DCS GENMASK(18, 16) +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -59,19 +59,24 @@ static void dwmac4_core_init(struct mac_ + writel(value, ioaddr + GMAC_INT_EN); + } + +-static void dwmac4_dump_regs(struct mac_device_info *hw) ++static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) + { + void __iomem *ioaddr = hw->pcsr; +- int i; ++ u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + +- pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr); ++ value &= GMAC_RX_QUEUE_CLEAR(queue); ++ value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + +- for (i = 0; i < GMAC_REG_NUM; i++) { +- int offset = i * 4; ++ writel(value, ioaddr + GMAC_RXQ_CTRL0); ++} + +- pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i, +- offset, readl(ioaddr + offset)); +- } ++static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ int i; ++ ++ for (i = 0; i < GMAC_REG_NUM; i++) ++ reg_space[i] = readl(ioaddr + i * 4); + } + + static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) +@@ -126,6 +131,65 @@ static void dwmac4_get_umac_addr(struct + GMAC_ADDR_LOW(reg_n)); + } + ++static void dwmac4_set_eee_mode(struct mac_device_info *hw, ++ bool en_tx_lpi_clockgating) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ /* Enable the link status receive on RGMII, SGMII ore SMII ++ * receive path and instruct the transmit to enter in LPI ++ * state. ++ */ ++ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); ++ value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; ++ ++ if (en_tx_lpi_clockgating) ++ value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; ++ ++ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); ++} ++ ++static void dwmac4_reset_eee_mode(struct mac_device_info *hw) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); ++ value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); ++ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); ++} ++ ++static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); ++ ++ if (link) ++ value |= GMAC4_LPI_CTRL_STATUS_PLS; ++ else ++ value &= ~GMAC4_LPI_CTRL_STATUS_PLS; ++ ++ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); ++} ++ ++static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); ++ ++ /* Program the timers in the LPI timer control register: ++ * LS: minimum time (ms) for which the link ++ * status from PHY should be ok before transmitting ++ * the LPI pattern. ++ * TW: minimum time (us) for which the core waits ++ * after it has stopped transmitting the LPI pattern. ++ */ ++ writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); ++} ++ + static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) + { +@@ -392,12 +456,17 @@ static void dwmac4_debug(void __iomem *i + static const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .rx_ipc = dwmac4_rx_ipc_enable, ++ .rx_queue_enable = dwmac4_rx_queue_enable, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, ++ .set_eee_mode = dwmac4_set_eee_mode, ++ .reset_eee_mode = dwmac4_reset_eee_mode, ++ .set_eee_timer = dwmac4_set_eee_timer, ++ .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(v + x->rx_mii++; + + if (unlikely(rdes3 & RDES3_CRC_ERROR)) { +- x->rx_crc++; ++ x->rx_crc_errors++; + stats->rx_crc_errors++; + } + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -127,53 +127,51 @@ static void dwmac4_dma_init(void __iomem + dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); + } + +-static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) ++static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, ++ u32 *reg_space) + { +- pr_debug(" Channel %d\n", channel); +- pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0, +- readl(ioaddr + DMA_CHAN_CONTROL(channel))); +- pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4, +- readl(ioaddr + DMA_CHAN_TX_CONTROL(channel))); +- pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8, +- readl(ioaddr + DMA_CHAN_RX_CONTROL(channel))); +- pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14, +- readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel))); +- pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c, +- readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel))); +- pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20, +- readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel))); +- pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28, +- readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel))); +- pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c, +- readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel))); +- pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30, +- readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel))); +- pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34, +- readl(ioaddr + DMA_CHAN_INTR_ENA(channel))); +- pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38, +- readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel))); +- pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c, +- readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel))); +- pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44, +- readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel))); +- pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c, +- readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel))); +- pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54, +- readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel))); +- pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c, +- readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel))); +- pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60, +- readl(ioaddr + DMA_CHAN_STATUS(channel))); ++ reg_space[DMA_CHAN_CONTROL(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CONTROL(channel)); ++ reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); ++ reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); ++ reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); ++ reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); ++ reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); ++ reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); ++ reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); ++ reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); ++ reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); ++ reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); ++ reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); ++ reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); ++ reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); ++ reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); ++ reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); ++ reg_space[DMA_CHAN_STATUS(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_STATUS(channel)); + } + +-static void dwmac4_dump_dma_regs(void __iomem *ioaddr) ++static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) + { + int i; + +- pr_debug(" GMAC4 DMA registers\n"); +- + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) +- _dwmac4_dump_dma_regs(ioaddr, i); ++ _dwmac4_dump_dma_regs(ioaddr, i, reg_space); + } + + static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) +@@ -303,6 +301,11 @@ static void dwmac4_get_hw_feature(void _ + ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_channel = + ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; ++ /* TX and RX number of queues */ ++ dma_cap->number_rx_queues = ++ ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; ++ dma_cap->number_tx_queues = ++ ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +@@ -10,10 +10,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -21,6 +17,7 @@ + *******************************************************************************/ + + #include ++#include + #include "common.h" + #include "dwmac_dma.h" + +@@ -29,19 +26,16 @@ + int dwmac_dma_reset(void __iomem *ioaddr) + { + u32 value = readl(ioaddr + DMA_BUS_MODE); +- int limit; ++ int err; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); +- limit = 10; +- while (limit--) { +- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) +- break; +- mdelay(10); +- } + +- if (limit < 0) ++ err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, ++ !(value & DMA_BUS_MODE_SFT_RESET), ++ 100000, 10000); ++ if (err) + return -EBUSY; + + return 0; +@@ -102,7 +96,7 @@ static void show_tx_process_state(unsign + pr_debug("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: +- pr_debug("- TX (Running):Fetching the Tx desc\n"); ++ pr_debug("- TX (Running): Fetching the Tx desc\n"); + break; + case 2: + pr_debug("- TX (Running): Waiting for end of tx\n"); +@@ -136,7 +130,7 @@ static void show_rx_process_state(unsign + pr_debug("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: +- pr_debug("- RX (Running):Checking for end of pkt\n"); ++ pr_debug("- RX (Running): Checking for end of pkt\n"); + break; + case 3: + pr_debug("- RX (Running): Waiting for Rx pkt\n"); +@@ -246,7 +240,7 @@ void stmmac_set_mac_addr(void __iomem *i + unsigned long data; + + data = (addr[5] << 8) | addr[4]; +- /* For MAC Addr registers se have to set the Address Enable (AE) ++ /* For MAC Addr registers we have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ +@@ -261,9 +255,9 @@ void stmmac_set_mac(void __iomem *ioaddr + u32 value = readl(ioaddr + MAC_CTRL_REG); + + if (enable) +- value |= MAC_RNABLE_RX | MAC_ENABLE_TX; ++ value |= MAC_ENABLE_RX | MAC_ENABLE_TX; + else +- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); ++ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + + writel(value, ioaddr + MAC_CTRL_REG); + } +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -225,7 +221,7 @@ static int enh_desc_get_rx_status(void * + x->rx_mii++; + + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { +- x->rx_crc++; ++ x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; +--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h ++++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -115,7 +111,7 @@ static int ndesc_get_rx_status(void *dat + stats->collisions++; + } + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { +- x->rx_crc++; ++ x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -16,10 +16,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -10,10 +10,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -106,9 +102,6 @@ struct stmmac_priv { + u32 msg_enable; + int wolopts; + int wol_irq; +- struct clk *stmmac_clk; +- struct clk *pclk; +- struct reset_control *stmmac_rst; + int clk_csr; + struct timer_list eee_ctrl_timer; + int lpi_irq; +@@ -120,8 +113,6 @@ struct stmmac_priv { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + unsigned int default_addend; +- struct clk *clk_ptp_ref; +- unsigned int clk_ptp_rate; + u32 adv_ts; + int use_riwt; + int irq_wake; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -65,7 +61,7 @@ static const struct stmmac_stats stmmac_ + STMMAC_STAT(overflow_error), + STMMAC_STAT(ipc_csum_error), + STMMAC_STAT(rx_collision), +- STMMAC_STAT(rx_crc), ++ STMMAC_STAT(rx_crc_errors), + STMMAC_STAT(dribbling_bit), + STMMAC_STAT(rx_length), + STMMAC_STAT(rx_mii), +@@ -439,32 +435,14 @@ static int stmmac_ethtool_get_regs_len(s + static void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) + { +- int i; + u32 *reg_space = (u32 *) space; + + struct stmmac_priv *priv = netdev_priv(dev); + + memset(reg_space, 0x0, REG_SPACE_SIZE); + +- if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) { +- /* MAC registers */ +- for (i = 0; i < 12; i++) +- reg_space[i] = readl(priv->ioaddr + (i * 4)); +- /* DMA registers */ +- for (i = 0; i < 9; i++) +- reg_space[i + 12] = +- readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); +- reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); +- reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); +- } else { +- /* MAC registers */ +- for (i = 0; i < 55; i++) +- reg_space[i] = readl(priv->ioaddr + (i * 4)); +- /* DMA registers */ +- for (i = 0; i < 22; i++) +- reg_space[i + 55] = +- readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); +- } ++ priv->hw->mac->dump_regs(priv->hw, reg_space); ++ priv->hw->dma->dump_regs(priv->ioaddr, reg_space); + } + + static void +@@ -712,7 +690,7 @@ static int stmmac_ethtool_op_set_eee(str + + static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) + { +- unsigned long clk = clk_get_rate(priv->stmmac_clk); ++ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) + return 0; +@@ -722,7 +700,7 @@ static u32 stmmac_usec2riwt(u32 usec, st + + static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) + { +- unsigned long clk = clk_get_rate(priv->stmmac_clk); ++ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) + return 0; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -13,10 +13,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -158,7 +154,7 @@ static void stmmac_clk_csr_set(struct st + { + u32 clk_rate; + +- clk_rate = clk_get_rate(priv->stmmac_clk); ++ clk_rate = clk_get_rate(priv->plat->stmmac_clk); + + /* Platform provided default clk_csr would be assumed valid + * for all other cases except for the below mentioned ones. +@@ -191,7 +187,7 @@ static void print_pkt(unsigned char *buf + + static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) + { +- unsigned avail; ++ u32 avail; + + if (priv->dirty_tx > priv->cur_tx) + avail = priv->dirty_tx - priv->cur_tx - 1; +@@ -203,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct + + static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) + { +- unsigned dirty; ++ u32 dirty; + + if (priv->dirty_rx <= priv->cur_rx) + dirty = priv->cur_rx - priv->dirty_rx; +@@ -216,7 +212,7 @@ static inline u32 stmmac_rx_dirty(struct + /** + * stmmac_hw_fix_mac_speed - callback for speed selection + * @priv: driver private structure +- * Description: on some platforms (e.g. ST), some HW system configuraton ++ * Description: on some platforms (e.g. ST), some HW system configuration + * registers have to be set according to the link speed negotiated. + */ + static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) +@@ -239,7 +235,8 @@ static void stmmac_enable_eee_mode(struc + /* Check and enter in LPI mode */ + if ((priv->dirty_tx == priv->cur_tx) && + (priv->tx_path_in_lpi_mode == false)) +- priv->hw->mac->set_eee_mode(priv->hw); ++ priv->hw->mac->set_eee_mode(priv->hw, ++ priv->plat->en_tx_lpi_clockgating); + } + + /** +@@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struc + /** + * stmmac_hwtstamp_ioctl - control hardware timestamping. + * @dev: device pointer. +- * @ifr: An IOCTL specefic structure, that can contain a pointer to ++ * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function configures the MAC to enable/disable both outgoing(TX) +@@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct + + /* program Sub Second Increment reg */ + sec_inc = priv->hw->ptp->config_sub_second_increment( +- priv->ptpaddr, priv->clk_ptp_rate, ++ priv->ptpaddr, priv->plat->clk_ptp_rate, + priv->plat->has_gmac4); + temp = div_u64(1000000000ULL, sec_inc); + +@@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); +- priv->default_addend = div_u64(temp, priv->clk_ptp_rate); ++ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + priv->hw->ptp->config_addend(priv->ptpaddr, + priv->default_addend); + +@@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + +- /* Fall-back to main clock in case of no PTP ref is passed */ +- priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); +- if (IS_ERR(priv->clk_ptp_ref)) { +- priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); +- priv->clk_ptp_ref = NULL; +- netdev_dbg(priv->dev, "PTP uses main clock\n"); +- } else { +- clk_prepare_enable(priv->clk_ptp_ref); +- priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); +- netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate); +- } +- + priv->adv_ts = 0; + /* Check if adv_ts can be enabled for dwmac 4.x core */ + if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) +@@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac + + static void stmmac_release_ptp(struct stmmac_priv *priv) + { +- if (priv->clk_ptp_ref) +- clk_disable_unprepare(priv->clk_ptp_ref); ++ if (priv->plat->clk_ptp_ref) ++ clk_disable_unprepare(priv->plat->clk_ptp_ref); + stmmac_ptp_unregister(priv); + } + +@@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct ne + int new_state = 0; + unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; + +- if (phydev == NULL) ++ if (!phydev) + return; + + spin_lock_irqsave(&priv->lock, flags); +@@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct ne + new_state = 1; + switch (phydev->speed) { + case 1000: +- if (likely((priv->plat->has_gmac) || +- (priv->plat->has_gmac4))) ++ if (priv->plat->has_gmac || ++ priv->plat->has_gmac4) + ctrl &= ~priv->hw->link.port; +- stmmac_hw_fix_mac_speed(priv); + break; + case 100: ++ if (priv->plat->has_gmac || ++ priv->plat->has_gmac4) { ++ ctrl |= priv->hw->link.port; ++ ctrl |= priv->hw->link.speed; ++ } else { ++ ctrl &= ~priv->hw->link.port; ++ } ++ break; + case 10: +- if (likely((priv->plat->has_gmac) || +- (priv->plat->has_gmac4))) { ++ if (priv->plat->has_gmac || ++ priv->plat->has_gmac4) { + ctrl |= priv->hw->link.port; +- if (phydev->speed == SPEED_100) { +- ctrl |= priv->hw->link.speed; +- } else { +- ctrl &= ~(priv->hw->link.speed); +- } ++ ctrl &= ~(priv->hw->link.speed); + } else { + ctrl &= ~priv->hw->link.port; + } +- stmmac_hw_fix_mac_speed(priv); + break; + default: + netif_warn(priv, link, priv->dev, +- "Speed (%d) not 10/100\n", +- phydev->speed); ++ "broken speed: %d\n", phydev->speed); ++ phydev->speed = SPEED_UNKNOWN; + break; + } +- ++ if (phydev->speed != SPEED_UNKNOWN) ++ stmmac_hw_fix_mac_speed(priv); + priv->speed = phydev->speed; + } + +@@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct ne + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; +- priv->speed = 0; +- priv->oldduplex = -1; ++ priv->speed = SPEED_UNKNOWN; ++ priv->oldduplex = DUPLEX_UNKNOWN; + } + + if (new_state && netif_msg_link(priv)) +@@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_de + int interface = priv->plat->interface; + int max_speed = priv->plat->max_speed; + priv->oldlink = 0; +- priv->speed = 0; +- priv->oldduplex = -1; ++ priv->speed = SPEED_UNKNOWN; ++ priv->oldduplex = DUPLEX_UNKNOWN; + + if (priv->plat->phy_node) { + phydev = of_phy_connect(dev, priv->plat->phy_node, +@@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_de + if (phydev->is_pseudo_fixed_link) + phydev->irq = PHY_POLL; + +- netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n", +- __func__, phydev->phy_id, phydev->link); +- ++ phy_attached_info(phydev); + return 0; + } + +@@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struc + * @dev: net device structure + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors +- * and allocates the socket buffers. It suppors the chained and ring ++ * and allocates the socket buffers. It supports the chained and ring + * modes. + */ + static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) +@@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct st + int i; + + for (i = 0; i < DMA_TX_SIZE; i++) { +- struct dma_desc *p; +- +- if (priv->extend_desc) +- p = &((priv->dma_etx + i)->basic); +- else +- p = priv->dma_tx + i; +- + if (priv->tx_skbuff_dma[i].buf) { + if (priv->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, +@@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct st + DMA_TO_DEVICE); + } + +- if (priv->tx_skbuff[i] != NULL) { ++ if (priv->tx_skbuff[i]) { + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + priv->tx_skbuff_dma[i].buf = 0; +@@ -1271,6 +1250,28 @@ static void free_dma_desc_resources(stru + } + + /** ++ * stmmac_mac_enable_rx_queues - Enable MAC rx queues ++ * @priv: driver private structure ++ * Description: It is used for enabling the rx queues in the MAC ++ */ ++static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++{ ++ int rx_count = priv->dma_cap.number_rx_queues; ++ int queue = 0; ++ ++ /* If GMAC does not have multiple queues, then this is not necessary*/ ++ if (rx_count == 1) ++ return; ++ ++ /** ++ * If the core is synthesized with multiple rx queues / multiple ++ * dma channels, then rx queues will be disabled by default. ++ * For now only rx queue 0 is enabled. ++ */ ++ priv->hw->mac->rx_queue_enable(priv->hw, queue); ++} ++ ++/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv: driver private structure + * Description: it is used for configuring the DMA operation mode register in +@@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_de + /* Copy the MAC addr into the HW */ + priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); + +- /* If required, perform hw setup of the bus. */ +- if (priv->plat->bus_setup) +- priv->plat->bus_setup(priv->ioaddr); +- + /* PS and related bits will be programmed according to the speed */ + if (priv->hw->pcs) { + int speed = priv->plat->mac_port_sel_speed; +@@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_de + /* Initialize the MAC Core */ + priv->hw->mac->core_init(priv->hw, dev->mtu); + ++ /* Initialize MAC RX Queues */ ++ if (priv->hw->mac->rx_queue_enable) ++ stmmac_mac_enable_rx_queues(priv); ++ + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); +@@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_de + + if (init_ptp) { + ret = stmmac_init_ptp(priv); +- if (ret) +- netdev_warn(priv->dev, "fail to init PTP.\n"); ++ if (ret == -EOPNOTSUPP) ++ netdev_warn(priv->dev, "PTP not supported by HW\n"); ++ else if (ret) ++ netdev_warn(priv->dev, "PTP init failed\n"); + } + + #ifdef CONFIG_DEBUG_FS +@@ -1726,11 +1729,6 @@ static int stmmac_hw_setup(struct net_de + priv->hw->dma->start_tx(priv->ioaddr); + priv->hw->dma->start_rx(priv->ioaddr); + +- /* Dump DMA/MAC registers */ +- if (netif_msg_hw(priv)) { +- priv->hw->mac->dump_regs(priv->hw); +- priv->hw->dma->dump_regs(priv->ioaddr); +- } + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; + + if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { +@@ -2519,7 +2517,7 @@ static int stmmac_rx(struct stmmac_priv + if (unlikely(status == discard_frame)) { + priv->dev->stats.rx_errors++; + if (priv->hwts_rx_en && !priv->extend_desc) { +- /* DESC2 & DESC3 will be overwitten by device ++ /* DESC2 & DESC3 will be overwritten by device + * with timestamp value, hence reinitialize + * them in stmmac_rx_refill() function so that + * device can reuse it. +@@ -2542,7 +2540,7 @@ static int stmmac_rx(struct stmmac_priv + + frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + +- /* If frame length is greather than skb buffer size ++ /* If frame length is greater than skb buffer size + * (preallocated during init) then the packet is + * ignored + */ +@@ -2762,7 +2760,7 @@ static netdev_features_t stmmac_fix_feat + /* Some GMAC devices have a bugged Jumbo frame support that + * needs to have the Tx COE disabled for oversized frames + * (due to limited buffer sizes). In this case we disable +- * the TX csum insertionin the TDES and not use SF. ++ * the TX csum insertion in the TDES and not use SF. + */ + if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_CSUM_MASK; +@@ -2908,9 +2906,7 @@ static void sysfs_display_ring(void *hea + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { +- u64 x; + if (extend_desc) { +- x = *(u64 *) ep; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + le32_to_cpu(ep->basic.des0), +@@ -2919,7 +2915,6 @@ static void sysfs_display_ring(void *hea + le32_to_cpu(ep->basic.des3)); + ep++; + } else { +- x = *(u64 *) p; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + le32_to_cpu(p->des0), le32_to_cpu(p->des1), +@@ -2989,7 +2984,7 @@ static int stmmac_sysfs_dma_cap_read(str + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); +- seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", ++ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", + (priv->dma_cap.pcs) ? "Y" : "N"); + seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", + (priv->dma_cap.sma_mdio) ? "Y" : "N"); +@@ -3265,44 +3260,8 @@ int stmmac_dvr_probe(struct device *devi + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + +- priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); +- if (IS_ERR(priv->stmmac_clk)) { +- netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", +- __func__); +- /* If failed to obtain stmmac_clk and specific clk_csr value +- * is NOT passed from the platform, probe fail. +- */ +- if (!priv->plat->clk_csr) { +- ret = PTR_ERR(priv->stmmac_clk); +- goto error_clk_get; +- } else { +- priv->stmmac_clk = NULL; +- } +- } +- clk_prepare_enable(priv->stmmac_clk); +- +- priv->pclk = devm_clk_get(priv->device, "pclk"); +- if (IS_ERR(priv->pclk)) { +- if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { +- ret = -EPROBE_DEFER; +- goto error_pclk_get; +- } +- priv->pclk = NULL; +- } +- clk_prepare_enable(priv->pclk); +- +- priv->stmmac_rst = devm_reset_control_get(priv->device, +- STMMAC_RESOURCE_NAME); +- if (IS_ERR(priv->stmmac_rst)) { +- if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { +- ret = -EPROBE_DEFER; +- goto error_hw_init; +- } +- dev_info(priv->device, "no reset control found\n"); +- priv->stmmac_rst = NULL; +- } +- if (priv->stmmac_rst) +- reset_control_deassert(priv->stmmac_rst); ++ if (priv->plat->stmmac_rst) ++ reset_control_deassert(priv->plat->stmmac_rst); + + /* Init MAC and get the capabilities */ + ret = stmmac_hw_init(priv); +@@ -3388,10 +3347,6 @@ error_netdev_register: + error_mdio_register: + netif_napi_del(&priv->napi); + error_hw_init: +- clk_disable_unprepare(priv->pclk); +-error_pclk_get: +- clk_disable_unprepare(priv->stmmac_clk); +-error_clk_get: + free_netdev(ndev); + + return ret; +@@ -3417,10 +3372,10 @@ int stmmac_dvr_remove(struct device *dev + stmmac_set_mac(priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); +- if (priv->stmmac_rst) +- reset_control_assert(priv->stmmac_rst); +- clk_disable_unprepare(priv->pclk); +- clk_disable_unprepare(priv->stmmac_clk); ++ if (priv->plat->stmmac_rst) ++ reset_control_assert(priv->plat->stmmac_rst); ++ clk_disable_unprepare(priv->plat->pclk); ++ clk_disable_unprepare(priv->plat->stmmac_clk); + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) +@@ -3469,14 +3424,14 @@ int stmmac_suspend(struct device *dev) + stmmac_set_mac(priv->ioaddr, false); + pinctrl_pm_select_sleep_state(priv->device); + /* Disable clock in case of PWM is off */ +- clk_disable(priv->pclk); +- clk_disable(priv->stmmac_clk); ++ clk_disable(priv->plat->pclk); ++ clk_disable(priv->plat->stmmac_clk); + } + spin_unlock_irqrestore(&priv->lock, flags); + + priv->oldlink = 0; +- priv->speed = 0; +- priv->oldduplex = -1; ++ priv->speed = SPEED_UNKNOWN; ++ priv->oldduplex = DUPLEX_UNKNOWN; + return 0; + } + EXPORT_SYMBOL_GPL(stmmac_suspend); +@@ -3509,9 +3464,9 @@ int stmmac_resume(struct device *dev) + priv->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(priv->device); +- /* enable the clk prevously disabled */ +- clk_enable(priv->stmmac_clk); +- clk_enable(priv->pclk); ++ /* enable the clk previously disabled */ ++ clk_enable(priv->plat->stmmac_clk); ++ clk_enable(priv->plat->pclk); + /* reset the phy so that it's ready */ + if (priv->mii) + stmmac_mdio_reset(priv->mii); +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -13,10 +13,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -24,13 +20,14 @@ + Maintainer: Giuseppe Cavallaro + *******************************************************************************/ + ++#include ++#include + #include +-#include +-#include + #include + #include + #include +-#include ++#include ++#include + + #include "stmmac.h" + +@@ -42,22 +39,6 @@ + #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) + #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) + +-static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) +-{ +- unsigned long curr; +- unsigned long finish = jiffies + 3 * HZ; +- +- do { +- curr = jiffies; +- if (readl(ioaddr + mii_addr) & MII_BUSY) +- cpu_relax(); +- else +- return 0; +- } while (!time_after_eq(curr, finish)); +- +- return -EBUSY; +-} +- + /** + * stmmac_mdio_read + * @bus: points to the mii_bus structure +@@ -74,7 +55,7 @@ static int stmmac_mdio_read(struct mii_b + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; +- ++ u32 v; + int data; + u32 value = MII_BUSY; + +@@ -86,12 +67,14 @@ static int stmmac_mdio_read(struct mii_b + if (priv->plat->has_gmac4) + value |= MII_GMAC4_READ; + +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) ++ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), ++ 100, 10000)) + return -EBUSY; + + writel(value, priv->ioaddr + mii_address); + +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) ++ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), ++ 100, 10000)) + return -EBUSY; + + /* Read the data from the MII data register */ +@@ -115,7 +98,7 @@ static int stmmac_mdio_write(struct mii_ + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; +- ++ u32 v; + u32 value = MII_BUSY; + + value |= (phyaddr << priv->hw->mii.addr_shift) +@@ -130,7 +113,8 @@ static int stmmac_mdio_write(struct mii_ + value |= MII_WRITE; + + /* Wait until any existing MII operation is complete */ +- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) ++ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), ++ 100, 10000)) + return -EBUSY; + + /* Set the MII address register to write */ +@@ -138,7 +122,8 @@ static int stmmac_mdio_write(struct mii_ + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ +- return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); ++ return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), ++ 100, 10000); + } + + /** +@@ -156,9 +141,9 @@ int stmmac_mdio_reset(struct mii_bus *bu + + #ifdef CONFIG_OF + if (priv->device->of_node) { +- + if (data->reset_gpio < 0) { + struct device_node *np = priv->device->of_node; ++ + if (!np) + return 0; + +@@ -198,7 +183,7 @@ int stmmac_mdio_reset(struct mii_bus *bu + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle +- * on MDC, so perform a dummy mdio read. To be upadted for GMAC4 ++ * on MDC, so perform a dummy mdio read. To be updated for GMAC4 + * if needed. + */ + if (!priv->plat->has_gmac4) +@@ -225,7 +210,7 @@ int stmmac_mdio_register(struct net_devi + return 0; + + new_bus = mdiobus_alloc(); +- if (new_bus == NULL) ++ if (!new_bus) + return -ENOMEM; + + if (mdio_bus_data->irqs) +@@ -262,49 +247,48 @@ int stmmac_mdio_register(struct net_devi + found = 0; + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); +- if (phydev) { +- int act = 0; +- char irq_num[4]; +- char *irq_str; +- +- /* +- * If an IRQ was provided to be assigned after +- * the bus probe, do it here. +- */ +- if ((mdio_bus_data->irqs == NULL) && +- (mdio_bus_data->probed_phy_irq > 0)) { +- new_bus->irq[addr] = +- mdio_bus_data->probed_phy_irq; +- phydev->irq = mdio_bus_data->probed_phy_irq; +- } +- +- /* +- * If we're going to bind the MAC to this PHY bus, +- * and no PHY number was provided to the MAC, +- * use the one probed here. +- */ +- if (priv->plat->phy_addr == -1) +- priv->plat->phy_addr = addr; +- +- act = (priv->plat->phy_addr == addr); +- switch (phydev->irq) { +- case PHY_POLL: +- irq_str = "POLL"; +- break; +- case PHY_IGNORE_INTERRUPT: +- irq_str = "IGNORE"; +- break; +- default: +- sprintf(irq_num, "%d", phydev->irq); +- irq_str = irq_num; +- break; +- } +- netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", +- phydev->phy_id, addr, +- irq_str, phydev_name(phydev), +- act ? " active" : ""); +- found = 1; ++ int act = 0; ++ char irq_num[4]; ++ char *irq_str; ++ ++ if (!phydev) ++ continue; ++ ++ /* ++ * If an IRQ was provided to be assigned after ++ * the bus probe, do it here. ++ */ ++ if (!mdio_bus_data->irqs && ++ (mdio_bus_data->probed_phy_irq > 0)) { ++ new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; ++ phydev->irq = mdio_bus_data->probed_phy_irq; ++ } ++ ++ /* ++ * If we're going to bind the MAC to this PHY bus, ++ * and no PHY number was provided to the MAC, ++ * use the one probed here. ++ */ ++ if (priv->plat->phy_addr == -1) ++ priv->plat->phy_addr = addr; ++ ++ act = (priv->plat->phy_addr == addr); ++ switch (phydev->irq) { ++ case PHY_POLL: ++ irq_str = "POLL"; ++ break; ++ case PHY_IGNORE_INTERRUPT: ++ irq_str = "IGNORE"; ++ break; ++ default: ++ sprintf(irq_num, "%d", phydev->irq); ++ irq_str = irq_num; ++ break; + } ++ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", ++ phydev->phy_id, addr, irq_str, phydev_name(phydev), ++ act ? " active" : ""); ++ found = 1; + } + + if (!found && !mdio_node) { +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -121,7 +117,6 @@ static struct stmmac_axi *stmmac_axi_set + axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); + axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); + axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); +- axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); + axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); + axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); + axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); +@@ -181,10 +176,19 @@ static int stmmac_dt_phy(struct plat_stm + mdio = false; + } + +- /* If snps,dwmac-mdio is passed from DT, always register the MDIO */ +- for_each_child_of_node(np, plat->mdio_node) { +- if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) +- break; ++ /* exception for dwmac-dwc-qos-eth glue logic */ ++ if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { ++ plat->mdio_node = of_get_child_by_name(np, "mdio"); ++ } else { ++ /** ++ * If snps,dwmac-mdio is passed from DT, always register ++ * the MDIO ++ */ ++ for_each_child_of_node(np, plat->mdio_node) { ++ if (of_device_is_compatible(plat->mdio_node, ++ "snps,dwmac-mdio")) ++ break; ++ } + } + + if (plat->mdio_node) { +@@ -249,6 +253,9 @@ stmmac_probe_config_dt(struct platform_d + plat->force_sf_dma_mode = + of_property_read_bool(np, "snps,force_sf_dma_mode"); + ++ plat->en_tx_lpi_clockgating = ++ of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); ++ + /* Set the maxmtu to a default of JUMBO_LEN in case the + * parameter is not present in the device tree. + */ +@@ -333,7 +340,54 @@ stmmac_probe_config_dt(struct platform_d + + plat->axi = stmmac_axi_setup(pdev); + ++ /* clock setup */ ++ plat->stmmac_clk = devm_clk_get(&pdev->dev, ++ STMMAC_RESOURCE_NAME); ++ if (IS_ERR(plat->stmmac_clk)) { ++ dev_warn(&pdev->dev, "Cannot get CSR clock\n"); ++ plat->stmmac_clk = NULL; ++ } ++ clk_prepare_enable(plat->stmmac_clk); ++ ++ plat->pclk = devm_clk_get(&pdev->dev, "pclk"); ++ if (IS_ERR(plat->pclk)) { ++ if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) ++ goto error_pclk_get; ++ ++ plat->pclk = NULL; ++ } ++ clk_prepare_enable(plat->pclk); ++ ++ /* Fall-back to main clock in case of no PTP ref is passed */ ++ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); ++ if (IS_ERR(plat->clk_ptp_ref)) { ++ plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); ++ plat->clk_ptp_ref = NULL; ++ dev_warn(&pdev->dev, "PTP uses main clock\n"); ++ } else { ++ clk_prepare_enable(plat->clk_ptp_ref); ++ plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); ++ dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); ++ } ++ ++ plat->stmmac_rst = devm_reset_control_get(&pdev->dev, ++ STMMAC_RESOURCE_NAME); ++ if (IS_ERR(plat->stmmac_rst)) { ++ if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) ++ goto error_hw_init; ++ ++ dev_info(&pdev->dev, "no reset control found\n"); ++ plat->stmmac_rst = NULL; ++ } ++ + return plat; ++ ++error_hw_init: ++ clk_disable_unprepare(plat->pclk); ++error_pclk_get: ++ clk_disable_unprepare(plat->stmmac_clk); ++ ++ return ERR_PTR(-EPROBE_DEFER); + } + + /** +@@ -357,7 +411,7 @@ void stmmac_remove_config_dt(struct plat + struct plat_stmmacenet_data * + stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) + { +- return ERR_PTR(-ENOSYS); ++ return ERR_PTR(-EINVAL); + } + + void stmmac_remove_config_dt(struct platform_device *pdev, +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -103,7 +103,6 @@ struct stmmac_axi { + u32 axi_wr_osr_lmt; + u32 axi_rd_osr_lmt; + bool axi_kbbe; +- bool axi_axi_all; + u32 axi_blen[AXI_BLEN]; + bool axi_fb; + bool axi_mb; +@@ -135,13 +134,18 @@ struct plat_stmmacenet_data { + int tx_fifo_size; + int rx_fifo_size; + void (*fix_mac_speed)(void *priv, unsigned int speed); +- void (*bus_setup)(void __iomem *ioaddr); + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); + void *bsp_priv; ++ struct clk *stmmac_clk; ++ struct clk *pclk; ++ struct clk *clk_ptp_ref; ++ unsigned int clk_ptp_rate; ++ struct reset_control *stmmac_rst; + struct stmmac_axi *axi; + int has_gmac4; + bool tso_en; + int mac_port_sel_speed; ++ bool en_tx_lpi_clockgating; + }; + #endif diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch new file mode 100644 index 000000000..a5cdec552 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -0,0 +1,5974 @@ +--- a/Documentation/devicetree/bindings/net/stmmac.txt ++++ b/Documentation/devicetree/bindings/net/stmmac.txt +@@ -7,9 +7,12 @@ Required properties: + - interrupt-parent: Should be the phandle for the interrupt controller + that services interrupts for this device + - interrupts: Should contain the STMMAC interrupts +-- interrupt-names: Should contain the interrupt names "macirq" +- "eth_wake_irq" if this interrupt is supported in the "interrupts" +- property ++- interrupt-names: Should contain a list of interrupt names corresponding to ++ the interrupts in the interrupts property, if available. ++ Valid interrupt names are: ++ - "macirq" (combined signal for various interrupt events) ++ - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection) ++ - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state) + - phy-mode: See ethernet.txt file in the same directory. + - snps,reset-gpio gpio number for phy reset. + - snps,reset-active-low boolean flag to indicate if phy reset is active low. +@@ -28,9 +31,9 @@ Optional properties: + clocks may be specified in derived bindings. + - clock-names: One name for each entry in the clocks property, the + first one should be "stmmaceth" and the second one should be "pclk". +-- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is +- available this clock is used for programming the Timestamp Addend Register. +- If not passed then the system clock will be used and this is fine on some ++- ptp_ref: this is the PTP reference clock; in case of the PTP is available ++ this clock is used for programming the Timestamp Addend Register. If not ++ passed then the system clock will be used and this is fine on some + platforms. + - tx-fifo-depth: See ethernet.txt file in the same directory + - rx-fifo-depth: See ethernet.txt file in the same directory +@@ -72,7 +75,45 @@ Optional properties: + - snps,mb: mixed-burst + - snps,rb: rebuild INCRx Burst + - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. +- ++- Multiple RX Queues parameters: below the list of all the parameters to ++ configure the multiple RX queues: ++ - snps,rx-queues-to-use: number of RX queues to be used in the driver ++ - Choose one of these RX scheduling algorithms: ++ - snps,rx-sched-sp: Strict priority ++ - snps,rx-sched-wsp: Weighted Strict priority ++ - For each RX queue ++ - Choose one of these modes: ++ - snps,dcb-algorithm: Queue to be enabled as DCB ++ - snps,avb-algorithm: Queue to be enabled as AVB ++ - snps,map-to-dma-channel: Channel to map ++ - Specifiy specific packet routing: ++ - snps,route-avcp: AV Untagged Control packets ++ - snps,route-ptp: PTP Packets ++ - snps,route-dcbcp: DCB Control Packets ++ - snps,route-up: Untagged Packets ++ - snps,route-multi-broad: Multicast & Broadcast Packets ++ - snps,priority: RX queue priority (Range: 0x0 to 0xF) ++- Multiple TX Queues parameters: below the list of all the parameters to ++ configure the multiple TX queues: ++ - snps,tx-queues-to-use: number of TX queues to be used in the driver ++ - Choose one of these TX scheduling algorithms: ++ - snps,tx-sched-wrr: Weighted Round Robin ++ - snps,tx-sched-wfq: Weighted Fair Queuing ++ - snps,tx-sched-dwrr: Deficit Weighted Round Robin ++ - snps,tx-sched-sp: Strict priority ++ - For each TX queue ++ - snps,weight: TX queue weight (if using a DCB weight algorithm) ++ - Choose one of these modes: ++ - snps,dcb-algorithm: TX queue will be working in DCB ++ - snps,avb-algorithm: TX queue will be working in AVB ++ [Attention] Queue 0 is reserved for legacy traffic ++ and so no AVB is available in this queue. ++ - Configure Credit Base Shaper (if AVB Mode selected): ++ - snps,send_slope: enable Low Power Interface ++ - snps,idle_slope: unlock on WoL ++ - snps,high_credit: max write outstanding req. limit ++ - snps,low_credit: max read outstanding req. limit ++ - snps,priority: TX queue priority (Range: 0x0 to 0xF) + Examples: + + stmmac_axi_setup: stmmac-axi-config { +@@ -81,12 +122,41 @@ Examples: + snps,blen = <256 128 64 32 0 0 0>; + }; + ++ mtl_rx_setup: rx-queues-config { ++ snps,rx-queues-to-use = <1>; ++ snps,rx-sched-sp; ++ queue0 { ++ snps,dcb-algorithm; ++ snps,map-to-dma-channel = <0x0>; ++ snps,priority = <0x0>; ++ }; ++ }; ++ ++ mtl_tx_setup: tx-queues-config { ++ snps,tx-queues-to-use = <2>; ++ snps,tx-sched-wrr; ++ queue0 { ++ snps,weight = <0x10>; ++ snps,dcb-algorithm; ++ snps,priority = <0x0>; ++ }; ++ ++ queue1 { ++ snps,avb-algorithm; ++ snps,send_slope = <0x1000>; ++ snps,idle_slope = <0x1000>; ++ snps,high_credit = <0x3E800>; ++ snps,low_credit = <0xFFC18000>; ++ snps,priority = <0x1>; ++ }; ++ }; ++ + gmac0: ethernet@e0800000 { + compatible = "st,spear600-gmac"; + reg = <0xe0800000 0x8000>; + interrupt-parent = <&vic1>; +- interrupts = <24 23>; +- interrupt-names = "macirq", "eth_wake_irq"; ++ interrupts = <24 23 22>; ++ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; + mac-address = [000000000000]; /* Filled in by U-Boot */ + max-frame-size = <3800>; + phy-mode = "gmii"; +@@ -104,4 +174,6 @@ Examples: + phy1: ethernet-phy@0 { + }; + }; ++ snps,mtl-rx-config = <&mtl_rx_setup>; ++ snps,mtl-tx-config = <&mtl_tx_setup>; + }; +--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +@@ -37,6 +37,7 @@ + #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) + #define TSE_PCS_CONTROL_REG 0x00 + #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) ++#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 + #define TSE_PCS_IF_MODE_REG 0x28 + #define TSE_PCS_LINK_TIMER_0_REG 0x24 + #define TSE_PCS_LINK_TIMER_1_REG 0x26 +@@ -65,6 +66,7 @@ + #define TSE_PCS_SW_RESET_TIMEOUT 100 + #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) + #define TSE_PCS_USE_SGMII_ENA BIT(0) ++#define TSE_PCS_IF_USE_SGMII 0x03 + + #define SGMII_ADAPTER_CTRL_REG 0x00 + #define SGMII_ADAPTER_DISABLE 0x0001 +@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str + { + int ret = 0; + +- writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); ++ writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); ++ ++ writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); + + writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); + writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); +--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +@@ -26,12 +26,15 @@ + + static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)p; +- unsigned int entry = priv->cur_tx; +- struct dma_desc *desc = priv->dma_tx + entry; ++ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); ++ struct stmmac_priv *priv = tx_q->priv_data; ++ unsigned int entry = tx_q->cur_tx; + unsigned int bmax, des2; + unsigned int i = 1, len; ++ struct dma_desc *desc; ++ ++ desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; +@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = bmax; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = bmax; + /* do not close the descriptor and do not set own bit */ + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, +- 0, false); ++ 0, false, skb->len); + + while (len != 0) { +- priv->tx_skbuff[entry] = NULL; ++ tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); +- desc = priv->dma_tx + entry; ++ desc = tx_q->dma_tx + entry; + + if (len > bmax) { + des2 = dma_map_single(priv->device, +@@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = bmax; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = bmax; + priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, +- false); ++ false, skb->len); + len -= bmax; + i++; + } else { +@@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = len; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_CHAIN_MODE, 1, +- true); ++ true, skb->len); + len = 0; + } + } + +- priv->cur_tx = entry; ++ tx_q->cur_tx = entry; + + return entry; + } +@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void * + + static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; ++ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; ++ struct stmmac_priv *priv = rx_q->priv_data; + + if (priv->hwts_rx_en && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ +- p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy + +- (((priv->dirty_rx) + 1) % ++ p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + ++ (((rx_q->dirty_rx) + 1) % + DMA_RX_SIZE) * + sizeof(struct dma_desc))); + } + + static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; +- unsigned int entry = priv->dirty_tx; ++ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; ++ struct stmmac_priv *priv = tx_q->priv_data; ++ unsigned int entry = tx_q->dirty_tx; + +- if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && ++ if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + priv->hwts_tx_en) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ +- p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + +- ((priv->dirty_tx + 1) % DMA_TX_SIZE)) ++ p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + ++ ((tx_q->dirty_tx + 1) % DMA_TX_SIZE)) + * sizeof(struct dma_desc))); + } + +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -246,6 +246,15 @@ struct stmmac_extra_stats { + #define STMMAC_TX_MAX_FRAMES 256 + #define STMMAC_TX_FRAMES 64 + ++/* Packets types */ ++enum packets_types { ++ PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */ ++ PACKET_PTPQ = 0x2, /* PTP Packets */ ++ PACKET_DCBCPQ = 0x3, /* DCB Control Packets */ ++ PACKET_UPQ = 0x4, /* Untagged Packets */ ++ PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */ ++}; ++ + /* Rx IPC status */ + enum rx_frame_status { + good_frame = 0x0, +@@ -324,6 +333,9 @@ struct dma_features { + unsigned int number_tx_queues; + /* Alternate (enhanced) DESC mode */ + unsigned int enh_desc; ++ /* TX and RX FIFO sizes */ ++ unsigned int tx_fifo_size; ++ unsigned int rx_fifo_size; + }; + + /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ +@@ -361,7 +373,7 @@ struct stmmac_desc_ops { + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, +- bool ls); ++ bool ls, unsigned int tot_pkt_len); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, + unsigned int tcphdrlen, +@@ -413,6 +425,14 @@ struct stmmac_dma_ops { + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx, u32 dma_rx, int atds); ++ void (*init_chan)(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, u32 chan); ++ void (*init_rx_chan)(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_rx_phy, u32 chan); ++ void (*init_tx_chan)(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx_phy, u32 chan); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ +@@ -421,25 +441,28 @@ struct stmmac_dma_ops { + * An invalid value enables the store-and-forward mode */ + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, + int rxfifosz); ++ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, ++ int fifosz); ++ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); +- void (*enable_dma_irq) (void __iomem *ioaddr); +- void (*disable_dma_irq) (void __iomem *ioaddr); +- void (*start_tx) (void __iomem *ioaddr); +- void (*stop_tx) (void __iomem *ioaddr); +- void (*start_rx) (void __iomem *ioaddr); +- void (*stop_rx) (void __iomem *ioaddr); ++ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); ++ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); ++ void (*start_tx)(void __iomem *ioaddr, u32 chan); ++ void (*stop_tx)(void __iomem *ioaddr, u32 chan); ++ void (*start_rx)(void __iomem *ioaddr, u32 chan); ++ void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, +- struct stmmac_extra_stats *x); ++ struct stmmac_extra_stats *x, u32 chan); + /* If supported then get the optional core features */ + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); + /* Program the HW RX Watchdog */ +- void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); +- void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); +- void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); ++ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); ++ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); ++ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); +@@ -451,20 +474,44 @@ struct mac_device_info; + struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, int mtu); ++ /* Enable the MAC RX/TX */ ++ void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ +- void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); ++ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); ++ /* RX Queues Priority */ ++ void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); ++ /* TX Queues Priority */ ++ void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); ++ /* RX Queues Routing */ ++ void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, ++ u32 queue); ++ /* Program RX Algorithms */ ++ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); ++ /* Program TX Algorithms */ ++ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); ++ /* Set MTL TX queues weight */ ++ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, ++ u32 weight, u32 queue); ++ /* RX MTL queue to RX dma mapping */ ++ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); ++ /* Configure AV Algorithm */ ++ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, ++ u32 idle_slope, u32 high_credit, u32 low_credit, ++ u32 queue); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); ++ /* Handle MTL interrupts */ ++ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, +- unsigned int fc, unsigned int pause_time); ++ unsigned int fc, unsigned int pause_time, u32 tx_cnt); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ +@@ -477,7 +524,8 @@ struct stmmac_ops { + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); +- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); ++ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, ++ u32 rx_queues, u32 tx_queues); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); +@@ -547,6 +595,11 @@ struct mac_device_info { + unsigned int ps; + }; + ++struct stmmac_rx_routing { ++ u32 reg_mask; ++ u32 reg_shift; ++}; ++ + struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries, + int *synopsys_id); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +@@ -14,16 +14,34 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include ++#include + #include + + #include "stmmac_platform.h" ++#include "dwmac4.h" ++ ++struct tegra_eqos { ++ struct device *dev; ++ void __iomem *regs; ++ ++ struct reset_control *rst; ++ struct clk *clk_master; ++ struct clk *clk_slave; ++ struct clk *clk_tx; ++ struct clk *clk_rx; ++ ++ struct gpio_desc *reset; ++}; + + static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) +@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc + return 0; + } + ++static void *dwc_qos_probe(struct platform_device *pdev, ++ struct plat_stmmacenet_data *plat_dat, ++ struct stmmac_resources *stmmac_res) ++{ ++ int err; ++ ++ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); ++ if (IS_ERR(plat_dat->stmmac_clk)) { ++ dev_err(&pdev->dev, "apb_pclk clock not found.\n"); ++ return ERR_CAST(plat_dat->stmmac_clk); ++ } ++ ++ err = clk_prepare_enable(plat_dat->stmmac_clk); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n", ++ err); ++ return ERR_PTR(err); ++ } ++ ++ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); ++ if (IS_ERR(plat_dat->pclk)) { ++ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); ++ err = PTR_ERR(plat_dat->pclk); ++ goto disable; ++ } ++ ++ err = clk_prepare_enable(plat_dat->pclk); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", ++ err); ++ goto disable; ++ } ++ ++ return NULL; ++ ++disable: ++ clk_disable_unprepare(plat_dat->stmmac_clk); ++ return ERR_PTR(err); ++} ++ ++static int dwc_qos_remove(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ ++ clk_disable_unprepare(priv->plat->pclk); ++ clk_disable_unprepare(priv->plat->stmmac_clk); ++ ++ return 0; ++} ++ ++#define SDMEMCOMPPADCTRL 0x8800 ++#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) ++ ++#define AUTO_CAL_CONFIG 0x8804 ++#define AUTO_CAL_CONFIG_START BIT(31) ++#define AUTO_CAL_CONFIG_ENABLE BIT(29) ++ ++#define AUTO_CAL_STATUS 0x880c ++#define AUTO_CAL_STATUS_ACTIVE BIT(31) ++ ++static void tegra_eqos_fix_speed(void *priv, unsigned int speed) ++{ ++ struct tegra_eqos *eqos = priv; ++ unsigned long rate = 125000000; ++ bool needs_calibration = false; ++ u32 value; ++ int err; ++ ++ switch (speed) { ++ case SPEED_1000: ++ needs_calibration = true; ++ rate = 125000000; ++ break; ++ ++ case SPEED_100: ++ needs_calibration = true; ++ rate = 25000000; ++ break; ++ ++ case SPEED_10: ++ rate = 2500000; ++ break; ++ ++ default: ++ dev_err(eqos->dev, "invalid speed %u\n", speed); ++ break; ++ } ++ ++ if (needs_calibration) { ++ /* calibrate */ ++ value = readl(eqos->regs + SDMEMCOMPPADCTRL); ++ value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; ++ writel(value, eqos->regs + SDMEMCOMPPADCTRL); ++ ++ udelay(1); ++ ++ value = readl(eqos->regs + AUTO_CAL_CONFIG); ++ value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE; ++ writel(value, eqos->regs + AUTO_CAL_CONFIG); ++ ++ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, ++ value, ++ value & AUTO_CAL_STATUS_ACTIVE, ++ 1, 10); ++ if (err < 0) { ++ dev_err(eqos->dev, "calibration did not start\n"); ++ goto failed; ++ } ++ ++ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, ++ value, ++ (value & AUTO_CAL_STATUS_ACTIVE) == 0, ++ 20, 200); ++ if (err < 0) { ++ dev_err(eqos->dev, "calibration didn't finish\n"); ++ goto failed; ++ } ++ ++ failed: ++ value = readl(eqos->regs + SDMEMCOMPPADCTRL); ++ value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; ++ writel(value, eqos->regs + SDMEMCOMPPADCTRL); ++ } else { ++ value = readl(eqos->regs + AUTO_CAL_CONFIG); ++ value &= ~AUTO_CAL_CONFIG_ENABLE; ++ writel(value, eqos->regs + AUTO_CAL_CONFIG); ++ } ++ ++ err = clk_set_rate(eqos->clk_tx, rate); ++ if (err < 0) ++ dev_err(eqos->dev, "failed to set TX rate: %d\n", err); ++} ++ ++static int tegra_eqos_init(struct platform_device *pdev, void *priv) ++{ ++ struct tegra_eqos *eqos = priv; ++ unsigned long rate; ++ u32 value; ++ ++ rate = clk_get_rate(eqos->clk_slave); ++ ++ value = (rate / 1000000) - 1; ++ writel(value, eqos->regs + GMAC_1US_TIC_COUNTER); ++ ++ return 0; ++} ++ ++static void *tegra_eqos_probe(struct platform_device *pdev, ++ struct plat_stmmacenet_data *data, ++ struct stmmac_resources *res) ++{ ++ struct tegra_eqos *eqos; ++ int err; ++ ++ eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL); ++ if (!eqos) { ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ eqos->dev = &pdev->dev; ++ eqos->regs = res->addr; ++ ++ eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus"); ++ if (IS_ERR(eqos->clk_master)) { ++ err = PTR_ERR(eqos->clk_master); ++ goto error; ++ } ++ ++ err = clk_prepare_enable(eqos->clk_master); ++ if (err < 0) ++ goto error; ++ ++ eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus"); ++ if (IS_ERR(eqos->clk_slave)) { ++ err = PTR_ERR(eqos->clk_slave); ++ goto disable_master; ++ } ++ ++ data->stmmac_clk = eqos->clk_slave; ++ ++ err = clk_prepare_enable(eqos->clk_slave); ++ if (err < 0) ++ goto disable_master; ++ ++ eqos->clk_rx = devm_clk_get(&pdev->dev, "rx"); ++ if (IS_ERR(eqos->clk_rx)) { ++ err = PTR_ERR(eqos->clk_rx); ++ goto disable_slave; ++ } ++ ++ err = clk_prepare_enable(eqos->clk_rx); ++ if (err < 0) ++ goto disable_slave; ++ ++ eqos->clk_tx = devm_clk_get(&pdev->dev, "tx"); ++ if (IS_ERR(eqos->clk_tx)) { ++ err = PTR_ERR(eqos->clk_tx); ++ goto disable_rx; ++ } ++ ++ err = clk_prepare_enable(eqos->clk_tx); ++ if (err < 0) ++ goto disable_rx; ++ ++ eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH); ++ if (IS_ERR(eqos->reset)) { ++ err = PTR_ERR(eqos->reset); ++ goto disable_tx; ++ } ++ ++ usleep_range(2000, 4000); ++ gpiod_set_value(eqos->reset, 0); ++ ++ eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); ++ if (IS_ERR(eqos->rst)) { ++ err = PTR_ERR(eqos->rst); ++ goto reset_phy; ++ } ++ ++ err = reset_control_assert(eqos->rst); ++ if (err < 0) ++ goto reset_phy; ++ ++ usleep_range(2000, 4000); ++ ++ err = reset_control_deassert(eqos->rst); ++ if (err < 0) ++ goto reset_phy; ++ ++ usleep_range(2000, 4000); ++ ++ data->fix_mac_speed = tegra_eqos_fix_speed; ++ data->init = tegra_eqos_init; ++ data->bsp_priv = eqos; ++ ++ err = tegra_eqos_init(pdev, eqos); ++ if (err < 0) ++ goto reset; ++ ++out: ++ return eqos; ++ ++reset: ++ reset_control_assert(eqos->rst); ++reset_phy: ++ gpiod_set_value(eqos->reset, 1); ++disable_tx: ++ clk_disable_unprepare(eqos->clk_tx); ++disable_rx: ++ clk_disable_unprepare(eqos->clk_rx); ++disable_slave: ++ clk_disable_unprepare(eqos->clk_slave); ++disable_master: ++ clk_disable_unprepare(eqos->clk_master); ++error: ++ eqos = ERR_PTR(err); ++ goto out; ++} ++ ++static int tegra_eqos_remove(struct platform_device *pdev) ++{ ++ struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev); ++ ++ reset_control_assert(eqos->rst); ++ gpiod_set_value(eqos->reset, 1); ++ clk_disable_unprepare(eqos->clk_tx); ++ clk_disable_unprepare(eqos->clk_rx); ++ clk_disable_unprepare(eqos->clk_slave); ++ clk_disable_unprepare(eqos->clk_master); ++ ++ return 0; ++} ++ ++struct dwc_eth_dwmac_data { ++ void *(*probe)(struct platform_device *pdev, ++ struct plat_stmmacenet_data *data, ++ struct stmmac_resources *res); ++ int (*remove)(struct platform_device *pdev); ++}; ++ ++static const struct dwc_eth_dwmac_data dwc_qos_data = { ++ .probe = dwc_qos_probe, ++ .remove = dwc_qos_remove, ++}; ++ ++static const struct dwc_eth_dwmac_data tegra_eqos_data = { ++ .probe = tegra_eqos_probe, ++ .remove = tegra_eqos_remove, ++}; ++ + static int dwc_eth_dwmac_probe(struct platform_device *pdev) + { ++ const struct dwc_eth_dwmac_data *data; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct resource *res; ++ void *priv; + int ret; + ++ data = of_device_get_match_data(&pdev->dev); ++ + memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); + + /** +@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); +- if (IS_ERR(plat_dat->stmmac_clk)) { +- dev_err(&pdev->dev, "apb_pclk clock not found.\n"); +- ret = PTR_ERR(plat_dat->stmmac_clk); +- plat_dat->stmmac_clk = NULL; +- goto err_remove_config_dt; ++ priv = data->probe(pdev, plat_dat, &stmmac_res); ++ if (IS_ERR(priv)) { ++ ret = PTR_ERR(priv); ++ dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret); ++ goto remove_config; + } +- clk_prepare_enable(plat_dat->stmmac_clk); +- +- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); +- if (IS_ERR(plat_dat->pclk)) { +- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); +- ret = PTR_ERR(plat_dat->pclk); +- plat_dat->pclk = NULL; +- goto err_out_clk_dis_phy; +- } +- clk_prepare_enable(plat_dat->pclk); + + ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); + if (ret) +- goto err_out_clk_dis_aper; ++ goto remove; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) +- goto err_out_clk_dis_aper; ++ goto remove; + +- return 0; ++ return ret; + +-err_out_clk_dis_aper: +- clk_disable_unprepare(plat_dat->pclk); +-err_out_clk_dis_phy: +- clk_disable_unprepare(plat_dat->stmmac_clk); +-err_remove_config_dt: ++remove: ++ data->remove(pdev); ++remove_config: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +@@ -178,11 +479,29 @@ err_remove_config_dt: + + static int dwc_eth_dwmac_remove(struct platform_device *pdev) + { +- return stmmac_pltfr_remove(pdev); ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ const struct dwc_eth_dwmac_data *data; ++ int err; ++ ++ data = of_device_get_match_data(&pdev->dev); ++ ++ err = stmmac_dvr_remove(&pdev->dev); ++ if (err < 0) ++ dev_err(&pdev->dev, "failed to remove platform: %d\n", err); ++ ++ err = data->remove(pdev); ++ if (err < 0) ++ dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err); ++ ++ stmmac_remove_config_dt(pdev, priv->plat); ++ ++ return err; + } + + static const struct of_device_id dwc_eth_dwmac_match[] = { +- { .compatible = "snps,dwc-qos-ethernet-4.10", }, ++ { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data }, ++ { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data }, + { } + }; + MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -74,6 +74,10 @@ struct rk_priv_data { + #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) + #define GRF_CLR_BIT(nr) (BIT(nr+16)) + ++#define DELAY_ENABLE(soc, tx, rx) \ ++ (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ++ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) ++ + #define RK3228_GRF_MAC_CON0 0x0900 + #define RK3228_GRF_MAC_CON1 0x0904 + +@@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RGMII | + RK3228_GMAC_RMII_MODE_CLR | +- RK3228_GMAC_RXCLK_DLY_ENABLE | +- RK3228_GMAC_TXCLK_DLY_ENABLE); ++ DELAY_ENABLE(RK3228, tx_delay, rx_delay)); + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, + RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | +@@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r + RK3288_GMAC_PHY_INTF_SEL_RGMII | + RK3288_GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3, +- RK3288_GMAC_RXCLK_DLY_ENABLE | +- RK3288_GMAC_TXCLK_DLY_ENABLE | ++ DELAY_ENABLE(RK3288, tx_delay, rx_delay) | + RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3288_GMAC_CLK_TX_DL_CFG(tx_delay)); + } +@@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r + RK3366_GMAC_PHY_INTF_SEL_RGMII | + RK3366_GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7, +- RK3366_GMAC_RXCLK_DLY_ENABLE | +- RK3366_GMAC_TXCLK_DLY_ENABLE | ++ DELAY_ENABLE(RK3366, tx_delay, rx_delay) | + RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3366_GMAC_CLK_TX_DL_CFG(tx_delay)); + } +@@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r + RK3368_GMAC_PHY_INTF_SEL_RGMII | + RK3368_GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16, +- RK3368_GMAC_RXCLK_DLY_ENABLE | +- RK3368_GMAC_TXCLK_DLY_ENABLE | ++ DELAY_ENABLE(RK3368, tx_delay, rx_delay) | + RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3368_GMAC_CLK_TX_DL_CFG(tx_delay)); + } +@@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r + RK3399_GMAC_PHY_INTF_SEL_RGMII | + RK3399_GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6, +- RK3399_GMAC_RXCLK_DLY_ENABLE | +- RK3399_GMAC_TXCLK_DLY_ENABLE | ++ DELAY_ENABLE(RK3399, tx_delay, rx_delay) | + RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3399_GMAC_CLK_TX_DL_CFG(tx_delay)); + } +@@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri + return ret; + + /*rmii or rgmii*/ +- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) { ++ switch (bsp_priv->phy_iface) { ++ case PHY_INTERFACE_MODE_RGMII: + dev_info(dev, "init for RGMII\n"); + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, + bsp_priv->rx_delay); +- } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) { ++ break; ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ dev_info(dev, "init for RGMII_ID\n"); ++ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0); ++ break; ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ dev_info(dev, "init for RGMII_RXID\n"); ++ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0); ++ break; ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ dev_info(dev, "init for RGMII_TXID\n"); ++ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay); ++ break; ++ case PHY_INTERFACE_MODE_RMII: + dev_info(dev, "init for RMII\n"); + bsp_priv->ops->set_to_rmii(bsp_priv); +- } else { ++ break; ++ default: + dev_err(dev, "NO interface defined!\n"); + } + +@@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns + struct rk_priv_data *bsp_priv = priv; + struct device *dev = &bsp_priv->pdev->dev; + +- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) ++ switch (bsp_priv->phy_iface) { ++ case PHY_INTERFACE_MODE_RGMII: ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: + bsp_priv->ops->set_rgmii_speed(bsp_priv, speed); +- else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) ++ break; ++ case PHY_INTERFACE_MODE_RMII: + bsp_priv->ops->set_rmii_speed(bsp_priv, speed); +- else ++ break; ++ default: + dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); ++ } + } + + static int rk_gmac_probe(struct platform_device *pdev) +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct + + + static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, +- unsigned int fc, unsigned int pause_time) ++ unsigned int fc, unsigned int pause_time, ++ u32 tx_cnt) + { + void __iomem *ioaddr = hw->pcsr; + /* Set flow such that DZPQ in Mac Register 6 is 0, +@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); + } + +-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) ++static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, ++ u32 rx_queues, u32 tx_queues) + { + u32 value = readl(ioaddr + GMAC_DEBUG); + +@@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem + + static const struct stmmac_ops dwmac1000_ops = { + .core_init = dwmac1000_core_init, ++ .set_mac = stmmac_set_mac, + .rx_ipc = dwmac1000_rx_ipc_enable, + .dump_regs = dwmac1000_dump_regs, + .host_irq_status = dwmac1000_irq_status, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi + dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; + } + +-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) ++static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, ++ u32 number_chan) + { + writel(riwt, ioaddr + DMA_RX_WATCHDOG); + } +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m + } + + static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, +- unsigned int fc, unsigned int pause_time) ++ unsigned int fc, unsigned int pause_time, ++ u32 tx_cnt) + { + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = MAC_FLOW_CTRL_ENABLE; +@@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi + + static const struct stmmac_ops dwmac100_ops = { + .core_init = dwmac100_core_init, ++ .set_mac = stmmac_set_mac, + .rx_ipc = dwmac100_rx_ipc_enable, + .dump_regs = dwmac100_dump_mac_regs, + .host_irq_status = dwmac100_irq_status, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -22,9 +22,15 @@ + #define GMAC_HASH_TAB_32_63 0x00000014 + #define GMAC_RX_FLOW_CTRL 0x00000090 + #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) ++#define GMAC_TXQ_PRTY_MAP0 0x98 ++#define GMAC_TXQ_PRTY_MAP1 0x9C + #define GMAC_RXQ_CTRL0 0x000000a0 ++#define GMAC_RXQ_CTRL1 0x000000a4 ++#define GMAC_RXQ_CTRL2 0x000000a8 ++#define GMAC_RXQ_CTRL3 0x000000ac + #define GMAC_INT_STATUS 0x000000b0 + #define GMAC_INT_EN 0x000000b4 ++#define GMAC_1US_TIC_COUNTER 0x000000dc + #define GMAC_PCS_BASE 0x000000e0 + #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 + #define GMAC_PMT 0x000000c0 +@@ -38,6 +44,22 @@ + #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) + #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) + ++/* RX Queues Routing */ ++#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) ++#define GMAC_RXQCTRL_AVCPQ_SHIFT 0 ++#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4) ++#define GMAC_RXQCTRL_PTPQ_SHIFT 4 ++#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8) ++#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8 ++#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12) ++#define GMAC_RXQCTRL_UPQ_SHIFT 12 ++#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16) ++#define GMAC_RXQCTRL_MCBCQ_SHIFT 16 ++#define GMAC_RXQCTRL_MCBCQEN BIT(20) ++#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20 ++#define GMAC_RXQCTRL_TACPQE BIT(21) ++#define GMAC_RXQCTRL_TACPQE_SHIFT 21 ++ + /* MAC Packet Filtering */ + #define GMAC_PACKET_FILTER_PR BIT(0) + #define GMAC_PACKET_FILTER_HMC BIT(2) +@@ -53,6 +75,14 @@ + /* MAC Flow Control RX */ + #define GMAC_RX_FLOW_CTRL_RFE BIT(0) + ++/* RX Queues Priorities */ ++#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) ++#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) ++ ++/* TX Queues Priorities */ ++#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) ++#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) ++ + /* MAC Flow Control TX */ + #define GMAC_TX_FLOW_CTRL_TFE BIT(1) + #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 +@@ -148,6 +178,8 @@ enum power_event { + /* MAC HW features1 bitmap */ + #define GMAC_HW_FEAT_AVSEL BIT(20) + #define GMAC_HW_TSOEN BIT(18) ++#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) ++#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) + + /* MAC HW features2 bitmap */ + #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) +@@ -161,8 +193,25 @@ enum power_event { + #define GMAC_HI_REG_AE BIT(31) + + /* MTL registers */ ++#define MTL_OPERATION_MODE 0x00000c00 ++#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) ++#define MTL_OPERATION_SCHALG_WRR (0x0 << 5) ++#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) ++#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5) ++#define MTL_OPERATION_SCHALG_SP (0x3 << 5) ++#define MTL_OPERATION_RAA BIT(2) ++#define MTL_OPERATION_RAA_SP (0x0 << 2) ++#define MTL_OPERATION_RAA_WSP (0x1 << 2) ++ + #define MTL_INT_STATUS 0x00000c20 +-#define MTL_INT_Q0 BIT(0) ++#define MTL_INT_QX(x) BIT(x) ++ ++#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */ ++#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */ ++#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0) ++#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0) ++#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x)) ++#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) + + #define MTL_CHAN_BASE_ADDR 0x00000d00 + #define MTL_CHAN_BASE_OFFSET 0x40 +@@ -180,6 +229,7 @@ enum power_event { + #define MTL_OP_MODE_TSF BIT(1) + + #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) ++#define MTL_OP_MODE_TQS_SHIFT 16 + + #define MTL_OP_MODE_TTC_MASK 0x70 + #define MTL_OP_MODE_TTC_SHIFT 4 +@@ -193,6 +243,17 @@ enum power_event { + #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) + #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) + ++#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20) ++#define MTL_OP_MODE_RQS_SHIFT 20 ++ ++#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14) ++#define MTL_OP_MODE_RFD_SHIFT 14 ++ ++#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8) ++#define MTL_OP_MODE_RFA_SHIFT 8 ++ ++#define MTL_OP_MODE_EHFC BIT(7) ++ + #define MTL_OP_MODE_RTC_MASK 0x18 + #define MTL_OP_MODE_RTC_SHIFT 3 + +@@ -201,6 +262,46 @@ enum power_event { + #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) + #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) + ++/* MTL ETS Control register */ ++#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10 ++#define MTL_ETS_CTRL_BASE_OFFSET 0x40 ++#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \ ++ ((x) * MTL_ETS_CTRL_BASE_OFFSET)) ++ ++#define MTL_ETS_CTRL_CC BIT(3) ++#define MTL_ETS_CTRL_AVALG BIT(2) ++ ++/* MTL Queue Quantum Weight */ ++#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18 ++#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40 ++#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \ ++ ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET)) ++#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0) ++ ++/* MTL sendSlopeCredit register */ ++#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c ++#define MTL_SEND_SLP_CRED_OFFSET 0x40 ++#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \ ++ ((x) * MTL_SEND_SLP_CRED_OFFSET)) ++ ++#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0) ++ ++/* MTL hiCredit register */ ++#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20 ++#define MTL_HIGH_CRED_OFFSET 0x40 ++#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \ ++ ((x) * MTL_HIGH_CRED_OFFSET)) ++ ++#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0) ++ ++/* MTL loCredit register */ ++#define MTL_LOW_CRED_BASE_ADDR 0x00000d24 ++#define MTL_LOW_CRED_OFFSET 0x40 ++#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \ ++ ((x) * MTL_LOW_CRED_OFFSET)) ++ ++#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0) ++ + /* MTL debug */ + #define MTL_DEBUG_TXSTSFSTS BIT(5) + #define MTL_DEBUG_TXFSTS BIT(4) +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_ + writel(value, ioaddr + GMAC_INT_EN); + } + +-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) ++static void dwmac4_rx_queue_enable(struct mac_device_info *hw, ++ u8 mode, u32 queue) + { + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + + value &= GMAC_RX_QUEUE_CLEAR(queue); +- value |= GMAC_RX_AV_QUEUE_ENABLE(queue); ++ if (mode == MTL_QUEUE_AVB) ++ value |= GMAC_RX_AV_QUEUE_ENABLE(queue); ++ else if (mode == MTL_QUEUE_DCB) ++ value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); + + writel(value, ioaddr + GMAC_RXQ_CTRL0); + } + ++static void dwmac4_rx_queue_priority(struct mac_device_info *hw, ++ u32 prio, u32 queue) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 base_register; ++ u32 value; ++ ++ base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; ++ ++ value = readl(ioaddr + base_register); ++ ++ value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); ++ value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & ++ GMAC_RXQCTRL_PSRQX_MASK(queue); ++ writel(value, ioaddr + base_register); ++} ++ ++static void dwmac4_tx_queue_priority(struct mac_device_info *hw, ++ u32 prio, u32 queue) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 base_register; ++ u32 value; ++ ++ base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; ++ ++ value = readl(ioaddr + base_register); ++ ++ value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); ++ value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & ++ GMAC_TXQCTRL_PSTQX_MASK(queue); ++ ++ writel(value, ioaddr + base_register); ++} ++ ++static void dwmac4_tx_queue_routing(struct mac_device_info *hw, ++ u8 packet, u32 queue) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ const struct stmmac_rx_routing route_possibilities[] = { ++ { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, ++ { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, ++ { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, ++ { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, ++ { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, ++ }; ++ ++ value = readl(ioaddr + GMAC_RXQ_CTRL1); ++ ++ /* routing configuration */ ++ value &= ~route_possibilities[packet - 1].reg_mask; ++ value |= (queue << route_possibilities[packet-1].reg_shift) & ++ route_possibilities[packet - 1].reg_mask; ++ ++ /* some packets require extra ops */ ++ if (packet == PACKET_AVCPQ) { ++ value &= ~GMAC_RXQCTRL_TACPQE; ++ value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; ++ } else if (packet == PACKET_MCBCQ) { ++ value &= ~GMAC_RXQCTRL_MCBCQEN; ++ value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; ++ } ++ ++ writel(value, ioaddr + GMAC_RXQ_CTRL1); ++} ++ ++static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, ++ u32 rx_alg) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value = readl(ioaddr + MTL_OPERATION_MODE); ++ ++ value &= ~MTL_OPERATION_RAA; ++ switch (rx_alg) { ++ case MTL_RX_ALGORITHM_SP: ++ value |= MTL_OPERATION_RAA_SP; ++ break; ++ case MTL_RX_ALGORITHM_WSP: ++ value |= MTL_OPERATION_RAA_WSP; ++ break; ++ default: ++ break; ++ } ++ ++ writel(value, ioaddr + MTL_OPERATION_MODE); ++} ++ ++static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, ++ u32 tx_alg) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value = readl(ioaddr + MTL_OPERATION_MODE); ++ ++ value &= ~MTL_OPERATION_SCHALG_MASK; ++ switch (tx_alg) { ++ case MTL_TX_ALGORITHM_WRR: ++ value |= MTL_OPERATION_SCHALG_WRR; ++ break; ++ case MTL_TX_ALGORITHM_WFQ: ++ value |= MTL_OPERATION_SCHALG_WFQ; ++ break; ++ case MTL_TX_ALGORITHM_DWRR: ++ value |= MTL_OPERATION_SCHALG_DWRR; ++ break; ++ case MTL_TX_ALGORITHM_SP: ++ value |= MTL_OPERATION_SCHALG_SP; ++ break; ++ default: ++ break; ++ } ++} ++ ++static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, ++ u32 weight, u32 queue) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); ++ ++ value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; ++ value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; ++ writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); ++} ++ ++static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ if (queue < 4) ++ value = readl(ioaddr + MTL_RXQ_DMA_MAP0); ++ else ++ value = readl(ioaddr + MTL_RXQ_DMA_MAP1); ++ ++ if (queue == 0 || queue == 4) { ++ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; ++ value |= MTL_RXQ_DMA_Q04MDMACH(chan); ++ } else { ++ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); ++ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); ++ } ++ ++ if (queue < 4) ++ writel(value, ioaddr + MTL_RXQ_DMA_MAP0); ++ else ++ writel(value, ioaddr + MTL_RXQ_DMA_MAP1); ++} ++ ++static void dwmac4_config_cbs(struct mac_device_info *hw, ++ u32 send_slope, u32 idle_slope, ++ u32 high_credit, u32 low_credit, u32 queue) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ ++ pr_debug("Queue %d configured as AVB. Parameters:\n", queue); ++ pr_debug("\tsend_slope: 0x%08x\n", send_slope); ++ pr_debug("\tidle_slope: 0x%08x\n", idle_slope); ++ pr_debug("\thigh_credit: 0x%08x\n", high_credit); ++ pr_debug("\tlow_credit: 0x%08x\n", low_credit); ++ ++ /* enable AV algorithm */ ++ value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); ++ value |= MTL_ETS_CTRL_AVALG; ++ value |= MTL_ETS_CTRL_CC; ++ writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); ++ ++ /* configure send slope */ ++ value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); ++ value &= ~MTL_SEND_SLP_CRED_SSC_MASK; ++ value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; ++ writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); ++ ++ /* configure idle slope (same register as tx weight) */ ++ dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); ++ ++ /* configure high credit */ ++ value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); ++ value &= ~MTL_HIGH_CRED_HC_MASK; ++ value |= high_credit & MTL_HIGH_CRED_HC_MASK; ++ writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); ++ ++ /* configure high credit */ ++ value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); ++ value &= ~MTL_HIGH_CRED_LC_MASK; ++ value |= low_credit & MTL_HIGH_CRED_LC_MASK; ++ writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); ++} ++ + static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) + { + void __iomem *ioaddr = hw->pcsr; +@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac + } + + static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, +- unsigned int fc, unsigned int pause_time) ++ unsigned int fc, unsigned int pause_time, ++ u32 tx_cnt) + { + void __iomem *ioaddr = hw->pcsr; +- u32 channel = STMMAC_CHAN0; /* FIXME */ + unsigned int flow = 0; ++ u32 queue = 0; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { +@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_ + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); +- flow |= GMAC_TX_FLOW_CTRL_TFE; +- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + +- if (duplex) { ++ if (duplex) + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); +- flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); +- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); ++ ++ for (queue = 0; queue < tx_cnt; queue++) { ++ flow |= GMAC_TX_FLOW_CTRL_TFE; ++ ++ if (duplex) ++ flow |= ++ (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); ++ ++ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); + } + } + } +@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome + } + } + ++static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 mtl_int_qx_status; ++ int ret = 0; ++ ++ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); ++ ++ /* Check MTL Interrupt */ ++ if (mtl_int_qx_status & MTL_INT_QX(chan)) { ++ /* read Queue x Interrupt status */ ++ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); ++ ++ if (status & MTL_RX_OVERFLOW_INT) { ++ /* clear Interrupt */ ++ writel(status | MTL_RX_OVERFLOW_INT, ++ ioaddr + MTL_CHAN_INT_CTRL(chan)); ++ ret = CORE_IRQ_MTL_RX_OVERFLOW; ++ } ++ } ++ ++ return ret; ++} ++ + static int dwmac4_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) + { + void __iomem *ioaddr = hw->pcsr; +- u32 mtl_int_qx_status; + u32 intr_status; + int ret = 0; + +@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_ + x->irq_receive_pmt_irq_n++; + } + +- mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); +- /* Check MTL Interrupt: Currently only one queue is used: Q0. */ +- if (mtl_int_qx_status & MTL_INT_Q0) { +- /* read Queue 0 Interrupt status */ +- u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); +- +- if (status & MTL_RX_OVERFLOW_INT) { +- /* clear Interrupt */ +- writel(status | MTL_RX_OVERFLOW_INT, +- ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); +- ret = CORE_IRQ_MTL_RX_OVERFLOW; +- } +- } +- + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); +@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_ + return ret; + } + +-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) ++static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, ++ u32 rx_queues, u32 tx_queues) + { + u32 value; ++ u32 queue; + +- /* Currently only channel 0 is supported */ +- value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); ++ for (queue = 0; queue < tx_queues; queue++) { ++ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); + +- if (value & MTL_DEBUG_TXSTSFSTS) +- x->mtl_tx_status_fifo_full++; +- if (value & MTL_DEBUG_TXFSTS) +- x->mtl_tx_fifo_not_empty++; +- if (value & MTL_DEBUG_TWCSTS) +- x->mmtl_fifo_ctrl++; +- if (value & MTL_DEBUG_TRCSTS_MASK) { +- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) +- >> MTL_DEBUG_TRCSTS_SHIFT; +- if (trcsts == MTL_DEBUG_TRCSTS_WRITE) +- x->mtl_tx_fifo_read_ctrl_write++; +- else if (trcsts == MTL_DEBUG_TRCSTS_TXW) +- x->mtl_tx_fifo_read_ctrl_wait++; +- else if (trcsts == MTL_DEBUG_TRCSTS_READ) +- x->mtl_tx_fifo_read_ctrl_read++; +- else +- x->mtl_tx_fifo_read_ctrl_idle++; ++ if (value & MTL_DEBUG_TXSTSFSTS) ++ x->mtl_tx_status_fifo_full++; ++ if (value & MTL_DEBUG_TXFSTS) ++ x->mtl_tx_fifo_not_empty++; ++ if (value & MTL_DEBUG_TWCSTS) ++ x->mmtl_fifo_ctrl++; ++ if (value & MTL_DEBUG_TRCSTS_MASK) { ++ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) ++ >> MTL_DEBUG_TRCSTS_SHIFT; ++ if (trcsts == MTL_DEBUG_TRCSTS_WRITE) ++ x->mtl_tx_fifo_read_ctrl_write++; ++ else if (trcsts == MTL_DEBUG_TRCSTS_TXW) ++ x->mtl_tx_fifo_read_ctrl_wait++; ++ else if (trcsts == MTL_DEBUG_TRCSTS_READ) ++ x->mtl_tx_fifo_read_ctrl_read++; ++ else ++ x->mtl_tx_fifo_read_ctrl_idle++; ++ } ++ if (value & MTL_DEBUG_TXPAUSED) ++ x->mac_tx_in_pause++; + } +- if (value & MTL_DEBUG_TXPAUSED) +- x->mac_tx_in_pause++; + +- value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); ++ for (queue = 0; queue < rx_queues; queue++) { ++ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); + +- if (value & MTL_DEBUG_RXFSTS_MASK) { +- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) +- >> MTL_DEBUG_RRCSTS_SHIFT; +- +- if (rxfsts == MTL_DEBUG_RXFSTS_FULL) +- x->mtl_rx_fifo_fill_level_full++; +- else if (rxfsts == MTL_DEBUG_RXFSTS_AT) +- x->mtl_rx_fifo_fill_above_thresh++; +- else if (rxfsts == MTL_DEBUG_RXFSTS_BT) +- x->mtl_rx_fifo_fill_below_thresh++; +- else +- x->mtl_rx_fifo_fill_level_empty++; +- } +- if (value & MTL_DEBUG_RRCSTS_MASK) { +- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> +- MTL_DEBUG_RRCSTS_SHIFT; +- +- if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) +- x->mtl_rx_fifo_read_ctrl_flush++; +- else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) +- x->mtl_rx_fifo_read_ctrl_read_data++; +- else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) +- x->mtl_rx_fifo_read_ctrl_status++; +- else +- x->mtl_rx_fifo_read_ctrl_idle++; ++ if (value & MTL_DEBUG_RXFSTS_MASK) { ++ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) ++ >> MTL_DEBUG_RRCSTS_SHIFT; ++ ++ if (rxfsts == MTL_DEBUG_RXFSTS_FULL) ++ x->mtl_rx_fifo_fill_level_full++; ++ else if (rxfsts == MTL_DEBUG_RXFSTS_AT) ++ x->mtl_rx_fifo_fill_above_thresh++; ++ else if (rxfsts == MTL_DEBUG_RXFSTS_BT) ++ x->mtl_rx_fifo_fill_below_thresh++; ++ else ++ x->mtl_rx_fifo_fill_level_empty++; ++ } ++ if (value & MTL_DEBUG_RRCSTS_MASK) { ++ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> ++ MTL_DEBUG_RRCSTS_SHIFT; ++ ++ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) ++ x->mtl_rx_fifo_read_ctrl_flush++; ++ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) ++ x->mtl_rx_fifo_read_ctrl_read_data++; ++ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) ++ x->mtl_rx_fifo_read_ctrl_status++; ++ else ++ x->mtl_rx_fifo_read_ctrl_idle++; ++ } ++ if (value & MTL_DEBUG_RWCSTS) ++ x->mtl_rx_fifo_ctrl_active++; + } +- if (value & MTL_DEBUG_RWCSTS) +- x->mtl_rx_fifo_ctrl_active++; + + /* GMAC debug */ + value = readl(ioaddr + GMAC_DEBUG); +@@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i + + static const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, ++ .set_mac = stmmac_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, ++ .rx_queue_prio = dwmac4_rx_queue_priority, ++ .tx_queue_prio = dwmac4_tx_queue_priority, ++ .rx_queue_routing = dwmac4_tx_queue_routing, ++ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, ++ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, ++ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, ++ .map_mtl_to_dma = dwmac4_map_mtl_dma, ++ .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, ++ .host_mtl_irq_status = dwmac4_irq_mtl_status, ++ .flow_ctrl = dwmac4_flow_ctrl, ++ .pmt = dwmac4_pmt, ++ .set_umac_addr = dwmac4_set_umac_addr, ++ .get_umac_addr = dwmac4_get_umac_addr, ++ .set_eee_mode = dwmac4_set_eee_mode, ++ .reset_eee_mode = dwmac4_reset_eee_mode, ++ .set_eee_timer = dwmac4_set_eee_timer, ++ .set_eee_pls = dwmac4_set_eee_pls, ++ .pcs_ctrl_ane = dwmac4_ctrl_ane, ++ .pcs_rane = dwmac4_rane, ++ .pcs_get_adv_lp = dwmac4_get_adv_lp, ++ .debug = dwmac4_debug, ++ .set_filter = dwmac4_set_filter, ++}; ++ ++static const struct stmmac_ops dwmac410_ops = { ++ .core_init = dwmac4_core_init, ++ .set_mac = stmmac_dwmac4_set_mac, ++ .rx_ipc = dwmac4_rx_ipc_enable, ++ .rx_queue_enable = dwmac4_rx_queue_enable, ++ .rx_queue_prio = dwmac4_rx_queue_priority, ++ .tx_queue_prio = dwmac4_tx_queue_priority, ++ .rx_queue_routing = dwmac4_tx_queue_routing, ++ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, ++ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, ++ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, ++ .map_mtl_to_dma = dwmac4_map_mtl_dma, ++ .config_cbs = dwmac4_config_cbs, ++ .dump_regs = dwmac4_dump_regs, ++ .host_irq_status = dwmac4_irq_status, ++ .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, +@@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + +- mac->mac = &dwmac4_ops; +- + mac->link.port = GMAC_CONFIG_PS; + mac->link.duplex = GMAC_CONFIG_DM; + mac->link.speed = GMAC_CONFIG_FES; +@@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi + else + mac->dma = &dwmac4_dma_ops; + ++ if (*synopsys_id >= DWMAC_CORE_4_00) ++ mac->mac = &dwmac410_ops; ++ else ++ mac->mac = &dwmac4_ops; ++ + return mac; + } +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam + { + /* Context type from W/B descriptor must be zero */ + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) +- return -EINVAL; ++ return 0; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) +- return 0; ++ return 1; + +- return 1; ++ return 0; + } + + static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) +@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam + } + } + exit: +- return ret; ++ if (likely(ret == 0)) ++ return 1; ++ ++ return 0; + } + + static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, +@@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc + + static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, +- bool ls) ++ bool ls, unsigned int tot_pkt_len) + { + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); + ++ tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK; + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; + else +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem + writel(value, ioaddr + DMA_SYS_BUS_MODE); + } + +-static void dwmac4_dma_init_channel(void __iomem *ioaddr, +- struct stmmac_dma_cfg *dma_cfg, +- u32 dma_tx_phy, u32 dma_rx_phy, +- u32 channel) ++void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_rx_phy, u32 chan) + { + u32 value; +- int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; +- int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; ++ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + +- /* set PBL for each channels. Currently we affect same configuration +- * on each channel +- */ +- value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); +- if (dma_cfg->pblx8) +- value = value | DMA_BUS_MODE_PBL; +- writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); ++ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); ++ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); ++ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); ++ ++ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); ++} + +- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); ++void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx_phy, u32 chan) ++{ ++ u32 value; ++ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; ++ ++ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); +- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); ++ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + +- value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); +- value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); +- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); ++ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); ++} + +- /* Mask interrupts by writing to CSR7 */ +- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); ++void dwmac4_dma_init_channel(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, u32 chan) ++{ ++ u32 value; ++ ++ /* common channel control register config */ ++ value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); ++ if (dma_cfg->pblx8) ++ value = value | DMA_BUS_MODE_PBL; ++ writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + +- writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); +- writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); ++ /* Mask interrupts by writing to CSR7 */ ++ writel(DMA_CHAN_INTR_DEFAULT_MASK, ++ ioaddr + DMA_CHAN_INTR_ENA(chan)); + } + + static void dwmac4_dma_init(void __iomem *ioaddr, +@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem + u32 dma_tx, u32 dma_rx, int atds) + { + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); +- int i; + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) +@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem + value |= DMA_SYS_BUS_AAL; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); +- +- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) +- dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); + } + + static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, +@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __ + _dwmac4_dump_dma_regs(ioaddr, i, reg_space); + } + +-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) ++static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) + { +- int i; ++ u32 chan; + +- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) +- writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); ++ for (chan = 0; chan < number_chan; chan++) ++ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); + } + +-static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, +- int rxmode, u32 channel) ++static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, ++ u32 channel, int fifosz) + { +- u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; ++ unsigned int rqs = fifosz / 256 - 1; ++ u32 mtl_rx_op, mtl_rx_int; + +- /* Following code only done for channel 0, other channels not yet +- * supported. +- */ +- mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); ++ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); ++ ++ if (mode == SF_DMA_MODE) { ++ pr_debug("GMAC: enable RX store and forward mode\n"); ++ mtl_rx_op |= MTL_OP_MODE_RSF; ++ } else { ++ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); ++ mtl_rx_op &= ~MTL_OP_MODE_RSF; ++ mtl_rx_op &= MTL_OP_MODE_RTC_MASK; ++ if (mode <= 32) ++ mtl_rx_op |= MTL_OP_MODE_RTC_32; ++ else if (mode <= 64) ++ mtl_rx_op |= MTL_OP_MODE_RTC_64; ++ else if (mode <= 96) ++ mtl_rx_op |= MTL_OP_MODE_RTC_96; ++ else ++ mtl_rx_op |= MTL_OP_MODE_RTC_128; ++ } ++ ++ mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; ++ mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; ++ ++ /* enable flow control only if each channel gets 4 KiB or more FIFO */ ++ if (fifosz >= 4096) { ++ unsigned int rfd, rfa; ++ ++ mtl_rx_op |= MTL_OP_MODE_EHFC; ++ ++ /* Set Threshold for Activating Flow Control to min 2 frames, ++ * i.e. 1500 * 2 = 3000 bytes. ++ * ++ * Set Threshold for Deactivating Flow Control to min 1 frame, ++ * i.e. 1500 bytes. ++ */ ++ switch (fifosz) { ++ case 4096: ++ /* This violates the above formula because of FIFO size ++ * limit therefore overflow may occur in spite of this. ++ */ ++ rfd = 0x03; /* Full-2.5K */ ++ rfa = 0x01; /* Full-1.5K */ ++ break; ++ ++ case 8192: ++ rfd = 0x06; /* Full-4K */ ++ rfa = 0x0a; /* Full-6K */ ++ break; ++ ++ case 16384: ++ rfd = 0x06; /* Full-4K */ ++ rfa = 0x12; /* Full-10K */ ++ break; ++ ++ default: ++ rfd = 0x06; /* Full-4K */ ++ rfa = 0x1e; /* Full-16K */ ++ break; ++ } + +- if (txmode == SF_DMA_MODE) { ++ mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; ++ mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; ++ ++ mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; ++ mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; ++ } ++ ++ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); ++ ++ /* Enable MTL RX overflow */ ++ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); ++ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, ++ ioaddr + MTL_CHAN_INT_CTRL(channel)); ++} ++ ++static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, ++ u32 channel) ++{ ++ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); ++ ++ if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + mtl_tx_op |= MTL_OP_MODE_TSF; + } else { +- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); ++ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); + mtl_tx_op &= ~MTL_OP_MODE_TSF; + mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + /* Set the transmit threshold */ +- if (txmode <= 32) ++ if (mode <= 32) + mtl_tx_op |= MTL_OP_MODE_TTC_32; +- else if (txmode <= 64) ++ else if (mode <= 64) + mtl_tx_op |= MTL_OP_MODE_TTC_64; +- else if (txmode <= 96) ++ else if (mode <= 96) + mtl_tx_op |= MTL_OP_MODE_TTC_96; +- else if (txmode <= 128) ++ else if (mode <= 128) + mtl_tx_op |= MTL_OP_MODE_TTC_128; +- else if (txmode <= 192) ++ else if (mode <= 192) + mtl_tx_op |= MTL_OP_MODE_TTC_192; +- else if (txmode <= 256) ++ else if (mode <= 256) + mtl_tx_op |= MTL_OP_MODE_TTC_256; +- else if (txmode <= 384) ++ else if (mode <= 384) + mtl_tx_op |= MTL_OP_MODE_TTC_384; + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; +@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void + */ + mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +- +- mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); +- +- if (rxmode == SF_DMA_MODE) { +- pr_debug("GMAC: enable RX store and forward mode\n"); +- mtl_rx_op |= MTL_OP_MODE_RSF; +- } else { +- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); +- mtl_rx_op &= ~MTL_OP_MODE_RSF; +- mtl_rx_op &= MTL_OP_MODE_RTC_MASK; +- if (rxmode <= 32) +- mtl_rx_op |= MTL_OP_MODE_RTC_32; +- else if (rxmode <= 64) +- mtl_rx_op |= MTL_OP_MODE_RTC_64; +- else if (rxmode <= 96) +- mtl_rx_op |= MTL_OP_MODE_RTC_96; +- else +- mtl_rx_op |= MTL_OP_MODE_RTC_128; +- } +- +- writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); +- +- /* Enable MTL RX overflow */ +- mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); +- writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, +- ioaddr + MTL_CHAN_INT_CTRL(channel)); +-} +- +-static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, +- int rxmode, int rxfifosz) +-{ +- /* Only Channel 0 is actually configured and used */ +- dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0); + } + + static void dwmac4_get_hw_feature(void __iomem *ioaddr, +@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; ++ /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by ++ * shifting and store the sizes in bytes. ++ */ ++ dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); ++ dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); + /* MAC HW feature2 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); + /* TX and RX number of channels */ +@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom + const struct stmmac_dma_ops dwmac4_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, ++ .init_chan = dwmac4_dma_init_channel, ++ .init_rx_chan = dwmac4_dma_init_rx_chan, ++ .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, +- .dma_mode = dwmac4_dma_operation_mode, ++ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, ++ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac4_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, +@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o + const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, ++ .init_chan = dwmac4_dma_init_channel, ++ .init_rx_chan = dwmac4_dma_init_rx_chan, ++ .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, +- .dma_mode = dwmac4_dma_operation_mode, ++ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, ++ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac410_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +@@ -185,17 +185,17 @@ + + int dwmac4_dma_reset(void __iomem *ioaddr); + void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); +-void dwmac4_enable_dma_irq(void __iomem *ioaddr); +-void dwmac410_enable_dma_irq(void __iomem *ioaddr); +-void dwmac4_disable_dma_irq(void __iomem *ioaddr); +-void dwmac4_dma_start_tx(void __iomem *ioaddr); +-void dwmac4_dma_stop_tx(void __iomem *ioaddr); +-void dwmac4_dma_start_rx(void __iomem *ioaddr); +-void dwmac4_dma_stop_rx(void __iomem *ioaddr); ++void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); ++void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); ++void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); ++void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); ++void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); ++void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); ++void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); + int dwmac4_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x); +-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); +-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); ++ struct stmmac_extra_stats *x, u32 chan); ++void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); ++void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); + void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd + + void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) + { +- writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); ++ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan)); + } + + void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) + { +- writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); ++ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan)); + } + +-void dwmac4_dma_start_tx(void __iomem *ioaddr) ++void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan) + { +- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); ++ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value |= DMA_CONTROL_ST; +- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); ++ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); + } + +-void dwmac4_dma_stop_tx(void __iomem *ioaddr) ++void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) + { +- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); ++ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value &= ~DMA_CONTROL_ST; +- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); ++ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); + } + +-void dwmac4_dma_start_rx(void __iomem *ioaddr) ++void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) + { +- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); ++ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value |= DMA_CONTROL_SR; + +- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); ++ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); + } + +-void dwmac4_dma_stop_rx(void __iomem *ioaddr) ++void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan) + { +- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); ++ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_CONTROL_SR; +- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); ++ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); + } + +-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) ++void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) + { +- writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); ++ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan)); + } + +-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) ++void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) + { +- writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); ++ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); + } + +-void dwmac4_enable_dma_irq(void __iomem *ioaddr) ++void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan) + { + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + +- DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); ++ DMA_CHAN_INTR_ENA(chan)); + } + +-void dwmac410_enable_dma_irq(void __iomem *ioaddr) ++void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan) + { + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, +- ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); ++ ioaddr + DMA_CHAN_INTR_ENA(chan)); + } + +-void dwmac4_disable_dma_irq(void __iomem *ioaddr) ++void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) + { +- writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); ++ writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan)); + } + + int dwmac4_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x) ++ struct stmmac_extra_stats *x, u32 chan) + { + int ret = 0; + +- u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); ++ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { +@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + u32 value; + +- value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); ++ value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; +@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i + * status [21-0] expect reserved bits [5-3] + */ + writel((intr_status & 0x3fffc7), +- ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); ++ ioaddr + DMA_CHAN_STATUS(chan)); + + return ret; + } +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +@@ -137,13 +137,14 @@ + #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + + void dwmac_enable_dma_transmission(void __iomem *ioaddr); +-void dwmac_enable_dma_irq(void __iomem *ioaddr); +-void dwmac_disable_dma_irq(void __iomem *ioaddr); +-void dwmac_dma_start_tx(void __iomem *ioaddr); +-void dwmac_dma_stop_tx(void __iomem *ioaddr); +-void dwmac_dma_start_rx(void __iomem *ioaddr); +-void dwmac_dma_stop_rx(void __iomem *ioaddr); +-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); ++void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); ++void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); ++void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); ++void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); ++void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); ++void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); ++int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, ++ u32 chan); + int dwmac_dma_reset(void __iomem *ioaddr); + + #endif /* __DWMAC_DMA_H__ */ +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void + writel(1, ioaddr + DMA_XMT_POLL_DEMAND); + } + +-void dwmac_enable_dma_irq(void __iomem *ioaddr) ++void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) + { + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + } + +-void dwmac_disable_dma_irq(void __iomem *ioaddr) ++void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) + { + writel(0, ioaddr + DMA_INTR_ENA); + } + +-void dwmac_dma_start_tx(void __iomem *ioaddr) ++void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) + { + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + } + +-void dwmac_dma_stop_tx(void __iomem *ioaddr) ++void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) + { + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + } + +-void dwmac_dma_start_rx(void __iomem *ioaddr) ++void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) + { + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); + } + +-void dwmac_dma_stop_rx(void __iomem *ioaddr) ++void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) + { + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; +@@ -156,7 +156,7 @@ static void show_rx_process_state(unsign + #endif + + int dwmac_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x) ++ struct stmmac_extra_stats *x, u32 chan) + { + int ret = 0; + /* read the status register (CSR5) */ +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str + + static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, +- bool ls) ++ bool ls, unsigned int tot_pkt_len) + { + unsigned int tdes0 = le32_to_cpu(p->des0); + +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +@@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct + + static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, +- bool ls) ++ bool ls, unsigned int tot_pkt_len) + { + unsigned int tdes1 = le32_to_cpu(p->des1); + +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -26,16 +26,17 @@ + + static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)p; +- unsigned int entry = priv->cur_tx; +- struct dma_desc *desc; ++ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); ++ struct stmmac_priv *priv = tx_q->priv_data; ++ unsigned int entry = tx_q->cur_tx; + unsigned int bmax, len, des2; ++ struct dma_desc *desc; + + if (priv->extend_desc) +- desc = (struct dma_desc *)(priv->dma_etx + entry); ++ desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else +- desc = priv->dma_tx + entry; ++ desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; +@@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str + if (dma_mapping_error(priv->device, des2)) + return -1; + +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = bmax; +- priv->tx_skbuff_dma[entry].is_jumbo = true; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = bmax; ++ tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, +- STMMAC_RING_MODE, 0, false); +- priv->tx_skbuff[entry] = NULL; ++ STMMAC_RING_MODE, 0, ++ false, skb->len); ++ tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + + if (priv->extend_desc) +- desc = (struct dma_desc *)(priv->dma_etx + entry); ++ desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else +- desc = priv->dma_tx + entry; ++ desc = tx_q->dma_tx + entry; + + des2 = dma_map_single(priv->device, skb->data + bmax, len, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = len; +- priv->tx_skbuff_dma[entry].is_jumbo = true; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = len; ++ tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, +- STMMAC_RING_MODE, 1, true); ++ STMMAC_RING_MODE, 1, ++ true, skb->len); + } else { + des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; +- priv->tx_skbuff_dma[entry].buf = des2; +- priv->tx_skbuff_dma[entry].len = nopaged_len; +- priv->tx_skbuff_dma[entry].is_jumbo = true; ++ tx_q->tx_skbuff_dma[entry].buf = des2; ++ tx_q->tx_skbuff_dma[entry].len = nopaged_len; ++ tx_q->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, +- STMMAC_RING_MODE, 0, true); ++ STMMAC_RING_MODE, 0, ++ true, skb->len); + } + +- priv->cur_tx = entry; ++ tx_q->cur_tx = entry; + + return entry; + } +@@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma + + static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; +- unsigned int entry = priv->dirty_tx; ++ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; ++ struct stmmac_priv *priv = tx_q->priv_data; ++ unsigned int entry = tx_q->dirty_tx; + + /* des3 is only used for jumbo frames tx or time stamping */ +- if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || +- (priv->tx_skbuff_dma[entry].last_segment && ++ if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo || ++ (tx_q->tx_skbuff_dma[entry].last_segment && + !priv->extend_desc && priv->hwts_tx_en))) + p->des3 = 0; + } +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -46,38 +46,51 @@ struct stmmac_tx_info { + bool is_jumbo; + }; + +-struct stmmac_priv { +- /* Frequently used values are kept adjacent for cache effect */ ++/* Frequently used values are kept adjacent for cache effect */ ++struct stmmac_tx_queue { ++ u32 queue_index; ++ struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; + struct dma_desc *dma_tx; + struct sk_buff **tx_skbuff; ++ struct stmmac_tx_info *tx_skbuff_dma; + unsigned int cur_tx; + unsigned int dirty_tx; ++ dma_addr_t dma_tx_phy; ++ u32 tx_tail_addr; ++}; ++ ++struct stmmac_rx_queue { ++ u32 queue_index; ++ struct stmmac_priv *priv_data; ++ struct dma_extended_desc *dma_erx; ++ struct dma_desc *dma_rx ____cacheline_aligned_in_smp; ++ struct sk_buff **rx_skbuff; ++ dma_addr_t *rx_skbuff_dma; ++ unsigned int cur_rx; ++ unsigned int dirty_rx; ++ u32 rx_zeroc_thresh; ++ dma_addr_t dma_rx_phy; ++ u32 rx_tail_addr; ++ struct napi_struct napi ____cacheline_aligned_in_smp; ++}; ++ ++struct stmmac_priv { ++ /* Frequently used values are kept adjacent for cache effect */ + u32 tx_count_frames; + u32 tx_coal_frames; + u32 tx_coal_timer; +- struct stmmac_tx_info *tx_skbuff_dma; +- dma_addr_t dma_tx_phy; ++ + int tx_coalesce; + int hwts_tx_en; + bool tx_path_in_lpi_mode; + struct timer_list txtimer; + bool tso; + +- struct dma_desc *dma_rx ____cacheline_aligned_in_smp; +- struct dma_extended_desc *dma_erx; +- struct sk_buff **rx_skbuff; +- unsigned int cur_rx; +- unsigned int dirty_rx; + unsigned int dma_buf_sz; + unsigned int rx_copybreak; +- unsigned int rx_zeroc_thresh; + u32 rx_riwt; + int hwts_rx_en; +- dma_addr_t *rx_skbuff_dma; +- dma_addr_t dma_rx_phy; +- +- struct napi_struct napi ____cacheline_aligned_in_smp; + + void __iomem *ioaddr; + struct net_device *dev; +@@ -85,6 +98,12 @@ struct stmmac_priv { + struct mac_device_info *hw; + spinlock_t lock; + ++ /* RX Queue */ ++ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; ++ ++ /* TX Queue */ ++ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; ++ + int oldlink; + int speed; + int oldduplex; +@@ -119,8 +138,6 @@ struct stmmac_priv { + spinlock_t ptp_lock; + void __iomem *mmcaddr; + void __iomem *ptpaddr; +- u32 rx_tail_addr; +- u32 tx_tail_addr; + u32 mss; + + #ifdef CONFIG_DEBUG_FS +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device + struct ethtool_pauseparam *pause) + { + struct stmmac_priv *priv = netdev_priv(netdev); ++ u32 tx_cnt = priv->plat->tx_queues_to_use; + struct phy_device *phy = netdev->phydev; + int new_pause = FLOW_OFF; + +@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device + } + + priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, +- priv->pause); ++ priv->pause, tx_cnt); + return 0; + } + +@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str + struct ethtool_stats *dummy, u64 *data) + { + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; + int i, j = 0; + + /* Update the DMA HW counters for dwmac10/100 */ +@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str + if ((priv->hw->mac->debug) && + (priv->synopsys_id >= DWMAC_CORE_3_50)) + priv->hw->mac->debug(priv->ioaddr, +- (void *)&priv->xstats); ++ (void *)&priv->xstats, ++ rx_queues_count, tx_queues_count); + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; +@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne + struct ethtool_coalesce *ec) + { + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int rx_riwt; + + /* Check not supported parameters */ +@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne + priv->tx_coal_frames = ec->tx_max_coalesced_frames; + priv->tx_coal_timer = ec->tx_coalesce_usecs; + priv->rx_riwt = rx_riwt; +- priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); ++ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); + + return 0; + } +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -139,6 +139,64 @@ static void stmmac_verify_args(void) + } + + /** ++ * stmmac_disable_all_queues - Disable all queues ++ * @priv: driver private structure ++ */ ++static void stmmac_disable_all_queues(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_cnt = priv->plat->rx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < rx_queues_cnt; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ napi_disable(&rx_q->napi); ++ } ++} ++ ++/** ++ * stmmac_enable_all_queues - Enable all queues ++ * @priv: driver private structure ++ */ ++static void stmmac_enable_all_queues(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_cnt = priv->plat->rx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < rx_queues_cnt; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ napi_enable(&rx_q->napi); ++ } ++} ++ ++/** ++ * stmmac_stop_all_queues - Stop all queues ++ * @priv: driver private structure ++ */ ++static void stmmac_stop_all_queues(struct stmmac_priv *priv) ++{ ++ u32 tx_queues_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < tx_queues_cnt; queue++) ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); ++} ++ ++/** ++ * stmmac_start_all_queues - Start all queues ++ * @priv: driver private structure ++ */ ++static void stmmac_start_all_queues(struct stmmac_priv *priv) ++{ ++ u32 tx_queues_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < tx_queues_cnt; queue++) ++ netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); ++} ++ ++/** + * stmmac_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr +@@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); + } + +-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) ++static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) + { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + u32 avail; + +- if (priv->dirty_tx > priv->cur_tx) +- avail = priv->dirty_tx - priv->cur_tx - 1; ++ if (tx_q->dirty_tx > tx_q->cur_tx) ++ avail = tx_q->dirty_tx - tx_q->cur_tx - 1; + else +- avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; ++ avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; + + return avail; + } + +-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) ++/** ++ * stmmac_rx_dirty - Get RX queue dirty ++ * @priv: driver private structure ++ * @queue: RX queue index ++ */ ++static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) + { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + u32 dirty; + +- if (priv->dirty_rx <= priv->cur_rx) +- dirty = priv->cur_rx - priv->dirty_rx; ++ if (rx_q->dirty_rx <= rx_q->cur_rx) ++ dirty = rx_q->cur_rx - rx_q->dirty_rx; + else +- dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; ++ dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; + + return dirty; + } +@@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe + */ + static void stmmac_enable_eee_mode(struct stmmac_priv *priv) + { ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ /* check if all TX queues have the work finished */ ++ for (queue = 0; queue < tx_cnt; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ if (tx_q->dirty_tx != tx_q->cur_tx) ++ return; /* still unfinished work */ ++ } ++ + /* Check and enter in LPI mode */ +- if ((priv->dirty_tx == priv->cur_tx) && +- (priv->tx_path_in_lpi_mode == false)) ++ if (!priv->tx_path_in_lpi_mode) + priv->hw->mac->set_eee_mode(priv->hw, + priv->plat->en_tx_lpi_clockgating); + } +@@ -359,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struc + return; + + /* check tx tstamp status */ +- if (!priv->hw->desc->get_tx_timestamp_status(p)) { ++ if (priv->hw->desc->get_tx_timestamp_status(p)) { + /* get the valid tstamp */ + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); + + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + +- netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); ++ netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + } +@@ -393,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struc + return; + + /* Check if timestamp is available */ +- if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { ++ if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4) + ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); + else + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); + +- netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); ++ netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } else { +- netdev_err(priv->dev, "cannot get RX hw timestamp\n"); ++ netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); + } + } + +@@ -471,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* take time stamp for all event messages */ +- snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ++ if (priv->plat->has_gmac4) ++ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; ++ else ++ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; +@@ -503,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ +- snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ++ if (priv->plat->has_gmac4) ++ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; ++ else ++ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; +@@ -537,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ +- snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ++ if (priv->plat->has_gmac4) ++ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; ++ else ++ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; +@@ -673,6 +757,19 @@ static void stmmac_release_ptp(struct st + } + + /** ++ * stmmac_mac_flow_ctrl - Configure flow control in all queues ++ * @priv: driver private structure ++ * Description: It is used for configuring the flow control in all queues ++ */ ++static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) ++{ ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ ++ priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, ++ priv->pause, tx_cnt); ++} ++ ++/** + * stmmac_adjust_link - adjusts the link parameters + * @dev: net device structure + * Description: this is the helper called by the physical abstraction layer +@@ -687,7 +784,6 @@ static void stmmac_adjust_link(struct ne + struct phy_device *phydev = dev->phydev; + unsigned long flags; + int new_state = 0; +- unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; + + if (!phydev) + return; +@@ -709,8 +805,7 @@ static void stmmac_adjust_link(struct ne + } + /* Flow Control operation */ + if (phydev->pause) +- priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, +- fc, pause_time); ++ stmmac_mac_flow_ctrl(priv, phydev->duplex); + + if (phydev->speed != priv->speed) { + new_state = 1; +@@ -878,22 +973,56 @@ static int stmmac_init_phy(struct net_de + return 0; + } + +-static void stmmac_display_rings(struct stmmac_priv *priv) ++static void stmmac_display_rx_rings(struct stmmac_priv *priv) + { +- void *head_rx, *head_tx; ++ u32 rx_cnt = priv->plat->rx_queues_to_use; ++ void *head_rx; ++ u32 queue; + +- if (priv->extend_desc) { +- head_rx = (void *)priv->dma_erx; +- head_tx = (void *)priv->dma_etx; +- } else { +- head_rx = (void *)priv->dma_rx; +- head_tx = (void *)priv->dma_tx; ++ /* Display RX rings */ ++ for (queue = 0; queue < rx_cnt; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ pr_info("\tRX Queue %u rings\n", queue); ++ ++ if (priv->extend_desc) ++ head_rx = (void *)rx_q->dma_erx; ++ else ++ head_rx = (void *)rx_q->dma_rx; ++ ++ /* Display RX ring */ ++ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); ++ } ++} ++ ++static void stmmac_display_tx_rings(struct stmmac_priv *priv) ++{ ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ void *head_tx; ++ u32 queue; ++ ++ /* Display TX rings */ ++ for (queue = 0; queue < tx_cnt; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ pr_info("\tTX Queue %d rings\n", queue); ++ ++ if (priv->extend_desc) ++ head_tx = (void *)tx_q->dma_etx; ++ else ++ head_tx = (void *)tx_q->dma_tx; ++ ++ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); + } ++} + +- /* Display Rx ring */ +- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); +- /* Display Tx ring */ +- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); ++static void stmmac_display_rings(struct stmmac_priv *priv) ++{ ++ /* Display RX ring */ ++ stmmac_display_rx_rings(priv); ++ ++ /* Display TX ring */ ++ stmmac_display_tx_rings(priv); + } + + static int stmmac_set_bfsize(int mtu, int bufsize) +@@ -913,48 +1042,88 @@ static int stmmac_set_bfsize(int mtu, in + } + + /** +- * stmmac_clear_descriptors - clear descriptors ++ * stmmac_clear_rx_descriptors - clear RX descriptors + * @priv: driver private structure +- * Description: this function is called to clear the tx and rx descriptors ++ * @queue: RX queue index ++ * Description: this function is called to clear the RX descriptors + * in case of both basic and extended descriptors are used. + */ +-static void stmmac_clear_descriptors(struct stmmac_priv *priv) ++static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) + { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; + +- /* Clear the Rx/Tx descriptors */ ++ /* Clear the RX descriptors */ + for (i = 0; i < DMA_RX_SIZE; i++) + if (priv->extend_desc) +- priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, ++ priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); + else +- priv->hw->desc->init_rx_desc(&priv->dma_rx[i], ++ priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); ++} ++ ++/** ++ * stmmac_clear_tx_descriptors - clear tx descriptors ++ * @priv: driver private structure ++ * @queue: TX queue index. ++ * Description: this function is called to clear the TX descriptors ++ * in case of both basic and extended descriptors are used. ++ */ ++static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) ++{ ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ int i; ++ ++ /* Clear the TX descriptors */ + for (i = 0; i < DMA_TX_SIZE; i++) + if (priv->extend_desc) +- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, ++ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, + priv->mode, + (i == DMA_TX_SIZE - 1)); + else +- priv->hw->desc->init_tx_desc(&priv->dma_tx[i], ++ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], + priv->mode, + (i == DMA_TX_SIZE - 1)); + } + + /** ++ * stmmac_clear_descriptors - clear descriptors ++ * @priv: driver private structure ++ * Description: this function is called to clear the TX and RX descriptors ++ * in case of both basic and extended descriptors are used. ++ */ ++static void stmmac_clear_descriptors(struct stmmac_priv *priv) ++{ ++ u32 rx_queue_cnt = priv->plat->rx_queues_to_use; ++ u32 tx_queue_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ /* Clear the RX descriptors */ ++ for (queue = 0; queue < rx_queue_cnt; queue++) ++ stmmac_clear_rx_descriptors(priv, queue); ++ ++ /* Clear the TX descriptors */ ++ for (queue = 0; queue < tx_queue_cnt; queue++) ++ stmmac_clear_tx_descriptors(priv, queue); ++} ++ ++/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @p: descriptor pointer + * @i: descriptor index +- * @flags: gfp flag. ++ * @flags: gfp flag ++ * @queue: RX queue index + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ + static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, +- int i, gfp_t flags) ++ int i, gfp_t flags, u32 queue) + { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct sk_buff *skb; + + skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); +@@ -963,20 +1132,20 @@ static int stmmac_init_rx_buffers(struct + "%s: Rx init fails; skb is NULL\n", __func__); + return -ENOMEM; + } +- priv->rx_skbuff[i] = skb; +- priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, ++ rx_q->rx_skbuff[i] = skb; ++ rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + priv->dma_buf_sz, + DMA_FROM_DEVICE); +- if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { ++ if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (priv->synopsys_id >= DWMAC_CORE_4_00) +- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); ++ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); + else +- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]); ++ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); + + if ((priv->hw->mode->init_desc3) && + (priv->dma_buf_sz == BUF_SIZE_16KiB)) +@@ -985,30 +1154,71 @@ static int stmmac_init_rx_buffers(struct + return 0; + } + +-static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) ++/** ++ * stmmac_free_rx_buffer - free RX dma buffers ++ * @priv: private structure ++ * @queue: RX queue index ++ * @i: buffer index. ++ */ ++static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) + { +- if (priv->rx_skbuff[i]) { +- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ if (rx_q->rx_skbuff[i]) { ++ dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); +- dev_kfree_skb_any(priv->rx_skbuff[i]); ++ dev_kfree_skb_any(rx_q->rx_skbuff[i]); + } +- priv->rx_skbuff[i] = NULL; ++ rx_q->rx_skbuff[i] = NULL; + } + + /** +- * init_dma_desc_rings - init the RX/TX descriptor rings ++ * stmmac_free_tx_buffer - free RX dma buffers ++ * @priv: private structure ++ * @queue: RX queue index ++ * @i: buffer index. ++ */ ++static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ++{ ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ if (tx_q->tx_skbuff_dma[i].buf) { ++ if (tx_q->tx_skbuff_dma[i].map_as_page) ++ dma_unmap_page(priv->device, ++ tx_q->tx_skbuff_dma[i].buf, ++ tx_q->tx_skbuff_dma[i].len, ++ DMA_TO_DEVICE); ++ else ++ dma_unmap_single(priv->device, ++ tx_q->tx_skbuff_dma[i].buf, ++ tx_q->tx_skbuff_dma[i].len, ++ DMA_TO_DEVICE); ++ } ++ ++ if (tx_q->tx_skbuff[i]) { ++ dev_kfree_skb_any(tx_q->tx_skbuff[i]); ++ tx_q->tx_skbuff[i] = NULL; ++ tx_q->tx_skbuff_dma[i].buf = 0; ++ tx_q->tx_skbuff_dma[i].map_as_page = false; ++ } ++} ++ ++/** ++ * init_dma_rx_desc_rings - init the RX descriptor rings + * @dev: net device structure + * @flags: gfp flag. +- * Description: this function initializes the DMA RX/TX descriptors ++ * Description: this function initializes the DMA RX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) ++static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) + { +- int i; + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_count = priv->plat->rx_queues_to_use; + unsigned int bfsize = 0; + int ret = -ENOMEM; ++ int queue; ++ int i; + + if (priv->hw->mode->set_16kib_bfsize) + bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); +@@ -1018,257 +1228,516 @@ static int init_dma_desc_rings(struct ne + + priv->dma_buf_sz = bfsize; + +- netif_dbg(priv, probe, priv->dev, +- "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", +- __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy); +- + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + +- for (i = 0; i < DMA_RX_SIZE; i++) { +- struct dma_desc *p; +- if (priv->extend_desc) +- p = &((priv->dma_erx + i)->basic); +- else +- p = priv->dma_rx + i; ++ for (queue = 0; queue < rx_count; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ netif_dbg(priv, probe, priv->dev, ++ "(%s) dma_rx_phy=0x%08x\n", __func__, ++ (u32)rx_q->dma_rx_phy); ++ ++ for (i = 0; i < DMA_RX_SIZE; i++) { ++ struct dma_desc *p; ++ ++ if (priv->extend_desc) ++ p = &((rx_q->dma_erx + i)->basic); ++ else ++ p = rx_q->dma_rx + i; + +- ret = stmmac_init_rx_buffers(priv, p, i, flags); +- if (ret) +- goto err_init_rx_buffers; ++ ret = stmmac_init_rx_buffers(priv, p, i, flags, ++ queue); ++ if (ret) ++ goto err_init_rx_buffers; ++ ++ netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", ++ rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, ++ (unsigned int)rx_q->rx_skbuff_dma[i]); ++ } ++ ++ rx_q->cur_rx = 0; ++ rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); ++ ++ stmmac_clear_rx_descriptors(priv, queue); + +- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", +- priv->rx_skbuff[i], priv->rx_skbuff[i]->data, +- (unsigned int)priv->rx_skbuff_dma[i]); ++ /* Setup the chained descriptor addresses */ ++ if (priv->mode == STMMAC_CHAIN_MODE) { ++ if (priv->extend_desc) ++ priv->hw->mode->init(rx_q->dma_erx, ++ rx_q->dma_rx_phy, ++ DMA_RX_SIZE, 1); ++ else ++ priv->hw->mode->init(rx_q->dma_rx, ++ rx_q->dma_rx_phy, ++ DMA_RX_SIZE, 0); ++ } + } +- priv->cur_rx = 0; +- priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); ++ + buf_sz = bfsize; + +- /* Setup the chained descriptor addresses */ +- if (priv->mode == STMMAC_CHAIN_MODE) { +- if (priv->extend_desc) { +- priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, +- DMA_RX_SIZE, 1); +- priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, +- DMA_TX_SIZE, 1); +- } else { +- priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, +- DMA_RX_SIZE, 0); +- priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, +- DMA_TX_SIZE, 0); +- } ++ return 0; ++ ++err_init_rx_buffers: ++ while (queue >= 0) { ++ while (--i >= 0) ++ stmmac_free_rx_buffer(priv, queue, i); ++ ++ if (queue == 0) ++ break; ++ ++ i = DMA_RX_SIZE; ++ queue--; + } + +- /* TX INITIALIZATION */ +- for (i = 0; i < DMA_TX_SIZE; i++) { +- struct dma_desc *p; +- if (priv->extend_desc) +- p = &((priv->dma_etx + i)->basic); +- else +- p = priv->dma_tx + i; ++ return ret; ++} + +- if (priv->synopsys_id >= DWMAC_CORE_4_00) { +- p->des0 = 0; +- p->des1 = 0; +- p->des2 = 0; +- p->des3 = 0; +- } else { +- p->des2 = 0; ++/** ++ * init_dma_tx_desc_rings - init the TX descriptor rings ++ * @dev: net device structure. ++ * Description: this function initializes the DMA TX descriptors ++ * and allocates the socket buffers. It supports the chained and ring ++ * modes. ++ */ ++static int init_dma_tx_desc_rings(struct net_device *dev) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ u32 tx_queue_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ int i; ++ ++ for (queue = 0; queue < tx_queue_cnt; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ netif_dbg(priv, probe, priv->dev, ++ "(%s) dma_tx_phy=0x%08x\n", __func__, ++ (u32)tx_q->dma_tx_phy); ++ ++ /* Setup the chained descriptor addresses */ ++ if (priv->mode == STMMAC_CHAIN_MODE) { ++ if (priv->extend_desc) ++ priv->hw->mode->init(tx_q->dma_etx, ++ tx_q->dma_tx_phy, ++ DMA_TX_SIZE, 1); ++ else ++ priv->hw->mode->init(tx_q->dma_tx, ++ tx_q->dma_tx_phy, ++ DMA_TX_SIZE, 0); + } + +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- priv->tx_skbuff_dma[i].len = 0; +- priv->tx_skbuff_dma[i].last_segment = false; +- priv->tx_skbuff[i] = NULL; ++ for (i = 0; i < DMA_TX_SIZE; i++) { ++ struct dma_desc *p; ++ if (priv->extend_desc) ++ p = &((tx_q->dma_etx + i)->basic); ++ else ++ p = tx_q->dma_tx + i; ++ ++ if (priv->synopsys_id >= DWMAC_CORE_4_00) { ++ p->des0 = 0; ++ p->des1 = 0; ++ p->des2 = 0; ++ p->des3 = 0; ++ } else { ++ p->des2 = 0; ++ } ++ ++ tx_q->tx_skbuff_dma[i].buf = 0; ++ tx_q->tx_skbuff_dma[i].map_as_page = false; ++ tx_q->tx_skbuff_dma[i].len = 0; ++ tx_q->tx_skbuff_dma[i].last_segment = false; ++ tx_q->tx_skbuff[i] = NULL; ++ } ++ ++ tx_q->dirty_tx = 0; ++ tx_q->cur_tx = 0; ++ ++ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + } + +- priv->dirty_tx = 0; +- priv->cur_tx = 0; +- netdev_reset_queue(priv->dev); ++ return 0; ++} ++ ++/** ++ * init_dma_desc_rings - init the RX/TX descriptor rings ++ * @dev: net device structure ++ * @flags: gfp flag. ++ * Description: this function initializes the DMA RX/TX descriptors ++ * and allocates the socket buffers. It supports the chained and ring ++ * modes. ++ */ ++static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int ret; ++ ++ ret = init_dma_rx_desc_rings(dev, flags); ++ if (ret) ++ return ret; ++ ++ ret = init_dma_tx_desc_rings(dev); + + stmmac_clear_descriptors(priv); + + if (netif_msg_hw(priv)) + stmmac_display_rings(priv); + +- return 0; +-err_init_rx_buffers: +- while (--i >= 0) +- stmmac_free_rx_buffers(priv, i); + return ret; + } + +-static void dma_free_rx_skbufs(struct stmmac_priv *priv) ++/** ++ * dma_free_rx_skbufs - free RX dma buffers ++ * @priv: private structure ++ * @queue: RX queue index ++ */ ++static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) + { + int i; + + for (i = 0; i < DMA_RX_SIZE; i++) +- stmmac_free_rx_buffers(priv, i); ++ stmmac_free_rx_buffer(priv, queue, i); + } + +-static void dma_free_tx_skbufs(struct stmmac_priv *priv) ++/** ++ * dma_free_tx_skbufs - free TX dma buffers ++ * @priv: private structure ++ * @queue: TX queue index ++ */ ++static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) + { + int i; + +- for (i = 0; i < DMA_TX_SIZE; i++) { +- if (priv->tx_skbuff_dma[i].buf) { +- if (priv->tx_skbuff_dma[i].map_as_page) +- dma_unmap_page(priv->device, +- priv->tx_skbuff_dma[i].buf, +- priv->tx_skbuff_dma[i].len, +- DMA_TO_DEVICE); +- else +- dma_unmap_single(priv->device, +- priv->tx_skbuff_dma[i].buf, +- priv->tx_skbuff_dma[i].len, +- DMA_TO_DEVICE); ++ for (i = 0; i < DMA_TX_SIZE; i++) ++ stmmac_free_tx_buffer(priv, queue, i); ++} ++ ++/** ++ * free_dma_rx_desc_resources - free RX dma desc resources ++ * @priv: private structure ++ */ ++static void free_dma_rx_desc_resources(struct stmmac_priv *priv) ++{ ++ u32 rx_count = priv->plat->rx_queues_to_use; ++ u32 queue; ++ ++ /* Free RX queue resources */ ++ for (queue = 0; queue < rx_count; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ /* Release the DMA RX socket buffers */ ++ dma_free_rx_skbufs(priv, queue); ++ ++ /* Free DMA regions of consistent memory previously allocated */ ++ if (!priv->extend_desc) ++ dma_free_coherent(priv->device, ++ DMA_RX_SIZE * sizeof(struct dma_desc), ++ rx_q->dma_rx, rx_q->dma_rx_phy); ++ else ++ dma_free_coherent(priv->device, DMA_RX_SIZE * ++ sizeof(struct dma_extended_desc), ++ rx_q->dma_erx, rx_q->dma_rx_phy); ++ ++ kfree(rx_q->rx_skbuff_dma); ++ kfree(rx_q->rx_skbuff); ++ } ++} ++ ++/** ++ * free_dma_tx_desc_resources - free TX dma desc resources ++ * @priv: private structure ++ */ ++static void free_dma_tx_desc_resources(struct stmmac_priv *priv) ++{ ++ u32 tx_count = priv->plat->tx_queues_to_use; ++ u32 queue = 0; ++ ++ /* Free TX queue resources */ ++ for (queue = 0; queue < tx_count; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ /* Release the DMA TX socket buffers */ ++ dma_free_tx_skbufs(priv, queue); ++ ++ /* Free DMA regions of consistent memory previously allocated */ ++ if (!priv->extend_desc) ++ dma_free_coherent(priv->device, ++ DMA_TX_SIZE * sizeof(struct dma_desc), ++ tx_q->dma_tx, tx_q->dma_tx_phy); ++ else ++ dma_free_coherent(priv->device, DMA_TX_SIZE * ++ sizeof(struct dma_extended_desc), ++ tx_q->dma_etx, tx_q->dma_tx_phy); ++ ++ kfree(tx_q->tx_skbuff_dma); ++ kfree(tx_q->tx_skbuff); ++ } ++} ++ ++/** ++ * alloc_dma_rx_desc_resources - alloc RX resources. ++ * @priv: private structure ++ * Description: according to which descriptor can be used (extend or basic) ++ * this function allocates the resources for TX and RX paths. In case of ++ * reception, for example, it pre-allocated the RX socket buffer in order to ++ * allow zero-copy mechanism. ++ */ ++static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ++{ ++ u32 rx_count = priv->plat->rx_queues_to_use; ++ int ret = -ENOMEM; ++ u32 queue; ++ ++ /* RX queues buffers and DMA */ ++ for (queue = 0; queue < rx_count; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ rx_q->queue_index = queue; ++ rx_q->priv_data = priv; ++ ++ rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, ++ sizeof(dma_addr_t), ++ GFP_KERNEL); ++ if (!rx_q->rx_skbuff_dma) ++ return -ENOMEM; ++ ++ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, ++ sizeof(struct sk_buff *), ++ GFP_KERNEL); ++ if (!rx_q->rx_skbuff) ++ goto err_dma; ++ ++ if (priv->extend_desc) { ++ rx_q->dma_erx = dma_zalloc_coherent(priv->device, ++ DMA_RX_SIZE * ++ sizeof(struct ++ dma_extended_desc), ++ &rx_q->dma_rx_phy, ++ GFP_KERNEL); ++ if (!rx_q->dma_erx) ++ goto err_dma; ++ ++ } else { ++ rx_q->dma_rx = dma_zalloc_coherent(priv->device, ++ DMA_RX_SIZE * ++ sizeof(struct ++ dma_desc), ++ &rx_q->dma_rx_phy, ++ GFP_KERNEL); ++ if (!rx_q->dma_rx) ++ goto err_dma; ++ } ++ } ++ ++ return 0; ++ ++err_dma: ++ free_dma_rx_desc_resources(priv); ++ ++ return ret; ++} ++ ++/** ++ * alloc_dma_tx_desc_resources - alloc TX resources. ++ * @priv: private structure ++ * Description: according to which descriptor can be used (extend or basic) ++ * this function allocates the resources for TX and RX paths. In case of ++ * reception, for example, it pre-allocated the RX socket buffer in order to ++ * allow zero-copy mechanism. ++ */ ++static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ++{ ++ u32 tx_count = priv->plat->tx_queues_to_use; ++ int ret = -ENOMEM; ++ u32 queue; ++ ++ /* TX queues buffers and DMA */ ++ for (queue = 0; queue < tx_count; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ tx_q->queue_index = queue; ++ tx_q->priv_data = priv; ++ ++ tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, ++ sizeof(*tx_q->tx_skbuff_dma), ++ GFP_KERNEL); ++ if (!tx_q->tx_skbuff_dma) ++ return -ENOMEM; ++ ++ tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, ++ sizeof(struct sk_buff *), ++ GFP_KERNEL); ++ if (!tx_q->tx_skbuff) ++ goto err_dma_buffers; ++ ++ if (priv->extend_desc) { ++ tx_q->dma_etx = dma_zalloc_coherent(priv->device, ++ DMA_TX_SIZE * ++ sizeof(struct ++ dma_extended_desc), ++ &tx_q->dma_tx_phy, ++ GFP_KERNEL); ++ if (!tx_q->dma_etx) ++ goto err_dma_buffers; ++ } else { ++ tx_q->dma_tx = dma_zalloc_coherent(priv->device, ++ DMA_TX_SIZE * ++ sizeof(struct ++ dma_desc), ++ &tx_q->dma_tx_phy, ++ GFP_KERNEL); ++ if (!tx_q->dma_tx) ++ goto err_dma_buffers; + } ++ } ++ ++ return 0; ++ ++err_dma_buffers: ++ free_dma_tx_desc_resources(priv); ++ ++ return ret; ++} ++ ++/** ++ * alloc_dma_desc_resources - alloc TX/RX resources. ++ * @priv: private structure ++ * Description: according to which descriptor can be used (extend or basic) ++ * this function allocates the resources for TX and RX paths. In case of ++ * reception, for example, it pre-allocated the RX socket buffer in order to ++ * allow zero-copy mechanism. ++ */ ++static int alloc_dma_desc_resources(struct stmmac_priv *priv) ++{ ++ /* RX Allocation */ ++ int ret = alloc_dma_rx_desc_resources(priv); ++ ++ if (ret) ++ return ret; ++ ++ ret = alloc_dma_tx_desc_resources(priv); ++ ++ return ret; ++} ++ ++/** ++ * free_dma_desc_resources - free dma desc resources ++ * @priv: private structure ++ */ ++static void free_dma_desc_resources(struct stmmac_priv *priv) ++{ ++ /* Release the DMA RX socket buffers */ ++ free_dma_rx_desc_resources(priv); ++ ++ /* Release the DMA TX socket buffers */ ++ free_dma_tx_desc_resources(priv); ++} ++ ++/** ++ * stmmac_mac_enable_rx_queues - Enable MAC rx queues ++ * @priv: driver private structure ++ * Description: It is used for enabling the rx queues in the MAC ++ */ ++static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ int queue; ++ u8 mode; + +- if (priv->tx_skbuff[i]) { +- dev_kfree_skb_any(priv->tx_skbuff[i]); +- priv->tx_skbuff[i] = NULL; +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- } ++ for (queue = 0; queue < rx_queues_count; queue++) { ++ mode = priv->plat->rx_queues_cfg[queue].mode_to_use; ++ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); + } + } + + /** +- * alloc_dma_desc_resources - alloc TX/RX resources. +- * @priv: private structure +- * Description: according to which descriptor can be used (extend or basic) +- * this function allocates the resources for TX and RX paths. In case of +- * reception, for example, it pre-allocated the RX socket buffer in order to +- * allow zero-copy mechanism. ++ * stmmac_start_rx_dma - start RX DMA channel ++ * @priv: driver private structure ++ * @chan: RX channel index ++ * Description: ++ * This starts a RX DMA channel + */ +-static int alloc_dma_desc_resources(struct stmmac_priv *priv) ++static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) + { +- int ret = -ENOMEM; +- +- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), +- GFP_KERNEL); +- if (!priv->rx_skbuff_dma) +- return -ENOMEM; +- +- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->rx_skbuff) +- goto err_rx_skbuff; +- +- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, +- sizeof(*priv->tx_skbuff_dma), +- GFP_KERNEL); +- if (!priv->tx_skbuff_dma) +- goto err_tx_skbuff_dma; +- +- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->tx_skbuff) +- goto err_tx_skbuff; +- +- if (priv->extend_desc) { +- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_erx) +- goto err_dma; +- +- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); +- if (!priv->dma_etx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- goto err_dma; +- } +- } else { +- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_rx) +- goto err_dma; ++ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); ++ priv->hw->dma->start_rx(priv->ioaddr, chan); ++} + +- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); +- if (!priv->dma_tx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); +- goto err_dma; +- } +- } ++/** ++ * stmmac_start_tx_dma - start TX DMA channel ++ * @priv: driver private structure ++ * @chan: TX channel index ++ * Description: ++ * This starts a TX DMA channel ++ */ ++static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) ++{ ++ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); ++ priv->hw->dma->start_tx(priv->ioaddr, chan); ++} + +- return 0; ++/** ++ * stmmac_stop_rx_dma - stop RX DMA channel ++ * @priv: driver private structure ++ * @chan: RX channel index ++ * Description: ++ * This stops a RX DMA channel ++ */ ++static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) ++{ ++ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); ++ priv->hw->dma->stop_rx(priv->ioaddr, chan); ++} + +-err_dma: +- kfree(priv->tx_skbuff); +-err_tx_skbuff: +- kfree(priv->tx_skbuff_dma); +-err_tx_skbuff_dma: +- kfree(priv->rx_skbuff); +-err_rx_skbuff: +- kfree(priv->rx_skbuff_dma); +- return ret; ++/** ++ * stmmac_stop_tx_dma - stop TX DMA channel ++ * @priv: driver private structure ++ * @chan: TX channel index ++ * Description: ++ * This stops a TX DMA channel ++ */ ++static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) ++{ ++ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); ++ priv->hw->dma->stop_tx(priv->ioaddr, chan); + } + +-static void free_dma_desc_resources(struct stmmac_priv *priv) ++/** ++ * stmmac_start_all_dma - start all RX and TX DMA channels ++ * @priv: driver private structure ++ * Description: ++ * This starts all the RX and TX DMA channels ++ */ ++static void stmmac_start_all_dma(struct stmmac_priv *priv) + { +- /* Release the DMA TX/RX socket buffers */ +- dma_free_rx_skbufs(priv); +- dma_free_tx_skbufs(priv); +- +- /* Free DMA regions of consistent memory previously allocated */ +- if (!priv->extend_desc) { +- dma_free_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- priv->dma_tx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); +- } else { +- dma_free_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_etx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- } +- kfree(priv->rx_skbuff_dma); +- kfree(priv->rx_skbuff); +- kfree(priv->tx_skbuff_dma); +- kfree(priv->tx_skbuff); ++ u32 rx_channels_count = priv->plat->rx_queues_to_use; ++ u32 tx_channels_count = priv->plat->tx_queues_to_use; ++ u32 chan = 0; ++ ++ for (chan = 0; chan < rx_channels_count; chan++) ++ stmmac_start_rx_dma(priv, chan); ++ ++ for (chan = 0; chan < tx_channels_count; chan++) ++ stmmac_start_tx_dma(priv, chan); + } + + /** +- * stmmac_mac_enable_rx_queues - Enable MAC rx queues +- * @priv: driver private structure +- * Description: It is used for enabling the rx queues in the MAC ++ * stmmac_stop_all_dma - stop all RX and TX DMA channels ++ * @priv: driver private structure ++ * Description: ++ * This stops the RX and TX DMA channels + */ +-static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++static void stmmac_stop_all_dma(struct stmmac_priv *priv) + { +- int rx_count = priv->dma_cap.number_rx_queues; +- int queue = 0; ++ u32 rx_channels_count = priv->plat->rx_queues_to_use; ++ u32 tx_channels_count = priv->plat->tx_queues_to_use; ++ u32 chan = 0; + +- /* If GMAC does not have multiple queues, then this is not necessary*/ +- if (rx_count == 1) +- return; ++ for (chan = 0; chan < rx_channels_count; chan++) ++ stmmac_stop_rx_dma(priv, chan); + +- /** +- * If the core is synthesized with multiple rx queues / multiple +- * dma channels, then rx queues will be disabled by default. +- * For now only rx queue 0 is enabled. +- */ +- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++ for (chan = 0; chan < tx_channels_count; chan++) ++ stmmac_stop_tx_dma(priv, chan); + } + + /** +@@ -1279,11 +1748,20 @@ static void stmmac_mac_enable_rx_queues( + */ + static void stmmac_dma_operation_mode(struct stmmac_priv *priv) + { ++ u32 rx_channels_count = priv->plat->rx_queues_to_use; ++ u32 tx_channels_count = priv->plat->tx_queues_to_use; + int rxfifosz = priv->plat->rx_fifo_size; +- +- if (priv->plat->force_thresh_dma_mode) +- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); +- else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { ++ u32 txmode = 0; ++ u32 rxmode = 0; ++ u32 chan = 0; ++ ++ if (rxfifosz == 0) ++ rxfifosz = priv->dma_cap.rx_fifo_size; ++ ++ if (priv->plat->force_thresh_dma_mode) { ++ txmode = tc; ++ rxmode = tc; ++ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + /* + * In case of GMAC, SF mode can be enabled + * to perform the TX COE in HW. This depends on: +@@ -1291,37 +1769,53 @@ static void stmmac_dma_operation_mode(st + * 2) There is no bugged Jumbo frame support + * that needs to not insert csum in the TDES. + */ +- priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, +- rxfifosz); ++ txmode = SF_DMA_MODE; ++ rxmode = SF_DMA_MODE; + priv->xstats.threshold = SF_DMA_MODE; +- } else +- priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, ++ } else { ++ txmode = tc; ++ rxmode = SF_DMA_MODE; ++ } ++ ++ /* configure all channels */ ++ if (priv->synopsys_id >= DWMAC_CORE_4_00) { ++ for (chan = 0; chan < rx_channels_count; chan++) ++ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, ++ rxfifosz); ++ ++ for (chan = 0; chan < tx_channels_count; chan++) ++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); ++ } else { ++ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, + rxfifosz); ++ } + } + + /** + * stmmac_tx_clean - to manage the transmission completion + * @priv: driver private structure ++ * @queue: TX queue index + * Description: it reclaims the transmit resources after transmission completes. + */ +-static void stmmac_tx_clean(struct stmmac_priv *priv) ++static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) + { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + unsigned int bytes_compl = 0, pkts_compl = 0; +- unsigned int entry = priv->dirty_tx; ++ unsigned int entry = tx_q->dirty_tx; + + netif_tx_lock(priv->dev); + + priv->xstats.tx_clean++; + +- while (entry != priv->cur_tx) { +- struct sk_buff *skb = priv->tx_skbuff[entry]; ++ while (entry != tx_q->cur_tx) { ++ struct sk_buff *skb = tx_q->tx_skbuff[entry]; + struct dma_desc *p; + int status; + + if (priv->extend_desc) +- p = (struct dma_desc *)(priv->dma_etx + entry); ++ p = (struct dma_desc *)(tx_q->dma_etx + entry); + else +- p = priv->dma_tx + entry; ++ p = tx_q->dma_tx + entry; + + status = priv->hw->desc->tx_status(&priv->dev->stats, + &priv->xstats, p, +@@ -1342,48 +1836,51 @@ static void stmmac_tx_clean(struct stmma + stmmac_get_tx_hwtstamp(priv, p, skb); + } + +- if (likely(priv->tx_skbuff_dma[entry].buf)) { +- if (priv->tx_skbuff_dma[entry].map_as_page) ++ if (likely(tx_q->tx_skbuff_dma[entry].buf)) { ++ if (tx_q->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, +- priv->tx_skbuff_dma[entry].buf, +- priv->tx_skbuff_dma[entry].len, ++ tx_q->tx_skbuff_dma[entry].buf, ++ tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, +- priv->tx_skbuff_dma[entry].buf, +- priv->tx_skbuff_dma[entry].len, ++ tx_q->tx_skbuff_dma[entry].buf, ++ tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); +- priv->tx_skbuff_dma[entry].buf = 0; +- priv->tx_skbuff_dma[entry].len = 0; +- priv->tx_skbuff_dma[entry].map_as_page = false; ++ tx_q->tx_skbuff_dma[entry].buf = 0; ++ tx_q->tx_skbuff_dma[entry].len = 0; ++ tx_q->tx_skbuff_dma[entry].map_as_page = false; + } + + if (priv->hw->mode->clean_desc3) +- priv->hw->mode->clean_desc3(priv, p); ++ priv->hw->mode->clean_desc3(tx_q, p); + +- priv->tx_skbuff_dma[entry].last_segment = false; +- priv->tx_skbuff_dma[entry].is_jumbo = false; ++ tx_q->tx_skbuff_dma[entry].last_segment = false; ++ tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + if (likely(skb != NULL)) { + pkts_compl++; + bytes_compl += skb->len; + dev_consume_skb_any(skb); +- priv->tx_skbuff[entry] = NULL; ++ tx_q->tx_skbuff[entry] = NULL; + } + + priv->hw->desc->release_tx_desc(p, priv->mode); + + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + } +- priv->dirty_tx = entry; ++ tx_q->dirty_tx = entry; ++ ++ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), ++ pkts_compl, bytes_compl); + +- netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); ++ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, ++ queue))) && ++ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { + +- if (unlikely(netif_queue_stopped(priv->dev) && +- stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { + netif_dbg(priv, tx_done, priv->dev, + "%s: restart transmit\n", __func__); +- netif_wake_queue(priv->dev); ++ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { +@@ -1393,45 +1890,76 @@ static void stmmac_tx_clean(struct stmma + netif_tx_unlock(priv->dev); + } + +-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) ++static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) + { +- priv->hw->dma->enable_dma_irq(priv->ioaddr); ++ priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); + } + +-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) ++static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) + { +- priv->hw->dma->disable_dma_irq(priv->ioaddr); ++ priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); + } + + /** + * stmmac_tx_err - to manage the tx error + * @priv: driver private structure ++ * @chan: channel index + * Description: it cleans the descriptors and restarts the transmission + * in case of transmission errors. + */ +-static void stmmac_tx_err(struct stmmac_priv *priv) ++static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) + { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + int i; +- netif_stop_queue(priv->dev); + +- priv->hw->dma->stop_tx(priv->ioaddr); +- dma_free_tx_skbufs(priv); ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); ++ ++ stmmac_stop_tx_dma(priv, chan); ++ dma_free_tx_skbufs(priv, chan); + for (i = 0; i < DMA_TX_SIZE; i++) + if (priv->extend_desc) +- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, ++ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, + priv->mode, + (i == DMA_TX_SIZE - 1)); + else +- priv->hw->desc->init_tx_desc(&priv->dma_tx[i], ++ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], + priv->mode, + (i == DMA_TX_SIZE - 1)); +- priv->dirty_tx = 0; +- priv->cur_tx = 0; +- netdev_reset_queue(priv->dev); +- priv->hw->dma->start_tx(priv->ioaddr); ++ tx_q->dirty_tx = 0; ++ tx_q->cur_tx = 0; ++ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); ++ stmmac_start_tx_dma(priv, chan); + + priv->dev->stats.tx_errors++; +- netif_wake_queue(priv->dev); ++ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); ++} ++ ++/** ++ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel ++ * @priv: driver private structure ++ * @txmode: TX operating mode ++ * @rxmode: RX operating mode ++ * @chan: channel index ++ * Description: it is used for configuring of the DMA operation mode in ++ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward ++ * mode. ++ */ ++static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, ++ u32 rxmode, u32 chan) ++{ ++ int rxfifosz = priv->plat->rx_fifo_size; ++ ++ if (rxfifosz == 0) ++ rxfifosz = priv->dma_cap.rx_fifo_size; ++ ++ if (priv->synopsys_id >= DWMAC_CORE_4_00) { ++ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, ++ rxfifosz); ++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); ++ } else { ++ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, ++ rxfifosz); ++ } + } + + /** +@@ -1443,31 +1971,43 @@ static void stmmac_tx_err(struct stmmac_ + */ + static void stmmac_dma_interrupt(struct stmmac_priv *priv) + { ++ u32 tx_channel_count = priv->plat->tx_queues_to_use; + int status; +- int rxfifosz = priv->plat->rx_fifo_size; ++ u32 chan; ++ ++ for (chan = 0; chan < tx_channel_count; chan++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + +- status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); +- if (likely((status & handle_rx)) || (status & handle_tx)) { +- if (likely(napi_schedule_prep(&priv->napi))) { +- stmmac_disable_dma_irq(priv); +- __napi_schedule(&priv->napi); ++ status = priv->hw->dma->dma_interrupt(priv->ioaddr, ++ &priv->xstats, chan); ++ if (likely((status & handle_rx)) || (status & handle_tx)) { ++ if (likely(napi_schedule_prep(&rx_q->napi))) { ++ stmmac_disable_dma_irq(priv, chan); ++ __napi_schedule(&rx_q->napi); ++ } + } +- } +- if (unlikely(status & tx_hard_error_bump_tc)) { +- /* Try to bump up the dma threshold on this failure */ +- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && +- (tc <= 256)) { +- tc += 64; +- if (priv->plat->force_thresh_dma_mode) +- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, +- rxfifosz); +- else +- priv->hw->dma->dma_mode(priv->ioaddr, tc, +- SF_DMA_MODE, rxfifosz); +- priv->xstats.threshold = tc; ++ ++ if (unlikely(status & tx_hard_error_bump_tc)) { ++ /* Try to bump up the dma threshold on this failure */ ++ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && ++ (tc <= 256)) { ++ tc += 64; ++ if (priv->plat->force_thresh_dma_mode) ++ stmmac_set_dma_operation_mode(priv, ++ tc, ++ tc, ++ chan); ++ else ++ stmmac_set_dma_operation_mode(priv, ++ tc, ++ SF_DMA_MODE, ++ chan); ++ priv->xstats.threshold = tc; ++ } ++ } else if (unlikely(status == tx_hard_error)) { ++ stmmac_tx_err(priv, chan); + } +- } else if (unlikely(status == tx_hard_error)) +- stmmac_tx_err(priv); ++ } + } + + /** +@@ -1574,6 +2114,13 @@ static void stmmac_check_ether_addr(stru + */ + static int stmmac_init_dma_engine(struct stmmac_priv *priv) + { ++ u32 rx_channels_count = priv->plat->rx_queues_to_use; ++ u32 tx_channels_count = priv->plat->tx_queues_to_use; ++ struct stmmac_rx_queue *rx_q; ++ struct stmmac_tx_queue *tx_q; ++ u32 dummy_dma_rx_phy = 0; ++ u32 dummy_dma_tx_phy = 0; ++ u32 chan = 0; + int atds = 0; + int ret = 0; + +@@ -1591,19 +2138,49 @@ static int stmmac_init_dma_engine(struct + return ret; + } + +- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, +- priv->dma_tx_phy, priv->dma_rx_phy, atds); +- + if (priv->synopsys_id >= DWMAC_CORE_4_00) { +- priv->rx_tail_addr = priv->dma_rx_phy + +- (DMA_RX_SIZE * sizeof(struct dma_desc)); +- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, +- STMMAC_CHAN0); +- +- priv->tx_tail_addr = priv->dma_tx_phy + +- (DMA_TX_SIZE * sizeof(struct dma_desc)); +- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, +- STMMAC_CHAN0); ++ /* DMA Configuration */ ++ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, ++ dummy_dma_tx_phy, dummy_dma_rx_phy, atds); ++ ++ /* DMA RX Channel Configuration */ ++ for (chan = 0; chan < rx_channels_count; chan++) { ++ rx_q = &priv->rx_queue[chan]; ++ ++ priv->hw->dma->init_rx_chan(priv->ioaddr, ++ priv->plat->dma_cfg, ++ rx_q->dma_rx_phy, chan); ++ ++ rx_q->rx_tail_addr = rx_q->dma_rx_phy + ++ (DMA_RX_SIZE * sizeof(struct dma_desc)); ++ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, ++ rx_q->rx_tail_addr, ++ chan); ++ } ++ ++ /* DMA TX Channel Configuration */ ++ for (chan = 0; chan < tx_channels_count; chan++) { ++ tx_q = &priv->tx_queue[chan]; ++ ++ priv->hw->dma->init_chan(priv->ioaddr, ++ priv->plat->dma_cfg, ++ chan); ++ ++ priv->hw->dma->init_tx_chan(priv->ioaddr, ++ priv->plat->dma_cfg, ++ tx_q->dma_tx_phy, chan); ++ ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (DMA_TX_SIZE * sizeof(struct dma_desc)); ++ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, ++ tx_q->tx_tail_addr, ++ chan); ++ } ++ } else { ++ rx_q = &priv->rx_queue[chan]; ++ tx_q = &priv->tx_queue[chan]; ++ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, ++ tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); + } + + if (priv->plat->axi && priv->hw->dma->axi) +@@ -1621,8 +2198,12 @@ static int stmmac_init_dma_engine(struct + static void stmmac_tx_timer(unsigned long data) + { + struct stmmac_priv *priv = (struct stmmac_priv *)data; ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; ++ u32 queue; + +- stmmac_tx_clean(priv); ++ /* let's scan all the tx queues */ ++ for (queue = 0; queue < tx_queues_count; queue++) ++ stmmac_tx_clean(priv, queue); + } + + /** +@@ -1644,6 +2225,196 @@ static void stmmac_init_tx_coalesce(stru + add_timer(&priv->txtimer); + } + ++static void stmmac_set_rings_length(struct stmmac_priv *priv) ++{ ++ u32 rx_channels_count = priv->plat->rx_queues_to_use; ++ u32 tx_channels_count = priv->plat->tx_queues_to_use; ++ u32 chan; ++ ++ /* set TX ring length */ ++ if (priv->hw->dma->set_tx_ring_len) { ++ for (chan = 0; chan < tx_channels_count; chan++) ++ priv->hw->dma->set_tx_ring_len(priv->ioaddr, ++ (DMA_TX_SIZE - 1), chan); ++ } ++ ++ /* set RX ring length */ ++ if (priv->hw->dma->set_rx_ring_len) { ++ for (chan = 0; chan < rx_channels_count; chan++) ++ priv->hw->dma->set_rx_ring_len(priv->ioaddr, ++ (DMA_RX_SIZE - 1), chan); ++ } ++} ++ ++/** ++ * stmmac_set_tx_queue_weight - Set TX queue weight ++ * @priv: driver private structure ++ * Description: It is used for setting TX queues weight ++ */ ++static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) ++{ ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; ++ u32 weight; ++ u32 queue; ++ ++ for (queue = 0; queue < tx_queues_count; queue++) { ++ weight = priv->plat->tx_queues_cfg[queue].weight; ++ priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); ++ } ++} ++ ++/** ++ * stmmac_configure_cbs - Configure CBS in TX queue ++ * @priv: driver private structure ++ * Description: It is used for configuring CBS in AVB TX queues ++ */ ++static void stmmac_configure_cbs(struct stmmac_priv *priv) ++{ ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; ++ u32 mode_to_use; ++ u32 queue; ++ ++ /* queue 0 is reserved for legacy traffic */ ++ for (queue = 1; queue < tx_queues_count; queue++) { ++ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; ++ if (mode_to_use == MTL_QUEUE_DCB) ++ continue; ++ ++ priv->hw->mac->config_cbs(priv->hw, ++ priv->plat->tx_queues_cfg[queue].send_slope, ++ priv->plat->tx_queues_cfg[queue].idle_slope, ++ priv->plat->tx_queues_cfg[queue].high_credit, ++ priv->plat->tx_queues_cfg[queue].low_credit, ++ queue); ++ } ++} ++ ++/** ++ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel ++ * @priv: driver private structure ++ * Description: It is used for mapping RX queues to RX dma channels ++ */ ++static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ u32 queue; ++ u32 chan; ++ ++ for (queue = 0; queue < rx_queues_count; queue++) { ++ chan = priv->plat->rx_queues_cfg[queue].chan; ++ priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); ++ } ++} ++ ++/** ++ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority ++ * @priv: driver private structure ++ * Description: It is used for configuring the RX Queue Priority ++ */ ++static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ u32 queue; ++ u32 prio; ++ ++ for (queue = 0; queue < rx_queues_count; queue++) { ++ if (!priv->plat->rx_queues_cfg[queue].use_prio) ++ continue; ++ ++ prio = priv->plat->rx_queues_cfg[queue].prio; ++ priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); ++ } ++} ++ ++/** ++ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority ++ * @priv: driver private structure ++ * Description: It is used for configuring the TX Queue Priority ++ */ ++static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) ++{ ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; ++ u32 queue; ++ u32 prio; ++ ++ for (queue = 0; queue < tx_queues_count; queue++) { ++ if (!priv->plat->tx_queues_cfg[queue].use_prio) ++ continue; ++ ++ prio = priv->plat->tx_queues_cfg[queue].prio; ++ priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); ++ } ++} ++ ++/** ++ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing ++ * @priv: driver private structure ++ * Description: It is used for configuring the RX queue routing ++ */ ++static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ u32 queue; ++ u8 packet; ++ ++ for (queue = 0; queue < rx_queues_count; queue++) { ++ /* no specific packet type routing specified for the queue */ ++ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) ++ continue; ++ ++ packet = priv->plat->rx_queues_cfg[queue].pkt_route; ++ priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); ++ } ++} ++ ++/** ++ * stmmac_mtl_configuration - Configure MTL ++ * @priv: driver private structure ++ * Description: It is used for configurring MTL ++ */ ++static void stmmac_mtl_configuration(struct stmmac_priv *priv) ++{ ++ u32 rx_queues_count = priv->plat->rx_queues_to_use; ++ u32 tx_queues_count = priv->plat->tx_queues_to_use; ++ ++ if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) ++ stmmac_set_tx_queue_weight(priv); ++ ++ /* Configure MTL RX algorithms */ ++ if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) ++ priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, ++ priv->plat->rx_sched_algorithm); ++ ++ /* Configure MTL TX algorithms */ ++ if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) ++ priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, ++ priv->plat->tx_sched_algorithm); ++ ++ /* Configure CBS in AVB TX queues */ ++ if (tx_queues_count > 1 && priv->hw->mac->config_cbs) ++ stmmac_configure_cbs(priv); ++ ++ /* Map RX MTL to DMA channels */ ++ if (priv->hw->mac->map_mtl_to_dma) ++ stmmac_rx_queue_dma_chan_map(priv); ++ ++ /* Enable MAC RX Queues */ ++ if (priv->hw->mac->rx_queue_enable) ++ stmmac_mac_enable_rx_queues(priv); ++ ++ /* Set RX priorities */ ++ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) ++ stmmac_mac_config_rx_queues_prio(priv); ++ ++ /* Set TX priorities */ ++ if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) ++ stmmac_mac_config_tx_queues_prio(priv); ++ ++ /* Set RX routing */ ++ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) ++ stmmac_mac_config_rx_queues_routing(priv); ++} ++ + /** + * stmmac_hw_setup - setup mac in a usable state. + * @dev : pointer to the device structure. +@@ -1659,6 +2430,9 @@ static void stmmac_init_tx_coalesce(stru + static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + { + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_cnt = priv->plat->rx_queues_to_use; ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ u32 chan; + int ret; + + /* DMA initialization and SW reset */ +@@ -1688,9 +2462,9 @@ static int stmmac_hw_setup(struct net_de + /* Initialize the MAC Core */ + priv->hw->mac->core_init(priv->hw, dev->mtu); + +- /* Initialize MAC RX Queues */ +- if (priv->hw->mac->rx_queue_enable) +- stmmac_mac_enable_rx_queues(priv); ++ /* Initialize MTL*/ ++ if (priv->synopsys_id >= DWMAC_CORE_4_00) ++ stmmac_mtl_configuration(priv); + + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { +@@ -1700,10 +2474,7 @@ static int stmmac_hw_setup(struct net_de + } + + /* Enable the MAC Rx/Tx */ +- if (priv->synopsys_id >= DWMAC_CORE_4_00) +- stmmac_dwmac4_set_mac(priv->ioaddr, true); +- else +- stmmac_set_mac(priv->ioaddr, true); ++ priv->hw->mac->set_mac(priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); +@@ -1711,6 +2482,10 @@ static int stmmac_hw_setup(struct net_de + stmmac_mmc_setup(priv); + + if (init_ptp) { ++ ret = clk_prepare_enable(priv->plat->clk_ptp_ref); ++ if (ret < 0) ++ netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); ++ + ret = stmmac_init_ptp(priv); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); +@@ -1725,35 +2500,37 @@ static int stmmac_hw_setup(struct net_de + __func__); + #endif + /* Start the ball rolling... */ +- netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); +- priv->hw->dma->start_tx(priv->ioaddr); +- priv->hw->dma->start_rx(priv->ioaddr); ++ stmmac_start_all_dma(priv); + + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; + + if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { + priv->rx_riwt = MAX_DMA_RIWT; +- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); ++ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); + } + + if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) + priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); + +- /* set TX ring length */ +- if (priv->hw->dma->set_tx_ring_len) +- priv->hw->dma->set_tx_ring_len(priv->ioaddr, +- (DMA_TX_SIZE - 1)); +- /* set RX ring length */ +- if (priv->hw->dma->set_rx_ring_len) +- priv->hw->dma->set_rx_ring_len(priv->ioaddr, +- (DMA_RX_SIZE - 1)); ++ /* set TX and RX rings length */ ++ stmmac_set_rings_length(priv); ++ + /* Enable TSO */ +- if (priv->tso) +- priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); ++ if (priv->tso) { ++ for (chan = 0; chan < tx_cnt; chan++) ++ priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); ++ } + + return 0; + } + ++static void stmmac_hw_teardown(struct net_device *dev) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ clk_disable_unprepare(priv->plat->clk_ptp_ref); ++} ++ + /** + * stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. +@@ -1821,7 +2598,7 @@ static int stmmac_open(struct net_device + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); +- goto init_error; ++ goto irq_error; + } + + /* Request the Wake IRQ in case of another line is used for WoL */ +@@ -1848,8 +2625,8 @@ static int stmmac_open(struct net_device + } + } + +- napi_enable(&priv->napi); +- netif_start_queue(dev); ++ stmmac_enable_all_queues(priv); ++ stmmac_start_all_queues(priv); + + return 0; + +@@ -1858,7 +2635,12 @@ lpiirq_error: + free_irq(priv->wol_irq, dev); + wolirq_error: + free_irq(dev->irq, dev); ++irq_error: ++ if (dev->phydev) ++ phy_stop(dev->phydev); + ++ del_timer_sync(&priv->txtimer); ++ stmmac_hw_teardown(dev); + init_error: + free_dma_desc_resources(priv); + dma_desc_error: +@@ -1887,9 +2669,9 @@ static int stmmac_release(struct net_dev + phy_disconnect(dev->phydev); + } + +- netif_stop_queue(dev); ++ stmmac_stop_all_queues(priv); + +- napi_disable(&priv->napi); ++ stmmac_disable_all_queues(priv); + + del_timer_sync(&priv->txtimer); + +@@ -1901,14 +2683,13 @@ static int stmmac_release(struct net_dev + free_irq(priv->lpi_irq, dev); + + /* Stop TX/RX DMA and clear the descriptors */ +- priv->hw->dma->stop_tx(priv->ioaddr); +- priv->hw->dma->stop_rx(priv->ioaddr); ++ stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ +- stmmac_set_mac(priv->ioaddr, false); ++ priv->hw->mac->set_mac(priv->ioaddr, false); + + netif_carrier_off(dev); + +@@ -1927,22 +2708,24 @@ static int stmmac_release(struct net_dev + * @des: buffer start address + * @total_len: total length to fill in descriptors + * @last_segmant: condition for the last descriptor ++ * @queue: TX queue index + * Description: + * This function fills descriptor and request new descriptors according to + * buffer length to fill + */ + static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, +- int total_len, bool last_segment) ++ int total_len, bool last_segment, u32 queue) + { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct dma_desc *desc; +- int tmp_len; + u32 buff_size; ++ int tmp_len; + + tmp_len = total_len; + + while (tmp_len > 0) { +- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); +- desc = priv->dma_tx + priv->cur_tx; ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); ++ desc = tx_q->dma_tx + tx_q->cur_tx; + + desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? +@@ -1950,7 +2733,7 @@ static void stmmac_tso_allocator(struct + + priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, + 0, 1, +- (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), ++ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; +@@ -1986,23 +2769,28 @@ static void stmmac_tso_allocator(struct + */ + static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + { +- u32 pay_len, mss; +- int tmp_pay_len = 0; ++ struct dma_desc *desc, *first, *mss_desc = NULL; + struct stmmac_priv *priv = netdev_priv(dev); + int nfrags = skb_shinfo(skb)->nr_frags; ++ u32 queue = skb_get_queue_mapping(skb); + unsigned int first_entry, des; +- struct dma_desc *desc, *first, *mss_desc = NULL; ++ struct stmmac_tx_queue *tx_q; ++ int tmp_pay_len = 0; ++ u32 pay_len, mss; + u8 proto_hdr_len; + int i; + ++ tx_q = &priv->tx_queue[queue]; ++ + /* Compute header lengths */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + /* Desc availability based on threshold should be enough safe */ +- if (unlikely(stmmac_tx_avail(priv) < ++ if (unlikely(stmmac_tx_avail(priv, queue) < + (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { +- if (!netif_queue_stopped(dev)) { +- netif_stop_queue(dev); ++ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, ++ queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", +@@ -2017,10 +2805,10 @@ static netdev_tx_t stmmac_tso_xmit(struc + + /* set new MSS value if needed */ + if (mss != priv->mss) { +- mss_desc = priv->dma_tx + priv->cur_tx; ++ mss_desc = tx_q->dma_tx + tx_q->cur_tx; + priv->hw->desc->set_mss(mss_desc, mss); + priv->mss = mss; +- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + } + + if (netif_msg_tx_queued(priv)) { +@@ -2030,9 +2818,9 @@ static netdev_tx_t stmmac_tso_xmit(struc + skb->data_len); + } + +- first_entry = priv->cur_tx; ++ first_entry = tx_q->cur_tx; + +- desc = priv->dma_tx + first_entry; ++ desc = tx_q->dma_tx + first_entry; + first = desc; + + /* first descriptor: fill Headers on Buf1 */ +@@ -2041,9 +2829,8 @@ static netdev_tx_t stmmac_tso_xmit(struc + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + +- priv->tx_skbuff_dma[first_entry].buf = des; +- priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); +- priv->tx_skbuff[first_entry] = skb; ++ tx_q->tx_skbuff_dma[first_entry].buf = des; ++ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + + first->des0 = cpu_to_le32(des); + +@@ -2054,7 +2841,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + +- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); ++ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + + /* Prepare fragments */ + for (i = 0; i < nfrags; i++) { +@@ -2063,24 +2850,34 @@ static netdev_tx_t stmmac_tso_xmit(struc + des = skb_frag_dma_map(priv->device, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); ++ if (dma_mapping_error(priv->device, des)) ++ goto dma_map_err; + + stmmac_tso_allocator(priv, des, skb_frag_size(frag), +- (i == nfrags - 1)); ++ (i == nfrags - 1), queue); + +- priv->tx_skbuff_dma[priv->cur_tx].buf = des; +- priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); +- priv->tx_skbuff[priv->cur_tx] = NULL; +- priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; ++ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; ++ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); ++ tx_q->tx_skbuff[tx_q->cur_tx] = NULL; ++ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; + } + +- priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; ++ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; ++ ++ /* Only the last descriptor gets to point to the skb. */ ++ tx_q->tx_skbuff[tx_q->cur_tx] = skb; + +- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); ++ /* We've used all descriptors we need for this skb, however, ++ * advance cur_tx so that it references a fresh descriptor. ++ * ndo_start_xmit will fill this descriptor the next time it's ++ * called and stmmac_tx_clean may clean up to this descriptor. ++ */ ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + +- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { ++ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); +- netif_stop_queue(dev); ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; +@@ -2112,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + priv->hw->desc->prepare_tso_tx_desc(first, 1, + proto_hdr_len, + pay_len, +- 1, priv->tx_skbuff_dma[first_entry].last_segment, ++ 1, tx_q->tx_skbuff_dma[first_entry].last_segment, + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ +@@ -2127,20 +2924,20 @@ static netdev_tx_t stmmac_tso_xmit(struc + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", +- __func__, priv->cur_tx, priv->dirty_tx, first_entry, +- priv->cur_tx, first, nfrags); ++ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, ++ tx_q->cur_tx, first, nfrags); + +- priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, ++ priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, + 0); + + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + +- netdev_sent_queue(dev, skb->len); ++ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + +- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, +- STMMAC_CHAN0); ++ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, ++ queue); + + return NETDEV_TX_OK; + +@@ -2164,21 +2961,27 @@ static netdev_tx_t stmmac_xmit(struct sk + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int nopaged_len = skb_headlen(skb); + int i, csum_insertion = 0, is_jumbo = 0; ++ u32 queue = skb_get_queue_mapping(skb); + int nfrags = skb_shinfo(skb)->nr_frags; +- unsigned int entry, first_entry; ++ int entry; ++ unsigned int first_entry; + struct dma_desc *desc, *first; ++ struct stmmac_tx_queue *tx_q; + unsigned int enh_desc; + unsigned int des; + ++ tx_q = &priv->tx_queue[queue]; ++ + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + return stmmac_tso_xmit(skb, dev); + } + +- if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { +- if (!netif_queue_stopped(dev)) { +- netif_stop_queue(dev); ++ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { ++ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, ++ queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", +@@ -2190,20 +2993,18 @@ static netdev_tx_t stmmac_xmit(struct sk + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + +- entry = priv->cur_tx; ++ entry = tx_q->cur_tx; + first_entry = entry; + + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + + if (likely(priv->extend_desc)) +- desc = (struct dma_desc *)(priv->dma_etx + entry); ++ desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else +- desc = priv->dma_tx + entry; ++ desc = tx_q->dma_tx + entry; + + first = desc; + +- priv->tx_skbuff[first_entry] = skb; +- + enh_desc = priv->plat->enh_desc; + /* To program the descriptors according to the size of the frame */ + if (enh_desc) +@@ -2211,7 +3012,7 @@ static netdev_tx_t stmmac_xmit(struct sk + + if (unlikely(is_jumbo) && likely(priv->synopsys_id < + DWMAC_CORE_4_00)) { +- entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); ++ entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); + if (unlikely(entry < 0)) + goto dma_map_err; + } +@@ -2224,48 +3025,56 @@ static netdev_tx_t stmmac_xmit(struct sk + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + + if (likely(priv->extend_desc)) +- desc = (struct dma_desc *)(priv->dma_etx + entry); ++ desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else +- desc = priv->dma_tx + entry; ++ desc = tx_q->dma_tx + entry; + + des = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; /* should reuse desc w/o issues */ + +- priv->tx_skbuff[entry] = NULL; ++ tx_q->tx_skbuff[entry] = NULL; + +- priv->tx_skbuff_dma[entry].buf = des; ++ tx_q->tx_skbuff_dma[entry].buf = des; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + desc->des0 = cpu_to_le32(des); + else + desc->des2 = cpu_to_le32(des); + +- priv->tx_skbuff_dma[entry].map_as_page = true; +- priv->tx_skbuff_dma[entry].len = len; +- priv->tx_skbuff_dma[entry].last_segment = last_segment; ++ tx_q->tx_skbuff_dma[entry].map_as_page = true; ++ tx_q->tx_skbuff_dma[entry].len = len; ++ tx_q->tx_skbuff_dma[entry].last_segment = last_segment; + + /* Prepare the descriptor and set the own bit too */ + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, +- priv->mode, 1, last_segment); ++ priv->mode, 1, last_segment, ++ skb->len); + } + +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ /* Only the last descriptor gets to point to the skb. */ ++ tx_q->tx_skbuff[entry] = skb; + +- priv->cur_tx = entry; ++ /* We've used all descriptors we need for this skb, however, ++ * advance cur_tx so that it references a fresh descriptor. ++ * ndo_start_xmit will fill this descriptor the next time it's ++ * called and stmmac_tx_clean may clean up to this descriptor. ++ */ ++ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ tx_q->cur_tx = entry; + + if (netif_msg_pktdata(priv)) { + void *tx_head; + + netdev_dbg(priv->dev, + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", +- __func__, priv->cur_tx, priv->dirty_tx, first_entry, ++ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + entry, first, nfrags); + + if (priv->extend_desc) +- tx_head = (void *)priv->dma_etx; ++ tx_head = (void *)tx_q->dma_etx; + else +- tx_head = (void *)priv->dma_tx; ++ tx_head = (void *)tx_q->dma_tx; + + priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); + +@@ -2273,10 +3082,10 @@ static netdev_tx_t stmmac_xmit(struct sk + print_pkt(skb->data, skb->len); + } + +- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { ++ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); +- netif_stop_queue(dev); ++ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; +@@ -2311,14 +3120,14 @@ static netdev_tx_t stmmac_xmit(struct sk + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + +- priv->tx_skbuff_dma[first_entry].buf = des; ++ tx_q->tx_skbuff_dma[first_entry].buf = des; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + first->des0 = cpu_to_le32(des); + else + first->des2 = cpu_to_le32(des); + +- priv->tx_skbuff_dma[first_entry].len = nopaged_len; +- priv->tx_skbuff_dma[first_entry].last_segment = last_segment; ++ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; ++ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { +@@ -2330,7 +3139,7 @@ static netdev_tx_t stmmac_xmit(struct sk + /* Prepare the first descriptor setting the OWN bit too */ + priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, + csum_insertion, priv->mode, 1, +- last_segment); ++ last_segment, skb->len); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that +@@ -2339,13 +3148,13 @@ static netdev_tx_t stmmac_xmit(struct sk + dma_wmb(); + } + +- netdev_sent_queue(dev, skb->len); ++ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + if (priv->synopsys_id < DWMAC_CORE_4_00) + priv->hw->dma->enable_dma_transmission(priv->ioaddr); + else +- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, +- STMMAC_CHAN0); ++ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, ++ queue); + + return NETDEV_TX_OK; + +@@ -2373,9 +3182,9 @@ static void stmmac_rx_vlan(struct net_de + } + + +-static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) ++static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) + { +- if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) ++ if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) + return 0; + + return 1; +@@ -2384,30 +3193,33 @@ static inline int stmmac_rx_threshold_co + /** + * stmmac_rx_refill - refill used skb preallocated buffers + * @priv: driver private structure ++ * @queue: RX queue index + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +-static inline void stmmac_rx_refill(struct stmmac_priv *priv) ++static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) + { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ int dirty = stmmac_rx_dirty(priv, queue); ++ unsigned int entry = rx_q->dirty_rx; ++ + int bfsize = priv->dma_buf_sz; +- unsigned int entry = priv->dirty_rx; +- int dirty = stmmac_rx_dirty(priv); + + while (dirty-- > 0) { + struct dma_desc *p; + + if (priv->extend_desc) +- p = (struct dma_desc *)(priv->dma_erx + entry); ++ p = (struct dma_desc *)(rx_q->dma_erx + entry); + else +- p = priv->dma_rx + entry; ++ p = rx_q->dma_rx + entry; + +- if (likely(priv->rx_skbuff[entry] == NULL)) { ++ if (likely(!rx_q->rx_skbuff[entry])) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); + if (unlikely(!skb)) { + /* so for a while no zero-copy! */ +- priv->rx_zeroc_thresh = STMMAC_RX_THRESH; ++ rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; + if (unlikely(net_ratelimit())) + dev_err(priv->device, + "fail to alloc skb entry %d\n", +@@ -2415,28 +3227,28 @@ static inline void stmmac_rx_refill(stru + break; + } + +- priv->rx_skbuff[entry] = skb; +- priv->rx_skbuff_dma[entry] = ++ rx_q->rx_skbuff[entry] = skb; ++ rx_q->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, +- priv->rx_skbuff_dma[entry])) { ++ rx_q->rx_skbuff_dma[entry])) { + netdev_err(priv->dev, "Rx DMA map failed\n"); + dev_kfree_skb(skb); + break; + } + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { +- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); ++ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); + p->des1 = 0; + } else { +- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); ++ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); + } + if (priv->hw->mode->refill_desc3) +- priv->hw->mode->refill_desc3(priv, p); ++ priv->hw->mode->refill_desc3(rx_q, p); + +- if (priv->rx_zeroc_thresh > 0) +- priv->rx_zeroc_thresh--; ++ if (rx_q->rx_zeroc_thresh > 0) ++ rx_q->rx_zeroc_thresh--; + + netif_dbg(priv, rx_status, priv->dev, + "refill entry #%d\n", entry); +@@ -2452,31 +3264,33 @@ static inline void stmmac_rx_refill(stru + + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); + } +- priv->dirty_rx = entry; ++ rx_q->dirty_rx = entry; + } + + /** + * stmmac_rx - manage the receive process + * @priv: driver private structure +- * @limit: napi bugget. ++ * @limit: napi bugget ++ * @queue: RX queue index. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +-static int stmmac_rx(struct stmmac_priv *priv, int limit) ++static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + { +- unsigned int entry = priv->cur_rx; ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ unsigned int entry = rx_q->cur_rx; ++ int coe = priv->hw->rx_csum; + unsigned int next_entry; + unsigned int count = 0; +- int coe = priv->hw->rx_csum; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) +- rx_head = (void *)priv->dma_erx; ++ rx_head = (void *)rx_q->dma_erx; + else +- rx_head = (void *)priv->dma_rx; ++ rx_head = (void *)rx_q->dma_rx; + + priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); + } +@@ -2486,9 +3300,9 @@ static int stmmac_rx(struct stmmac_priv + struct dma_desc *np; + + if (priv->extend_desc) +- p = (struct dma_desc *)(priv->dma_erx + entry); ++ p = (struct dma_desc *)(rx_q->dma_erx + entry); + else +- p = priv->dma_rx + entry; ++ p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = priv->hw->desc->rx_status(&priv->dev->stats, +@@ -2499,20 +3313,20 @@ static int stmmac_rx(struct stmmac_priv + + count++; + +- priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); +- next_entry = priv->cur_rx; ++ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); ++ next_entry = rx_q->cur_rx; + + if (priv->extend_desc) +- np = (struct dma_desc *)(priv->dma_erx + next_entry); ++ np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else +- np = priv->dma_rx + next_entry; ++ np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) + priv->hw->desc->rx_extended_status(&priv->dev->stats, + &priv->xstats, +- priv->dma_erx + ++ rx_q->dma_erx + + entry); + if (unlikely(status == discard_frame)) { + priv->dev->stats.rx_errors++; +@@ -2522,9 +3336,9 @@ static int stmmac_rx(struct stmmac_priv + * them in stmmac_rx_refill() function so that + * device can reuse it. + */ +- priv->rx_skbuff[entry] = NULL; ++ rx_q->rx_skbuff[entry] = NULL; + dma_unmap_single(priv->device, +- priv->rx_skbuff_dma[entry], ++ rx_q->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); + } +@@ -2572,7 +3386,7 @@ static int stmmac_rx(struct stmmac_priv + */ + if (unlikely(!priv->plat->has_gmac4 && + ((frame_len < priv->rx_copybreak) || +- stmmac_rx_threshold_count(priv)))) { ++ stmmac_rx_threshold_count(rx_q)))) { + skb = netdev_alloc_skb_ip_align(priv->dev, + frame_len); + if (unlikely(!skb)) { +@@ -2584,21 +3398,21 @@ static int stmmac_rx(struct stmmac_priv + } + + dma_sync_single_for_cpu(priv->device, +- priv->rx_skbuff_dma ++ rx_q->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, +- priv-> ++ rx_q-> + rx_skbuff[entry]->data, + frame_len); + + skb_put(skb, frame_len); + dma_sync_single_for_device(priv->device, +- priv->rx_skbuff_dma ++ rx_q->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + } else { +- skb = priv->rx_skbuff[entry]; ++ skb = rx_q->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", +@@ -2607,12 +3421,12 @@ static int stmmac_rx(struct stmmac_priv + break; + } + prefetch(skb->data - NET_IP_ALIGN); +- priv->rx_skbuff[entry] = NULL; +- priv->rx_zeroc_thresh++; ++ rx_q->rx_skbuff[entry] = NULL; ++ rx_q->rx_zeroc_thresh++; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, +- priv->rx_skbuff_dma[entry], ++ rx_q->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); + } +@@ -2634,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + +- napi_gro_receive(&priv->napi, skb); ++ napi_gro_receive(&rx_q->napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += frame_len; +@@ -2642,7 +3456,7 @@ static int stmmac_rx(struct stmmac_priv + entry = next_entry; + } + +- stmmac_rx_refill(priv); ++ stmmac_rx_refill(priv, queue); + + priv->xstats.rx_pkt_n += count; + +@@ -2659,16 +3473,24 @@ static int stmmac_rx(struct stmmac_priv + */ + static int stmmac_poll(struct napi_struct *napi, int budget) + { +- struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); ++ struct stmmac_rx_queue *rx_q = ++ container_of(napi, struct stmmac_rx_queue, napi); ++ struct stmmac_priv *priv = rx_q->priv_data; ++ u32 tx_count = priv->plat->tx_queues_to_use; ++ u32 chan = rx_q->queue_index; + int work_done = 0; ++ u32 queue; + + priv->xstats.napi_poll++; +- stmmac_tx_clean(priv); + +- work_done = stmmac_rx(priv, budget); ++ /* check all the queues */ ++ for (queue = 0; queue < tx_count; queue++) ++ stmmac_tx_clean(priv, queue); ++ ++ work_done = stmmac_rx(priv, budget, rx_q->queue_index); + if (work_done < budget) { + napi_complete_done(napi, work_done); +- stmmac_enable_dma_irq(priv); ++ stmmac_enable_dma_irq(priv, chan); + } + return work_done; + } +@@ -2684,9 +3506,12 @@ static int stmmac_poll(struct napi_struc + static void stmmac_tx_timeout(struct net_device *dev) + { + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 tx_count = priv->plat->tx_queues_to_use; ++ u32 chan; + + /* Clear Tx resources and restart transmitting again */ +- stmmac_tx_err(priv); ++ for (chan = 0; chan < tx_count; chan++) ++ stmmac_tx_err(priv, chan); + } + + /** +@@ -2809,6 +3634,12 @@ static irqreturn_t stmmac_interrupt(int + { + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_cnt = priv->plat->rx_queues_to_use; ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ u32 queues_count; ++ u32 queue; ++ ++ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; + + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); +@@ -2822,16 +3653,30 @@ static irqreturn_t stmmac_interrupt(int + if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { + int status = priv->hw->mac->host_irq_status(priv->hw, + &priv->xstats); ++ + if (unlikely(status)) { + /* For LPI we need to save the tx status */ + if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) + priv->tx_path_in_lpi_mode = true; + if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) + priv->tx_path_in_lpi_mode = false; +- if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) +- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, +- priv->rx_tail_addr, +- STMMAC_CHAN0); ++ } ++ ++ if (priv->synopsys_id >= DWMAC_CORE_4_00) { ++ for (queue = 0; queue < queues_count; queue++) { ++ struct stmmac_rx_queue *rx_q = ++ &priv->rx_queue[queue]; ++ ++ status |= ++ priv->hw->mac->host_mtl_irq_status(priv->hw, ++ queue); ++ ++ if (status & CORE_IRQ_MTL_RX_OVERFLOW && ++ priv->hw->dma->set_rx_tail_ptr) ++ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, ++ rx_q->rx_tail_addr, ++ queue); ++ } + } + + /* PCS link status */ +@@ -2916,7 +3761,7 @@ static void sysfs_display_ring(void *hea + ep++; + } else { + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", +- i, (unsigned int)virt_to_phys(ep), ++ i, (unsigned int)virt_to_phys(p), + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; +@@ -2929,17 +3774,40 @@ static int stmmac_sysfs_ring_read(struct + { + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); ++ u32 rx_count = priv->plat->rx_queues_to_use; ++ u32 tx_count = priv->plat->tx_queues_to_use; ++ u32 queue; + +- if (priv->extend_desc) { +- seq_printf(seq, "Extended RX descriptor ring:\n"); +- sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); +- seq_printf(seq, "Extended TX descriptor ring:\n"); +- sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); +- } else { +- seq_printf(seq, "RX descriptor ring:\n"); +- sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); +- seq_printf(seq, "TX descriptor ring:\n"); +- sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); ++ for (queue = 0; queue < rx_count; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ seq_printf(seq, "RX Queue %d:\n", queue); ++ ++ if (priv->extend_desc) { ++ seq_printf(seq, "Extended descriptor ring:\n"); ++ sysfs_display_ring((void *)rx_q->dma_erx, ++ DMA_RX_SIZE, 1, seq); ++ } else { ++ seq_printf(seq, "Descriptor ring:\n"); ++ sysfs_display_ring((void *)rx_q->dma_rx, ++ DMA_RX_SIZE, 0, seq); ++ } ++ } ++ ++ for (queue = 0; queue < tx_count; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ seq_printf(seq, "TX Queue %d:\n", queue); ++ ++ if (priv->extend_desc) { ++ seq_printf(seq, "Extended descriptor ring:\n"); ++ sysfs_display_ring((void *)tx_q->dma_etx, ++ DMA_TX_SIZE, 1, seq); ++ } else { ++ seq_printf(seq, "Descriptor ring:\n"); ++ sysfs_display_ring((void *)tx_q->dma_tx, ++ DMA_TX_SIZE, 0, seq); ++ } + } + + return 0; +@@ -3222,11 +4090,14 @@ int stmmac_dvr_probe(struct device *devi + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res) + { +- int ret = 0; + struct net_device *ndev = NULL; + struct stmmac_priv *priv; ++ int ret = 0; ++ u32 queue; + +- ndev = alloc_etherdev(sizeof(struct stmmac_priv)); ++ ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), ++ MTL_MAX_TX_QUEUES, ++ MTL_MAX_RX_QUEUES); + if (!ndev) + return -ENOMEM; + +@@ -3268,6 +4139,10 @@ int stmmac_dvr_probe(struct device *devi + if (ret) + goto error_hw_init; + ++ /* Configure real RX and TX queues */ ++ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); ++ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); ++ + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | +@@ -3300,7 +4175,12 @@ int stmmac_dvr_probe(struct device *devi + "Enable RX Mitigation via HW Watchdog Timer\n"); + } + +- netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); ++ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ netif_napi_add(ndev, &rx_q->napi, stmmac_poll, ++ (8 * priv->plat->rx_queues_to_use)); ++ } + + spin_lock_init(&priv->lock); + +@@ -3345,7 +4225,11 @@ error_netdev_register: + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); + error_mdio_register: +- netif_napi_del(&priv->napi); ++ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ netif_napi_del(&rx_q->napi); ++ } + error_hw_init: + free_netdev(ndev); + +@@ -3366,10 +4250,9 @@ int stmmac_dvr_remove(struct device *dev + + netdev_info(priv->dev, "%s: removing driver", __func__); + +- priv->hw->dma->stop_rx(priv->ioaddr); +- priv->hw->dma->stop_tx(priv->ioaddr); ++ stmmac_stop_all_dma(priv); + +- stmmac_set_mac(priv->ioaddr, false); ++ priv->hw->mac->set_mac(priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); + if (priv->plat->stmmac_rst) +@@ -3408,20 +4291,19 @@ int stmmac_suspend(struct device *dev) + spin_lock_irqsave(&priv->lock, flags); + + netif_device_detach(ndev); +- netif_stop_queue(ndev); ++ stmmac_stop_all_queues(priv); + +- napi_disable(&priv->napi); ++ stmmac_disable_all_queues(priv); + + /* Stop TX/RX DMA */ +- priv->hw->dma->stop_tx(priv->ioaddr); +- priv->hw->dma->stop_rx(priv->ioaddr); ++ stmmac_stop_all_dma(priv); + + /* Enable Power down mode by programming the PMT regs */ + if (device_may_wakeup(priv->device)) { + priv->hw->mac->pmt(priv->hw, priv->wolopts); + priv->irq_wake = 1; + } else { +- stmmac_set_mac(priv->ioaddr, false); ++ priv->hw->mac->set_mac(priv->ioaddr, false); + pinctrl_pm_select_sleep_state(priv->device); + /* Disable clock in case of PWM is off */ + clk_disable(priv->plat->pclk); +@@ -3437,6 +4319,31 @@ int stmmac_suspend(struct device *dev) + EXPORT_SYMBOL_GPL(stmmac_suspend); + + /** ++ * stmmac_reset_queues_param - reset queue parameters ++ * @dev: device pointer ++ */ ++static void stmmac_reset_queues_param(struct stmmac_priv *priv) ++{ ++ u32 rx_cnt = priv->plat->rx_queues_to_use; ++ u32 tx_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < rx_cnt; queue++) { ++ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; ++ ++ rx_q->cur_rx = 0; ++ rx_q->dirty_rx = 0; ++ } ++ ++ for (queue = 0; queue < tx_cnt; queue++) { ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; ++ ++ tx_q->cur_tx = 0; ++ tx_q->dirty_tx = 0; ++ } ++} ++ ++/** + * stmmac_resume - resume callback + * @dev: device pointer + * Description: when resume this function is invoked to setup the DMA and CORE +@@ -3476,10 +4383,8 @@ int stmmac_resume(struct device *dev) + + spin_lock_irqsave(&priv->lock, flags); + +- priv->cur_rx = 0; +- priv->dirty_rx = 0; +- priv->dirty_tx = 0; +- priv->cur_tx = 0; ++ stmmac_reset_queues_param(priv); ++ + /* reset private mss value to force mss context settings at + * next tso xmit (only used for gmac4). + */ +@@ -3491,9 +4396,9 @@ int stmmac_resume(struct device *dev) + stmmac_init_tx_coalesce(priv); + stmmac_set_rx_mode(ndev); + +- napi_enable(&priv->napi); ++ stmmac_enable_all_queues(priv); + +- netif_start_queue(ndev); ++ stmmac_start_all_queues(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -32,6 +32,7 @@ + */ + struct stmmac_pci_dmi_data { + const char *name; ++ const char *asset_tag; + unsigned int func; + int phy_addr; + }; +@@ -46,6 +47,7 @@ struct stmmac_pci_info { + static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) + { + const char *name = dmi_get_system_info(DMI_BOARD_NAME); ++ const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG); + unsigned int func = PCI_FUNC(info->pdev->devfn); + struct stmmac_pci_dmi_data *dmi; + +@@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru + return 1; + + for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) { +- if (!strcmp(dmi->name, name) && dmi->func == func) ++ if (!strcmp(dmi->name, name) && dmi->func == func) { ++ /* If asset tag is provided, match on it as well. */ ++ if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag)) ++ continue; + return dmi->phy_addr; ++ } + } + + return -ENODEV; + } + +-static void stmmac_default_data(struct plat_stmmacenet_data *plat) ++static void common_default_data(struct plat_stmmacenet_data *plat) + { +- plat->bus_id = 1; +- plat->phy_addr = 0; +- plat->interface = PHY_INTERFACE_MODE_GMII; + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; +@@ -76,10 +79,6 @@ static void stmmac_default_data(struct p + plat->mdio_bus_data->phy_reset = NULL; + plat->mdio_bus_data->phy_mask = 0; + +- plat->dma_cfg->pbl = 32; +- plat->dma_cfg->pblx8 = true; +- /* TODO: AXI */ +- + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + +@@ -88,6 +87,31 @@ static void stmmac_default_data(struct p + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; ++ ++ /* Set default number of RX and TX queues to use */ ++ plat->tx_queues_to_use = 1; ++ plat->rx_queues_to_use = 1; ++ ++ /* Disable Priority config by default */ ++ plat->tx_queues_cfg[0].use_prio = false; ++ plat->rx_queues_cfg[0].use_prio = false; ++ ++ /* Disable RX queues routing by default */ ++ plat->rx_queues_cfg[0].pkt_route = 0x0; ++} ++ ++static void stmmac_default_data(struct plat_stmmacenet_data *plat) ++{ ++ /* Set common default data first */ ++ common_default_data(plat); ++ ++ plat->bus_id = 1; ++ plat->phy_addr = 0; ++ plat->interface = PHY_INTERFACE_MODE_GMII; ++ ++ plat->dma_cfg->pbl = 32; ++ plat->dma_cfg->pblx8 = true; ++ /* TODO: AXI */ + } + + static int quark_default_data(struct plat_stmmacenet_data *plat, +@@ -96,6 +120,9 @@ static int quark_default_data(struct pla + struct pci_dev *pdev = info->pdev; + int ret; + ++ /* Set common default data first */ ++ common_default_data(plat); ++ + /* + * Refuse to load the driver and register net device if MAC controller + * does not connect to any PHY interface. +@@ -107,27 +134,12 @@ static int quark_default_data(struct pla + plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat->phy_addr = ret; + plat->interface = PHY_INTERFACE_MODE_RMII; +- plat->clk_csr = 2; +- plat->has_gmac = 1; +- plat->force_sf_dma_mode = 1; +- +- plat->mdio_bus_data->phy_reset = NULL; +- plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 16; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ + +- /* Set default value for multicast hash bins */ +- plat->multicast_filter_bins = HASH_TABLE_SIZE; +- +- /* Set default value for unicast filter entries */ +- plat->unicast_filter_entries = 1; +- +- /* Set the maxmtu to a default of JUMBO_LEN */ +- plat->maxmtu = JUMBO_LEN; +- + return 0; + } + +@@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_ + .func = 6, + .phy_addr = 1, + }, ++ { ++ .name = "SIMATIC IOT2000", ++ .asset_tag = "6ES7647-0AA00-0YA2", ++ .func = 6, ++ .phy_addr = 1, ++ }, ++ { ++ .name = "SIMATIC IOT2000", ++ .asset_tag = "6ES7647-0AA00-1YA2", ++ .func = 6, ++ .phy_addr = 1, ++ }, ++ { ++ .name = "SIMATIC IOT2000", ++ .asset_tag = "6ES7647-0AA00-1YA2", ++ .func = 7, ++ .phy_addr = 1, ++ }, + {} + }; + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set + if (!np) + return NULL; + +- axi = kzalloc(sizeof(*axi), GFP_KERNEL); ++ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); + if (!axi) { + of_node_put(np); + return ERR_PTR(-ENOMEM); +@@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set + } + + /** ++ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration ++ * @pdev: platform device ++ */ ++static void stmmac_mtl_setup(struct platform_device *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ struct device_node *q_node; ++ struct device_node *rx_node; ++ struct device_node *tx_node; ++ u8 queue = 0; ++ ++ /* For backwards-compatibility with device trees that don't have any ++ * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back ++ * to one RX and TX queues each. ++ */ ++ plat->rx_queues_to_use = 1; ++ plat->tx_queues_to_use = 1; ++ ++ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); ++ if (!rx_node) ++ return; ++ ++ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); ++ if (!tx_node) { ++ of_node_put(rx_node); ++ return; ++ } ++ ++ /* Processing RX queues common config */ ++ if (of_property_read_u8(rx_node, "snps,rx-queues-to-use", ++ &plat->rx_queues_to_use)) ++ plat->rx_queues_to_use = 1; ++ ++ if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; ++ else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; ++ else ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; ++ ++ /* Processing individual RX queue config */ ++ for_each_child_of_node(rx_node, q_node) { ++ if (queue >= plat->rx_queues_to_use) ++ break; ++ ++ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) ++ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; ++ else if (of_property_read_bool(q_node, "snps,avb-algorithm")) ++ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; ++ else ++ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; ++ ++ if (of_property_read_u8(q_node, "snps,map-to-dma-channel", ++ &plat->rx_queues_cfg[queue].chan)) ++ plat->rx_queues_cfg[queue].chan = queue; ++ /* TODO: Dynamic mapping to be included in the future */ ++ ++ if (of_property_read_u32(q_node, "snps,priority", ++ &plat->rx_queues_cfg[queue].prio)) { ++ plat->rx_queues_cfg[queue].prio = 0; ++ plat->rx_queues_cfg[queue].use_prio = false; ++ } else { ++ plat->rx_queues_cfg[queue].use_prio = true; ++ } ++ ++ /* RX queue specific packet type routing */ ++ if (of_property_read_bool(q_node, "snps,route-avcp")) ++ plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; ++ else if (of_property_read_bool(q_node, "snps,route-ptp")) ++ plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; ++ else if (of_property_read_bool(q_node, "snps,route-dcbcp")) ++ plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; ++ else if (of_property_read_bool(q_node, "snps,route-up")) ++ plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; ++ else if (of_property_read_bool(q_node, "snps,route-multi-broad")) ++ plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; ++ else ++ plat->rx_queues_cfg[queue].pkt_route = 0x0; ++ ++ queue++; ++ } ++ ++ /* Processing TX queues common config */ ++ if (of_property_read_u8(tx_node, "snps,tx-queues-to-use", ++ &plat->tx_queues_to_use)) ++ plat->tx_queues_to_use = 1; ++ ++ if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; ++ else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; ++ else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; ++ else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; ++ else ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; ++ ++ queue = 0; ++ ++ /* Processing individual TX queue config */ ++ for_each_child_of_node(tx_node, q_node) { ++ if (queue >= plat->tx_queues_to_use) ++ break; ++ ++ if (of_property_read_u8(q_node, "snps,weight", ++ &plat->tx_queues_cfg[queue].weight)) ++ plat->tx_queues_cfg[queue].weight = 0x10 + queue; ++ ++ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { ++ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; ++ } else if (of_property_read_bool(q_node, ++ "snps,avb-algorithm")) { ++ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; ++ ++ /* Credit Base Shaper parameters used by AVB */ ++ if (of_property_read_u32(q_node, "snps,send_slope", ++ &plat->tx_queues_cfg[queue].send_slope)) ++ plat->tx_queues_cfg[queue].send_slope = 0x0; ++ if (of_property_read_u32(q_node, "snps,idle_slope", ++ &plat->tx_queues_cfg[queue].idle_slope)) ++ plat->tx_queues_cfg[queue].idle_slope = 0x0; ++ if (of_property_read_u32(q_node, "snps,high_credit", ++ &plat->tx_queues_cfg[queue].high_credit)) ++ plat->tx_queues_cfg[queue].high_credit = 0x0; ++ if (of_property_read_u32(q_node, "snps,low_credit", ++ &plat->tx_queues_cfg[queue].low_credit)) ++ plat->tx_queues_cfg[queue].low_credit = 0x0; ++ } else { ++ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; ++ } ++ ++ if (of_property_read_u32(q_node, "snps,priority", ++ &plat->tx_queues_cfg[queue].prio)) { ++ plat->tx_queues_cfg[queue].prio = 0; ++ plat->tx_queues_cfg[queue].use_prio = false; ++ } else { ++ plat->tx_queues_cfg[queue].use_prio = true; ++ } ++ ++ queue++; ++ } ++ ++ of_node_put(rx_node); ++ of_node_put(tx_node); ++ of_node_put(q_node); ++} ++ ++/** + * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources + * @plat: driver data platform structure + * @np: device tree node +@@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d + + plat->axi = stmmac_axi_setup(pdev); + ++ stmmac_mtl_setup(pdev, plat); ++ + /* clock setup */ + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); +@@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d + clk_prepare_enable(plat->pclk); + + /* Fall-back to main clock in case of no PTP ref is passed */ +- plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); ++ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + dev_warn(&pdev->dev, "PTP uses main clock\n"); + } else { +- clk_prepare_enable(plat->clk_ptp_ref); + plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); + dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); + } +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +@@ -59,7 +59,8 @@ + /* Enable Snapshot for Messages Relevant to Master */ + #define PTP_TCR_TSMSTRENA BIT(15) + /* Select PTP packets for Taking Snapshots */ +-#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) ++#define PTP_TCR_SNAPTYPSEL_1 BIT(16) ++#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) + /* Enable MAC address for PTP Frame Filtering */ + #define PTP_TCR_TSENMACADDR BIT(18) + +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -28,6 +28,9 @@ + + #include + ++#define MTL_MAX_RX_QUEUES 8 ++#define MTL_MAX_TX_QUEUES 8 ++ + #define STMMAC_RX_COE_NONE 0 + #define STMMAC_RX_COE_TYPE1 1 + #define STMMAC_RX_COE_TYPE2 2 +@@ -44,6 +47,18 @@ + #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ + #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ + ++/* MTL algorithms identifiers */ ++#define MTL_TX_ALGORITHM_WRR 0x0 ++#define MTL_TX_ALGORITHM_WFQ 0x1 ++#define MTL_TX_ALGORITHM_DWRR 0x2 ++#define MTL_TX_ALGORITHM_SP 0x3 ++#define MTL_RX_ALGORITHM_SP 0x4 ++#define MTL_RX_ALGORITHM_WSP 0x5 ++ ++/* RX/TX Queue Mode */ ++#define MTL_QUEUE_AVB 0x0 ++#define MTL_QUEUE_DCB 0x1 ++ + /* The MDC clock could be set higher than the IEEE 802.3 + * specified frequency limit 0f 2.5 MHz, by programming a clock divider + * of value different than the above defined values. The resultant MDIO +@@ -109,6 +124,26 @@ struct stmmac_axi { + bool axi_rb; + }; + ++struct stmmac_rxq_cfg { ++ u8 mode_to_use; ++ u8 chan; ++ u8 pkt_route; ++ bool use_prio; ++ u32 prio; ++}; ++ ++struct stmmac_txq_cfg { ++ u8 weight; ++ u8 mode_to_use; ++ /* Credit Base Shaper parameters */ ++ u32 send_slope; ++ u32 idle_slope; ++ u32 high_credit; ++ u32 low_credit; ++ bool use_prio; ++ u32 prio; ++}; ++ + struct plat_stmmacenet_data { + int bus_id; + int phy_addr; +@@ -133,6 +168,12 @@ struct plat_stmmacenet_data { + int unicast_filter_entries; + int tx_fifo_size; + int rx_fifo_size; ++ u8 rx_queues_to_use; ++ u8 tx_queues_to_use; ++ u8 rx_sched_algorithm; ++ u8 tx_sched_algorithm; ++ struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; ++ struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; + void (*fix_mac_speed)(void *priv, unsigned int speed); + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); diff --git a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch new file mode 100644 index 000000000..f829b79dc --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch @@ -0,0 +1,1924 @@ +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -145,6 +145,17 @@ config DWMAC_SUNXI + This selects Allwinner SoC glue layer support for the + stmmac device driver. This driver is used for A20/A31 + GMAC ethernet controller. ++ ++config DWMAC_SUN8I ++ tristate "Allwinner sun8i GMAC support" ++ default ARCH_SUNXI ++ depends on OF && (ARCH_SUNXI || COMPILE_TEST) ++ ---help--- ++ Support for Allwinner H3 A83T A64 EMAC ethernet controllers. ++ ++ This selects Allwinner SoC glue layer support for the ++ stmmac device driver. This driver is used for H3/A83T/A64 ++ EMAC ethernet controller. + endif + + config STMMAC_PCI +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-alt + obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o + obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o + obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o ++obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o + obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o + obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o + stmmac-platform-objs:= stmmac_platform.o +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -549,9 +549,11 @@ extern const struct stmmac_hwtimestamp s + extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; + + struct mac_link { +- int port; +- int duplex; +- int speed; ++ u32 speed_mask; ++ u32 speed10; ++ u32 speed100; ++ u32 speed1000; ++ u32 duplex; + }; + + struct mii_regs { +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(st + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); + ctrl |= val << reg_shift; + +- if (dwmac->f2h_ptp_ref_clk) { ++ if (dwmac->f2h_ptp_ref_clk || ++ phymode == PHY_INTERFACE_MODE_MII || ++ phymode == PHY_INTERFACE_MODE_GMII || ++ phymode == PHY_INTERFACE_MODE_SGMII) { + ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); + regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, + &module); +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -0,0 +1,1007 @@ ++/* ++ * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer ++ * ++ * Copyright (C) 2017 Corentin Labbe ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac.h" ++#include "stmmac_platform.h" ++ ++/* General notes on dwmac-sun8i: ++ * Locking: no locking is necessary in this file because all necessary locking ++ * is done in the "stmmac files" ++ */ ++ ++/* struct emac_variant - Descrive dwmac-sun8i hardware variant ++ * @default_syscon_value: The default value of the EMAC register in syscon ++ * This value is used for disabling properly EMAC ++ * and used as a good starting value in case of the ++ * boot process(uboot) leave some stuff. ++ * @internal_phy: Does the MAC embed an internal PHY ++ * @support_mii: Does the MAC handle MII ++ * @support_rmii: Does the MAC handle RMII ++ * @support_rgmii: Does the MAC handle RGMII ++ */ ++struct emac_variant { ++ u32 default_syscon_value; ++ int internal_phy; ++ bool support_mii; ++ bool support_rmii; ++ bool support_rgmii; ++}; ++ ++/* struct sunxi_priv_data - hold all sunxi private data ++ * @tx_clk: reference to MAC TX clock ++ * @ephy_clk: reference to the optional EPHY clock for the internal PHY ++ * @regulator: reference to the optional regulator ++ * @rst_ephy: reference to the optional EPHY reset for the internal PHY ++ * @variant: reference to the current board variant ++ * @regmap: regmap for using the syscon ++ * @use_internal_phy: Does the current PHY choice imply using the internal PHY ++ */ ++struct sunxi_priv_data { ++ struct clk *tx_clk; ++ struct clk *ephy_clk; ++ struct regulator *regulator; ++ struct reset_control *rst_ephy; ++ const struct emac_variant *variant; ++ struct regmap *regmap; ++ bool use_internal_phy; ++}; ++ ++static const struct emac_variant emac_variant_h3 = { ++ .default_syscon_value = 0x58000, ++ .internal_phy = PHY_INTERFACE_MODE_MII, ++ .support_mii = true, ++ .support_rmii = true, ++ .support_rgmii = true ++}; ++ ++static const struct emac_variant emac_variant_v3s = { ++ .default_syscon_value = 0x38000, ++ .internal_phy = PHY_INTERFACE_MODE_MII, ++ .support_mii = true ++}; ++ ++static const struct emac_variant emac_variant_a83t = { ++ .default_syscon_value = 0, ++ .internal_phy = 0, ++ .support_mii = true, ++ .support_rgmii = true ++}; ++ ++static const struct emac_variant emac_variant_a64 = { ++ .default_syscon_value = 0, ++ .internal_phy = 0, ++ .support_mii = true, ++ .support_rmii = true, ++ .support_rgmii = true ++}; ++ ++#define EMAC_BASIC_CTL0 0x00 ++#define EMAC_BASIC_CTL1 0x04 ++#define EMAC_INT_STA 0x08 ++#define EMAC_INT_EN 0x0C ++#define EMAC_TX_CTL0 0x10 ++#define EMAC_TX_CTL1 0x14 ++#define EMAC_TX_FLOW_CTL 0x1C ++#define EMAC_TX_DESC_LIST 0x20 ++#define EMAC_RX_CTL0 0x24 ++#define EMAC_RX_CTL1 0x28 ++#define EMAC_RX_DESC_LIST 0x34 ++#define EMAC_RX_FRM_FLT 0x38 ++#define EMAC_MDIO_CMD 0x48 ++#define EMAC_MDIO_DATA 0x4C ++#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8) ++#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8) ++#define EMAC_TX_DMA_STA 0xB0 ++#define EMAC_TX_CUR_DESC 0xB4 ++#define EMAC_TX_CUR_BUF 0xB8 ++#define EMAC_RX_DMA_STA 0xC0 ++#define EMAC_RX_CUR_DESC 0xC4 ++#define EMAC_RX_CUR_BUF 0xC8 ++ ++/* Use in EMAC_BASIC_CTL0 */ ++#define EMAC_DUPLEX_FULL BIT(0) ++#define EMAC_LOOPBACK BIT(1) ++#define EMAC_SPEED_1000 0 ++#define EMAC_SPEED_100 (0x03 << 2) ++#define EMAC_SPEED_10 (0x02 << 2) ++ ++/* Use in EMAC_BASIC_CTL1 */ ++#define EMAC_BURSTLEN_SHIFT 24 ++ ++/* Used in EMAC_RX_FRM_FLT */ ++#define EMAC_FRM_FLT_RXALL BIT(0) ++#define EMAC_FRM_FLT_CTL BIT(13) ++#define EMAC_FRM_FLT_MULTICAST BIT(16) ++ ++/* Used in RX_CTL1*/ ++#define EMAC_RX_MD BIT(1) ++#define EMAC_RX_TH_MASK GENMASK(4, 5) ++#define EMAC_RX_TH_32 0 ++#define EMAC_RX_TH_64 (0x1 << 4) ++#define EMAC_RX_TH_96 (0x2 << 4) ++#define EMAC_RX_TH_128 (0x3 << 4) ++#define EMAC_RX_DMA_EN BIT(30) ++#define EMAC_RX_DMA_START BIT(31) ++ ++/* Used in TX_CTL1*/ ++#define EMAC_TX_MD BIT(1) ++#define EMAC_TX_NEXT_FRM BIT(2) ++#define EMAC_TX_TH_MASK GENMASK(8, 10) ++#define EMAC_TX_TH_64 0 ++#define EMAC_TX_TH_128 (0x1 << 8) ++#define EMAC_TX_TH_192 (0x2 << 8) ++#define EMAC_TX_TH_256 (0x3 << 8) ++#define EMAC_TX_DMA_EN BIT(30) ++#define EMAC_TX_DMA_START BIT(31) ++ ++/* Used in RX_CTL0 */ ++#define EMAC_RX_RECEIVER_EN BIT(31) ++#define EMAC_RX_DO_CRC BIT(27) ++#define EMAC_RX_FLOW_CTL_EN BIT(16) ++ ++/* Used in TX_CTL0 */ ++#define EMAC_TX_TRANSMITTER_EN BIT(31) ++ ++/* Used in EMAC_TX_FLOW_CTL */ ++#define EMAC_TX_FLOW_CTL_EN BIT(0) ++ ++/* Used in EMAC_INT_STA */ ++#define EMAC_TX_INT BIT(0) ++#define EMAC_TX_DMA_STOP_INT BIT(1) ++#define EMAC_TX_BUF_UA_INT BIT(2) ++#define EMAC_TX_TIMEOUT_INT BIT(3) ++#define EMAC_TX_UNDERFLOW_INT BIT(4) ++#define EMAC_TX_EARLY_INT BIT(5) ++#define EMAC_RX_INT BIT(8) ++#define EMAC_RX_BUF_UA_INT BIT(9) ++#define EMAC_RX_DMA_STOP_INT BIT(10) ++#define EMAC_RX_TIMEOUT_INT BIT(11) ++#define EMAC_RX_OVERFLOW_INT BIT(12) ++#define EMAC_RX_EARLY_INT BIT(13) ++#define EMAC_RGMII_STA_INT BIT(16) ++ ++#define MAC_ADDR_TYPE_DST BIT(31) ++ ++/* H3 specific bits for EPHY */ ++#define H3_EPHY_ADDR_SHIFT 20 ++#define H3_EPHY_CLK_SEL BIT(18) /* 1: 24MHz, 0: 25MHz */ ++#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */ ++#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */ ++#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */ ++ ++/* H3/A64 specific bits */ ++#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */ ++ ++/* Generic system control EMAC_CLK bits */ ++#define SYSCON_ETXDC_MASK GENMASK(2, 0) ++#define SYSCON_ETXDC_SHIFT 10 ++#define SYSCON_ERXDC_MASK GENMASK(4, 0) ++#define SYSCON_ERXDC_SHIFT 5 ++/* EMAC PHY Interface Type */ ++#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */ ++#define SYSCON_ETCS_MASK GENMASK(1, 0) ++#define SYSCON_ETCS_MII 0x0 ++#define SYSCON_ETCS_EXT_GMII 0x1 ++#define SYSCON_ETCS_INT_GMII 0x2 ++#define SYSCON_EMAC_REG 0x30 ++ ++/* sun8i_dwmac_dma_reset() - reset the EMAC ++ * Called from stmmac via stmmac_dma_ops->reset ++ */ ++static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) ++{ ++ writel(0, ioaddr + EMAC_RX_CTL1); ++ writel(0, ioaddr + EMAC_TX_CTL1); ++ writel(0, ioaddr + EMAC_RX_FRM_FLT); ++ writel(0, ioaddr + EMAC_RX_DESC_LIST); ++ writel(0, ioaddr + EMAC_TX_DESC_LIST); ++ writel(0, ioaddr + EMAC_INT_EN); ++ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); ++ return 0; ++} ++ ++/* sun8i_dwmac_dma_init() - initialize the EMAC ++ * Called from stmmac via stmmac_dma_ops->init ++ */ ++static void sun8i_dwmac_dma_init(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx, u32 dma_rx, int atds) ++{ ++ /* Write TX and RX descriptors address */ ++ writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST); ++ writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST); ++ ++ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); ++ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); ++} ++ ++/* sun8i_dwmac_dump_regs() - Dump EMAC address space ++ * Called from stmmac_dma_ops->dump_regs ++ * Used for ethtool ++ */ ++static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space) ++{ ++ int i; ++ ++ for (i = 0; i < 0xC8; i += 4) { ++ if (i == 0x32 || i == 0x3C) ++ continue; ++ reg_space[i / 4] = readl(ioaddr + i); ++ } ++} ++ ++/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space ++ * Called from stmmac_ops->dump_regs ++ * Used for ethtool ++ */ ++static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw, ++ u32 *reg_space) ++{ ++ int i; ++ void __iomem *ioaddr = hw->pcsr; ++ ++ for (i = 0; i < 0xC8; i += 4) { ++ if (i == 0x32 || i == 0x3C) ++ continue; ++ reg_space[i / 4] = readl(ioaddr + i); ++ } ++} ++ ++static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) ++{ ++ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); ++} ++ ++static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) ++{ ++ writel(0, ioaddr + EMAC_INT_EN); ++} ++ ++static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_TX_CTL1); ++ v |= EMAC_TX_DMA_START; ++ v |= EMAC_TX_DMA_EN; ++ writel(v, ioaddr + EMAC_TX_CTL1); ++} ++ ++static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_TX_CTL1); ++ v |= EMAC_TX_DMA_START; ++ v |= EMAC_TX_DMA_EN; ++ writel(v, ioaddr + EMAC_TX_CTL1); ++} ++ ++static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_TX_CTL1); ++ v &= ~EMAC_TX_DMA_EN; ++ writel(v, ioaddr + EMAC_TX_CTL1); ++} ++ ++static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_RX_CTL1); ++ v |= EMAC_RX_DMA_START; ++ v |= EMAC_RX_DMA_EN; ++ writel(v, ioaddr + EMAC_RX_CTL1); ++} ++ ++static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_RX_CTL1); ++ v &= ~EMAC_RX_DMA_EN; ++ writel(v, ioaddr + EMAC_RX_CTL1); ++} ++ ++static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, ++ struct stmmac_extra_stats *x, u32 chan) ++{ ++ u32 v; ++ int ret = 0; ++ ++ v = readl(ioaddr + EMAC_INT_STA); ++ ++ if (v & EMAC_TX_INT) { ++ ret |= handle_tx; ++ x->tx_normal_irq_n++; ++ } ++ ++ if (v & EMAC_TX_DMA_STOP_INT) ++ x->tx_process_stopped_irq++; ++ ++ if (v & EMAC_TX_BUF_UA_INT) ++ x->tx_process_stopped_irq++; ++ ++ if (v & EMAC_TX_TIMEOUT_INT) ++ ret |= tx_hard_error; ++ ++ if (v & EMAC_TX_UNDERFLOW_INT) { ++ ret |= tx_hard_error; ++ x->tx_undeflow_irq++; ++ } ++ ++ if (v & EMAC_TX_EARLY_INT) ++ x->tx_early_irq++; ++ ++ if (v & EMAC_RX_INT) { ++ ret |= handle_rx; ++ x->rx_normal_irq_n++; ++ } ++ ++ if (v & EMAC_RX_BUF_UA_INT) ++ x->rx_buf_unav_irq++; ++ ++ if (v & EMAC_RX_DMA_STOP_INT) ++ x->rx_process_stopped_irq++; ++ ++ if (v & EMAC_RX_TIMEOUT_INT) ++ ret |= tx_hard_error; ++ ++ if (v & EMAC_RX_OVERFLOW_INT) { ++ ret |= tx_hard_error; ++ x->rx_overflow_irq++; ++ } ++ ++ if (v & EMAC_RX_EARLY_INT) ++ x->rx_early_irq++; ++ ++ if (v & EMAC_RGMII_STA_INT) ++ x->irq_rgmii_n++; ++ ++ writel(v, ioaddr + EMAC_INT_STA); ++ ++ return ret; ++} ++ ++static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, ++ int rxmode, int rxfifosz) ++{ ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_TX_CTL1); ++ if (txmode == SF_DMA_MODE) { ++ v |= EMAC_TX_MD; ++ /* Undocumented bit (called TX_NEXT_FRM in BSP), the original ++ * comment is ++ * "Operating on second frame increase the performance ++ * especially when transmit store-and-forward is used." ++ */ ++ v |= EMAC_TX_NEXT_FRM; ++ } else { ++ v &= ~EMAC_TX_MD; ++ v &= ~EMAC_TX_TH_MASK; ++ if (txmode < 64) ++ v |= EMAC_TX_TH_64; ++ else if (txmode < 128) ++ v |= EMAC_TX_TH_128; ++ else if (txmode < 192) ++ v |= EMAC_TX_TH_192; ++ else if (txmode < 256) ++ v |= EMAC_TX_TH_256; ++ } ++ writel(v, ioaddr + EMAC_TX_CTL1); ++ ++ v = readl(ioaddr + EMAC_RX_CTL1); ++ if (rxmode == SF_DMA_MODE) { ++ v |= EMAC_RX_MD; ++ } else { ++ v &= ~EMAC_RX_MD; ++ v &= ~EMAC_RX_TH_MASK; ++ if (rxmode < 32) ++ v |= EMAC_RX_TH_32; ++ else if (rxmode < 64) ++ v |= EMAC_RX_TH_64; ++ else if (rxmode < 96) ++ v |= EMAC_RX_TH_96; ++ else if (rxmode < 128) ++ v |= EMAC_RX_TH_128; ++ } ++ writel(v, ioaddr + EMAC_RX_CTL1); ++} ++ ++static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { ++ .reset = sun8i_dwmac_dma_reset, ++ .init = sun8i_dwmac_dma_init, ++ .dump_regs = sun8i_dwmac_dump_regs, ++ .dma_mode = sun8i_dwmac_dma_operation_mode, ++ .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission, ++ .enable_dma_irq = sun8i_dwmac_enable_dma_irq, ++ .disable_dma_irq = sun8i_dwmac_disable_dma_irq, ++ .start_tx = sun8i_dwmac_dma_start_tx, ++ .stop_tx = sun8i_dwmac_dma_stop_tx, ++ .start_rx = sun8i_dwmac_dma_start_rx, ++ .stop_rx = sun8i_dwmac_dma_stop_rx, ++ .dma_interrupt = sun8i_dwmac_dma_interrupt, ++}; ++ ++static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) ++{ ++ struct sunxi_priv_data *gmac = priv; ++ int ret; ++ ++ if (gmac->regulator) { ++ ret = regulator_enable(gmac->regulator); ++ if (ret) { ++ dev_err(&pdev->dev, "Fail to enable regulator\n"); ++ return ret; ++ } ++ } ++ ++ ret = clk_prepare_enable(gmac->tx_clk); ++ if (ret) { ++ if (gmac->regulator) ++ regulator_disable(gmac->regulator); ++ dev_err(&pdev->dev, "Could not enable AHB clock\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 v; ++ ++ v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */ ++ writel(v, ioaddr + EMAC_BASIC_CTL1); ++} ++ ++static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable) ++{ ++ u32 t, r; ++ ++ t = readl(ioaddr + EMAC_TX_CTL0); ++ r = readl(ioaddr + EMAC_RX_CTL0); ++ if (enable) { ++ t |= EMAC_TX_TRANSMITTER_EN; ++ r |= EMAC_RX_RECEIVER_EN; ++ } else { ++ t &= ~EMAC_TX_TRANSMITTER_EN; ++ r &= ~EMAC_RX_RECEIVER_EN; ++ } ++ writel(t, ioaddr + EMAC_TX_CTL0); ++ writel(r, ioaddr + EMAC_RX_CTL0); ++} ++ ++/* Set MAC address at slot reg_n ++ * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST ++ * If addr is NULL, clear the slot ++ */ ++static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw, ++ unsigned char *addr, ++ unsigned int reg_n) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 v; ++ ++ if (!addr) { ++ writel(0, ioaddr + EMAC_MACADDR_HI(reg_n)); ++ return; ++ } ++ ++ stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n), ++ EMAC_MACADDR_LO(reg_n)); ++ if (reg_n > 0) { ++ v = readl(ioaddr + EMAC_MACADDR_HI(reg_n)); ++ v |= MAC_ADDR_TYPE_DST; ++ writel(v, ioaddr + EMAC_MACADDR_HI(reg_n)); ++ } ++} ++ ++static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw, ++ unsigned char *addr, ++ unsigned int reg_n) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ ++ stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n), ++ EMAC_MACADDR_LO(reg_n)); ++} ++ ++/* caution this function must return non 0 to work */ ++static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_RX_CTL0); ++ v |= EMAC_RX_DO_CRC; ++ writel(v, ioaddr + EMAC_RX_CTL0); ++ ++ return 1; ++} ++ ++static void sun8i_dwmac_set_filter(struct mac_device_info *hw, ++ struct net_device *dev) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 v; ++ int i = 1; ++ struct netdev_hw_addr *ha; ++ int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1; ++ ++ v = EMAC_FRM_FLT_CTL; ++ ++ if (dev->flags & IFF_PROMISC) { ++ v = EMAC_FRM_FLT_RXALL; ++ } else if (dev->flags & IFF_ALLMULTI) { ++ v |= EMAC_FRM_FLT_MULTICAST; ++ } else if (macaddrs <= hw->unicast_filter_entries) { ++ if (!netdev_mc_empty(dev)) { ++ netdev_for_each_mc_addr(ha, dev) { ++ sun8i_dwmac_set_umac_addr(hw, ha->addr, i); ++ i++; ++ } ++ } ++ if (!netdev_uc_empty(dev)) { ++ netdev_for_each_uc_addr(ha, dev) { ++ sun8i_dwmac_set_umac_addr(hw, ha->addr, i); ++ i++; ++ } ++ } ++ } else { ++ netdev_info(dev, "Too many address, switching to promiscuous\n"); ++ v = EMAC_FRM_FLT_RXALL; ++ } ++ ++ /* Disable unused address filter slots */ ++ while (i < hw->unicast_filter_entries) ++ sun8i_dwmac_set_umac_addr(hw, NULL, i++); ++ ++ writel(v, ioaddr + EMAC_RX_FRM_FLT); ++} ++ ++static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw, ++ unsigned int duplex, unsigned int fc, ++ unsigned int pause_time, u32 tx_cnt) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 v; ++ ++ v = readl(ioaddr + EMAC_RX_CTL0); ++ if (fc == FLOW_AUTO) ++ v |= EMAC_RX_FLOW_CTL_EN; ++ else ++ v &= ~EMAC_RX_FLOW_CTL_EN; ++ writel(v, ioaddr + EMAC_RX_CTL0); ++ ++ v = readl(ioaddr + EMAC_TX_FLOW_CTL); ++ if (fc == FLOW_AUTO) ++ v |= EMAC_TX_FLOW_CTL_EN; ++ else ++ v &= ~EMAC_TX_FLOW_CTL_EN; ++ writel(v, ioaddr + EMAC_TX_FLOW_CTL); ++} ++ ++static int sun8i_dwmac_reset(struct stmmac_priv *priv) ++{ ++ u32 v; ++ int err; ++ ++ v = readl(priv->ioaddr + EMAC_BASIC_CTL1); ++ writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1); ++ ++ /* The timeout was previoulsy set to 10ms, but some board (OrangePI0) ++ * need more if no cable plugged. 100ms seems OK ++ */ ++ err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v, ++ !(v & 0x01), 100, 100000); ++ ++ if (err) { ++ dev_err(priv->device, "EMAC reset timeout\n"); ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) ++{ ++ struct sunxi_priv_data *gmac = priv->plat->bsp_priv; ++ struct device_node *node = priv->device->of_node; ++ int ret; ++ u32 reg, val; ++ ++ regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); ++ reg = gmac->variant->default_syscon_value; ++ if (reg != val) ++ dev_warn(priv->device, ++ "Current syscon value is not the default %x (expect %x)\n", ++ val, reg); ++ ++ if (gmac->variant->internal_phy) { ++ if (!gmac->use_internal_phy) { ++ /* switch to external PHY interface */ ++ reg &= ~H3_EPHY_SELECT; ++ } else { ++ reg |= H3_EPHY_SELECT; ++ reg &= ~H3_EPHY_SHUTDOWN; ++ dev_dbg(priv->device, "Select internal_phy %x\n", reg); ++ ++ if (of_property_read_bool(priv->plat->phy_node, ++ "allwinner,leds-active-low")) ++ reg |= H3_EPHY_LED_POL; ++ else ++ reg &= ~H3_EPHY_LED_POL; ++ ++ /* Force EPHY xtal frequency to 24MHz. */ ++ reg |= H3_EPHY_CLK_SEL; ++ ++ ret = of_mdio_parse_addr(priv->device, ++ priv->plat->phy_node); ++ if (ret < 0) { ++ dev_err(priv->device, "Could not parse MDIO addr\n"); ++ return ret; ++ } ++ /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY ++ * address. No need to mask it again. ++ */ ++ reg |= ret << H3_EPHY_ADDR_SHIFT; ++ } ++ } ++ ++ if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { ++ if (val % 100) { ++ dev_err(priv->device, "tx-delay must be a multiple of 100\n"); ++ return -EINVAL; ++ } ++ val /= 100; ++ dev_dbg(priv->device, "set tx-delay to %x\n", val); ++ if (val <= SYSCON_ETXDC_MASK) { ++ reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT); ++ reg |= (val << SYSCON_ETXDC_SHIFT); ++ } else { ++ dev_err(priv->device, "Invalid TX clock delay: %d\n", ++ val); ++ return -EINVAL; ++ } ++ } ++ ++ if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) { ++ if (val % 100) { ++ dev_err(priv->device, "rx-delay must be a multiple of 100\n"); ++ return -EINVAL; ++ } ++ val /= 100; ++ dev_dbg(priv->device, "set rx-delay to %x\n", val); ++ if (val <= SYSCON_ERXDC_MASK) { ++ reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT); ++ reg |= (val << SYSCON_ERXDC_SHIFT); ++ } else { ++ dev_err(priv->device, "Invalid RX clock delay: %d\n", ++ val); ++ return -EINVAL; ++ } ++ } ++ ++ /* Clear interface mode bits */ ++ reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT); ++ if (gmac->variant->support_rmii) ++ reg &= ~SYSCON_RMII_EN; ++ ++ switch (priv->plat->interface) { ++ case PHY_INTERFACE_MODE_MII: ++ /* default */ ++ break; ++ case PHY_INTERFACE_MODE_RGMII: ++ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; ++ break; ++ case PHY_INTERFACE_MODE_RMII: ++ reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII; ++ break; ++ default: ++ dev_err(priv->device, "Unsupported interface mode: %s", ++ phy_modes(priv->plat->interface)); ++ return -EINVAL; ++ } ++ ++ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); ++ ++ return 0; ++} ++ ++static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) ++{ ++ u32 reg = gmac->variant->default_syscon_value; ++ ++ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); ++} ++ ++static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv) ++{ ++ struct sunxi_priv_data *gmac = priv->plat->bsp_priv; ++ int ret; ++ ++ if (!gmac->use_internal_phy) ++ return 0; ++ ++ ret = clk_prepare_enable(gmac->ephy_clk); ++ if (ret) { ++ dev_err(priv->device, "Cannot enable ephy\n"); ++ return ret; ++ } ++ ++ /* Make sure the EPHY is properly reseted, as U-Boot may leave ++ * it at deasserted state, and thus it may fail to reset EMAC. ++ */ ++ reset_control_assert(gmac->rst_ephy); ++ ++ ret = reset_control_deassert(gmac->rst_ephy); ++ if (ret) { ++ dev_err(priv->device, "Cannot deassert ephy\n"); ++ clk_disable_unprepare(gmac->ephy_clk); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac) ++{ ++ if (!gmac->use_internal_phy) ++ return 0; ++ ++ clk_disable_unprepare(gmac->ephy_clk); ++ reset_control_assert(gmac->rst_ephy); ++ return 0; ++} ++ ++/* sun8i_power_phy() - Activate the PHY: ++ * In case of error, no need to call sun8i_unpower_phy(), ++ * it will be called anyway by sun8i_dwmac_exit() ++ */ ++static int sun8i_power_phy(struct stmmac_priv *priv) ++{ ++ int ret; ++ ++ ret = sun8i_dwmac_power_internal_phy(priv); ++ if (ret) ++ return ret; ++ ++ ret = sun8i_dwmac_set_syscon(priv); ++ if (ret) ++ return ret; ++ ++ /* After changing syscon value, the MAC need reset or it will use ++ * the last value (and so the last PHY set. ++ */ ++ ret = sun8i_dwmac_reset(priv); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++static void sun8i_unpower_phy(struct sunxi_priv_data *gmac) ++{ ++ sun8i_dwmac_unset_syscon(gmac); ++ sun8i_dwmac_unpower_internal_phy(gmac); ++} ++ ++static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) ++{ ++ struct sunxi_priv_data *gmac = priv; ++ ++ sun8i_unpower_phy(gmac); ++ ++ clk_disable_unprepare(gmac->tx_clk); ++ ++ if (gmac->regulator) ++ regulator_disable(gmac->regulator); ++} ++ ++static const struct stmmac_ops sun8i_dwmac_ops = { ++ .core_init = sun8i_dwmac_core_init, ++ .set_mac = sun8i_dwmac_set_mac, ++ .dump_regs = sun8i_dwmac_dump_mac_regs, ++ .rx_ipc = sun8i_dwmac_rx_ipc_enable, ++ .set_filter = sun8i_dwmac_set_filter, ++ .flow_ctrl = sun8i_dwmac_flow_ctrl, ++ .set_umac_addr = sun8i_dwmac_set_umac_addr, ++ .get_umac_addr = sun8i_dwmac_get_umac_addr, ++}; ++ ++static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) ++{ ++ struct mac_device_info *mac; ++ struct stmmac_priv *priv = ppriv; ++ int ret; ++ ++ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); ++ if (!mac) ++ return NULL; ++ ++ ret = sun8i_power_phy(priv); ++ if (ret) ++ return NULL; ++ ++ mac->pcsr = priv->ioaddr; ++ mac->mac = &sun8i_dwmac_ops; ++ mac->dma = &sun8i_dwmac_dma_ops; ++ ++ /* The loopback bit seems to be re-set when link change ++ * Simply mask it each time ++ * Speed 10/100/1000 are set in BIT(2)/BIT(3) ++ */ ++ mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK; ++ mac->link.speed10 = EMAC_SPEED_10; ++ mac->link.speed100 = EMAC_SPEED_100; ++ mac->link.speed1000 = EMAC_SPEED_1000; ++ mac->link.duplex = EMAC_DUPLEX_FULL; ++ mac->mii.addr = EMAC_MDIO_CMD; ++ mac->mii.data = EMAC_MDIO_DATA; ++ mac->mii.reg_shift = 4; ++ mac->mii.reg_mask = GENMASK(8, 4); ++ mac->mii.addr_shift = 12; ++ mac->mii.addr_mask = GENMASK(16, 12); ++ mac->mii.clk_csr_shift = 20; ++ mac->mii.clk_csr_mask = GENMASK(22, 20); ++ mac->unicast_filter_entries = 8; ++ ++ /* Synopsys Id is not available */ ++ priv->synopsys_id = 0; ++ ++ return mac; ++} ++ ++static int sun8i_dwmac_probe(struct platform_device *pdev) ++{ ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ struct sunxi_priv_data *gmac; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ ret = stmmac_get_platform_resources(pdev, &stmmac_res); ++ if (ret) ++ return ret; ++ ++ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ if (IS_ERR(plat_dat)) ++ return PTR_ERR(plat_dat); ++ ++ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); ++ if (!gmac) ++ return -ENOMEM; ++ ++ gmac->variant = of_device_get_match_data(&pdev->dev); ++ if (!gmac->variant) { ++ dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n"); ++ return -EINVAL; ++ } ++ ++ gmac->tx_clk = devm_clk_get(dev, "stmmaceth"); ++ if (IS_ERR(gmac->tx_clk)) { ++ dev_err(dev, "Could not get TX clock\n"); ++ return PTR_ERR(gmac->tx_clk); ++ } ++ ++ /* Optional regulator for PHY */ ++ gmac->regulator = devm_regulator_get_optional(dev, "phy"); ++ if (IS_ERR(gmac->regulator)) { ++ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ dev_info(dev, "No regulator found\n"); ++ gmac->regulator = NULL; ++ } ++ ++ gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, ++ "syscon"); ++ if (IS_ERR(gmac->regmap)) { ++ ret = PTR_ERR(gmac->regmap); ++ dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret); ++ return ret; ++ } ++ ++ plat_dat->interface = of_get_phy_mode(dev->of_node); ++ if (plat_dat->interface == gmac->variant->internal_phy) { ++ dev_info(&pdev->dev, "Will use internal PHY\n"); ++ gmac->use_internal_phy = true; ++ gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0); ++ if (IS_ERR(gmac->ephy_clk)) { ++ ret = PTR_ERR(gmac->ephy_clk); ++ dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL); ++ if (IS_ERR(gmac->rst_ephy)) { ++ ret = PTR_ERR(gmac->rst_ephy); ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ dev_err(&pdev->dev, "No EPHY reset control found %d\n", ++ ret); ++ return -EINVAL; ++ } ++ } else { ++ dev_info(&pdev->dev, "Will use external PHY\n"); ++ gmac->use_internal_phy = false; ++ } ++ ++ /* platform data specifying hardware features and callbacks. ++ * hardware features were copied from Allwinner drivers. ++ */ ++ plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; ++ plat_dat->tx_coe = 1; ++ plat_dat->has_sun8i = true; ++ plat_dat->bsp_priv = gmac; ++ plat_dat->init = sun8i_dwmac_init; ++ plat_dat->exit = sun8i_dwmac_exit; ++ plat_dat->setup = sun8i_dwmac_setup; ++ ++ ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); ++ if (ret) ++ return ret; ++ ++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (ret) ++ sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); ++ ++ return ret; ++} ++ ++static const struct of_device_id sun8i_dwmac_match[] = { ++ { .compatible = "allwinner,sun8i-h3-emac", ++ .data = &emac_variant_h3 }, ++ { .compatible = "allwinner,sun8i-v3s-emac", ++ .data = &emac_variant_v3s }, ++ { .compatible = "allwinner,sun8i-a83t-emac", ++ .data = &emac_variant_a83t }, ++ { .compatible = "allwinner,sun50i-a64-emac", ++ .data = &emac_variant_a64 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); ++ ++static struct platform_driver sun8i_dwmac_driver = { ++ .probe = sun8i_dwmac_probe, ++ .remove = stmmac_pltfr_remove, ++ .driver = { ++ .name = "dwmac-sun8i", ++ .pm = &stmmac_pltfr_pm_ops, ++ .of_match_table = sun8i_dwmac_match, ++ }, ++}; ++module_platform_driver(sun8i_dwmac_driver); ++ ++MODULE_AUTHOR("Corentin Labbe "); ++MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer"); ++MODULE_LICENSE("GPL"); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +@@ -45,15 +45,17 @@ static void dwmac1000_core_init(struct m + if (hw->ps) { + value |= GMAC_CONTROL_TE; + +- if (hw->ps == SPEED_1000) { +- value &= ~GMAC_CONTROL_PS; +- } else { +- value |= GMAC_CONTROL_PS; +- +- if (hw->ps == SPEED_10) +- value &= ~GMAC_CONTROL_FES; +- else +- value |= GMAC_CONTROL_FES; ++ value &= ~hw->link.speed_mask; ++ switch (hw->ps) { ++ case SPEED_1000: ++ value |= hw->link.speed1000; ++ break; ++ case SPEED_100: ++ value |= hw->link.speed100; ++ break; ++ case SPEED_10: ++ value |= hw->link.speed10; ++ break; + } + } + +@@ -531,9 +533,11 @@ struct mac_device_info *dwmac1000_setup( + mac->mac = &dwmac1000_ops; + mac->dma = &dwmac1000_dma_ops; + +- mac->link.port = GMAC_CONTROL_PS; + mac->link.duplex = GMAC_CONTROL_DM; +- mac->link.speed = GMAC_CONTROL_FES; ++ mac->link.speed10 = GMAC_CONTROL_PS; ++ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; ++ mac->link.speed1000 = 0; ++ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +@@ -205,8 +205,8 @@ static void dwmac1000_dump_dma_regs(void + { + int i; + +- for (i = 0; i < 22; i++) +- if ((i < 9) || (i > 17)) ++ for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) ++ if ((i < 12) || (i > 17)) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); + } +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +@@ -175,9 +175,11 @@ struct mac_device_info *dwmac100_setup(v + mac->mac = &dwmac100_ops; + mac->dma = &dwmac100_dma_ops; + +- mac->link.port = MAC_CONTROL_PS; + mac->link.duplex = MAC_CONTROL_F; +- mac->link.speed = 0; ++ mac->link.speed10 = 0; ++ mac->link.speed100 = 0; ++ mac->link.speed1000 = 0; ++ mac->link.speed_mask = MAC_CONTROL_PS; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; + mac->mii.addr_shift = 11; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void + { + int i; + +- for (i = 0; i < 9; i++) ++ for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -35,15 +35,17 @@ static void dwmac4_core_init(struct mac_ + if (hw->ps) { + value |= GMAC_CONFIG_TE; + +- if (hw->ps == SPEED_1000) { +- value &= ~GMAC_CONFIG_PS; +- } else { +- value |= GMAC_CONFIG_PS; +- +- if (hw->ps == SPEED_10) +- value &= ~GMAC_CONFIG_FES; +- else +- value |= GMAC_CONFIG_FES; ++ value &= hw->link.speed_mask; ++ switch (hw->ps) { ++ case SPEED_1000: ++ value |= hw->link.speed1000; ++ break; ++ case SPEED_100: ++ value |= hw->link.speed100; ++ break; ++ case SPEED_10: ++ value |= hw->link.speed10; ++ break; + } + } + +@@ -115,7 +117,7 @@ static void dwmac4_tx_queue_routing(stru + void __iomem *ioaddr = hw->pcsr; + u32 value; + +- const struct stmmac_rx_routing route_possibilities[] = { ++ static const struct stmmac_rx_routing route_possibilities[] = { + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, +@@ -747,9 +749,11 @@ struct mac_device_info *dwmac4_setup(voi + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + +- mac->link.port = GMAC_CONFIG_PS; + mac->link.duplex = GMAC_CONFIG_DM; +- mac->link.speed = GMAC_CONFIG_FES; ++ mac->link.speed10 = GMAC_CONFIG_PS; ++ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; ++ mac->link.speed1000 = 0; ++ mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; + mac->mii.addr_shift = 21; +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -71,9 +71,9 @@ static void dwmac4_dma_axi(void __iomem + writel(value, ioaddr + DMA_SYS_BUS_MODE); + } + +-void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, +- struct stmmac_dma_cfg *dma_cfg, +- u32 dma_rx_phy, u32 chan) ++static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_rx_phy, u32 chan) + { + u32 value; + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; +@@ -85,9 +85,9 @@ void dwmac4_dma_init_rx_chan(void __iome + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); + } + +-void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, +- struct stmmac_dma_cfg *dma_cfg, +- u32 dma_tx_phy, u32 chan) ++static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ u32 dma_tx_phy, u32 chan) + { + u32 value; + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; +@@ -99,8 +99,8 @@ void dwmac4_dma_init_tx_chan(void __iome + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); + } + +-void dwmac4_dma_init_channel(void __iomem *ioaddr, +- struct stmmac_dma_cfg *dma_cfg, u32 chan) ++static void dwmac4_dma_init_channel(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, u32 chan) + { + u32 value; + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +@@ -136,6 +136,9 @@ + #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + ++#define NUM_DWMAC100_DMA_REGS 9 ++#define NUM_DWMAC1000_DMA_REGS 23 ++ + void dwmac_enable_dma_transmission(void __iomem *ioaddr); + void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); + void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +@@ -248,6 +248,7 @@ void stmmac_set_mac_addr(void __iomem *i + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); + } ++EXPORT_SYMBOL_GPL(stmmac_set_mac_addr); + + /* Enable disable MAC RX/TX */ + void stmmac_set_mac(void __iomem *ioaddr, bool enable) +@@ -279,4 +280,4 @@ void stmmac_get_mac_addr(void __iomem *i + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; + } +- ++EXPORT_SYMBOL_GPL(stmmac_get_mac_addr); +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -104,7 +104,7 @@ struct stmmac_priv { + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + +- int oldlink; ++ bool oldlink; + int speed; + int oldduplex; + unsigned int flow_ctrl; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -29,10 +29,12 @@ + #include "stmmac.h" + #include "dwmac_dma.h" + +-#define REG_SPACE_SIZE 0x1054 ++#define REG_SPACE_SIZE 0x1060 + #define MAC100_ETHTOOL_NAME "st_mac100" + #define GMAC_ETHTOOL_NAME "st_gmac" + ++#define ETHTOOL_DMA_OFFSET 55 ++ + struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; +@@ -273,7 +275,6 @@ static int stmmac_ethtool_get_link_ksett + { + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = dev->phydev; +- int rc; + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { +@@ -364,8 +365,8 @@ static int stmmac_ethtool_get_link_ksett + "link speed / duplex setting\n", dev->name); + return -EBUSY; + } +- rc = phy_ethtool_ksettings_get(phy, cmd); +- return rc; ++ phy_ethtool_ksettings_get(phy, cmd); ++ return 0; + } + + static int +@@ -443,6 +444,9 @@ static void stmmac_ethtool_gregs(struct + + priv->hw->mac->dump_regs(priv->hw, reg_space); + priv->hw->dma->dump_regs(priv->ioaddr, reg_space); ++ /* Copy DMA registers to where ethtool expects them */ ++ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], ++ NUM_DWMAC1000_DMA_REGS * 4); + } + + static void +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -235,6 +235,17 @@ static void stmmac_clk_csr_set(struct st + else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + priv->clk_csr = STMMAC_CSR_250_300M; + } ++ ++ if (priv->plat->has_sun8i) { ++ if (clk_rate > 160000000) ++ priv->clk_csr = 0x03; ++ else if (clk_rate > 80000000) ++ priv->clk_csr = 0x02; ++ else if (clk_rate > 40000000) ++ priv->clk_csr = 0x01; ++ else ++ priv->clk_csr = 0; ++ } + } + + static void print_pkt(unsigned char *buf, int len) +@@ -783,7 +794,7 @@ static void stmmac_adjust_link(struct ne + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + unsigned long flags; +- int new_state = 0; ++ bool new_state = false; + + if (!phydev) + return; +@@ -796,8 +807,8 @@ static void stmmac_adjust_link(struct ne + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != priv->oldduplex) { +- new_state = 1; +- if (!(phydev->duplex)) ++ new_state = true; ++ if (!phydev->duplex) + ctrl &= ~priv->hw->link.duplex; + else + ctrl |= priv->hw->link.duplex; +@@ -808,30 +819,17 @@ static void stmmac_adjust_link(struct ne + stmmac_mac_flow_ctrl(priv, phydev->duplex); + + if (phydev->speed != priv->speed) { +- new_state = 1; ++ new_state = true; ++ ctrl &= ~priv->hw->link.speed_mask; + switch (phydev->speed) { +- case 1000: +- if (priv->plat->has_gmac || +- priv->plat->has_gmac4) +- ctrl &= ~priv->hw->link.port; ++ case SPEED_1000: ++ ctrl |= priv->hw->link.speed1000; + break; +- case 100: +- if (priv->plat->has_gmac || +- priv->plat->has_gmac4) { +- ctrl |= priv->hw->link.port; +- ctrl |= priv->hw->link.speed; +- } else { +- ctrl &= ~priv->hw->link.port; +- } ++ case SPEED_100: ++ ctrl |= priv->hw->link.speed100; + break; +- case 10: +- if (priv->plat->has_gmac || +- priv->plat->has_gmac4) { +- ctrl |= priv->hw->link.port; +- ctrl &= ~(priv->hw->link.speed); +- } else { +- ctrl &= ~priv->hw->link.port; +- } ++ case SPEED_10: ++ ctrl |= priv->hw->link.speed10; + break; + default: + netif_warn(priv, link, priv->dev, +@@ -847,12 +845,12 @@ static void stmmac_adjust_link(struct ne + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + + if (!priv->oldlink) { +- new_state = 1; +- priv->oldlink = 1; ++ new_state = true; ++ priv->oldlink = true; + } + } else if (priv->oldlink) { +- new_state = 1; +- priv->oldlink = 0; ++ new_state = true; ++ priv->oldlink = false; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; + } +@@ -915,7 +913,7 @@ static int stmmac_init_phy(struct net_de + char bus_id[MII_BUS_ID_SIZE]; + int interface = priv->plat->interface; + int max_speed = priv->plat->max_speed; +- priv->oldlink = 0; ++ priv->oldlink = false; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; + +@@ -1450,7 +1448,7 @@ static void free_dma_rx_desc_resources(s + static void free_dma_tx_desc_resources(struct stmmac_priv *priv) + { + u32 tx_count = priv->plat->tx_queues_to_use; +- u32 queue = 0; ++ u32 queue; + + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) { +@@ -1499,7 +1497,7 @@ static int alloc_dma_rx_desc_resources(s + sizeof(dma_addr_t), + GFP_KERNEL); + if (!rx_q->rx_skbuff_dma) +- return -ENOMEM; ++ goto err_dma; + + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, + sizeof(struct sk_buff *), +@@ -1562,13 +1560,13 @@ static int alloc_dma_tx_desc_resources(s + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) +- return -ENOMEM; ++ goto err_dma; + + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) +- goto err_dma_buffers; ++ goto err_dma; + + if (priv->extend_desc) { + tx_q->dma_etx = dma_zalloc_coherent(priv->device, +@@ -1578,7 +1576,7 @@ static int alloc_dma_tx_desc_resources(s + &tx_q->dma_tx_phy, + GFP_KERNEL); + if (!tx_q->dma_etx) +- goto err_dma_buffers; ++ goto err_dma; + } else { + tx_q->dma_tx = dma_zalloc_coherent(priv->device, + DMA_TX_SIZE * +@@ -1587,13 +1585,13 @@ static int alloc_dma_tx_desc_resources(s + &tx_q->dma_tx_phy, + GFP_KERNEL); + if (!tx_q->dma_tx) +- goto err_dma_buffers; ++ goto err_dma; + } + } + + return 0; + +-err_dma_buffers: ++err_dma: + free_dma_tx_desc_resources(priv); + + return ret; +@@ -2895,8 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + priv->xstats.tx_set_ic_bit++; + } + +- if (!priv->hwts_tx_en) +- skb_tx_timestamp(skb); ++ skb_tx_timestamp(skb); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { +@@ -2974,7 +2971,7 @@ static netdev_tx_t stmmac_xmit(struct sk + + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { +- if (ip_hdr(skb)->protocol == IPPROTO_TCP) ++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + return stmmac_tso_xmit(skb, dev); + } + +@@ -3105,8 +3102,7 @@ static netdev_tx_t stmmac_xmit(struct sk + priv->xstats.tx_set_ic_bit++; + } + +- if (!priv->hwts_tx_en) +- skb_tx_timestamp(skb); ++ skb_tx_timestamp(skb); + + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be +@@ -3983,7 +3979,9 @@ static int stmmac_hw_init(struct stmmac_ + struct mac_device_info *mac; + + /* Identify the MAC HW device */ +- if (priv->plat->has_gmac) { ++ if (priv->plat->setup) { ++ mac = priv->plat->setup(priv); ++ } else if (priv->plat->has_gmac) { + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac = dwmac1000_setup(priv->ioaddr, + priv->plat->multicast_filter_bins, +@@ -4003,6 +4001,10 @@ static int stmmac_hw_init(struct stmmac_ + + priv->hw = mac; + ++ /* dwmac-sun8i only work in chain mode */ ++ if (priv->plat->has_sun8i) ++ chain_mode = 1; ++ + /* To use the chained or ring mode */ + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->hw->mode = &dwmac4_ring_mode_ops; +@@ -4131,8 +4133,15 @@ int stmmac_dvr_probe(struct device *devi + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + +- if (priv->plat->stmmac_rst) ++ if (priv->plat->stmmac_rst) { ++ ret = reset_control_assert(priv->plat->stmmac_rst); + reset_control_deassert(priv->plat->stmmac_rst); ++ /* Some reset controllers have only reset callback instead of ++ * assert + deassert callbacks pair. ++ */ ++ if (ret == -ENOTSUPP) ++ reset_control_reset(priv->plat->stmmac_rst); ++ } + + /* Init MAC and get the capabilities */ + ret = stmmac_hw_init(priv); +@@ -4149,7 +4158,7 @@ int stmmac_dvr_probe(struct device *devi + NETIF_F_RXCSUM; + + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { +- ndev->hw_features |= NETIF_F_TSO; ++ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + priv->tso = true; + dev_info(priv->device, "TSO feature enabled\n"); + } +@@ -4311,7 +4320,7 @@ int stmmac_suspend(struct device *dev) + } + spin_unlock_irqrestore(&priv->lock, flags); + +- priv->oldlink = 0; ++ priv->oldlink = false; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; + return 0; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_devi + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct device_node *mdio_node = priv->plat->mdio_node; ++ struct device *dev = ndev->dev.parent; + int addr, found; + + if (!mdio_bus_data) +@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_devi + else + err = mdiobus_register(new_bus); + if (err != 0) { +- netdev_err(ndev, "Cannot register the MDIO bus\n"); ++ dev_err(dev, "Cannot register the MDIO bus\n"); + goto bus_register_fail; + } + +@@ -292,7 +293,7 @@ int stmmac_mdio_register(struct net_devi + } + + if (!found && !mdio_node) { +- netdev_warn(ndev, "No PHY found\n"); ++ dev_warn(dev, "No PHY found\n"); + mdiobus_unregister(new_bus); + mdiobus_free(new_bus); + return -ENODEV; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -30,42 +30,39 @@ + * negative value of the address means that MAC controller is not connected + * with PHY. + */ +-struct stmmac_pci_dmi_data { +- const char *name; +- const char *asset_tag; ++struct stmmac_pci_func_data { + unsigned int func; + int phy_addr; + }; + +-struct stmmac_pci_info { +- struct pci_dev *pdev; +- int (*setup)(struct plat_stmmacenet_data *plat, +- struct stmmac_pci_info *info); +- struct stmmac_pci_dmi_data *dmi; ++struct stmmac_pci_dmi_data { ++ const struct stmmac_pci_func_data *func; ++ size_t nfuncs; + }; + +-static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) +-{ +- const char *name = dmi_get_system_info(DMI_BOARD_NAME); +- const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG); +- unsigned int func = PCI_FUNC(info->pdev->devfn); +- struct stmmac_pci_dmi_data *dmi; +- +- /* +- * Galileo boards with old firmware don't support DMI. We always return +- * 1 here, so at least first found MAC controller would be probed. +- */ +- if (!name) +- return 1; ++struct stmmac_pci_info { ++ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); ++}; + +- for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) { +- if (!strcmp(dmi->name, name) && dmi->func == func) { +- /* If asset tag is provided, match on it as well. */ +- if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag)) +- continue; +- return dmi->phy_addr; +- } +- } ++static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, ++ const struct dmi_system_id *dmi_list) ++{ ++ const struct stmmac_pci_func_data *func_data; ++ const struct stmmac_pci_dmi_data *dmi_data; ++ const struct dmi_system_id *dmi_id; ++ int func = PCI_FUNC(pdev->devfn); ++ size_t n; ++ ++ dmi_id = dmi_first_match(dmi_list); ++ if (!dmi_id) ++ return -ENODEV; ++ ++ dmi_data = dmi_id->driver_data; ++ func_data = dmi_data->func; ++ ++ for (n = 0; n < dmi_data->nfuncs; n++, func_data++) ++ if (func_data->func == func) ++ return func_data->phy_addr; + + return -ENODEV; + } +@@ -100,7 +97,8 @@ static void common_default_data(struct p + plat->rx_queues_cfg[0].pkt_route = 0x0; + } + +-static void stmmac_default_data(struct plat_stmmacenet_data *plat) ++static int stmmac_default_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) + { + /* Set common default data first */ + common_default_data(plat); +@@ -112,12 +110,77 @@ static void stmmac_default_data(struct p + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + /* TODO: AXI */ ++ ++ return 0; + } + +-static int quark_default_data(struct plat_stmmacenet_data *plat, +- struct stmmac_pci_info *info) ++static const struct stmmac_pci_info stmmac_pci_info = { ++ .setup = stmmac_default_data, ++}; ++ ++static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { ++ { ++ .func = 6, ++ .phy_addr = 1, ++ }, ++}; ++ ++static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { ++ .func = galileo_stmmac_func_data, ++ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), ++}; ++ ++static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { ++ { ++ .func = 6, ++ .phy_addr = 1, ++ }, ++ { ++ .func = 7, ++ .phy_addr = 1, ++ }, ++}; ++ ++static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { ++ .func = iot2040_stmmac_func_data, ++ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), ++}; ++ ++static const struct dmi_system_id quark_pci_dmi[] = { ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), ++ }, ++ .driver_data = (void *)&galileo_stmmac_dmi_data, ++ }, ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), ++ }, ++ .driver_data = (void *)&galileo_stmmac_dmi_data, ++ }, ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), ++ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, ++ "6ES7647-0AA00-0YA2"), ++ }, ++ .driver_data = (void *)&galileo_stmmac_dmi_data, ++ }, ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), ++ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, ++ "6ES7647-0AA00-1YA2"), ++ }, ++ .driver_data = (void *)&iot2040_stmmac_dmi_data, ++ }, ++ {} ++}; ++ ++static int quark_default_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) + { +- struct pci_dev *pdev = info->pdev; + int ret; + + /* Set common default data first */ +@@ -127,9 +190,19 @@ static int quark_default_data(struct pla + * Refuse to load the driver and register net device if MAC controller + * does not connect to any PHY interface. + */ +- ret = stmmac_pci_find_phy_addr(info); +- if (ret < 0) +- return ret; ++ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); ++ if (ret < 0) { ++ /* Return error to the caller on DMI enabled boards. */ ++ if (dmi_get_system_info(DMI_BOARD_NAME)) ++ return ret; ++ ++ /* ++ * Galileo boards with old firmware don't support DMI. We always ++ * use 1 here as PHY address, so at least the first found MAC ++ * controller would be probed. ++ */ ++ ret = 1; ++ } + + plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat->phy_addr = ret; +@@ -143,41 +216,8 @@ static int quark_default_data(struct pla + return 0; + } + +-static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = { +- { +- .name = "Galileo", +- .func = 6, +- .phy_addr = 1, +- }, +- { +- .name = "GalileoGen2", +- .func = 6, +- .phy_addr = 1, +- }, +- { +- .name = "SIMATIC IOT2000", +- .asset_tag = "6ES7647-0AA00-0YA2", +- .func = 6, +- .phy_addr = 1, +- }, +- { +- .name = "SIMATIC IOT2000", +- .asset_tag = "6ES7647-0AA00-1YA2", +- .func = 6, +- .phy_addr = 1, +- }, +- { +- .name = "SIMATIC IOT2000", +- .asset_tag = "6ES7647-0AA00-1YA2", +- .func = 7, +- .phy_addr = 1, +- }, +- {} +-}; +- +-static struct stmmac_pci_info quark_pci_info = { ++static const struct stmmac_pci_info quark_pci_info = { + .setup = quark_default_data, +- .dmi = quark_pci_dmi_data, + }; + + /** +@@ -236,15 +276,9 @@ static int stmmac_pci_probe(struct pci_d + + pci_set_master(pdev); + +- if (info) { +- info->pdev = pdev; +- if (info->setup) { +- ret = info->setup(plat, info); +- if (ret) +- return ret; +- } +- } else +- stmmac_default_data(plat); ++ ret = info->setup(pdev, plat); ++ if (ret) ++ return ret; + + pci_enable_msi(pdev); + +@@ -270,14 +304,21 @@ static void stmmac_pci_remove(struct pci + + static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); + +-#define STMMAC_VENDOR_ID 0x700 ++/* synthetic ID, no official vendor */ ++#define PCI_VENDOR_ID_STMMAC 0x700 ++ + #define STMMAC_QUARK_ID 0x0937 + #define STMMAC_DEVICE_ID 0x1108 + ++#define STMMAC_DEVICE(vendor_id, dev_id, info) { \ ++ PCI_VDEVICE(vendor_id, dev_id), \ ++ .driver_data = (kernel_ulong_t)&info \ ++ } ++ + static const struct pci_device_id stmmac_id_table[] = { +- {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, +- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, +- {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info}, ++ STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info), ++ STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info), + {} + }; + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -309,6 +309,13 @@ static int stmmac_dt_phy(struct plat_stm + struct device_node *np, struct device *dev) + { + bool mdio = true; ++ static const struct of_device_id need_mdio_ids[] = { ++ { .compatible = "snps,dwc-qos-ethernet-4.10" }, ++ { .compatible = "allwinner,sun8i-a83t-emac" }, ++ { .compatible = "allwinner,sun8i-h3-emac" }, ++ { .compatible = "allwinner,sun8i-v3s-emac" }, ++ { .compatible = "allwinner,sun50i-a64-emac" }, ++ }; + + /* If phy-handle property is passed from DT, use it as the PHY */ + plat->phy_node = of_parse_phandle(np, "phy-handle", 0); +@@ -325,8 +332,7 @@ static int stmmac_dt_phy(struct plat_stm + mdio = false; + } + +- /* exception for dwmac-dwc-qos-eth glue logic */ +- if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { ++ if (of_match_node(need_mdio_ids, np)) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + } else { + /** +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -177,6 +177,7 @@ struct plat_stmmacenet_data { + void (*fix_mac_speed)(void *priv, unsigned int speed); + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); ++ struct mac_device_info *(*setup)(void *priv); + void *bsp_priv; + struct clk *stmmac_clk; + struct clk *pclk; +@@ -185,6 +186,7 @@ struct plat_stmmacenet_data { + struct reset_control *stmmac_rst; + struct stmmac_axi *axi; + int has_gmac4; ++ bool has_sun8i; + bool tso_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; diff --git a/target/linux/sunxi/patches-4.9/0054-crypto-sun4i-ss_support_the_Security_System_PRNG.patch b/target/linux/sunxi/patches-4.9/0054-crypto-sun4i-ss_support_the_Security_System_PRNG.patch new file mode 100644 index 000000000..f2bac1fde --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0054-crypto-sun4i-ss_support_the_Security_System_PRNG.patch @@ -0,0 +1,206 @@ +From b8ae5c7387ad075ee61e8c8774ce2bca46bc9236 Mon Sep 17 00:00:00 2001 +From: Corentin LABBE +Date: Mon, 3 Jul 2017 20:48:48 +0200 +Subject: crypto: sun4i-ss - support the Security System PRNG + +The Security System has a PRNG, this patch adds support for it via +crypto_rng. + +Signed-off-by: Corentin Labbe +Signed-off-by: Herbert Xu +--- + drivers/crypto/Kconfig | 8 +++++ + drivers/crypto/sunxi-ss/Makefile | 1 + + drivers/crypto/sunxi-ss/sun4i-ss-core.c | 30 ++++++++++++++++++ + drivers/crypto/sunxi-ss/sun4i-ss-prng.c | 56 +++++++++++++++++++++++++++++++++ + drivers/crypto/sunxi-ss/sun4i-ss.h | 11 +++++++ + 5 files changed, 106 insertions(+) + create mode 100644 drivers/crypto/sunxi-ss/sun4i-ss-prng.c + +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -538,6 +538,14 @@ config CRYPTO_DEV_SUN4I_SS + To compile this driver as a module, choose M here: the module + will be called sun4i-ss. + ++config CRYPTO_DEV_SUN4I_SS_PRNG ++ bool "Support for Allwinner Security System PRNG" ++ depends on CRYPTO_DEV_SUN4I_SS ++ select CRYPTO_RNG ++ help ++ Select this option if you want to provide kernel-side support for ++ the Pseudo-Random Number Generator found in the Security System. ++ + config CRYPTO_DEV_ROCKCHIP + tristate "Rockchip's Cryptographic Engine driver" + depends on OF && ARCH_ROCKCHIP +--- a/drivers/crypto/sunxi-ss/Makefile ++++ b/drivers/crypto/sunxi-ss/Makefile +@@ -1,2 +1,3 @@ + obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o + sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o ++sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o +--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c +@@ -214,6 +214,23 @@ static struct sun4i_ss_alg_template ss_a + } + } + }, ++#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG ++{ ++ .type = CRYPTO_ALG_TYPE_RNG, ++ .alg.rng = { ++ .base = { ++ .cra_name = "stdrng", ++ .cra_driver_name = "sun4i_ss_rng", ++ .cra_priority = 300, ++ .cra_ctxsize = 0, ++ .cra_module = THIS_MODULE, ++ }, ++ .generate = sun4i_ss_prng_generate, ++ .seed = sun4i_ss_prng_seed, ++ .seedsize = SS_SEED_LEN / BITS_PER_BYTE, ++ } ++}, ++#endif + }; + + static int sun4i_ss_probe(struct platform_device *pdev) +@@ -356,6 +373,13 @@ static int sun4i_ss_probe(struct platfor + goto error_alg; + } + break; ++ case CRYPTO_ALG_TYPE_RNG: ++ err = crypto_register_rng(&ss_algs[i].alg.rng); ++ if (err) { ++ dev_err(ss->dev, "Fail to register %s\n", ++ ss_algs[i].alg.rng.base.cra_name); ++ } ++ break; + } + } + platform_set_drvdata(pdev, ss); +@@ -370,6 +394,9 @@ error_alg: + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; ++ case CRYPTO_ALG_TYPE_RNG: ++ crypto_unregister_rng(&ss_algs[i].alg.rng); ++ break; + } + } + if (ss->reset) +@@ -394,6 +421,9 @@ static int sun4i_ss_remove(struct platfo + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; ++ case CRYPTO_ALG_TYPE_RNG: ++ crypto_unregister_rng(&ss_algs[i].alg.rng); ++ break; + } + } + +--- /dev/null ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +@@ -0,0 +1,56 @@ ++#include "sun4i-ss.h" ++ ++int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, ++ unsigned int slen) ++{ ++ struct sun4i_ss_alg_template *algt; ++ struct rng_alg *alg = crypto_rng_alg(tfm); ++ ++ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ++ memcpy(algt->ss->seed, seed, slen); ++ ++ return 0; ++} ++ ++int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int dlen) ++{ ++ struct sun4i_ss_alg_template *algt; ++ struct rng_alg *alg = crypto_rng_alg(tfm); ++ int i; ++ u32 v; ++ u32 *data = (u32 *)dst; ++ const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; ++ size_t len; ++ struct sun4i_ss_ctx *ss; ++ unsigned int todo = (dlen / 4) * 4; ++ ++ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ++ ss = algt->ss; ++ ++ spin_lock(&ss->slock); ++ ++ writel(mode, ss->base + SS_CTL); ++ ++ while (todo > 0) { ++ /* write the seed */ ++ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) ++ writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); ++ ++ /* Read the random data */ ++ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); ++ readsl(ss->base + SS_TXFIFO, data, len / 4); ++ data += len / 4; ++ todo -= len; ++ ++ /* Update the seed */ ++ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { ++ v = readl(ss->base + SS_KEY0 + i * 4); ++ ss->seed[i] = v; ++ } ++ } ++ ++ writel(0, ss->base + SS_CTL); ++ spin_unlock(&ss->slock); ++ return dlen; ++} +--- a/drivers/crypto/sunxi-ss/sun4i-ss.h ++++ b/drivers/crypto/sunxi-ss/sun4i-ss.h +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #define SS_CTL 0x00 + #define SS_KEY0 0x04 +@@ -125,6 +126,9 @@ + #define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) + #define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) + ++#define SS_SEED_LEN 192 ++#define SS_DATA_LEN 160 ++ + struct sun4i_ss_ctx { + void __iomem *base; + int irq; +@@ -134,6 +138,9 @@ struct sun4i_ss_ctx { + struct device *dev; + struct resource *res; + spinlock_t slock; /* control the use of the device */ ++#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG ++ u32 seed[SS_SEED_LEN / BITS_PER_LONG]; ++#endif + }; + + struct sun4i_ss_alg_template { +@@ -142,6 +149,7 @@ struct sun4i_ss_alg_template { + union { + struct crypto_alg crypto; + struct ahash_alg hash; ++ struct rng_alg rng; + } alg; + struct sun4i_ss_ctx *ss; + }; +@@ -199,3 +207,6 @@ int sun4i_ss_des_setkey(struct crypto_ab + unsigned int keylen); + int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); ++int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int dlen); ++int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); diff --git a/target/linux/sunxi/patches-4.9/0060-arm64-allwinner-sun50i-a64-Add-dt-node-for-the-sysco.patch b/target/linux/sunxi/patches-4.9/0060-arm64-allwinner-sun50i-a64-Add-dt-node-for-the-sysco.patch new file mode 100644 index 000000000..911ea4996 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0060-arm64-allwinner-sun50i-a64-Add-dt-node-for-the-sysco.patch @@ -0,0 +1,33 @@ +From 79b953605ded6a9a995040a1c8cc665127a6411a Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:45 +0200 +Subject: arm64: allwinner: sun50i-a64: Add dt node for the syscon control + module + +This patch add the dt node for the syscon register present on the +Allwinner A64. + +Only two register are present in this syscon and the only one useful is +the one dedicated to EMAC clock. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -129,6 +129,12 @@ + #size-cells = <1>; + ranges; + ++ syscon: syscon@1c00000 { ++ compatible = "allwinner,sun50i-a64-system-controller", ++ "syscon"; ++ reg = <0x01c00000 0x1000>; ++ }; ++ + mmc0: mmc@1c0f000 { + compatible = "allwinner,sun50i-a64-mmc"; + reg = <0x01c0f000 0x1000>; diff --git a/target/linux/sunxi/patches-4.9/0061-arm64-allwinner-sun50i-a64-add-dwmac-sun8i-Ethernet-.patch b/target/linux/sunxi/patches-4.9/0061-arm64-allwinner-sun50i-a64-add-dwmac-sun8i-Ethernet-.patch new file mode 100644 index 000000000..e59c3e873 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0061-arm64-allwinner-sun50i-a64-add-dwmac-sun8i-Ethernet-.patch @@ -0,0 +1,69 @@ +From e53f67e981bcc5547857475241b3a4a066955f8c Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:46 +0200 +Subject: arm64: allwinner: sun50i-a64: add dwmac-sun8i Ethernet driver + +The dwmac-sun8i is an Ethernet MAC that supports 10/100/1000 Mbit +connections. It is very similar to the device found in the Allwinner +H3, but lacks the internal 100 Mbit PHY and its associated control +bits. +This adds the necessary bits to the Allwinner A64 SoC .dtsi, but keeps +it disabled at this level. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 35 +++++++++++++++++++++++++++ + 1 file changed, 35 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -284,6 +284,21 @@ + bias-pull-up; + }; + ++ rmii_pins: rmii_pins { ++ pins = "PD10", "PD11", "PD13", "PD14", "PD17", ++ "PD18", "PD19", "PD20", "PD22", "PD23"; ++ function = "emac"; ++ drive-strength = <40>; ++ }; ++ ++ rgmii_pins: rgmii_pins { ++ pins = "PD8", "PD9", "PD10", "PD11", "PD12", ++ "PD13", "PD15", "PD16", "PD17", "PD18", ++ "PD19", "PD20", "PD21", "PD22", "PD23"; ++ function = "emac"; ++ drive-strength = <40>; ++ }; ++ + uart0_pins_a: uart0@0 { + pins = "PB8", "PB9"; + function = "uart0"; +@@ -388,6 +403,26 @@ + #size-cells = <0>; + }; + ++ emac: ethernet@1c30000 { ++ compatible = "allwinner,sun50i-a64-emac"; ++ syscon = <&syscon>; ++ reg = <0x01c30000 0x100>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ resets = <&ccu RST_BUS_EMAC>; ++ reset-names = "stmmaceth"; ++ clocks = <&ccu CLK_BUS_EMAC>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ mdio: mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ + gic: interrupt-controller@1c81000 { + compatible = "arm,gic-400"; + reg = <0x01c81000 0x1000>, diff --git a/target/linux/sunxi/patches-4.9/0062-arm64-allwinner-pine64-Enable-dwmac-sun8i.patch b/target/linux/sunxi/patches-4.9/0062-arm64-allwinner-pine64-Enable-dwmac-sun8i.patch new file mode 100644 index 000000000..d9a825490 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0062-arm64-allwinner-pine64-Enable-dwmac-sun8i.patch @@ -0,0 +1,46 @@ +From 97023943749367111b9a88e09d1b9bd157dd3326 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:47 +0200 +Subject: arm64: allwinner: pine64: Enable dwmac-sun8i + +The dwmac-sun8i hardware is present on the pine64 +It uses an external PHY via RMII. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -70,6 +70,15 @@ + status = "okay"; + }; + ++&emac { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&rmii_pins>; ++ phy-mode = "rmii"; ++ phy-handle = <&ext_rmii_phy1>; ++ status = "okay"; ++ ++}; ++ + &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; +@@ -80,6 +89,13 @@ + bias-pull-up; + }; + ++&mdio { ++ ext_rmii_phy1: ethernet-phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <1>; ++ }; ++}; ++ + &mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; diff --git a/target/linux/sunxi/patches-4.9/0063-arm64-allwinner-pine64-plus-Enable-dwmac-sun8i.patch b/target/linux/sunxi/patches-4.9/0063-arm64-allwinner-pine64-plus-Enable-dwmac-sun8i.patch new file mode 100644 index 000000000..f89b84279 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0063-arm64-allwinner-pine64-plus-Enable-dwmac-sun8i.patch @@ -0,0 +1,38 @@ +From 94dcfdc77fc55ed1956011ceea341911c6e760a0 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:48 +0200 +Subject: arm64: allwinner: pine64-plus: Enable dwmac-sun8i + +The dwmac-sun8i hardware is present on the pine64 plus. +It uses an external PHY rtl8211e via RGMII. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + .../arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts +@@ -46,5 +46,20 @@ + model = "Pine64+"; + compatible = "pine64,pine64-plus", "allwinner,sun50i-a64"; + +- /* TODO: Camera, Ethernet PHY, touchscreen, etc. */ ++ /* TODO: Camera, touchscreen, etc. */ ++}; ++ ++&emac { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&rgmii_pins>; ++ phy-mode = "rgmii"; ++ phy-handle = <&ext_rgmii_phy>; ++ status = "okay"; ++}; ++ ++&mdio { ++ ext_rgmii_phy: ethernet-phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <1>; ++ }; + }; diff --git a/target/linux/sunxi/patches-4.9/0064-arm64-allwinner-sun50i-a64-Correct-emac-register-siz.patch b/target/linux/sunxi/patches-4.9/0064-arm64-allwinner-sun50i-a64-Correct-emac-register-siz.patch new file mode 100644 index 000000000..924c87528 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0064-arm64-allwinner-sun50i-a64-Correct-emac-register-siz.patch @@ -0,0 +1,26 @@ +From 3a4bae5fd44aa1cf49780dd25b3a89e6a39e8560 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Mon, 10 Jul 2017 20:44:40 +0200 +Subject: arm64: allwinner: sun50i-a64: Correct emac register size + +The datasheet said that emac register size is 0x10000 not 0x100 + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +[wens@csie.org: Fixed commit subject prefix] +Signed-off-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -406,7 +406,7 @@ + emac: ethernet@1c30000 { + compatible = "allwinner,sun50i-a64-emac"; + syscon = <&syscon>; +- reg = <0x01c30000 0x100>; ++ reg = <0x01c30000 0x10000>; + interrupts = ; + interrupt-names = "macirq"; + resets = <&ccu RST_BUS_EMAC>; diff --git a/target/linux/sunxi/patches-4.9/0065-arm64-allwinner-a64-pine64-add-missing-ethernet0-ali.patch b/target/linux/sunxi/patches-4.9/0065-arm64-allwinner-a64-pine64-add-missing-ethernet0-ali.patch new file mode 100644 index 000000000..f892219fa --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0065-arm64-allwinner-a64-pine64-add-missing-ethernet0-ali.patch @@ -0,0 +1,29 @@ +From dff751c68904cf587d918cfb6b2f5b0112f73bc9 Mon Sep 17 00:00:00 2001 +From: Icenowy Zheng +Date: Sat, 22 Jul 2017 10:28:51 +0800 +Subject: arm64: allwinner: a64: pine64: add missing ethernet0 alias + +The EMAC Ethernet controller was enabled, but an accompanying alias +was not added. This results in unstable numbering if other Ethernet +devices, such as a USB dongle, are present. Also, the bootloader uses +the alias to assign a generated stable MAC address to the device node. + +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +Fixes: 970239437493 ("arm64: allwinner: pine64: Enable dwmac-sun8i") +[wens@csie.org: Rewrite commit log as fixing a previous patch with Fixes] +Signed-off-by: Chen-Yu Tsai +--- + arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +@@ -51,6 +51,7 @@ + compatible = "pine64,pine64", "allwinner,sun50i-a64"; + + aliases { ++ ethernet0 = &emac; + serial0 = &uart0; + }; + diff --git a/target/linux/sunxi/patches-4.9/0070-arm-sun8i-sunxi-h3-h5-Add-dt-node-for-the-syscon-con.patch b/target/linux/sunxi/patches-4.9/0070-arm-sun8i-sunxi-h3-h5-Add-dt-node-for-the-syscon-con.patch new file mode 100644 index 000000000..88d431d31 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0070-arm-sun8i-sunxi-h3-h5-Add-dt-node-for-the-syscon-con.patch @@ -0,0 +1,32 @@ +From d91d3daf5de90e0118227d8ddcb7bb4ff40c1b91 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:37 +0200 +Subject: arm: sun8i: sunxi-h3-h5: Add dt node for the syscon control module + +This patch add the dt node for the syscon register present on the +Allwinner H3/H5 + +Only two register are present in this syscon and the only one useful is +the one dedicated to EMAC clock.. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm/boot/dts/sunxi-h3-h5.dtsi | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/arch/arm/boot/dts/sun8i-h3.dtsi ++++ b/arch/arm/boot/dts/sun8i-h3.dtsi +@@ -140,6 +140,12 @@ + #size-cells = <1>; + ranges; + ++ syscon: syscon@1c00000 { ++ compatible = "allwinner,sun8i-h3-system-controller", ++ "syscon"; ++ reg = <0x01c00000 0x1000>; ++ }; ++ + dma: dma-controller@01c02000 { + compatible = "allwinner,sun8i-h3-dma"; + reg = <0x01c02000 0x1000>; diff --git a/target/linux/sunxi/patches-4.9/0071-arm-sun8i-sunxi-h3-h5-add-dwmac-sun8i-ethernet-drive.patch b/target/linux/sunxi/patches-4.9/0071-arm-sun8i-sunxi-h3-h5-add-dwmac-sun8i-ethernet-drive.patch new file mode 100644 index 000000000..7054e3dc0 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0071-arm-sun8i-sunxi-h3-h5-add-dwmac-sun8i-ethernet-drive.patch @@ -0,0 +1,67 @@ +From 0eba511a3cac29d6338b22b5b727f40cf8d163df Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:38 +0200 +Subject: arm: sun8i: sunxi-h3-h5: add dwmac-sun8i ethernet driver + +The dwmac-sun8i is an ethernet MAC hardware that support 10/100/1000 +speed. + +This patch enable the dwmac-sun8i on Allwinner H3/H5 SoC Device-tree. +SoC H3/H5 have an internal PHY, so optionals syscon and ephy are set. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm/boot/dts/sunxi-h3-h5.dtsi | 34 ++++++++++++++++++++++++++++++++++ + 1 file changed, 34 insertions(+) + +--- a/arch/arm/boot/dts/sun8i-h3.dtsi ++++ b/arch/arm/boot/dts/sun8i-h3.dtsi +@@ -333,6 +333,14 @@ + interrupt-controller; + #interrupt-cells = <3>; + ++ emac_rgmii_pins: emac0 { ++ pins = "PD0", "PD1", "PD2", "PD3", "PD4", ++ "PD5", "PD7", "PD8", "PD9", "PD10", ++ "PD12", "PD13", "PD15", "PD16", "PD17"; ++ function = "emac"; ++ drive-strength = <40>; ++ }; ++ + i2c0_pins: i2c0 { + allwinner,pins = "PA11", "PA12"; + allwinner,function = "i2c0"; +@@ -431,6 +439,32 @@ + clocks = <&osc24M>; + }; + ++ emac: ethernet@1c30000 { ++ compatible = "allwinner,sun8i-h3-emac"; ++ syscon = <&syscon>; ++ reg = <0x01c30000 0x104>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ resets = <&ccu RST_BUS_EMAC>; ++ reset-names = "stmmaceth"; ++ clocks = <&ccu CLK_BUS_EMAC>; ++ clock-names = "stmmaceth"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ mdio: mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ int_mii_phy: ethernet-phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <1>; ++ clocks = <&ccu CLK_BUS_EPHY>; ++ resets = <&ccu RST_BUS_EPHY>; ++ }; ++ }; ++ }; ++ + wdt0: watchdog@01c20ca0 { + compatible = "allwinner,sun6i-a31-wdt"; + reg = <0x01c20ca0 0x20>; diff --git a/target/linux/sunxi/patches-4.9/0072-arm-sun8i-orangepi-2-Enable-dwmac-sun8i.patch b/target/linux/sunxi/patches-4.9/0072-arm-sun8i-orangepi-2-Enable-dwmac-sun8i.patch new file mode 100644 index 000000000..450d5bc2b --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0072-arm-sun8i-orangepi-2-Enable-dwmac-sun8i.patch @@ -0,0 +1,40 @@ +From a9992f2dd1890112643a93d621ff5a4c97c55d53 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Wed, 31 May 2017 09:18:42 +0200 +Subject: arm: sun8i: orangepi-2: Enable dwmac-sun8i + +The dwmac-sun8i hardware is present on the Orange PI 2. +It uses the internal PHY. + +This patch create the needed emac node. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm/boot/dts/sun8i-h3-orangepi-2.dts | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts ++++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts +@@ -55,6 +55,7 @@ + aliases { + serial0 = &uart0; + /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ ++ ethernet0 = &emac; + ethernet1 = &rtl8189; + }; + +@@ -109,6 +110,13 @@ + status = "okay"; + }; + ++&emac { ++ phy-handle = <&int_mii_phy>; ++ phy-mode = "mii"; ++ allwinner,leds-active-low; ++ status = "okay"; ++}; ++ + &ir { + pinctrl-names = "default"; + pinctrl-0 = <&ir_pins_a>; diff --git a/target/linux/sunxi/patches-4.9/0073-ARM-sun8i-orangepi-plus-Enable-dwmac-sun8i.patch b/target/linux/sunxi/patches-4.9/0073-ARM-sun8i-orangepi-plus-Enable-dwmac-sun8i.patch new file mode 100644 index 000000000..375d119d5 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0073-ARM-sun8i-orangepi-plus-Enable-dwmac-sun8i.patch @@ -0,0 +1,64 @@ +From 1dcd0095019aca7533eaeed9475d995a4eb30137 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Mon, 5 Jun 2017 21:21:26 +0200 +Subject: ARM: sun8i: orangepi-plus: Enable dwmac-sun8i + +The dwmac-sun8i hardware is present on the Orange PI plus. +It uses an external PHY rtl8211e via RGMII. + +This patch create the needed regulator, emac and phy nodes. + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +--- + arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts | 32 ++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts ++++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts +@@ -47,6 +47,20 @@ + model = "Xunlong Orange Pi Plus / Plus 2"; + compatible = "xunlong,orangepi-plus", "allwinner,sun8i-h3"; + ++ aliases { ++ ethernet0 = &emac; ++ }; ++ ++ reg_gmac_3v3: gmac-3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "gmac-3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ startup-delay-us = <100000>; ++ enable-active-high; ++ gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>; ++ }; ++ + reg_usb3_vbus: usb3-vbus { + compatible = "regulator-fixed"; + pinctrl-names = "default"; +@@ -64,6 +78,24 @@ + status = "okay"; + }; + ++&emac { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&emac_rgmii_pins>; ++ phy-supply = <®_gmac_3v3>; ++ phy-handle = <&ext_rgmii_phy>; ++ phy-mode = "rgmii"; ++ ++ allwinner,leds-active-low; ++ status = "okay"; ++}; ++ ++&mdio { ++ ext_rgmii_phy: ethernet-phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ + &mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_8bit_pins>; diff --git a/target/linux/sunxi/patches-4.9/0074-ARM-dts-sunxi-h3-h5-Correct-emac-register-size.patch b/target/linux/sunxi/patches-4.9/0074-ARM-dts-sunxi-h3-h5-Correct-emac-register-size.patch new file mode 100644 index 000000000..50877a530 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0074-ARM-dts-sunxi-h3-h5-Correct-emac-register-size.patch @@ -0,0 +1,26 @@ +From 072b6e3692532b6281bf781ded1c7a986ac17471 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Thu, 6 Jul 2017 10:53:34 +0200 +Subject: ARM: dts: sunxi: h3/h5: Correct emac register size + +The datasheet said that emac register size is 0x10000 not 0x104 + +Signed-off-by: Corentin Labbe +Signed-off-by: Maxime Ripard +[wens@csie.org: Fixed commit subject prefix] +Signed-off-by: Chen-Yu Tsai +--- + arch/arm/boot/dts/sunxi-h3-h5.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/boot/dts/sun8i-h3.dtsi ++++ b/arch/arm/boot/dts/sun8i-h3.dtsi +@@ -442,7 +442,7 @@ + emac: ethernet@1c30000 { + compatible = "allwinner,sun8i-h3-emac"; + syscon = <&syscon>; +- reg = <0x01c30000 0x104>; ++ reg = <0x01c30000 0x10000>; + interrupts = ; + interrupt-names = "macirq"; + resets = <&ccu RST_BUS_EMAC>; diff --git a/target/linux/sunxi/patches-4.9/0080-ARM-dts-sunxi-nanopi-neo-Enable-dwmac-sun8i.patch b/target/linux/sunxi/patches-4.9/0080-ARM-dts-sunxi-nanopi-neo-Enable-dwmac-sun8i.patch new file mode 100644 index 000000000..4de2fd2e8 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0080-ARM-dts-sunxi-nanopi-neo-Enable-dwmac-sun8i.patch @@ -0,0 +1,24 @@ +--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts ++++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts +@@ -53,6 +53,7 @@ + + aliases { + serial0 = &uart0; ++ ethernet0 = &emac; + }; + + chosen { +@@ -81,6 +82,13 @@ + status = "okay"; + }; + ++&emac { ++ phy-handle = <&int_mii_phy>; ++ phy-mode = "mii"; ++ allwinner,leds-active-low; ++ status = "okay"; ++}; ++ + &mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>; diff --git a/target/linux/sunxi/patches-4.9/0081-ARM-dts-sun8i-nanopi-neo-enable-UART-USB-and-I2C-pin.patch b/target/linux/sunxi/patches-4.9/0081-ARM-dts-sun8i-nanopi-neo-enable-UART-USB-and-I2C-pin.patch new file mode 100644 index 000000000..b815f7263 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/0081-ARM-dts-sun8i-nanopi-neo-enable-UART-USB-and-I2C-pin.patch @@ -0,0 +1,80 @@ +From e036def69972b9db9d2695f45cbf4e84c707b3c5 Mon Sep 17 00:00:00 2001 +From: Daniel Golle +Date: Fri, 23 Dec 2016 07:28:51 +0100 +Subject: [PATCH] ARM: dts: sun8i: nanopi-neo: enable UART, USB and I2C pins + +--- + arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts | 43 ++++++++++++++++++++++++++++--- + 1 file changed, 40 insertions(+), 3 deletions(-) + +--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts ++++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts +@@ -78,10 +78,30 @@ + }; + }; + ++&ehci1 { ++ status = "okay"; ++}; ++ ++&ohci1 { ++ status = "okay"; ++}; ++ ++&ehci2 { ++ status = "okay"; ++}; ++ ++&ohci2 { ++ status = "okay"; ++}; ++ + &ehci3 { + status = "okay"; + }; + ++&ohci3 { ++ status = "okay"; ++}; ++ + &emac { + phy-handle = <&int_mii_phy>; + phy-mode = "mii"; +@@ -99,9 +119,6 @@ + status = "okay"; + }; + +-&ohci3 { +- status = "okay"; +-}; + + &pio { + leds_opc: led-pins { +@@ -127,7 +144,27 @@ + status = "okay"; + }; + ++&uart1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart1_pins>; ++ status = "okay"; ++}; ++ ++&uart2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart2_pins>; ++ status = "okay"; ++}; ++ + &usbphy { + /* USB VBUS is always on */ + status = "okay"; + }; ++ ++&i2c0 { ++ status = "okay"; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; diff --git a/target/linux/sunxi/patches-4.9/115-musb-ignore-vbus-errors.patch b/target/linux/sunxi/patches-4.9/115-musb-ignore-vbus-errors.patch new file mode 100644 index 000000000..460aec078 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/115-musb-ignore-vbus-errors.patch @@ -0,0 +1,26 @@ +From fce20ac5d8c98f1a8ea5298051d9fa669e455f04 Mon Sep 17 00:00:00 2001 +From: Hans de Goede +Date: Tue, 4 Aug 2015 23:22:45 +0200 +Subject: [PATCH] musb: sunxi: Ignore VBus errors in host-only mode + +For some unclear reason sometimes we get VBus errors in host-only mode, +even though we do not have any vbus-detection then. Ignore these. + +Signed-off-by: Hans de Goede +--- + drivers/usb/musb/sunxi.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/usb/musb/sunxi.c ++++ b/drivers/usb/musb/sunxi.c +@@ -202,6 +202,10 @@ static irqreturn_t sunxi_musb_interrupt( + musb_writeb(musb->mregs, MUSB_FADDR, 0); + } + ++ /* Ignore Vbus errors when in host only mode */ ++ if (musb->port_mode == MUSB_PORT_MODE_HOST) ++ musb->int_usb &= ~MUSB_INTR_VBUSERROR; ++ + musb->int_tx = readw(musb->mregs + SUNXI_MUSB_INTRTX); + if (musb->int_tx) + writew(musb->int_tx, musb->mregs + SUNXI_MUSB_INTRTX); diff --git a/target/linux/sunxi/patches-4.9/131-reset-add-h3-resets.patch b/target/linux/sunxi/patches-4.9/131-reset-add-h3-resets.patch new file mode 100644 index 000000000..dee01dc02 --- /dev/null +++ b/target/linux/sunxi/patches-4.9/131-reset-add-h3-resets.patch @@ -0,0 +1,92 @@ +From 5f0bb9d0bc545ef53a83f7bd176fdc0736eed8e5 Mon Sep 17 00:00:00 2001 +From: Jens Kuske +Date: Tue, 27 Oct 2015 17:50:24 +0100 +Subject: [PATCH] reset: sunxi: Add Allwinner H3 bus resets + +The H3 bus resets have some holes between the registers, so we add +an of_xlate() function to skip them according to the datasheet. + +Signed-off-by: Jens Kuske +--- + .../bindings/reset/allwinner,sunxi-clock-reset.txt | 1 + + drivers/reset/reset-sunxi.c | 30 +++++++++++++++++++--- + 2 files changed, 28 insertions(+), 3 deletions(-) + +--- a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt ++++ b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt +@@ -8,6 +8,7 @@ Required properties: + - compatible: Should be one of the following: + "allwinner,sun6i-a31-ahb1-reset" + "allwinner,sun6i-a31-clock-reset" ++ "allwinner,sun8i-h3-bus-reset" + - reg: should be register base and length as documented in the + datasheet + - #reset-cells: 1, see below +--- a/drivers/reset/reset-sunxi.c ++++ b/drivers/reset/reset-sunxi.c +@@ -75,7 +75,9 @@ static const struct reset_control_ops su + .deassert = sunxi_reset_deassert, + }; + +-static int sunxi_reset_init(struct device_node *np) ++static int sunxi_reset_init(struct device_node *np, ++ int (*of_xlate)(struct reset_controller_dev *rcdev, ++ const struct of_phandle_args *reset_spec)) + { + struct sunxi_reset_data *data; + struct resource res; +@@ -108,6 +110,7 @@ static int sunxi_reset_init(struct devic + data->rcdev.nr_resets = size * 32; + data->rcdev.ops = &sunxi_reset_ops; + data->rcdev.of_node = np; ++ data->rcdev.of_xlate = of_xlate; + + return reset_controller_register(&data->rcdev); + +@@ -116,6 +119,21 @@ err_alloc: + return ret; + }; + ++static int sun8i_h3_bus_reset_xlate(struct reset_controller_dev *rcdev, ++ const struct of_phandle_args *reset_spec) ++{ ++ unsigned int index = reset_spec->args[0]; ++ ++ if (index < 96) ++ return index; ++ else if (index < 128) ++ return index + 32; ++ else if (index < 160) ++ return index + 64; ++ else ++ return -EINVAL; ++} ++ + /* + * These are the reset controller we need to initialize early on in + * our system, before we can even think of using a regular device +@@ -123,15 +141,21 @@ err_alloc: + */ + static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = { + { .compatible = "allwinner,sun6i-a31-ahb1-reset", }, ++ { .compatible = "allwinner,sun8i-h3-bus-reset", .data = sun8i_h3_bus_reset_xlate, }, + { /* sentinel */ }, + }; + + void __init sun6i_reset_init(void) + { + struct device_node *np; +- +- for_each_matching_node(np, sunxi_early_reset_dt_ids) +- sunxi_reset_init(np); ++ const struct of_device_id *match; ++ int (*of_xlate)(struct reset_controller_dev *rcdev, ++ const struct of_phandle_args *reset_spec); ++ ++ for_each_matching_node_and_match(np, sunxi_early_reset_dt_ids, &match) { ++ of_xlate = match->data; ++ sunxi_reset_init(np, of_xlate); ++ } + } + + /* diff --git a/target/linux/sunxi/patches-4.9/200-ARM-dts-sunxi-add-support-for-Orange-Pi-R1-board.patch b/target/linux/sunxi/patches-4.9/200-ARM-dts-sunxi-add-support-for-Orange-Pi-R1-board.patch new file mode 100644 index 000000000..7e92a565f --- /dev/null +++ b/target/linux/sunxi/patches-4.9/200-ARM-dts-sunxi-add-support-for-Orange-Pi-R1-board.patch @@ -0,0 +1,196 @@ +From daf75255fb67bf44db178e4c95a803b7972ed670 Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens +Date: Tue, 26 Sep 2017 22:29:06 +0200 +Subject: ARM: dts: sunxi: add support for Orange Pi R1 board + +Signed-off-by: Hauke Mehrtens +--- + arch/arm/boot/dts/Makefile | 1 + + arch/arm/boot/dts/sun8i-h2-plus-orangepi-r1.dts | 171 ++++++++++++++++++++++++ + 2 files changed, 172 insertions(+) + create mode 100644 arch/arm/boot/dts/sun8i-h2-plus-orangepi-r1.dts + +--- a/arch/arm/boot/dts/Makefile ++++ b/arch/arm/boot/dts/Makefile +@@ -819,6 +819,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \ + sun8i-a33-sinlinx-sina33.dtb \ + sun8i-a83t-allwinner-h8homlet-v2.dtb \ + sun8i-a83t-cubietruck-plus.dtb \ ++ sun8i-h2-plus-orangepi-r1.dtb \ + sun8i-h3-bananapi-m2-plus.dtb \ + sun8i-h3-nanopi-neo.dtb \ + sun8i-h3-orangepi-2.dtb \ +--- /dev/null ++++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-r1.dts +@@ -0,0 +1,171 @@ ++/* ++ * Copyright (C) 2017 Hauke Mehrtens ++ * ++ * Based on sun8i-h2-plus-orangepi-zero.dts, which is: ++ * Copyright (C) 2016 Icenowy Zheng ++ * ++ * Based on sun8i-h3-orangepi-one.dts, which is: ++ * Copyright (C) 2016 Hans de Goede ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPL or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This file is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/dts-v1/; ++#include "sun8i-h3.dtsi" ++#include "sunxi-common-regulators.dtsi" ++ ++#include ++#include ++#include ++ ++/ { ++ model = "Xunlong Orange Pi R1"; ++ compatible = "xunlong,orangepi-r1", "allwinner,sun8i-h2-plus"; ++ ++ aliases { ++ serial0 = &uart0; ++ /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ ++ ethernet0 = &emac; ++ ethernet1 = &xr819; ++ }; ++ ++ chosen { ++ stdout-path = "serial0:115200n8"; ++ }; ++ ++ leds { ++ compatible = "gpio-leds"; ++ ++ pwr_led { ++ label = "orangepi:green:pwr"; ++ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; ++ default-state = "on"; ++ }; ++ ++ status_led { ++ label = "orangepi:red:status"; ++ gpios = <&pio 0 17 GPIO_ACTIVE_HIGH>; ++ }; ++ }; ++ ++ reg_vcc_wifi: reg_vcc_wifi { ++ compatible = "regulator-fixed"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-name = "vcc-wifi"; ++ enable-active-high; ++ gpio = <&pio 0 20 GPIO_ACTIVE_HIGH>; ++ }; ++ ++ wifi_pwrseq: wifi_pwrseq { ++ compatible = "mmc-pwrseq-simple"; ++ reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; ++ post-power-on-delay-ms = <200>; ++ }; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&emac { ++ phy-handle = <&int_mii_phy>; ++ phy-mode = "mii"; ++ allwinner,leds-active-low; ++ status = "okay"; ++}; ++ ++&mmc0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mmc0_pins_a>; ++ vmmc-supply = <®_vcc3v3>; ++ bus-width = <4>; ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */ ++ cd-inverted; ++ status = "okay"; ++}; ++ ++&mmc1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mmc1_pins_a>; ++ vmmc-supply = <®_vcc_wifi>; ++ mmc-pwrseq = <&wifi_pwrseq>; ++ bus-width = <4>; ++ non-removable; ++ status = "okay"; ++ ++ /* ++ * Explicitly define the sdio device, so that we can add an ethernet ++ * alias for it (which e.g. makes u-boot set a mac-address). ++ */ ++ xr819: sdio_wifi@1 { ++ reg = <1>; ++ }; ++}; ++ ++&mmc1_pins_a { ++ bias-pull-up; ++}; ++ ++&ohci1 { ++ status = "okay"; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins_a>; ++ status = "okay"; ++}; ++ ++&uart1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart1_pins>; ++ status = "disabled"; ++}; ++ ++&uart2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart2_pins>; ++ status = "disabled"; ++}; ++ ++&usbphy { ++ /* USB VBUS is always on */ ++ status = "okay"; ++};